Clean up unnecessary namespace specifiers in TensorBlock.h.

This commit is contained in:
Rasmus Munk Larsen 2019-08-07 12:12:52 -07:00
parent 0050644b23
commit 0987126165

View File

@ -147,8 +147,8 @@ struct TensorBlockCopyOp {
typedef typename packet_traits<Scalar>::type Packet; typedef typename packet_traits<Scalar>::type Packet;
enum { enum {
Vectorizable = internal::packet_traits<Scalar>::Vectorizable, Vectorizable = packet_traits<Scalar>::Vectorizable,
PacketSize = internal::packet_traits<Scalar>::size PacketSize = packet_traits<Scalar>::size
}; };
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(
@ -171,8 +171,8 @@ struct TensorBlockCopyOp {
if (dst_stride == 1) { if (dst_stride == 1) {
// LINEAR // LINEAR
for (StorageIndex i = 0; i < vectorized_size; i += PacketSize) { for (StorageIndex i = 0; i < vectorized_size; i += PacketSize) {
Packet p = internal::ploadu<Packet>(src + i); Packet p = ploadu<Packet>(src + i);
internal::pstoreu<Scalar, Packet>(dst + i, p); pstoreu<Scalar, Packet>(dst + i, p);
} }
for (StorageIndex i = vectorized_size; i < num_coeff_to_copy; ++i) { for (StorageIndex i = vectorized_size; i < num_coeff_to_copy; ++i) {
dst[i] = src[i]; dst[i] = src[i];
@ -180,8 +180,8 @@ struct TensorBlockCopyOp {
} else { } else {
// SCATTER // SCATTER
for (StorageIndex i = 0; i < vectorized_size; i += PacketSize) { for (StorageIndex i = 0; i < vectorized_size; i += PacketSize) {
Packet p = internal::ploadu<Packet>(src + i); Packet p = ploadu<Packet>(src + i);
internal::pscatter<Scalar, Packet>(dst + i * dst_stride, p, dst_stride); pscatter<Scalar, Packet>(dst + i * dst_stride, p, dst_stride);
} }
for (StorageIndex i = vectorized_size; i < num_coeff_to_copy; ++i) { for (StorageIndex i = vectorized_size; i < num_coeff_to_copy; ++i) {
dst[i * dst_stride] = src[i]; dst[i * dst_stride] = src[i];
@ -192,8 +192,8 @@ struct TensorBlockCopyOp {
if (dst_stride == 1) { if (dst_stride == 1) {
// LINEAR // LINEAR
for (StorageIndex i = 0; i < vectorized_size; i += PacketSize) { for (StorageIndex i = 0; i < vectorized_size; i += PacketSize) {
Packet p = internal::pload1<Packet>(src); Packet p = pload1<Packet>(src);
internal::pstoreu<Scalar, Packet>(dst + i, p); pstoreu<Scalar, Packet>(dst + i, p);
} }
for (StorageIndex i = vectorized_size; i < num_coeff_to_copy; ++i) { for (StorageIndex i = vectorized_size; i < num_coeff_to_copy; ++i) {
dst[i] = *src; dst[i] = *src;
@ -201,8 +201,8 @@ struct TensorBlockCopyOp {
} else { } else {
// SCATTER // SCATTER
for (StorageIndex i = 0; i < vectorized_size; i += PacketSize) { for (StorageIndex i = 0; i < vectorized_size; i += PacketSize) {
Packet p = internal::pload1<Packet>(src); Packet p = pload1<Packet>(src);
internal::pscatter<Scalar, Packet>(dst + i * dst_stride, p, dst_stride); pscatter<Scalar, Packet>(dst + i * dst_stride, p, dst_stride);
} }
for (StorageIndex i = vectorized_size; i < num_coeff_to_copy; ++i) { for (StorageIndex i = vectorized_size; i < num_coeff_to_copy; ++i) {
dst[i * dst_stride] = *src; dst[i * dst_stride] = *src;
@ -213,8 +213,8 @@ struct TensorBlockCopyOp {
// GATHER // GATHER
const StorageIndex vectorized_size = (num_coeff_to_copy / PacketSize) * PacketSize; const StorageIndex vectorized_size = (num_coeff_to_copy / PacketSize) * PacketSize;
for (StorageIndex i = 0; i < vectorized_size; i += PacketSize) { for (StorageIndex i = 0; i < vectorized_size; i += PacketSize) {
Packet p = internal::pgather<Scalar, Packet>(src + i * src_stride, src_stride); Packet p = pgather<Scalar, Packet>(src + i * src_stride, src_stride);
internal::pstoreu<Scalar, Packet>(dst + i, p); pstoreu<Scalar, Packet>(dst + i, p);
} }
for (StorageIndex i = vectorized_size; i < num_coeff_to_copy; ++i) { for (StorageIndex i = vectorized_size; i < num_coeff_to_copy; ++i) {
dst[i] = src[i * src_stride]; dst[i] = src[i * src_stride];
@ -491,11 +491,11 @@ struct TensorBlockCwiseUnaryOp {
const StorageIndex output_index, const StorageIndex output_stride, const StorageIndex output_index, const StorageIndex output_stride,
OutputScalar* output_data, const StorageIndex input_index, OutputScalar* output_data, const StorageIndex input_index,
const StorageIndex input_stride, const InputScalar* input_data) { const StorageIndex input_stride, const InputScalar* input_data) {
typedef const Eigen::Array<InputScalar, Dynamic, 1> Input; typedef const Array<InputScalar, Dynamic, 1> Input;
typedef Eigen::Array<OutputScalar, Dynamic, 1> Output; typedef Array<OutputScalar, Dynamic, 1> Output;
typedef Eigen::Map<Input, 0, InnerStride<> > InputMap; typedef Map<Input, 0, InnerStride<> > InputMap;
typedef Eigen::Map<Output, 0, InnerStride<> > OutputMap; typedef Map<Output, 0, InnerStride<> > OutputMap;
const InputScalar* input_base = &input_data[input_index]; const InputScalar* input_base = &input_data[input_index];
OutputScalar* output_base = &output_data[output_index]; OutputScalar* output_base = &output_data[output_index];
@ -503,7 +503,7 @@ struct TensorBlockCwiseUnaryOp {
const InputMap input(input_base, num_coeff, InnerStride<>(input_stride)); const InputMap input(input_base, num_coeff, InnerStride<>(input_stride));
OutputMap output(output_base, num_coeff, InnerStride<>(output_stride)); OutputMap output(output_base, num_coeff, InnerStride<>(output_stride));
output = Eigen::CwiseUnaryOp<UnaryFunctor, InputMap>(input, functor); output = CwiseUnaryOp<UnaryFunctor, InputMap>(input, functor);
} }
}; };
@ -518,7 +518,7 @@ struct TensorBlockCwiseUnaryOp {
template <typename UnaryFunctor, typename StorageIndex, typename OutputScalar, template <typename UnaryFunctor, typename StorageIndex, typename OutputScalar,
int NumDims, int Layout> int NumDims, int Layout>
struct TensorBlockCwiseUnaryIO { struct TensorBlockCwiseUnaryIO {
typedef typename internal::TensorBlock<OutputScalar, StorageIndex, NumDims, typedef typename TensorBlock<OutputScalar, StorageIndex, NumDims,
Layout>::Dimensions Dimensions; Layout>::Dimensions Dimensions;
struct BlockIteratorState { struct BlockIteratorState {
@ -865,7 +865,7 @@ class TensorBlockMapper {
const TensorBlockShapeType block_shape, const TensorBlockShapeType block_shape,
Index min_target_size) Index min_target_size)
: m_dimensions(dims), : m_dimensions(dims),
m_block_dim_sizes(BlockDimensions(dims, block_shape, internal::convert_index<StorageIndex>(min_target_size))) { m_block_dim_sizes(BlockDimensions(dims, block_shape, convert_index<StorageIndex>(min_target_size))) {
// Calculate block counts by dimension and total block count. // Calculate block counts by dimension and total block count.
DSizes<StorageIndex, NumDims> block_count; DSizes<StorageIndex, NumDims> block_count;
for (Index i = 0; i < block_count.rank(); ++i) { for (Index i = 0; i < block_count.rank(); ++i) {
@ -974,7 +974,7 @@ class TensorBlockMapper {
if (block_shape == kUniformAllDims) { if (block_shape == kUniformAllDims) {
// Tensor will not fit within 'min_target_size' budget: calculate tensor // Tensor will not fit within 'min_target_size' budget: calculate tensor
// block dimension sizes based on "square" dimension size target. // block dimension sizes based on "square" dimension size target.
const StorageIndex dim_size_target = internal::convert_index<StorageIndex>( const StorageIndex dim_size_target = convert_index<StorageIndex>(
std::pow(static_cast<float>(min_target_size), std::pow(static_cast<float>(min_target_size),
1.0f / static_cast<float>(block_dim_sizes.rank()))); 1.0f / static_cast<float>(block_dim_sizes.rank())));
for (Index i = 0; i < block_dim_sizes.rank(); ++i) { for (Index i = 0; i < block_dim_sizes.rank(); ++i) {