mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-12 03:39:01 +08:00
Updated the regressions tests that cover full reductions
This commit is contained in:
parent
c75a19f815
commit
0d15ad8019
@ -61,14 +61,14 @@ static void test_argmax_tuple_reducer()
|
||||
Tensor<Tuple<DenseIndex, float>, 4, DataLayout> index_tuples(2,3,5,7);
|
||||
index_tuples = tensor.index_tuples();
|
||||
|
||||
Tensor<Tuple<DenseIndex, float>, 1, DataLayout> reduced(1);
|
||||
Tensor<Tuple<DenseIndex, float>, 0, DataLayout> reduced;
|
||||
DimensionList<DenseIndex, 4> dims;
|
||||
reduced = index_tuples.reduce(
|
||||
dims, internal::ArgMaxTupleReducer<Tuple<DenseIndex, float>>());
|
||||
|
||||
Tensor<float, 1, DataLayout> maxi = tensor.maximum();
|
||||
Tensor<float, 0, DataLayout> maxi = tensor.maximum();
|
||||
|
||||
VERIFY_IS_EQUAL(maxi(0), reduced(0).second);
|
||||
VERIFY_IS_EQUAL(maxi(), reduced(0).second);
|
||||
|
||||
array<DenseIndex, 3> reduce_dims;
|
||||
for (int d = 0; d < 3; ++d) reduce_dims[d] = d;
|
||||
@ -93,14 +93,14 @@ static void test_argmin_tuple_reducer()
|
||||
Tensor<Tuple<DenseIndex, float>, 4, DataLayout> index_tuples(2,3,5,7);
|
||||
index_tuples = tensor.index_tuples();
|
||||
|
||||
Tensor<Tuple<DenseIndex, float>, 1, DataLayout> reduced(1);
|
||||
Tensor<Tuple<DenseIndex, float>, 0, DataLayout> reduced;
|
||||
DimensionList<DenseIndex, 4> dims;
|
||||
reduced = index_tuples.reduce(
|
||||
dims, internal::ArgMinTupleReducer<Tuple<DenseIndex, float>>());
|
||||
|
||||
Tensor<float, 1, DataLayout> mini = tensor.minimum();
|
||||
Tensor<float, 0, DataLayout> mini = tensor.minimum();
|
||||
|
||||
VERIFY_IS_EQUAL(mini(0), reduced(0).second);
|
||||
VERIFY_IS_EQUAL(mini(), reduced(0).second);
|
||||
|
||||
array<DenseIndex, 3> reduce_dims;
|
||||
for (int d = 0; d < 3; ++d) reduce_dims[d] = d;
|
||||
@ -123,7 +123,7 @@ static void test_simple_argmax()
|
||||
tensor = (tensor + tensor.constant(0.5)).log();
|
||||
tensor(0,0,0,0) = 10.0;
|
||||
|
||||
Tensor<DenseIndex, 1, DataLayout> tensor_argmax(1);
|
||||
Tensor<DenseIndex, 0, DataLayout> tensor_argmax;
|
||||
|
||||
tensor_argmax = tensor.argmax();
|
||||
|
||||
@ -144,7 +144,7 @@ static void test_simple_argmin()
|
||||
tensor = (tensor + tensor.constant(0.5)).log();
|
||||
tensor(0,0,0,0) = -10.0;
|
||||
|
||||
Tensor<DenseIndex, 1, DataLayout> tensor_argmin(1);
|
||||
Tensor<DenseIndex, 0, DataLayout> tensor_argmin;
|
||||
|
||||
tensor_argmin = tensor.argmin();
|
||||
|
||||
|
@ -142,7 +142,7 @@ static void test_type2index_list()
|
||||
}
|
||||
|
||||
const Dims4 reduction_axis4;
|
||||
Tensor<float, 1> result4 = tensor.sum(reduction_axis4);
|
||||
Tensor<float, 0> result4 = tensor.sum(reduction_axis4);
|
||||
float expected = 0.0f;
|
||||
for (int m = 0; m < 11; ++m) {
|
||||
for (int l = 0; l < 7; ++l) {
|
||||
@ -155,7 +155,7 @@ static void test_type2index_list()
|
||||
}
|
||||
}
|
||||
}
|
||||
VERIFY_IS_APPROX(result4(0), expected);
|
||||
VERIFY_IS_APPROX(result4(), expected);
|
||||
}
|
||||
|
||||
|
||||
@ -236,9 +236,9 @@ static void test_mixed_index_list()
|
||||
EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase<ReductionList>()() == true), YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
#endif
|
||||
|
||||
Tensor<float, 1> result1 = tensor.sum(reduction_axis);
|
||||
Tensor<float, 1> result2 = tensor.sum(reduction_indices);
|
||||
Tensor<float, 1> result3 = tensor.sum(reduction_list);
|
||||
Tensor<float, 0> result1 = tensor.sum(reduction_axis);
|
||||
Tensor<float, 0> result2 = tensor.sum(reduction_indices);
|
||||
Tensor<float, 0> result3 = tensor.sum(reduction_list);
|
||||
|
||||
float expected = 0.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
@ -250,9 +250,9 @@ static void test_mixed_index_list()
|
||||
}
|
||||
}
|
||||
}
|
||||
VERIFY_IS_APPROX(result1(0), expected);
|
||||
VERIFY_IS_APPROX(result2(0), expected);
|
||||
VERIFY_IS_APPROX(result3(0), expected);
|
||||
VERIFY_IS_APPROX(result1(), expected);
|
||||
VERIFY_IS_APPROX(result2(), expected);
|
||||
VERIFY_IS_APPROX(result3(), expected);
|
||||
}
|
||||
|
||||
|
||||
|
@ -232,8 +232,11 @@ static void test_from_tensor()
|
||||
|
||||
|
||||
static int f(const TensorMap<Tensor<int, 3> >& tensor) {
|
||||
Tensor<int, 1> result = tensor.sum();
|
||||
return result(0);
|
||||
// Size<0> empty;
|
||||
EIGEN_STATIC_ASSERT((internal::array_size<Sizes<>>::value == 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
EIGEN_STATIC_ASSERT((internal::array_size<DSizes<int, 0>>::value == 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
Tensor<int, 0> result = tensor.sum();
|
||||
return result();
|
||||
}
|
||||
|
||||
static void test_casting()
|
||||
|
@ -13,6 +13,45 @@
|
||||
|
||||
using Eigen::Tensor;
|
||||
|
||||
template <int DataLayout>
|
||||
static void test_trivial_reductions() {
|
||||
{
|
||||
Tensor<float, 0, DataLayout> tensor;
|
||||
tensor.setRandom();
|
||||
array<ptrdiff_t, 0> reduction_axis;
|
||||
|
||||
Tensor<float, 0, DataLayout> result = tensor.sum(reduction_axis);
|
||||
VERIFY_IS_EQUAL(result(), tensor());
|
||||
}
|
||||
|
||||
{
|
||||
Tensor<float, 1, DataLayout> tensor(7);
|
||||
tensor.setRandom();
|
||||
array<ptrdiff_t, 0> reduction_axis;
|
||||
|
||||
Tensor<float, 1, DataLayout> result = tensor.sum(reduction_axis);
|
||||
VERIFY_IS_EQUAL(result.dimension(0), 7);
|
||||
for (int i = 0; i < 7; ++i) {
|
||||
VERIFY_IS_EQUAL(result(i), tensor(i));
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
Tensor<float, 2, DataLayout> tensor(2, 3);
|
||||
tensor.setRandom();
|
||||
array<ptrdiff_t, 0> reduction_axis;
|
||||
|
||||
Tensor<float, 2, DataLayout> result = tensor.sum(reduction_axis);
|
||||
VERIFY_IS_EQUAL(result.dimension(0), 2);
|
||||
VERIFY_IS_EQUAL(result.dimension(1), 3);
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
VERIFY_IS_EQUAL(result(i, j), tensor(i, j));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <int DataLayout>
|
||||
static void test_simple_reductions() {
|
||||
Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
|
||||
@ -37,18 +76,18 @@ static void test_simple_reductions() {
|
||||
}
|
||||
|
||||
{
|
||||
Tensor<float, 1, DataLayout> sum1 = tensor.sum();
|
||||
VERIFY_IS_EQUAL(sum1.dimension(0), 1);
|
||||
Tensor<float, 0, DataLayout> sum1 = tensor.sum();
|
||||
VERIFY_IS_EQUAL(sum1.rank(), 0);
|
||||
|
||||
array<ptrdiff_t, 4> reduction_axis4;
|
||||
reduction_axis4[0] = 0;
|
||||
reduction_axis4[1] = 1;
|
||||
reduction_axis4[2] = 2;
|
||||
reduction_axis4[3] = 3;
|
||||
Tensor<float, 1, DataLayout> sum2 = tensor.sum(reduction_axis4);
|
||||
VERIFY_IS_EQUAL(sum2.dimension(0), 1);
|
||||
Tensor<float, 0, DataLayout> sum2 = tensor.sum(reduction_axis4);
|
||||
VERIFY_IS_EQUAL(sum2.rank(), 0);
|
||||
|
||||
VERIFY_IS_APPROX(sum1(0), sum2(0));
|
||||
VERIFY_IS_APPROX(sum1(), sum2());
|
||||
}
|
||||
|
||||
reduction_axis2[0] = 0;
|
||||
@ -69,18 +108,18 @@ static void test_simple_reductions() {
|
||||
}
|
||||
|
||||
{
|
||||
Tensor<float, 1, DataLayout> prod1 = tensor.prod();
|
||||
VERIFY_IS_EQUAL(prod1.dimension(0), 1);
|
||||
Tensor<float, 0, DataLayout> prod1 = tensor.prod();
|
||||
VERIFY_IS_EQUAL(prod1.rank(), 0);
|
||||
|
||||
array<ptrdiff_t, 4> reduction_axis4;
|
||||
reduction_axis4[0] = 0;
|
||||
reduction_axis4[1] = 1;
|
||||
reduction_axis4[2] = 2;
|
||||
reduction_axis4[3] = 3;
|
||||
Tensor<float, 1, DataLayout> prod2 = tensor.prod(reduction_axis4);
|
||||
VERIFY_IS_EQUAL(prod2.dimension(0), 1);
|
||||
Tensor<float, 0, DataLayout> prod2 = tensor.prod(reduction_axis4);
|
||||
VERIFY_IS_EQUAL(prod2.rank(), 0);
|
||||
|
||||
VERIFY_IS_APPROX(prod1(0), prod2(0));
|
||||
VERIFY_IS_APPROX(prod1(), prod2());
|
||||
}
|
||||
|
||||
reduction_axis2[0] = 0;
|
||||
@ -101,18 +140,18 @@ static void test_simple_reductions() {
|
||||
}
|
||||
|
||||
{
|
||||
Tensor<float, 1, DataLayout> max1 = tensor.maximum();
|
||||
VERIFY_IS_EQUAL(max1.dimension(0), 1);
|
||||
Tensor<float, 0, DataLayout> max1 = tensor.maximum();
|
||||
VERIFY_IS_EQUAL(max1.rank(), 0);
|
||||
|
||||
array<ptrdiff_t, 4> reduction_axis4;
|
||||
reduction_axis4[0] = 0;
|
||||
reduction_axis4[1] = 1;
|
||||
reduction_axis4[2] = 2;
|
||||
reduction_axis4[3] = 3;
|
||||
Tensor<float, 1, DataLayout> max2 = tensor.maximum(reduction_axis4);
|
||||
VERIFY_IS_EQUAL(max2.dimension(0), 1);
|
||||
Tensor<float, 0, DataLayout> max2 = tensor.maximum(reduction_axis4);
|
||||
VERIFY_IS_EQUAL(max2.rank(), 0);
|
||||
|
||||
VERIFY_IS_APPROX(max1(0), max2(0));
|
||||
VERIFY_IS_APPROX(max1(), max2());
|
||||
}
|
||||
|
||||
reduction_axis2[0] = 0;
|
||||
@ -133,18 +172,18 @@ static void test_simple_reductions() {
|
||||
}
|
||||
|
||||
{
|
||||
Tensor<float, 1, DataLayout> min1 = tensor.minimum();
|
||||
VERIFY_IS_EQUAL(min1.dimension(0), 1);
|
||||
Tensor<float, 0, DataLayout> min1 = tensor.minimum();
|
||||
VERIFY_IS_EQUAL(min1.rank(), 0);
|
||||
|
||||
array<ptrdiff_t, 4> reduction_axis4;
|
||||
reduction_axis4[0] = 0;
|
||||
reduction_axis4[1] = 1;
|
||||
reduction_axis4[2] = 2;
|
||||
reduction_axis4[3] = 3;
|
||||
Tensor<float, 1, DataLayout> min2 = tensor.minimum(reduction_axis4);
|
||||
VERIFY_IS_EQUAL(min2.dimension(0), 1);
|
||||
Tensor<float, 0, DataLayout> min2 = tensor.minimum(reduction_axis4);
|
||||
VERIFY_IS_EQUAL(min2.rank(), 0);
|
||||
|
||||
VERIFY_IS_APPROX(min1(0), min2(0));
|
||||
VERIFY_IS_APPROX(min1(), min2());
|
||||
}
|
||||
|
||||
reduction_axis2[0] = 0;
|
||||
@ -167,35 +206,35 @@ static void test_simple_reductions() {
|
||||
}
|
||||
|
||||
{
|
||||
Tensor<float, 1, DataLayout> mean1 = tensor.mean();
|
||||
VERIFY_IS_EQUAL(mean1.dimension(0), 1);
|
||||
Tensor<float, 0, DataLayout> mean1 = tensor.mean();
|
||||
VERIFY_IS_EQUAL(mean1.rank(), 0);
|
||||
|
||||
array<ptrdiff_t, 4> reduction_axis4;
|
||||
reduction_axis4[0] = 0;
|
||||
reduction_axis4[1] = 1;
|
||||
reduction_axis4[2] = 2;
|
||||
reduction_axis4[3] = 3;
|
||||
Tensor<float, 1, DataLayout> mean2 = tensor.mean(reduction_axis4);
|
||||
VERIFY_IS_EQUAL(mean2.dimension(0), 1);
|
||||
Tensor<float, 0, DataLayout> mean2 = tensor.mean(reduction_axis4);
|
||||
VERIFY_IS_EQUAL(mean2.rank(), 0);
|
||||
|
||||
VERIFY_IS_APPROX(mean1(0), mean2(0));
|
||||
VERIFY_IS_APPROX(mean1(), mean2());
|
||||
}
|
||||
|
||||
{
|
||||
Tensor<int, 1> ints(10);
|
||||
std::iota(ints.data(), ints.data() + ints.dimension(0), 0);
|
||||
|
||||
TensorFixedSize<bool, Sizes<1> > all;
|
||||
TensorFixedSize<bool, Sizes<> > all;
|
||||
all = ints.all();
|
||||
VERIFY(!all(0));
|
||||
VERIFY(!all());
|
||||
all = (ints >= ints.constant(0)).all();
|
||||
VERIFY(all(0));
|
||||
VERIFY(all());
|
||||
|
||||
TensorFixedSize<bool, Sizes<1> > any;
|
||||
TensorFixedSize<bool, Sizes<> > any;
|
||||
any = (ints > ints.constant(10)).any();
|
||||
VERIFY(!any(0));
|
||||
VERIFY(!any());
|
||||
any = (ints < ints.constant(1)).any();
|
||||
VERIFY(any(0));
|
||||
VERIFY(any());
|
||||
}
|
||||
}
|
||||
|
||||
@ -207,8 +246,8 @@ static void test_full_reductions() {
|
||||
reduction_axis[0] = 0;
|
||||
reduction_axis[1] = 1;
|
||||
|
||||
Tensor<float, 1, DataLayout> result = tensor.sum(reduction_axis);
|
||||
VERIFY_IS_EQUAL(result.dimension(0), 1);
|
||||
Tensor<float, 0, DataLayout> result = tensor.sum(reduction_axis);
|
||||
VERIFY_IS_EQUAL(result.rank(), 0);
|
||||
|
||||
float sum = 0.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
@ -219,7 +258,7 @@ static void test_full_reductions() {
|
||||
VERIFY_IS_APPROX(result(0), sum);
|
||||
|
||||
result = tensor.square().sum(reduction_axis).sqrt();
|
||||
VERIFY_IS_EQUAL(result.dimension(0), 1);
|
||||
VERIFY_IS_EQUAL(result.rank(), 0);
|
||||
|
||||
sum = 0.0f;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
@ -227,7 +266,7 @@ static void test_full_reductions() {
|
||||
sum += tensor(i, j) * tensor(i, j);
|
||||
}
|
||||
}
|
||||
VERIFY_IS_APPROX(result(0), sqrtf(sum));
|
||||
VERIFY_IS_APPROX(result(), sqrtf(sum));
|
||||
}
|
||||
|
||||
struct UserReducer {
|
||||
@ -418,6 +457,8 @@ static void test_reduce_middle_dims() {
|
||||
}
|
||||
|
||||
void test_cxx11_tensor_reduction() {
|
||||
CALL_SUBTEST(test_trivial_reductions<ColMajor>());
|
||||
CALL_SUBTEST(test_trivial_reductions<RowMajor>());
|
||||
CALL_SUBTEST(test_simple_reductions<ColMajor>());
|
||||
CALL_SUBTEST(test_simple_reductions<RowMajor>());
|
||||
CALL_SUBTEST(test_full_reductions<ColMajor>());
|
||||
|
@ -28,7 +28,7 @@ static void test_full_reductions() {
|
||||
Tensor<float, 2, DataLayout> in(num_rows, num_cols);
|
||||
in.setRandom();
|
||||
|
||||
Tensor<float, 1, DataLayout> full_redux(1);
|
||||
Tensor<float, 0, DataLayout> full_redux;
|
||||
full_redux = in.sum();
|
||||
|
||||
std::size_t in_bytes = in.size() * sizeof(float);
|
||||
@ -38,16 +38,16 @@ static void test_full_reductions() {
|
||||
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
|
||||
|
||||
TensorMap<Tensor<float, 2, DataLayout> > in_gpu(gpu_in_ptr, num_rows, num_cols);
|
||||
TensorMap<Tensor<float, 1, DataLayout> > out_gpu(gpu_out_ptr, 1);
|
||||
TensorMap<Tensor<float, 0, DataLayout> > out_gpu(gpu_out_ptr);
|
||||
|
||||
out_gpu.device(gpu_device) = in_gpu.sum();
|
||||
|
||||
Tensor<float, 1, DataLayout> full_redux_gpu(1);
|
||||
Tensor<float, 0, DataLayout> full_redux_gpu;
|
||||
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
|
||||
gpu_device.synchronize();
|
||||
|
||||
// Check that the CPU and GPU reductions return the same result.
|
||||
VERIFY_IS_APPROX(full_redux(0), full_redux_gpu(0));
|
||||
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
|
||||
}
|
||||
|
||||
void test_cxx11_tensor_reduction_cuda() {
|
||||
|
@ -14,7 +14,7 @@ static void test_comparison_sugar() {
|
||||
// make sure we have at least one value == 0
|
||||
t(0,0,0) = 0;
|
||||
|
||||
Tensor<bool,1> b;
|
||||
Tensor<bool,0> b;
|
||||
|
||||
#define TEST_TENSOR_EQUAL(e1, e2) \
|
||||
b = ((e1) == (e2)).all(); \
|
||||
|
Loading…
x
Reference in New Issue
Block a user