mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-12 03:39:01 +08:00
Properly fix merge issues.
This commit is contained in:
commit
5e7de771e3
@ -42,6 +42,8 @@ template<> struct packet_traits<float> : default_packet_traits
|
|||||||
HasDiGamma = 1,
|
HasDiGamma = 1,
|
||||||
HasErf = 1,
|
HasErf = 1,
|
||||||
HasErfc = 1,
|
HasErfc = 1,
|
||||||
|
HasIgamma = 1,
|
||||||
|
HasIGammac = 1,
|
||||||
|
|
||||||
HasBlend = 0,
|
HasBlend = 0,
|
||||||
};
|
};
|
||||||
@ -66,6 +68,8 @@ template<> struct packet_traits<double> : default_packet_traits
|
|||||||
HasDiGamma = 1,
|
HasDiGamma = 1,
|
||||||
HasErf = 1,
|
HasErf = 1,
|
||||||
HasErfc = 1,
|
HasErfc = 1,
|
||||||
|
HasIGamma = 1,
|
||||||
|
HasIGammac = 1,
|
||||||
|
|
||||||
HasBlend = 0,
|
HasBlend = 0,
|
||||||
};
|
};
|
||||||
@ -308,7 +312,6 @@ template<> EIGEN_DEVICE_FUNC inline double2 pabs<double2>(const double2& a) {
|
|||||||
return make_double2(fabs(a.x), fabs(a.y));
|
return make_double2(fabs(a.x), fabs(a.y));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC inline void
|
EIGEN_DEVICE_FUNC inline void
|
||||||
ptranspose(PacketBlock<float4,4>& kernel) {
|
ptranspose(PacketBlock<float4,4>& kernel) {
|
||||||
double tmp = kernel.packet[0].y;
|
double tmp = kernel.packet[0].y;
|
||||||
|
@ -333,12 +333,27 @@ class TensorBase<Derived, ReadOnlyAccessors>
|
|||||||
operator==(const OtherDerived& other) const {
|
operator==(const OtherDerived& other) const {
|
||||||
return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, internal::cmp_EQ>());
|
return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, internal::cmp_EQ>());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||||
const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_NEQ>, const Derived, const OtherDerived>
|
const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_NEQ>, const Derived, const OtherDerived>
|
||||||
operator!=(const OtherDerived& other) const {
|
operator!=(const OtherDerived& other) const {
|
||||||
return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, internal::cmp_NEQ>());
|
return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, internal::cmp_NEQ>());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// igamma(a = this, x = other)
|
||||||
|
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||||
|
const TensorCwiseBinaryOp<internal::scalar_igamma_op<Scalar>, const Derived, const OtherDerived>
|
||||||
|
igamma(const OtherDerived& other) const {
|
||||||
|
return binaryExpr(other.derived(), internal::scalar_igamma_op<Scalar>());
|
||||||
|
}
|
||||||
|
|
||||||
|
// igammac(a = this, x = other)
|
||||||
|
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||||
|
const TensorCwiseBinaryOp<internal::scalar_igammac_op<Scalar>, const Derived, const OtherDerived>
|
||||||
|
igammac(const OtherDerived& other) const {
|
||||||
|
return binaryExpr(other.derived(), internal::scalar_igammac_op<Scalar>());
|
||||||
|
}
|
||||||
|
|
||||||
// comparisons and tests for Scalars
|
// comparisons and tests for Scalars
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_LT>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
|
EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_LT>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
|
||||||
|
@ -1,3 +1,17 @@
|
|||||||
|
# generate split test header file only if it does not yet exist
|
||||||
|
# in order to prevent a rebuild everytime cmake is configured
|
||||||
|
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
|
||||||
|
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
|
||||||
|
foreach(i RANGE 1 999)
|
||||||
|
file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h
|
||||||
|
"#ifdef EIGEN_TEST_PART_${i}\n"
|
||||||
|
"#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n"
|
||||||
|
"#else\n"
|
||||||
|
"#define CALL_SUBTEST_${i}(FUNC)\n"
|
||||||
|
"#endif\n\n"
|
||||||
|
)
|
||||||
|
endforeach()
|
||||||
|
endif()
|
||||||
|
|
||||||
set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Unsupported")
|
set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Unsupported")
|
||||||
add_custom_target(BuildUnsupported)
|
add_custom_target(BuildUnsupported)
|
||||||
@ -161,7 +175,7 @@ endif()
|
|||||||
# These tests needs nvcc
|
# These tests needs nvcc
|
||||||
find_package(CUDA 7.0)
|
find_package(CUDA 7.0)
|
||||||
if(CUDA_FOUND)
|
if(CUDA_FOUND)
|
||||||
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
|
# set(CUDA_PROPAGATE_HOST_FLAGS OFF)
|
||||||
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||||
set(CUDA_NVCC_FLAGS "-ccbin /usr/bin/clang" CACHE STRING "nvcc flags" FORCE)
|
set(CUDA_NVCC_FLAGS "-ccbin /usr/bin/clang" CACHE STRING "nvcc flags" FORCE)
|
||||||
endif()
|
endif()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user