From d7a84148483b1a11b993c037a2cea5b43f2c052f Mon Sep 17 00:00:00 2001 From: Mehdi Goli Date: Wed, 1 Aug 2018 11:56:30 +0100 Subject: [PATCH 1/2] Distinguishing between internal memory allocation/deallocation from explicit user memory allocation/deallocation. --- .../Eigen/CXX11/src/Tensor/TensorCustomOp.h | 8 ++++---- .../Eigen/CXX11/src/Tensor/TensorDeviceDefault.h | 6 ++++++ .../Eigen/CXX11/src/Tensor/TensorDeviceGpu.h | 7 +++++++ .../CXX11/src/Tensor/TensorDeviceThreadPool.h | 8 ++++++++ .../Eigen/CXX11/src/Tensor/TensorForcedEval.h | 4 ++-- .../Eigen/CXX11/src/Tensor/TensorReduction.h | 14 +++++++------- 6 files changed, 34 insertions(+), 13 deletions(-) diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h b/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h index 0e4db46de..7b4d56e4b 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h @@ -112,7 +112,7 @@ struct TensorEvaluator, Devi return false; } else { m_result = static_cast( - m_device.allocate(dimensions().TotalSize() * sizeof(Scalar))); + m_device.allocate_temp(dimensions().TotalSize() * sizeof(Scalar))); evalTo(m_result); return true; } @@ -120,7 +120,7 @@ struct TensorEvaluator, Devi EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { if (m_result != NULL) { - m_device.deallocate(m_result); + m_device.deallocate_temp(m_result); m_result = NULL; } } @@ -273,7 +273,7 @@ struct TensorEvaluator(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar))); + m_result = static_cast(m_device.allocate_temp(dimensions().TotalSize() * sizeof(Scalar))); evalTo(m_result); return true; } @@ -281,7 +281,7 @@ struct TensorEvaluatorallocate(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const { + stream_->deallocate(buffer); + } virtual void* scratchpad() const { if (scratch_ == NULL) { diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h index 90fd99027..5a16ebe50 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h @@ -105,6 +105,14 @@ struct ThreadPoolDevice { internal::aligned_free(buffer); } + EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const { + return allocate(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const { + deallocate(buffer); + } + EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { ::memcpy(dst, src, n); } diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h index fdb31928f..09f6f2067 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h @@ -115,7 +115,7 @@ struct TensorEvaluator, Device> #endif EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) { const Index numValues = internal::array_prod(m_impl.dimensions()); - m_buffer = (CoeffReturnType*)m_device.allocate(numValues * sizeof(CoeffReturnType)); + m_buffer = (CoeffReturnType*)m_device.allocate_temp(numValues * sizeof(CoeffReturnType)); // Should initialize the memory in case we're dealing with non POD types. if (NumTraits::RequireInitialization) { for (Index i = 0; i < numValues; ++i) { @@ -129,7 +129,7 @@ struct TensorEvaluator, Device> return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { - m_device.deallocate(m_buffer); + m_device.deallocate_temp(m_buffer); m_buffer = NULL; } diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h index 375fc0802..c1cbdebc6 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h @@ -513,7 +513,7 @@ struct TensorEvaluator, !RunningOnGPU))) { bool need_assign = false; if (!data) { - m_result = static_cast(m_device.allocate(sizeof(CoeffReturnType))); + m_result = static_cast(m_device.allocate_temp(sizeof(CoeffReturnType))); data = m_result; need_assign = true; } @@ -525,7 +525,7 @@ struct TensorEvaluator, const Index num_values_to_reduce = internal::array_prod(m_reducedDims); const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); if (!data) { - data = static_cast(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve)); + data = static_cast(m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve)); m_result = data; } Op reducer(m_reducer); @@ -549,7 +549,7 @@ struct TensorEvaluator, const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); if (!data) { if (num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 128) { - data = static_cast(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve)); + data = static_cast(m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve)); m_result = data; } else { @@ -559,7 +559,7 @@ struct TensorEvaluator, Op reducer(m_reducer); if (internal::InnerReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) { if (m_result) { - m_device.deallocate(m_result); + m_device.deallocate_temp(m_result); m_result = NULL; } return true; @@ -582,7 +582,7 @@ struct TensorEvaluator, const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); if (!data) { if (num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 32) { - data = static_cast(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve)); + data = static_cast(m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve)); m_result = data; } else { @@ -592,7 +592,7 @@ struct TensorEvaluator, Op reducer(m_reducer); if (internal::OuterReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) { if (m_result) { - m_device.deallocate(m_result); + m_device.deallocate_temp(m_result); m_result = NULL; } return true; @@ -607,7 +607,7 @@ struct TensorEvaluator, EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); if (m_result) { - m_device.deallocate(m_result); + m_device.deallocate_temp(m_result); m_result = NULL; } } From c6a5c70712851cd696d7410579506fc299c04a05 Mon Sep 17 00:00:00 2001 From: Mehdi Goli Date: Wed, 1 Aug 2018 16:56:26 +0100 Subject: [PATCH 2/2] Correcting the position of allocate_temp/deallocate_temp in TensorDeviceGpu.h --- .../Eigen/CXX11/src/Tensor/TensorDeviceGpu.h | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h index 48bbd5e45..b490433db 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h @@ -159,13 +159,6 @@ class GpuStreamDevice : public StreamInterface { err = gpuFree(buffer); gpu_assert(err == gpuSuccess); } - EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const { - return stream_->allocate(num_bytes); - } - - EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const { - stream_->deallocate(buffer); - } virtual void* scratchpad() const { if (scratch_ == NULL) { @@ -214,6 +207,15 @@ struct GpuDevice { stream_->deallocate(buffer); } + EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const { + return stream_->allocate(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const { + stream_->deallocate(buffer); + } + + EIGEN_STRONG_INLINE void* scratchpad() const { return stream_->scratchpad(); }