mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-09-12 17:33:15 +08:00
make Aron's idea work using Qt's atomic implementation for the synchronisation
This commit is contained in:
parent
6924bf2e99
commit
aeff3ff391
@ -107,12 +107,14 @@ static void run(int rows, int cols, int depth,
|
||||
// (==GEMM_VAR1)
|
||||
for(int k=0; k<depth; k+=kc)
|
||||
{
|
||||
// pack B_k to B' in parallel fashion,
|
||||
// each thread packs the B_k,j sub block to B'_j where j is the thread id
|
||||
// Pack B_k to B' in parallel fashion,
|
||||
// each thread packs the sub block B_k,j to B'_j where j is the thread id.
|
||||
|
||||
// TODO before copying to B'_j, makes sure that no other threads are using it!
|
||||
// currently done using a barrier
|
||||
#pragma omp barrier
|
||||
|
||||
// Before copying to B'_j, we have to make sure that no other thread is still using it,
|
||||
// i.e., we test that info[tid].users equals 0.
|
||||
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
|
||||
while(!info[tid].users.testAndSetOrdered(0,threads)) {}
|
||||
|
||||
const int actual_kc = std::min(k+kc,depth)-k; // => rows of B', and cols of the A'
|
||||
|
||||
@ -122,7 +124,9 @@ static void run(int rows, int cols, int depth,
|
||||
sgemm_oncopy(actual_kc, info[tid].rhs_length, &rhs(k,info[tid].rhs_start), rhsStride, blockB+info[tid].rhs_start*kc);
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
// mark that the parts B'_j is uptodate and can be used.
|
||||
info[tid].sync.fetchAndStoreOrdered(k);
|
||||
|
||||
// this is an attempt to implement a smarter strategy as suggested by Aron
|
||||
// the layout is good, but there is no synchronization yet
|
||||
{
|
||||
@ -138,17 +142,16 @@ static void run(int rows, int cols, int depth,
|
||||
{
|
||||
int j = (tid+shift)%threads;
|
||||
|
||||
// TODO here we have to makes sure that thread j is done with packing B'_j
|
||||
// At this point we have to make sure that B'_j has been updated by the thread j,
|
||||
// we use testAndSetOrdered to mimic a volatile integer
|
||||
while(!info[j].sync.testAndSetOrdered(k,k)) {}
|
||||
|
||||
sgemm_kernel(actual_mc, info[j].rhs_length, actual_kc, alpha, blockA, blockB+info[j].rhs_start*kc, res+info[j].rhs_start*resStride, resStride);
|
||||
}
|
||||
}
|
||||
|
||||
// then keep going as usual with the remaining A'
|
||||
for(int i=mc; i<rows; i+=mc)
|
||||
#else
|
||||
#pragma omp barrier
|
||||
for(int i=0; i<rows; i+=mc)
|
||||
#endif
|
||||
{
|
||||
const int actual_mc = std::min(i+mc,rows)-i;
|
||||
|
||||
@ -166,6 +169,11 @@ static void run(int rows, int cols, int depth,
|
||||
sgemm_kernel(actual_mc, cols, actual_kc, alpha, blockA, blockB, res+i, resStride);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Release all the sub blocks B'_j of B' for the current thread,
|
||||
// i.e., we simply decrement the number of users by 1
|
||||
for(int j=0; j<threads; ++j)
|
||||
info[j].users.fetchAndAddOrdered(-1);
|
||||
}
|
||||
|
||||
ei_aligned_stack_delete(Scalar, blockA, kc*mc);
|
||||
|
@ -25,13 +25,6 @@
|
||||
#ifndef EIGEN_PARALLELIZER_H
|
||||
#define EIGEN_PARALLELIZER_H
|
||||
|
||||
struct GemmParallelInfo
|
||||
{
|
||||
int rhs_start;
|
||||
int rhs_length;
|
||||
float* blockB;
|
||||
};
|
||||
|
||||
template<bool Parallelize,typename Functor>
|
||||
void ei_run_parallel_1d(const Functor& func, int size)
|
||||
{
|
||||
@ -97,6 +90,15 @@ void ei_run_parallel_2d(const Functor& func, int size1, int size2)
|
||||
#endif
|
||||
}
|
||||
|
||||
struct GemmParallelInfo
|
||||
{
|
||||
QAtomicInt sync;
|
||||
QAtomicInt users;
|
||||
int rhs_start;
|
||||
int rhs_length;
|
||||
float* blockB;
|
||||
};
|
||||
|
||||
template<bool Parallelize,typename Functor>
|
||||
void ei_run_parallel_gemm(const Functor& func, int rows, int cols)
|
||||
{
|
||||
@ -128,6 +130,8 @@ void ei_run_parallel_gemm(const Functor& func, int rows, int cols)
|
||||
info[i].rhs_start = c0;
|
||||
info[i].rhs_length = actualBlockCols;
|
||||
info[i].blockB = sharedBlockB;
|
||||
info[i].sync.fetchAndStoreOrdered(-1);
|
||||
info[i].users.fetchAndStoreOrdered(0);
|
||||
|
||||
func(r0, actualBlockRows, 0,cols, info);
|
||||
}
|
||||
|
@ -2,6 +2,8 @@
|
||||
// g++-4.4 bench_gemm.cpp -I .. -O2 -DNDEBUG -lrt -fopenmp && OMP_NUM_THREADS=2 ./a.out
|
||||
// icpc bench_gemm.cpp -I .. -O3 -DNDEBUG -lrt -openmp && OMP_NUM_THREADS=2 ./a.out
|
||||
|
||||
#include <QAtomicInt>
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
#include <bench/BenchTimer.h>
|
||||
@ -69,10 +71,10 @@ void gemm(const M& a, const M& b, M& c)
|
||||
|
||||
int main(int argc, char ** argv)
|
||||
{
|
||||
int rep = 2048; // number of repetitions per try
|
||||
int rep = 1; // number of repetitions per try
|
||||
int tries = 5; // number of tries, we keep the best
|
||||
|
||||
int s = 512;
|
||||
int s = 2048;
|
||||
int m = s;
|
||||
int n = s;
|
||||
int p = s;
|
||||
@ -80,31 +82,48 @@ int main(int argc, char ** argv)
|
||||
M b(n,p); b.setRandom();
|
||||
M c(m,p); c.setOnes();
|
||||
|
||||
BenchTimer t;
|
||||
|
||||
M r = c;
|
||||
|
||||
// check the parallel product is correct
|
||||
#ifdef HAVE_BLAS
|
||||
blas_gemm(a,b,r);
|
||||
#else
|
||||
#ifdef EIGEN_HAS_OPENMP
|
||||
int procs = omp_get_max_threads();
|
||||
omp_set_num_threads(1);
|
||||
r.noalias() += a * b;
|
||||
omp_set_num_threads(procs);
|
||||
if(procs>1)
|
||||
{
|
||||
#ifdef HAVE_BLAS
|
||||
blas_gemm(a,b,r);
|
||||
#else
|
||||
omp_set_num_threads(1);
|
||||
r.noalias() += a * b;
|
||||
omp_set_num_threads(procs);
|
||||
#endif
|
||||
c.noalias() += a * b;
|
||||
if(!r.isApprox(c)) std::cerr << "Warning, your parallel product is crap!\n\n";
|
||||
}
|
||||
#endif
|
||||
c.noalias() += a * b;
|
||||
if(!r.isApprox(c)) std::cerr << "Warning, your parallel product is crap!\n\n";
|
||||
|
||||
#ifdef HAVE_BLAS
|
||||
BENCH(t, tries, rep, blas_gemm(a,b,c));
|
||||
std::cerr << "blas cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n";
|
||||
std::cerr << "blas real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
|
||||
BenchTimer tblas;
|
||||
BENCH(tblas, tries, rep, blas_gemm(a,b,c));
|
||||
std::cout << "blas cpu " << tblas.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(CPU_TIMER) << "s)\n";
|
||||
std::cout << "blas real " << tblas.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(REAL_TIMER) << "s)\n";
|
||||
#endif
|
||||
|
||||
BENCH(t, tries, rep, gemm(a,b,c));
|
||||
std::cerr << "eigen cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n";
|
||||
std::cerr << "eigen real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n";
|
||||
BenchTimer tmt;
|
||||
BENCH(tmt, tries, rep, gemm(a,b,c));
|
||||
std::cout << "eigen cpu " << tmt.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(CPU_TIMER) << "s)\n";
|
||||
std::cout << "eigen real " << tmt.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n";
|
||||
|
||||
#ifdef EIGEN_HAS_OPENMP
|
||||
if(procs>1)
|
||||
{
|
||||
BenchTimer tmono;
|
||||
omp_set_num_threads(1);
|
||||
BENCH(tmono, tries, rep, gemm(a,b,c));
|
||||
std::cout << "eigen mono cpu " << tmono.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(CPU_TIMER) << "s)\n";
|
||||
std::cout << "eigen mono real " << tmono.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(REAL_TIMER) << "s)\n";
|
||||
std::cout << "mt speed up x" << tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER) << " => " << (100.0*tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER))/procs << "%\n";
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user