#include <mpi.h>
#include <tbb/task_scheduler_init.h>
#include <tbb/blocked_range.h>
#include <tbb/parallel_reduce.h>
#include <tbb/cache_aligned_allocator.h>
using namespace tbb;
struct thread1 {
float ee;
float *x, *y;
thread1(float *xx, float *yy) : ee(0), x(xx), y(yy) {}
thread1(thread1& s, split) { ee = 0; x = s.x; y = s.y; }
void operator() (const blocked_range<int> & r) {
// do computation
float e = ee;
for (int i=r.begin(); i!=r.end(); ++i) {
x[i] += ( y[i+1] + y[i-1] )*.5;
e += y[i] * y[i];
}
ee = e;
}
void join(thread1& s) { ee += s.ee; }
};
int main(int argc, char *argv[]) {
int n = ...;
MPI_Init(&argc, &argv);
int numproc, me;
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
int p_left = -1, p_right = -1;
if (me > 0)
p_left = me-1;
if (me < numproc-1)
p_right = me+1;
int n_local0 = 1 + (me * (n-1)) / numproc;
int n_local1 = 1 + ((me+1) * (n-1)) / numproc;
// allocate only local part + ghost zone of the arrays x,y
float *x, *y;
MPI_Alloc_mem(sizeof(float) * (n_local1 - n_local0 + 2),
MPI_INFO_NULL, &x);
MPI_Alloc_mem(sizeof(float) * (n_local1 - n_local0 + 2),
MPI_INFO_NULL, &y);
x -= (n_local0 - 1);
y -= (n_local0 - 1);
MPI_Win win;
MPI_Win_create(&y[n_local0], sizeof(float) * (n_local1-n_local0+2),
sizeof(float), MPI_INFO_NULL, MPI_COMM_WORLD, &win);
... // fill x, y
// fill ghost zone
MPI_Win_fence(0, win);
if (p_left != -1)
MPI_Put(&y[n_local0], 1, MPI_FLOAT, p_left,
n_local1-n_local0+1, 1, MPI_FLOAT, win);
if (p_right != -1)
MPI_Put(&y[n_local1-1], 1, MPI_FLOAT, p_right,
0, 1, MPI_FLOAT, win);
MPI_Win_fence(0, win);
thread1 t(x, y);
parallel_reduce(blocked_range<int>(n_local0, n_local1, 1024), t);
float e = t.ee;
float e_local = e;
MPI_Allreduce(&e_local, &e, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
... // output x, e
MPI_Win_free(&win);
x += (n_local0 - 1);
y += (n_local0 - 1);
MPI_Free_mem(y);
MPI_Free_mem(x);
MPI_Finalize();
return 0;
}
|