#include <mpi.h>
#include <boost/thread/thread.hpp>
using namespace boost;
#define PROC (8)
struct thread1 {
float *x, *y, *ep;
int i0, i1, p;
thread1(float *xx, float *yy, float *ee, int pp, int ii0, int ii1) :
x(xx), y(yy), ep(ee), p(pp), i0(ii0), i1(ii1) {}
void operator()() {
// do computation float e = 0;
for (int i=i0; i<i1; ++i) {
x[i] += ( y[i+1] + y[i-1] )*.5;
e += y[i] * y[i];
} *ep = e;
}
};
int main(int argc, char *argv[]) {
int n = ...;
MPI_Init(&argc, &argv);
int numproc, me;
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
int p_left = -1, p_right = -1;
if (me > 0)
p_left = me-1;
if (me < numproc-1)
p_right = me+1;
int n_local0 = 1 + (me * (n-1)) / numproc;
int n_local1 = 1 + ((me+1) * (n-1)) / numproc;
// allocate only local part + ghost zone of the arrays x,y
float *x, *y;
x = new float[n_local1 - n_local0 + 2];
y = new float[n_local1 - n_local0 + 2];
x -= (n_local0 - 1);
y -= (n_local0 - 1);
... // fill x, y
// fill ghost zone
MPI_Status s;
if (p_left != -1)
MPI_Send(&y[n_local0], 1, MPI_FLOAT, p_left,
1, MPI_COMM_WORLD);
if (p_right != -1) {
MPI_Recv(&y[n_local1], 1, MPI_FLOAT, p_right,
1, MPI_COMM_WORLD, &s);
MPI_Send(&y[n_local1-1], 1, MPI_FLOAT, p_right,
2, MPI_COMM_WORLD);
}
if (p_left != -1)
MPI_Recv(&y[n_local0-1], 1, MPI_FLOAT, p_left,
2, MPI_COMM_WORLD, &s);
float e = 0;
float e_vec[PROC];
thread_group grp;
// start threads and wait for termination
for (int i=0; i<PROC; ++i) {
thread1 t(x, y, &e_vec[i], i,
n_local0+((n_local1-n_local0)*i)/PROC, n_local0+((n_local1-n_local0)*(i+1))/PROC);
grp.create_thread(t);
}
grp.join_all();
for (int i=0; i<PROC; ++i)
e += e_vec[i];
float e_local = e;
MPI_Allreduce(&e_local, &e, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
... // output x, e
x += (n_local0 - 1);
y += (n_local0 - 1);
delete[] x, y;
MPI_Finalize();
return 0;
}
|