#include <mpi.h>
#pragma hmpp code1 codelet, &
#pragma hmpp code1 target=CUDA:BROOK:PTHREADSSE:PTHREAD:SSE, &
#pragma hmpp code1 args[x,e].io=inout
static void sub1(int n, float x[n], float y[n+2], float e[1]) {
int i;
float e_local = e[0];
// do computation
for (i=0; i<n; ++i) {
x[i] += ( y[i+2] + y[i] )*.5;
e_local += y[i+1] * y[i+1];
}
e[0] = e_local;
}
int main(int argc, char *argv[]) {
int n = ...;
MPI_Init(&argc, &argv);
int numproc, me;
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
int p_left = -1, p_right = -1;
if (me > 0)
p_left = me-1;
if (me < numproc-1)
p_right = me+1;
int n_local0 = 1 + (me * (n-1)) / numproc;
int n_local1 = 1 + ((me+1) * (n-1)) / numproc;
// allocate only local part + ghost zone of the arrays x,y
float *x, *y;
x = new float[n_local1 - n_local0 + 2];
y = new float[n_local1 - n_local0 + 2];
x -= (n_local0 - 1);
y -= (n_local0 - 1);
... // fill x, y
// fill ghost zone
MPI_Status s;
if (p_left != -1)
MPI_Send(&y[n_local0], 1, MPI_FLOAT, p_left,
1, MPI_COMM_WORLD);
if (p_right != -1) {
MPI_Recv(&y[n_local1], 1, MPI_FLOAT, p_right,
1, MPI_COMM_WORLD, &s);
MPI_Send(&y[n_local1-1], 1, MPI_FLOAT, p_right,
2, MPI_COMM_WORLD);
}
if (p_left != -1)
MPI_Recv(&y[n_local0-1], 1, MPI_FLOAT, p_left,
2, MPI_COMM_WORLD, &s);
float e = 0;
#pragma hmpp code1 callsite
sub1(n_local1-n_local0, &x[n_local0], &y[n_local0-1], &e);
float e_local = e;
MPI_Allreduce(&e_local, &e, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
... // output x, e
x += (n_local0 - 1);
y += (n_local0 - 1);
delete[] x, y;
MPI_Finalize();
return 0;
}
|