code example
MPI-2 message passing interface, version 2
one-sided communication defined in MPI-2
TBB boost threads
part of the C++ boost library
Brook+ ATI GPU stream computing

#include <mpi.h>
#include <boost/thread/thread.hpp>
using namespace boost;

#define PROC (8)

// kernel
kernel void sub1(float x[], float y[],
  out float xout<>, out float eout<>)
{
  // do computation
  float i = 1 + indexof(xout).x;
  xout = x[i] + (y[i+1] + y[i-1])*.5f;
  eout = y[i] * y[i];
}

reduce void sub2(float y<>, reduce float e<>)
{
  e += y;
}

struct thread1 {
 float *x, *y, *ep;
 int i0, i1, p;
 thread1(float *xx, float *yy, float *ee, int pp, int ii0, int ii1) :
  x(xx), y(yy), ep(ee), p(pp), i0(ii0), i1(ii1) {}
 void operator()() {
  #define BLOCK (8190)
  float xStream<BLOCK>;
  float xoutStream<BLOCK>;
  float yStream<BLOCK+2>;
  float einStream<BLOCK+2>;
  float eStream<1>;
  float e = 0;
  float ee = 0;
  for (int i=i0; i<i1; i+=BLOCK) {
   streamRead(xStream, x+i);
   streamRead(yStream, y+i-1);
   sub1(xStream, yStream, xoutStream, einStream);
   streamWrite(xoutStream, x+i);
   sub2(einStream, eStream);
   float e_local;
   streamWrite(eStream, &e_local);
   ee += e_local;
  }
  e += ee;
  *ep = e;
 }
};

int main(int argc, char *argv[]) {
  int n = ...;
  MPI_Init(&argc, &argv);
  int numproc, me;
  MPI_Comm_size(MPI_COMM_WORLD, &numproc);
  MPI_Comm_rank(MPI_COMM_WORLD, &me);
  int p_left = -1, p_right = -1;
  if (me > 0)
   p_left = me-1;
  if (me < numproc-1)
   p_right = me+1;
  int n_local0 = 1 + (me * (n-1)) / numproc;
  int n_local1 = 1 + ((me+1) * (n-1)) / numproc;
  // allocate only local part + ghost zone of the arrays x,y
  float *x, *y;
  MPI_Alloc_mem(sizeof(float) * (n_local1 - n_local0 + 2),
   MPI_INFO_NULL, &x);
  MPI_Alloc_mem(sizeof(float) * (n_local1 - n_local0 + 2),
   MPI_INFO_NULL, &y);
  x -= (n_local0 - 1);
  y -= (n_local0 - 1);
  MPI_Win win;
  MPI_Win_create(&y[n_local0], sizeof(float) * (n_local1-n_local0+2),
   sizeof(float), MPI_INFO_NULL, MPI_COMM_WORLD, &win);

  ... // fill x, y

  // fill ghost zone
  MPI_Win_fence(0, win);
  if (p_left != -1)
   MPI_Put(&y[n_local0], 1, MPI_FLOAT, p_left,
   n_local1-n_local0+1, 1, MPI_FLOAT, win);
  if (p_right != -1)
   MPI_Put(&y[n_local1-1], 1, MPI_FLOAT, p_right,
   0, 1, MPI_FLOAT, win);
  MPI_Win_fence(0, win);

  float e = 0;
  float e_vec[PROC];
  thread_group grp;
  // start threads and wait for termination
  for (int i=0; i<PROC; ++i) {
   thread1 t(x, y, &e_vec[i], i,
     n_local0+((n_local1-n_local0)*i)/PROC, n_local0+((n_local1-n_local0)*(i+1))/PROC);
   grp.create_thread(t);
  }
  grp.join_all();
  for (int i=0; i<PROC; ++i)
   e += e_vec[i];

  float e_local = e;
  MPI_Allreduce(&e_local, &e, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);

  ... // output x, e

  MPI_Win_free(&win);
  x += (n_local0 - 1);
  y += (n_local0 - 1);
  MPI_Free_mem(y);
  MPI_Free_mem(x);
  MPI_Finalize();
  return 0;
}

[start] [references] [download] [install]