code example
PVM parallel virtual machine
TBB boost threads
part of the C++ boost library
Brook+ ATI GPU stream computing

#include "pvm3.h"
#include <boost/thread/thread.hpp>
using namespace boost;

#define PROC (8)

// kernel
kernel void sub1(float x[], float y[],
  out float xout<>, out float eout<>)
{
  // do computation
  float i = 1 + indexof(xout).x;
  xout = x[i] + (y[i+1] + y[i-1])*.5f;
  eout = y[i] * y[i];
}

reduce void sub2(float y<>, reduce float e<>)
{
  e += y;
}

struct thread1 {
 float *x, *y, *ep;
 int i0, i1, p;
 thread1(float *xx, float *yy, float *ee, int pp, int ii0, int ii1) :
  x(xx), y(yy), ep(ee), p(pp), i0(ii0), i1(ii1) {}
 void operator()() {
  #define BLOCK (8190)
  float xStream<BLOCK>;
  float xoutStream<BLOCK>;
  float yStream<BLOCK+2>;
  float einStream<BLOCK+2>;
  float eStream<1>;
  float e = 0;
  float ee = 0;
  for (int i=i0; i<i1; i+=BLOCK) {
   streamRead(xStream, x+i);
   streamRead(yStream, y+i-1);
   sub1(xStream, yStream, xoutStream, einStream);
   streamWrite(xoutStream, x+i);
   sub2(einStream, eStream);
   float e_local;
   streamWrite(eStream, &e_local);
   ee += e_local;
  }
  e += ee;
  *ep = e;
 }
};

int main(int argc, char *argv[]) {
  int n = ...;
  if (pvm_parent() == PvmNoParent) {
  #define N (4)
  int tid[N];
  pvm_spawn("program", argv, PvmTaskDefault, (char*)0, N, &tid[0]);
  } else {
  int mytid = pvm_mytid();
  int *tids, me = -1;
  int ntids = pvm_siblings(&tids);
  for (int i=0; i<ntids; ++i)
   if ( tids[i] == mytid) {
    me = i;
    break;
   }
  int p_left = -1, p_right = -1;
  if (me > 0)
    p_left = tids[me-1];
  if (me < ntids-1)
    p_right = tids[me+1];
  int n_local0 = 1 + (me * (n-1)) / ntids;
  int n_local1 = 1 + ((me+1) * (n-1)) / ntids;
  pvm_joingroup("worker");   // allocate only local part + ghost zone of the arrays x,y
  float *x, *y;
  x = new float[n_local1 - n_local0 + 2];
  y = new float[n_local1 - n_local0 + 2];
  x -= (n_local0 - 1);
  y -= (n_local0 - 1);

  ... // fill x, y

  // fill ghost zone
  if (p_left != -1) {
   pvm_initsend(PvmDataDefault);
   pvm_pkfloat(&y[n_local0], 1, 1);
   int msgtag = 1;
   pvm_send(p_left, msgtag);
  }
  if (p_right != -1) {
   int msgtag = 1;
   pvm_recv(p_right, msgtag);
   pvm_upkfloat(&y[n_local1], 1, 1);
   pvm_initsend(PvmDataDefault);
   pvm_pkfloat(&y[n_local1-1], 1, 1);
   msgtag = 2;
   pvm_send(p_right, msgtag);
  }
  if (p_left != -1) {
   int msgtag = 2;
   pvm_recv(p_left, msgtag);
   pvm_upkfloat(&y[n_local0-1], 1, 1);
  }

  float e = 0;
  float e_vec[PROC];
  thread_group grp;
  // start threads and wait for termination
  for (int i=0; i<PROC; ++i) {
   thread1 t(x, y, &e_vec[i], i,
     n_local0+((n_local1-n_local0)*i)/PROC, n_local0+((n_local1-n_local0)*(i+1))/PROC);
   grp.create_thread(t);
  }
  grp.join_all();
  for (int i=0; i<PROC; ++i)
   e += e_vec[i];

  int msgtag = 3;
  pvm_reduce(PvmSum, &e, 1, PVM_FLOAT, msgtag, "worker", tids[0]);
  msgtag = 4;
  if (me==0) {
   pvm_initsend(PvmDataDefault);
   pvm_pkfloat(&e, 1, 1);
   pvm_bcast("worker", msgtag);
  } else {
   pvm_recv(tids[0], msgtag);
   pvm_upkfloat(&e, 1, 1);
  }

  ... // output x, e

  x += (n_local0 - 1);
  y += (n_local0 - 1);
  delete[] x, y;
  }
  pvm_exit();
  return 0;
}

[start] [references] [download] [install]