code example
PVM parallel virtual machine
TBB boost threads
part of the C++ boost library
OpenCL Open Computing Language
GPU computing

#include "pvm3.h"
#include <boost/thread/thread.hpp>
using namespace boost;
#include <cl.h>
#include <malloc.h>

#define PROC (8)

// kernel
#define BLOCK (512)
const char *source =
"__kernel void sub1(__global float* fx,\
    __global const float* fy,\
    __local float* se, __global float* fe) {\
  const unsigned int t = get_global_id(0);\
  const unsigned int b = get_group_id(0);\
  const unsigned block = 512;\
  const unsigned int i = block*b+t;\
  float e;\
  /* do computation */\
  fx[t] += ( fy[t+2] + fy[t] )*.5;\
  e = fy[t+1] * fy[t+1];\
  /* reduction */\
  se[t] = e;\
  barrier(CLK_LOCAL_MEM_FENCE);\
  if (t<256) {\
   se[t] += se[t+256];\
   barrier(CLK_LOCAL_MEM_FENCE);\
  }\
  if (t<128) {\
   se[t] += se[t+128];\
   barrier(CLK_LOCAL_MEM_FENCE);\
  }\
  if (t<64) {\
   se[t] += se[t+64];\
   barrier(CLK_LOCAL_MEM_FENCE);\
  }\
  if (t<32) {\
   se[t] += se[t+32];\
   se[t] += se[t+16];\
   se[t] += se[t+8];\
   se[t] += se[t+4];\
   se[t] += se[t+2];\
   se[t] += se[t+1];\
  }\
  if (t==0)\
   fe[b] = se[0];\
}";

struct thread1 {
 float *x, *y, *ep;
 int i0, i1, p;
 thread1(float *xx, float *yy, float *ee, int pp, int ii0, int ii1) :
  x(xx), y(yy), ep(ee), p(pp), i0(ii0), i1(ii1) {}
 void operator()() {
  // allocate GPU
  cl_context ct = clCreateContextFromType(0, CL_DEVICE_TYPE_GPU, 0, 0, 0);
  size_t ctsize;
  clGetContextInfo(ct, CL_CONTEXT_DEVICES, 0, 0, &ctsize);
  cl_device_id *aDevices = (cl_device_id*)malloc(ctsize);
  clGetContextInfo(ct, CL_CONTEXT_DEVICES, ctsize, aDevices, 0);
  // compile kernel
  cl_program prog = clCreateProgramWithSource(ct, 1, &source, 0, 0);
  clBuildProgram(prog, 0, 0, 0, 0, 0);
  cl_kernel kern = clCreateKernel(prog, "sub1", 0);
  float e = 0;
  // pick GPU
  cl_command_queue queue = clCreateCommandQueue(ct, aDevices[p], 0, 0);
  // allocate GPU memory
  cl_mem fx = clCreateBuffer(ct, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
  (i1-i0)*sizeof(cl_float), &x[i0], 0);
  cl_mem fy = clCreateBuffer(ct, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
  (i1-i0+2)*sizeof(cl_float), &y[i0-1], 0);
  cl_mem se = clCreateBuffer(ct, CL_MEM_READ_WRITE,
  BLOCK*sizeof(cl_float), 0, 0);
  cl_mem fe = clCreateBuffer(ct, CL_MEM_WRITE_ONLY,
  (i1-i0)/BLOCK*sizeof(cl_float), 0, 0);
  clSetKernelArg(kern, 0, sizeof(cl_mem), (void *)&fx);
  clSetKernelArg(kern, 1, sizeof(cl_mem), (void *)&fx);
  clSetKernelArg(kern, 2, sizeof(cl_mem), (void *)&se);
  clSetKernelArg(kern, 3, sizeof(cl_mem), (void *)&fe);
  float *d = new float[(i1-i0)/BLOCK];
  // call GPU
  const unsigned int size = BLOCK;
  const unsigned int dim = i1-i0+2;
  clEnqueueNDRangeKernel(queue, kern, 1, 0, &dim, &size, 0, 0, 0);
  // copy to host memory
  clEnqueueReadBuffer(queue, fx, CL_TRUE, 0,
  (i1-i0) * sizeof(cl_float), &x[i0], 0, 0, 0);
  clEnqueueReadBuffer(queue, fe, CL_TRUE, 0,
  (i1-i0) * sizeof(cl_float), d, 0, 0, 0);
  float ee = 0;
  for (int i=0; i<(i1-i0)/BLOCK; ++i)
   ee += d[i];
  e += ee;
  delete[] d;
  // release GPU memory
  clReleaseMemObject(fx);
  clReleaseMemObject(fy);
  clReleaseMemObject(se);
  clReleaseMemObject(fe);
  *ep = e;
 }
};

int main(int argc, char *argv[]) {
  int n = ...;
  if (pvm_parent() == PvmNoParent) {
  #define N (4)
  int tid[N];
  pvm_spawn("program", argv, PvmTaskDefault, (char*)0, N, &tid[0]);
  } else {
  int mytid = pvm_mytid();
  int *tids, me = -1;
  int ntids = pvm_siblings(&tids);
  for (int i=0; i<ntids; ++i)
   if ( tids[i] == mytid) {
    me = i;
    break;
   }
  int p_left = -1, p_right = -1;
  if (me > 0)
    p_left = tids[me-1];
  if (me < ntids-1)
    p_right = tids[me+1];
  int n_local0 = 1 + (me * (n-1)) / ntids;
  int n_local1 = 1 + ((me+1) * (n-1)) / ntids;
  pvm_joingroup("worker");   // allocate only local part + ghost zone of the arrays x,y
  float *x, *y;
  x = new float[n_local1 - n_local0 + 2];
  y = new float[n_local1 - n_local0 + 2];
  x -= (n_local0 - 1);
  y -= (n_local0 - 1);

  ... // fill x, y

  // fill ghost zone
  if (p_left != -1) {
   pvm_initsend(PvmDataDefault);
   pvm_pkfloat(&y[n_local0], 1, 1);
   int msgtag = 1;
   pvm_send(p_left, msgtag);
  }
  if (p_right != -1) {
   int msgtag = 1;
   pvm_recv(p_right, msgtag);
   pvm_upkfloat(&y[n_local1], 1, 1);
   pvm_initsend(PvmDataDefault);
   pvm_pkfloat(&y[n_local1-1], 1, 1);
   msgtag = 2;
   pvm_send(p_right, msgtag);
  }
  if (p_left != -1) {
   int msgtag = 2;
   pvm_recv(p_left, msgtag);
   pvm_upkfloat(&y[n_local0-1], 1, 1);
  }

  float e = 0;
  float e_vec[PROC];
  thread_group grp;
  // start threads and wait for termination
  for (int i=0; i<PROC; ++i) {
   thread1 t(x, y, &e_vec[i], i,
     n_local0+((n_local1-n_local0)*i)/PROC, n_local0+((n_local1-n_local0)*(i+1))/PROC);
   grp.create_thread(t);
  }
  grp.join_all();
  for (int i=0; i<PROC; ++i)
   e += e_vec[i];

  int msgtag = 3;
  pvm_reduce(PvmSum, &e, 1, PVM_FLOAT, msgtag, "worker", tids[0]);
  msgtag = 4;
  if (me==0) {
   pvm_initsend(PvmDataDefault);
   pvm_pkfloat(&e, 1, 1);
   pvm_bcast("worker", msgtag);
  } else {
   pvm_recv(tids[0], msgtag);
   pvm_upkfloat(&e, 1, 1);
  }

  ... // output x, e

  x += (n_local0 - 1);
  y += (n_local0 - 1);
  delete[] x, y;
  }
  pvm_exit();
  return 0;
}

[start] [references] [download] [install]