code example
MPI-2 message passing interface, version 2
one-sided communication defined in MPI-2
TBB boost threads
part of the C++ boost library
Cell Cell Broadband Engine
AltiVec PowerPC SIMD instructions
file sub1.h: common interface

class data1 {
public:
   data1() {}
   int n0, n1;
   float *x, *y;
   float e;
   int pad[27];// pad to 128 byte length
};

file spe1.cc: SPU code

#include <spu_mfcio.h>
#include "sub1.h"
#define BLOCK (2048)
int main(unsigned long long id,
     unsigned long long argp, unsigned long long envp) {
  data1 vec __attribute__ (aligned(128));
  // read parameter
  mfc_get(&vec, (unsigned int)argp, sizeof(data1), 1, 0, 0);
  mfc_write_tag_mask(0xffffffff);
  mfc_read_tag_status_all();// wait for data transfer
  float e = 0;
  // allocate double buffer in local SPU memory
  float *x = (float*)malloc(BLOCK*sizeof(float));
  float *x0 = (float*)malloc(BLOCK*sizeof(float));
  float *y = (float*)malloc((BLOCK+32)*sizeof(float)) + 1;
  float *y0 = (float*)malloc((BLOCK+32)*sizeof(float)) + 1;
  // get first buffer
  mfc_get(x, &vec.x[vec.n0], BLOCK*sizeof(float), 2, 0, 0);
  mfc_get(y-1, &vec.y[vec.n0], (BLOCK+32)*sizeof(float), 3, 0, 0);
  mfc_read_tag_status_all();
  for (int ib=vec.n0; ib<vec.n1; ib += BLOCK) {
   if (ib+BLOCK<vec.n1) {
    // get next buffer, fence after put x
    mfc_getf(x0, &vec.x[ib+BLOCK], BLOCK*sizeof(float), 2, 0, 0);
    mfc_get(y0-1, &vec.y[ib+BLOCK], (BLOCK+32)*sizeof(float), 3, 0, 0);
   }
  // do computation
  float ve[4] = {0, 0, 0, 0};
  for (int i=0; iBLOCK; i+=4) {
   float* yp = &y[i+1], y0 = &y[i], ym = &y[i-1];
   vec_st(vec_madd(
    vec_splats(.5),
    vec_add(
     vec_perm(vec_ld(0,ym), vec_ld(16,ym),
      vec_lvsl(0,ym)),
     vec_perm(vec_ld(0,yp), vec_ld(16,yp),
      vec_lvsl(0,yp))),
    vec_splats(0.)),
    0, &x[i]);
   vec_st(vec_add(
    vec_ld(0,&ve[0]),vec_madd(
     vec_ld(0,y0),
     vec_ld(0,y0),
     vec_splats(0.))),
    0, &ve[0]);
  }
  e += ve[0] + ve[1] + ve[2] + ve[3];
   mfc_read_tag_status_all(); // wait for data transfer
   // put current buffer
   mfc_put(x, &vec.x[ib], BLOCK*sizeof(float), 2, 0, 0);
   float *t = x; x = x0; x0 = t;
   t = y; y = y0; y0 = t; // swap buffers
  }
  // put reduction value
  vec.e = e;
  mfc_put(&vec, argp, sizeof(data1), 1, 0, 0);
  mfc_read_tag_status_all(); // wait for data transfer
  return 0;
}

main file
#include <mpi.h>
#include <boost/thread/thread.hpp>
using namespace boost;
#include <libspe2.h>
#include "sub1.h"
#include <altivec.h>

#define PROC (8)

extern spe_program_handle_t spe1; // defined in SPU code
struct thread1 {
 float *x, *y, *ep;
 int i0, i1, p;
 thread1(float *xx, float *yy, float *ee, int pp, int ii0, int ii1) :
  x(xx), y(yy), ep(ee), p(pp), i0(ii0), i1(ii1) {}
 void operator()() {
  data1 block __attribute__ (aligned(128));
  spe_context_ptr_t ctxs;
  spe_stop_info_t st;
  ctxs = spe_context_create(0, NULL);
  spe_program_load (ctxs, &spe1);
  block.n0 = i0;
  block.n1 = i1;
  block.x = &x[i0];
  block.y = &y[i0-1];
  unsigned int entry = SPE_DEFAULT_ENTRY;
  float e = 0;
  // execute code on a single SPU and wait for termination
  spe_context_run(ctxs[p], &entry, 0, &block, NULL, st);
  spe_context_destroy(ctxs);
  e += block.e;
  *ep = e;
 }
};

int main(int argc, char *argv[]) {
  int n = ...;
  MPI_Init(&argc, &argv);
  int numproc, me;
  MPI_Comm_size(MPI_COMM_WORLD, &numproc);
  MPI_Comm_rank(MPI_COMM_WORLD, &me);
  int p_left = -1, p_right = -1;
  if (me > 0)
   p_left = me-1;
  if (me < numproc-1)
   p_right = me+1;
  int n_local0 = 1 + (me * (n-1)) / numproc;
  int n_local1 = 1 + ((me+1) * (n-1)) / numproc;
  // allocate only local part + ghost zone of the arrays x,y
  float *x, *y;
  MPI_Alloc_mem(sizeof(float) * (n_local1 - n_local0 + 2),
   MPI_INFO_NULL, &x);
  MPI_Alloc_mem(sizeof(float) * (n_local1 - n_local0 + 2),
   MPI_INFO_NULL, &y);
  x -= (n_local0 - 1);
  y -= (n_local0 - 1);
  MPI_Win win;
  MPI_Win_create(&y[n_local0], sizeof(float) * (n_local1-n_local0+2),
   sizeof(float), MPI_INFO_NULL, MPI_COMM_WORLD, &win);

  ... // fill x, y

  // fill ghost zone
  MPI_Win_fence(0, win);
  if (p_left != -1)
   MPI_Put(&y[n_local0], 1, MPI_FLOAT, p_left,
   n_local1-n_local0+1, 1, MPI_FLOAT, win);
  if (p_right != -1)
   MPI_Put(&y[n_local1-1], 1, MPI_FLOAT, p_right,
   0, 1, MPI_FLOAT, win);
  MPI_Win_fence(0, win);

  float e = 0;
  float e_vec[PROC];
  thread_group grp;
  // start threads and wait for termination
  for (int i=0; i<PROC; ++i) {
   thread1 t(x, y, &e_vec[i], i,
     n_local0+((n_local1-n_local0)*i)/PROC, n_local0+((n_local1-n_local0)*(i+1))/PROC);
   grp.create_thread(t);
  }
  grp.join_all();
  for (int i=0; i<PROC; ++i)
   e += e_vec[i];

  float e_local = e;
  MPI_Allreduce(&e_local, &e, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);

  ... // output x, e

  MPI_Win_free(&win);
  x += (n_local0 - 1);
  y += (n_local0 - 1);
  MPI_Free_mem(y);
  MPI_Free_mem(x);
  MPI_Finalize();
  return 0;
}

[start] [references] [download] [install]