code example
shmem SGI shmem
one-sided communication
POSIX threads
Cuda nVidia Computer Unified Device Architecture
GPU computing

#include <mpp/shmem.h>
#include <pthread.h>
#include <cuda.h>

float *x, *y;
#define PROC (8)
float e_vec[PROC];
int n_thread0, n_thread1;

// kernel
__global__ void sub1(float* fx, float* fy, float* fe) {
#define BLOCK (512)
  int t = threadIdx.x; // builtin
  int b = blockIdx.x; // builtin
  float e;
  __shared__ float se[BLOCK];
  __shared__ float sx[BLOCK];
  __shared__ float sy[BLOCK+2];
  // copy from device to processor memory
  sx[t] = fx[BLOCK*b+t];
  sy[t] = fy[BLOCK*b+t];
  if (t<2)
     sy[t+BLOCK] = fy[BLOCK*b+t+BLOCK];
  __syncthreads();

  // do computation
  sx[t] += ( sy[t+2] + sy[t] )*.5;
  e = sy[t+1] * sy[t+1];
  // copy to device memory
  fx[BLOCK*b+t] = sx[t];
  // reduction
  se[t] = e;
  __syncthreads();
  if (t<256) {
     se[t] += se[t+256];
     __syncthreads();
  }
  if (t<128) {
     se[t] += se[t+128];
     __syncthreads();
  }
  if (t<64) {
     se[t] += se[t+64];
     __syncthreads();
  }
  if (t<32) { // warp size
     se[t] += se[t+32];
     se[t] += se[t+16];
     se[t] += se[t+8];
     se[t] += se[t+4];
     se[t] += se[t+2];
     se[t] += se[t+1];
  }
  if (t==0)
     fe[b] = se[0];
}

void *thread1(void *arg) {
  int p = (int)arg;
  int n0 = n_thread0 + (p * (n_thread1-n_thread0)) / PROC;
  int n1 = n_thread0 + ((p+1) * (n_thread1-n_thread0)) / PROC;
  // pick GPU
  cudaSetDevice(p);
  // allocate GPU memory
  float *fx, *fy, *fe;
  cudaMalloc((void**)&fx, (n1-n0+2) * sizeof(float));
  cudaMalloc((void**)&fy, (n1-n0+2) * sizeof(float));
  cudaMalloc((void**)&fe, (n1-n0+2)/BLOCK * sizeof(float));
  float *de = new float[(n1-n0+2)/BLOCK];
  // copy to GPU memory
  cudaMemcpy(fx+1, &x[n0],
   (n1-n0) * sizeof(float), cudaMemcpyHostToDevice);
  cudaMemcpy(fy, &y[n0-1],
   (n1-n0+2) * sizeof(float), cudaMemcpyHostToDevice);
  dim3 dimBlock(BLOCK, 1, 1);
  dim3 dimGrid((n1-n0+2)/BLOCK, 1, 1);

  float e = 0;
  // call GPU
  sub1<<<dimGrid, dimBlock>>>(fx, fy, fe);
  // copy to host memory
  cudaMemcpy(fx+1, &x[n0], (n1-n0) * sizeof(float),
   cudaMemcpyDeviceToHost);
  cudaMemcpy(fe, &de[n0-1], (n1-n0+2)/BLOCK * sizeof(float),
   cudaMemcpyDeviceToHost);
  // release GPU memory
  cudaFree(fe);
  cudaFree(fy);
  cudaFree(fx);
  float e_local = 0;
  for (int i=0; i<(n1-n0+2)/BLOCK; ++i)
   e_local += de[i];
  e += e_local;
  delete[] de;
  e_vec[p] = e;
  return (void*) 0;
}

int main(int argc, char *argv[]) {
  int n = ...;
  start_pes(0);
  int nn = (n-1) / _num_pes();
  int n_local0 = 1 + _my_pe() * nn;
  int n_local1 = 1 + (_my_pe()+1) * nn;
  // allocate only local part + ghost zone of the arrays x,y
  float *x, *y;
  x = (float*)shmalloc((n_local1 - n_local0 + 2)*sizeof(float));
  y = (float*)shmalloc((n_local1 - n_local0 + 2)*sizeof(float));
  x -= (n_local0 - 1);
  y -= (n_local0 - 1);
  shmem_barrier_all();

  ... // fill x, y

  // fill ghost zone
  if (_my_pe() > 0)
   shmem_float_put(&y[n_local1], &y[n_local0], 1, _my_pe()-1);
  if (_my_pe() < _num_pes()-1)
   shmem_float_put(&y[n_local0-1], &y[n_local1-1], 1, _my_pe()+1);
  shmem_barrier_all();

  pthread_t threads[PROC];
  pthread_attr_t attr;
  pthread_attr_init(&attr);
  n_thread0 = n_local0;
  n_thread1 = n_local1;
  float e = 0;
  // start threads and wait for termination
  for (int p=0; p<PROC; ++p)
   pthread_create(&threads[p], &attr, thread1, (void *)p);
  for (int p=0; p<PROC; ++p) {
   pthread_join(threads[p], NULL);
   e += e_vec[p];
  }

  static float work[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
  static long sync[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
  static float el, es;
  el = e;
  shmem_float_sum_to_all(&es, &el, 1,
   0, 0, _num_pes(), work, sync);
  e = es;

  ... // output x, e

  x += (n_local0 - 1);
  y += (n_local0 - 1);
  shfree(x);
  shfree(y);
  return 0;
}

[start] [references] [download] [install]