#include "pvm3.h"
#include <boost/thread/thread.hpp>
using namespace boost;
#include <cuda.h>
#define PROC (8)
// kernel
__global__ void sub1(float* fx, float* fy, float* fe) {
#define BLOCK (512)
int t = threadIdx.x; // builtin
int b = blockIdx.x; // builtin
float e;
__shared__ float se[BLOCK];
__shared__ float sx[BLOCK];
__shared__ float sy[BLOCK+2];
// copy from device to processor memory
sx[t] = fx[BLOCK*b+t];
sy[t] = fy[BLOCK*b+t];
if (t<2)
sy[t+BLOCK] = fy[BLOCK*b+t+BLOCK];
__syncthreads();
// do computation
sx[t] += ( sy[t+2] + sy[t] )*.5;
e = sy[t+1] * sy[t+1];
// copy to device memory
fx[BLOCK*b+t] = sx[t];
// reduction
se[t] = e;
__syncthreads();
if (t<256) {
se[t] += se[t+256];
__syncthreads();
}
if (t<128) {
se[t] += se[t+128];
__syncthreads();
}
if (t<64) {
se[t] += se[t+64];
__syncthreads();
}
if (t<32) { // warp size
se[t] += se[t+32];
se[t] += se[t+16];
se[t] += se[t+8];
se[t] += se[t+4];
se[t] += se[t+2];
se[t] += se[t+1];
}
if (t==0)
fe[b] = se[0];
}
struct thread1 {
float *x, *y, *ep;
int i0, i1, p;
thread1(float *xx, float *yy, float *ee, int pp, int ii0, int ii1) :
x(xx), y(yy), ep(ee), p(pp), i0(ii0), i1(ii1) {}
void operator()() {
// pick GPU
cudaSetDevice(p);
// allocate GPU memory
float *fx, *fy, *fe;
cudaMalloc((void**)&fx, (i1-i0+2) * sizeof(float));
cudaMalloc((void**)&fy, (i1-i0+2) * sizeof(float));
cudaMalloc((void**)&fe, (i1-i0+2)/BLOCK * sizeof(float));
float *de = new float[(i1-i0+2)/BLOCK];
// copy to GPU memory
cudaMemcpy(fx+1, &x[i0],
(i1-i0) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(fy, &y[i0-1],
(i1-i0+2) * sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK, 1, 1);
dim3 dimGrid((i1-i0+2)/BLOCK, 1, 1);
float e = 0;
// call GPU
sub1<<<dimGrid, dimBlock>>>(fx, fy, fe);
// copy to host memory
cudaMemcpy(fx+1, &x[i0], (i1-i0) * sizeof(float),
cudaMemcpyDeviceToHost);
cudaMemcpy(fe, &de[i0-1], (i1-i0+2)/BLOCK * sizeof(float),
cudaMemcpyDeviceToHost);
// release GPU memory
cudaFree(fe);
cudaFree(fy);
cudaFree(fx);
float e_local = 0;
for (int i=0; i<(i1-i0+2)/BLOCK; ++i)
e_local += de[i];
e += e_local;
delete[] de;
*ep = e;
}
};
int main(int argc, char *argv[]) {
int n = ...;
if (pvm_parent() == PvmNoParent) {
#define N (4)
int tid[N];
pvm_spawn("program", argv, PvmTaskDefault, (char*)0, N, &tid[0]);
} else {
int mytid = pvm_mytid();
int *tids, me = -1;
int ntids = pvm_siblings(&tids);
for (int i=0; i<ntids; ++i)
if ( tids[i] == mytid) {
me = i;
break;
}
int p_left = -1, p_right = -1;
if (me > 0)
p_left = tids[me-1];
if (me < ntids-1)
p_right = tids[me+1];
int n_local0 = 1 + (me * (n-1)) / ntids;
int n_local1 = 1 + ((me+1) * (n-1)) / ntids;
pvm_joingroup("worker");
// allocate only local part + ghost zone of the arrays x,y
float *x, *y;
x = new float[n_local1 - n_local0 + 2];
y = new float[n_local1 - n_local0 + 2];
x -= (n_local0 - 1);
y -= (n_local0 - 1);
... // fill x, y
// fill ghost zone
if (p_left != -1) {
pvm_initsend(PvmDataDefault);
pvm_pkfloat(&y[n_local0], 1, 1);
int msgtag = 1;
pvm_send(p_left, msgtag);
}
if (p_right != -1) {
int msgtag = 1;
pvm_recv(p_right, msgtag);
pvm_upkfloat(&y[n_local1], 1, 1);
pvm_initsend(PvmDataDefault);
pvm_pkfloat(&y[n_local1-1], 1, 1);
msgtag = 2;
pvm_send(p_right, msgtag);
}
if (p_left != -1) {
int msgtag = 2;
pvm_recv(p_left, msgtag);
pvm_upkfloat(&y[n_local0-1], 1, 1);
}
float e = 0;
float e_vec[PROC];
thread_group grp;
// start threads and wait for termination
for (int i=0; i<PROC; ++i) {
thread1 t(x, y, &e_vec[i], i,
n_local0+((n_local1-n_local0)*i)/PROC, n_local0+((n_local1-n_local0)*(i+1))/PROC);
grp.create_thread(t);
}
grp.join_all();
for (int i=0; i<PROC; ++i)
e += e_vec[i];
int msgtag = 3;
pvm_reduce(PvmSum, &e, 1, PVM_FLOAT, msgtag, "worker", tids[0]);
msgtag = 4;
if (me==0) {
pvm_initsend(PvmDataDefault);
pvm_pkfloat(&e, 1, 1);
pvm_bcast("worker", msgtag);
} else {
pvm_recv(tids[0], msgtag);
pvm_upkfloat(&e, 1, 1);
}
... // output x, e
x += (n_local0 - 1);
y += (n_local0 - 1);
delete[] x, y;
}
pvm_exit();
return 0;
}
|