#include <string.h>
#include <dcmf.h>
#include <dcmf_globalcollectives.h>
#include <boost/thread/thread.hpp>
using namespace boost;
#include <cl.h>
#include <malloc.h>
#define PROC (8)
DCMF_Protocol_t barrier_prot, control0_prot, control1_prot,
put_prot, reduce_prot;
void cb_decr(void *data) {
unsigned *val = (unsigned*)data;
(*val)--;
}
void cb_recv(void *data, const DCMF_Control_t *info, unsigned) {
memcpy((DCMF_Memregion_t*)data, info, sizeof(DCMF_Memregion_t));
}
void barrier() {
DCMF_CriticalSection_enter(0);
volatile unsigned active = 1;
DCMF_Callback_t cb = { cb_decr, (void *) &active };
DCMF_Request_t req;
DCMF_GlobalBarrier(&barrier_prot, &req, cb);
while (active)
DCMF_Messager_advance();
DCMF_CriticalSection_exit(0);
}
// kernel
#define BLOCK (512)
const char *source =
"__kernel void sub1(__global float* fx,\
__global const float* fy,\
__local float* se, __global float* fe) {\
const unsigned int t = get_global_id(0);\
const unsigned int b = get_group_id(0);\
const unsigned block = 512;\
const unsigned int i = block*b+t;\
float e;\
/* do computation */\
fx[t] += ( fy[t+2] + fy[t] )*.5;\
e = fy[t+1] * fy[t+1];\
/* reduction */\
se[t] = e;\
barrier(CLK_LOCAL_MEM_FENCE);\
if (t<256) {\
se[t] += se[t+256];\
barrier(CLK_LOCAL_MEM_FENCE);\
}\
if (t<128) {\
se[t] += se[t+128];\
barrier(CLK_LOCAL_MEM_FENCE);\
}\
if (t<64) {\
se[t] += se[t+64];\
barrier(CLK_LOCAL_MEM_FENCE);\
}\
if (t<32) {\
se[t] += se[t+32];\
se[t] += se[t+16];\
se[t] += se[t+8];\
se[t] += se[t+4];\
se[t] += se[t+2];\
se[t] += se[t+1];\
}\
if (t==0)\
fe[b] = se[0];\
}";
struct thread1 {
float *x, *y, *ep;
int i0, i1, p;
thread1(float *xx, float *yy, float *ee, int pp, int ii0, int ii1) :
x(xx), y(yy), ep(ee), p(pp), i0(ii0), i1(ii1) {}
void operator()() {
// allocate GPU
cl_context ct = clCreateContextFromType(0, CL_DEVICE_TYPE_GPU, 0, 0, 0);
size_t ctsize;
clGetContextInfo(ct, CL_CONTEXT_DEVICES, 0, 0, &ctsize);
cl_device_id *aDevices = (cl_device_id*)malloc(ctsize);
clGetContextInfo(ct, CL_CONTEXT_DEVICES, ctsize, aDevices, 0);
// compile kernel
cl_program prog = clCreateProgramWithSource(ct, 1, &source, 0, 0);
clBuildProgram(prog, 0, 0, 0, 0, 0);
cl_kernel kern = clCreateKernel(prog, "sub1", 0);
float e = 0;
// pick GPU
cl_command_queue queue = clCreateCommandQueue(ct, aDevices[p], 0, 0);
// allocate GPU memory
cl_mem fx = clCreateBuffer(ct, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
(i1-i0)*sizeof(cl_float), &x[i0], 0);
cl_mem fy = clCreateBuffer(ct, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
(i1-i0+2)*sizeof(cl_float), &y[i0-1], 0);
cl_mem se = clCreateBuffer(ct, CL_MEM_READ_WRITE,
BLOCK*sizeof(cl_float), 0, 0);
cl_mem fe = clCreateBuffer(ct, CL_MEM_WRITE_ONLY,
(i1-i0)/BLOCK*sizeof(cl_float), 0, 0);
clSetKernelArg(kern, 0, sizeof(cl_mem), (void *)&fx);
clSetKernelArg(kern, 1, sizeof(cl_mem), (void *)&fx);
clSetKernelArg(kern, 2, sizeof(cl_mem), (void *)&se);
clSetKernelArg(kern, 3, sizeof(cl_mem), (void *)&fe);
float *d = new float[(i1-i0)/BLOCK];
// call GPU
const unsigned int size = BLOCK;
const unsigned int dim = i1-i0+2;
clEnqueueNDRangeKernel(queue, kern, 1, 0, &dim, &size, 0, 0, 0);
// copy to host memory
clEnqueueReadBuffer(queue, fx, CL_TRUE, 0,
(i1-i0) * sizeof(cl_float), &x[i0], 0, 0, 0);
clEnqueueReadBuffer(queue, fe, CL_TRUE, 0,
(i1-i0) * sizeof(cl_float), d, 0, 0, 0);
float ee = 0;
for (int i=0; i<(i1-i0)/BLOCK; ++i)
ee += d[i];
e += ee;
delete[] d;
// release GPU memory
clReleaseMemObject(fx);
clReleaseMemObject(fy);
clReleaseMemObject(se);
clReleaseMemObject(fe);
*ep = e;
}
};
int main(int argc, char *argv[]) {
int n = ...;
DCMF_Messager_initialize();
{ // init barrier, put, reduce
DCMF_GlobalBarrier_Configuration_t barrier_conf =
{DCMF_DEFAULT_GLOBALBARRIER_PROTOCOL};
DCMF_Put_Configuration_t put_conf =
{DCMF_DEFAULT_PUT_PROTOCOL};
DCMF_GlobalAllreduce_Configuration_t reduce_conf =
{DCMF_TREE_GLOBALALLREDUCE_PROTOCOL};
DCMF_CriticalSection_enter(0);
DCMF_GlobalBarrier_register(&barrier_prot, &barrier_conf);
DCMF_Put_register(&put_prot, &put_conf);
DCMF_GlobalAllreduce_register(&reduce_prot, &reduce_conf);
DCMF_CriticalSection_exit(0);
}
unsigned me = DCMF_Messager_rank();
unsigned numproc = DCMF_Messager_size();
int p_left = -1, p_right = -1;
if (me > 0)
p_left = me-1;
if (me < numproc-1)
p_right = me+1;
int n_local0 = 1 + (me * (n-1)) / numproc;
int n_local1 = 1 + ((me+1) * (n-1)) / numproc;
// allocate only local part + ghost zone of the arrays x,y
float *x, *y;
x = new float[n_local1 - n_local0 + 2];
y = new float[n_local1 - n_local0 + 2];
x -= (n_local0 - 1);
y -= (n_local0 - 1);
// ghost zones
DCMF_Memregion_t memregion0, memregion1,
memregion_left, memregion_right;
size_t bytes;
DCMF_CriticalSection_enter(0);
DCMF_Memregion_create(&memregion0, &bytes,
2 * sizeof(float), &y[n_local0-1], 0);
DCMF_Memregion_create(&memregion1, &bytes,
2 * sizeof(float), &y[n_local1-1], 0);
// set memregion_left, memregion_right
DCMF_Control_Configuration_t c0_conf =
{ DCMF_DEFAULT_CONTROL_PROTOCOL, cb_recv, &memregion_right};
DCMF_Control_Configuration_t c1_conf =
{ DCMF_DEFAULT_CONTROL_PROTOCOL, cb_recv, &memregion_left};
DCMF_Control_register(&control0_prot, &c0_conf);
DCMF_Control_register(&control1_prot, &c1_conf);
barrier();
if (p_left != -1)
DCMF_Control(&control0_prot, DCMF_MATCH_CONSISTENCY,
p_left, (DCMF_Control_t*) &memregion0);
if (p_right != -1)
DCMF_Control(&control1_prot, DCMF_MATCH_CONSISTENCY,
p_right, (DCMF_Control_t*) &memregion1);
barrier();
DCMF_CriticalSection_exit(0);
... // fill x, y
{ // fill ghost zone
volatile unsigned active0 = 1, active1 = 1;
DCMF_Callback_t cb0 = { cb_decr, (void*)&active0 },
cb1 = { cb_decr, (void*)&active1 };
DCMF_Request_t req0, req1;
DCMF_CriticalSection_enter(0);
if (p_left != -1)
DCMF_Put(&put_prot, &req0, cb0, DCMF_SEQUENTIAL_CONSISTENCY,
p_left, sizeof(float), &memregion0, &memregion_left,
sizeof(float), sizeof(float));
if (p_right != -1)
DCMF_Put(&put_prot, &req1, cb1, DCMF_SEQUENTIAL_CONSISTENCY,
p_right, sizeof(float), &memregion1, &memregion_right,
sizeof(float), 0);
if (p_left != -1)
while (active0)
DCMF_Messager_advance();
if (p_right != -1)
while (active1)
DCMF_Messager_advance();
DCMF_CriticalSection_exit(0);
barrier();
}
float e = 0;
float e_vec[PROC];
thread_group grp;
// start threads and wait for termination
for (int i=0; i<PROC; ++i) {
thread1 t(x, y, &e_vec[i], i,
n_local0+((n_local1-n_local0)*i)/PROC, n_local0+((n_local1-n_local0)*(i+1))/PROC);
grp.create_thread(t);
}
grp.join_all();
for (int i=0; i<PROC; ++i)
e += e_vec[i];
{ // reduction
DCMF_CriticalSection_enter(0);
float e_local = e;
volatile unsigned active = 1;
DCMF_Callback_t cb = { cb_decr, (void*)&active };
DCMF_Request_t req;
DCMF_GlobalAllreduce(&reduce_prot, &req, cb,
DCMF_MATCH_CONSISTENCY, -1,
(char*)&e_local, (char*)&e, 1, DCMF_FLOAT, DCMF_SUM);
while (active)
DCMF_Messager_advance();
DCMF_CriticalSection_exit(0);
}
... // output x, e
barrier();
DCMF_Memregion_destroy(&memregion0);
DCMF_Memregion_destroy(&memregion1);
x += (n_local0 - 1);
y += (n_local0 - 1);
delete[] x, y;
DCMF_Messager_finalize();
return 0;
}
|