Timothy  0.9
Tissue Modelling Framework
 All Data Structures Files Functions Variables Typedefs Macros
comm.c File Reference

contains communication functions More...

#include <stdlib.h>
#include "global.h"
Include dependency graph for comm.c:

Go to the source code of this file.

Data Structures

struct  expData
 

Macros

#define MAX_EXPORTED_PER_PROC   2*maxCellsPerProc
 

Functions

int comm_compare_exp_list (const void *a, const void *b)
 
void createExportList ()
 
void commCleanup ()
 
void cellsExchangeInit ()
 
void cellsExchangeWait ()
 
void densPotExchangeInit ()
 
void densPotExchangeWait ()
 

Variables

MPI_Request * reqSend
 
MPI_Request * reqRecv
 
int64_t * sendOffset
 
int64_t * recvOffset
 
struct expDataexpList
 
int * recvCount
 
int * sendCount
 

Detailed Description

contains communication functions

Definition in file comm.c.

Macro Definition Documentation

#define MAX_EXPORTED_PER_PROC   2*maxCellsPerProc

Definition at line 48 of file comm.c.

Function Documentation

void cellsExchangeInit ( )

This function initiate sending and receiving cells' data between processes.

Definition at line 177 of file comm.c.

References expData::cell, cells, partData::h, h, lnc, MPIrank, MPIsize, nc, numExp, numImp, recvCount, recvData, recvOffset, reqRecv, reqSend, sendCount, sendData, sendOffset, cellData::size, partData::size, tlnc, cellData::x, partData::x, cellData::y, partData::y, partData::young, cellData::z, and partData::z.

178 {
179  int i;
180 
181  if (nc < MPIsize || MPIsize == 1)
182  return;
183 
184  /* allocate communication buffers */
185  sendData = (struct partData *) malloc(numExp * sizeof(struct partData));
186  recvData = (struct partData *) malloc(numImp * sizeof(struct partData));
187  reqSend = (MPI_Request *) malloc(sizeof(MPI_Request) * MPIsize);
188  reqRecv = (MPI_Request *) malloc(sizeof(MPI_Request) * MPIsize);
189 
190  /* create reduced particle data buffer for exporting */
191  for (i = 0; i < numExp; i++) {
192  sendData[i].x = cells[expList[i].cell].x;
193  sendData[i].y = cells[expList[i].cell].y;
194  sendData[i].z = cells[expList[i].cell].z;
195  sendData[i].size = cells[expList[i].cell].size;
196  sendData[i].h = h;
197  sendData[i].young = (double) cells[expList[i].cell].young;
198  }
199 
200  /* send cells - asynchronous MPI call */
201  for (i = 0; i < MPIsize; i++) {
202  if (sendCount[i] == 0 || tlnc[i] == 0 || lnc == 0)
203  continue;
204  MPI_Isend(&sendData[sendOffset[i]],
205  sendCount[i] * sizeof(struct partData), MPI_BYTE, i, MPIrank,
206  MPI_COMM_WORLD, &reqSend[i]);
207  }
208 
209  /* receive cells - asynchronous MPI call */
210  for (i = 0; i < MPIsize; i++) {
211  if (recvCount[i] == 0 || tlnc[i] == 0 || lnc == 0)
212  continue;
213  MPI_Irecv(&recvData[recvOffset[i]],
214  recvCount[i] * sizeof(struct partData), MPI_BYTE, i, i,
215  MPI_COMM_WORLD, &reqRecv[i]);
216  }
217 
218 }
double h
Definition: global.h:120
int64_t * tlnc
Definition: global.h:111
int MPIrank
Definition: global.h:134
#define lnc
Definition: global.h:102
int64_t * sendOffset
Definition: comm.c:40
int cell
Definition: comm.c:33
struct partData * sendData
Definition: global.h:148
double z
Definition: global.h:119
int * recvCount
Definition: comm.c:45
int MPIsize
Definition: global.h:135
double size
Definition: global.h:72
struct partData * recvData
Definition: global.h:149
int numExp
Definition: global.h:153
int * sendCount
Definition: comm.c:46
double x
Definition: global.h:69
int64_t * recvOffset
Definition: comm.c:41
int numImp
Definition: global.h:154
struct cellData * cells
Definition: global.h:82
MPI_Request * reqSend
Definition: comm.c:37
double size
Definition: global.h:121
double z
Definition: global.h:71
double y
Definition: global.h:70
#define nc
Definition: global.h:93
MPI_Request * reqRecv
Definition: comm.c:38
double young
Definition: global.h:122
struct expData * expList
Definition: comm.c:43
double x
Definition: global.h:117
double y
Definition: global.h:118
double h
Definition: global.h:200

Here is the caller graph for this function:

void cellsExchangeWait ( )

This function waits for cells' data exchange completion.

Definition at line 223 of file comm.c.

References lnc, MPIsize, nc, recvCount, reqRecv, reqSend, sendCount, sendData, stopRun(), and tlnc.

224 {
225  int i;
226  MPI_Status status;
227 
228  if (nc < MPIsize || MPIsize == 1)
229  return;
230 
231  /* wait for send completion */
232  for (i = 0; i < MPIsize; i++) {
233  if (sendCount[i] == 0 || tlnc[i] == 0 || lnc == 0)
234  continue;
235  if (MPI_Wait(&reqSend[i], &status) != MPI_SUCCESS)
236  stopRun(103, "reqSend", __FILE__, __LINE__);
237  }
238 
239  /* wait for receive completion */
240  for (i = 0; i < MPIsize; i++) {
241  if (recvCount[i] == 0 || tlnc[i] == 0 || lnc == 0)
242  continue;
243  if (MPI_Wait(&reqRecv[i], &status) != MPI_SUCCESS)
244  stopRun(103, "reqRecv", __FILE__, __LINE__);
245  }
246 
247  /* some of the buffers can be deallocated here */
248  free(sendData);
249  free(reqSend);
250  free(reqRecv);
251 }
int64_t * tlnc
Definition: global.h:111
#define lnc
Definition: global.h:102
struct partData * sendData
Definition: global.h:148
int * recvCount
Definition: comm.c:45
int MPIsize
Definition: global.h:135
int * sendCount
Definition: comm.c:46
MPI_Request * reqSend
Definition: comm.c:37
#define nc
Definition: global.h:93
MPI_Request * reqRecv
Definition: comm.c:38
void stopRun(int ierr, char *name, char *file, int line)
Definition: utils.c:72

Here is the call graph for this function:

Here is the caller graph for this function:

int comm_compare_exp_list ( const void *  a,
const void *  b 
)

This function is a comparison function used for sorting export list table.

Definition at line 54 of file comm.c.

55 {
56  return ((struct expData *) a)->proc - ((struct expData *) b)->proc;
57 }
HYPRE_SStructVector b
Definition: tempf.c:40
Definition: comm.c:32

Here is the caller graph for this function:

void commCleanup ( )

This function deallocates all communication buffers and auxiliary tables.

Definition at line 156 of file comm.c.

References MPIsize, nc, recvCount, recvData, recvDensPotData, recvOffset, sendCount, and sendOffset.

157 {
158  if (nc < MPIsize || MPIsize == 1)
159  return;
160 
161  free(recvData);
162  free(recvDensPotData);
163 
164  free(expList);
165 
166  free(recvCount);
167  free(sendCount);
168 
169  free(sendOffset);
170  free(recvOffset);
171 
172 }
int64_t * sendOffset
Definition: comm.c:40
int * recvCount
Definition: comm.c:45
int MPIsize
Definition: global.h:135
struct partData * recvData
Definition: global.h:149
int * sendCount
Definition: comm.c:46
int64_t * recvOffset
Definition: comm.c:41
struct densPotData * recvDensPotData
Definition: global.h:151
#define nc
Definition: global.h:93
struct expData * expList
Definition: comm.c:43

Here is the caller graph for this function:

void createExportList ( )

This function uses Zoltan's library function Zoltan_LB_Box_Assign to find possible intersections of cells' neighbourhoods and other processes' geometries.

Definition at line 64 of file comm.c.

References expData::cell, cells, comm_compare_exp_list(), h, cellData::halo, lnc, MAX_EXPORTED_PER_PROC, MPIrank, MPIsize, nc, numExp, numImp, expData::proc, recvCount, recvOffset, sdim, sendCount, sendOffset, stopRun(), tlnc, cellData::x, cellData::y, cellData::z, and ztn.

65 {
66 
67  int i, p;
68  int procs[MPIsize];
69  int numprocs;
70 
71  numExp = 0;
72  numImp = 0;
73  if (nc < MPIsize || MPIsize == 1)
74  return;
75 
76  expList =
77  (struct expData *) malloc(sizeof(struct expData) *
79  recvCount = (int *) calloc(MPIsize, sizeof(int));
80  sendCount = (int *) calloc(MPIsize, sizeof(int));
81  sendOffset = (int64_t *) calloc(MPIsize, sizeof(int64_t));
82  recvOffset = (int64_t *) calloc(MPIsize, sizeof(int64_t));
83 
84  /* loop over local cells */
85  /*#pragma omp parallel for private(procs) */
86  for (p = 0; p < lnc; p++) {
87  double xmin, xmax, ymin, ymax, zmin, zmax;
88  double r;
89 
90  cells[p].halo = 0;
91 
92  if (nc < MPIsize)
93  continue;
94 
95  r = h * 1.5;
96 
97  /* compute neighbourhood box */
98  xmin = cells[p].x - r;
99  xmax = cells[p].x + r;
100  ymin = cells[p].y - r;
101  ymax = cells[p].y + r;
102  if (sdim == 3) {
103  zmin = cells[p].z - r;
104  zmax = cells[p].z + r;
105  } else {
106  zmin = 0.0;
107  zmax = 0.0;
108  }
109 
110  /* look for possible neighbours */
111  Zoltan_LB_Box_Assign(ztn, xmin, ymin, zmin, xmax, ymax, zmax, procs,
112  &numprocs);
113 
114  /* loop over receivers */
115  for (i = 0; i < numprocs; i++) {
116  if (procs[i] == MPIrank || tlnc[procs[i]] == 0)
117  continue;
118  expList[numExp].cell = p;
119  expList[numExp].proc = procs[i];
120  cells[p].halo = MPIrank + 1;
121  sendCount[procs[i]]++;
122  numExp++;
123  /* upps! too many refugees */
125  stopRun(110, NULL, __FILE__, __LINE__);
126  }
127  }
128 
129  /* sort export list with respect to process number */
130  qsort(expList, numExp, sizeof(struct expData), comm_compare_exp_list);
131 
132  /* distribute the information on transfer sizes between each process */
133  MPI_Alltoall(sendCount, 1, MPI_INT, recvCount, 1, MPI_INT,
134  MPI_COMM_WORLD);
135 
136  /* compute send offsets */
137  sendOffset[0] = 0;
138  for (i = 1; i < MPIsize; i++)
139  sendOffset[i] = sendOffset[i - 1] + sendCount[i - 1];
140 
141  /* compute receive offsets */
142  recvOffset[0] = 0;
143  for (i = 1; i < MPIsize; i++)
144  recvOffset[i] = recvOffset[i - 1] + recvCount[i - 1];
145 
146  /* count cells to be imported */
147  for (i = 0; i < MPIsize; i++)
148  numImp += recvCount[i];
149 
150 }
int64_t * tlnc
Definition: global.h:111
int MPIrank
Definition: global.h:134
#define lnc
Definition: global.h:102
int64_t * sendOffset
Definition: comm.c:40
int cell
Definition: comm.c:33
int * recvCount
Definition: comm.c:45
int MPIsize
Definition: global.h:135
struct Zoltan_Struct * ztn
Definition: global.h:146
Definition: comm.c:32
int numExp
Definition: global.h:153
int * sendCount
Definition: comm.c:46
int sdim
Definition: global.h:160
double x
Definition: global.h:69
int64_t * recvOffset
Definition: comm.c:41
int numImp
Definition: global.h:154
struct cellData * cells
Definition: global.h:82
double z
Definition: global.h:71
#define MAX_EXPORTED_PER_PROC
Definition: comm.c:48
double y
Definition: global.h:70
#define nc
Definition: global.h:93
int proc
Definition: comm.c:34
struct expData * expList
Definition: comm.c:43
int halo
Definition: global.h:61
void stopRun(int ierr, char *name, char *file, int line)
Definition: utils.c:72
int comm_compare_exp_list(const void *a, const void *b)
Definition: comm.c:54
double h
Definition: global.h:200

Here is the call graph for this function:

Here is the caller graph for this function:

void densPotExchangeInit ( )

This function initiate sending and receiving density and potential values between processes.

Definition at line 257 of file comm.c.

References expData::cell, cells, cellData::density, densPotData::density, lnc, MPIrank, MPIsize, nc, numExp, numImp, recvCount, recvDensPotData, recvOffset, reqRecv, reqSend, sendCount, sendDensPotData, sendOffset, tlnc, cellData::v, and densPotData::v.

258 {
259  int i;
260 
261  if (nc < MPIsize || MPIsize == 1)
262  return;
263 
264  /* allocate communication buffers */
266  (struct densPotData *) malloc(numExp * sizeof(struct densPotData));
268  (struct densPotData *) malloc(numImp * sizeof(struct densPotData));
269  reqSend = (MPI_Request *) malloc(sizeof(MPI_Request) * MPIsize);
270  reqRecv = (MPI_Request *) malloc(sizeof(MPI_Request) * MPIsize);
271 
272  /* create density and potential buffer for exporting */
273  for (i = 0; i < numExp; i++) {
274  sendDensPotData[i].v = cells[expList[i].cell].v;
276  }
277 
278  /* send data - asynchronous MPI call */
279  for (i = 0; i < MPIsize; i++) {
280  if (sendCount[i] == 0 || tlnc[i] == 0 || lnc == 0)
281  continue;
282  MPI_Isend(&sendDensPotData[sendOffset[i]],
283  sendCount[i] * sizeof(struct densPotData), MPI_BYTE, i,
284  MPIrank, MPI_COMM_WORLD, &reqSend[i]);
285  }
286 
287  /* receive data - asynchronous MPI call */
288  for (i = 0; i < MPIsize; i++) {
289  if (recvCount[i] == 0 || tlnc[i] == 0 || lnc == 0)
290  continue;
291  MPI_Irecv(&recvDensPotData[recvOffset[i]],
292  recvCount[i] * sizeof(struct densPotData), MPI_BYTE, i, i,
293  MPI_COMM_WORLD, &reqRecv[i]);
294  }
295 
296 }
double density
Definition: global.h:75
int64_t * tlnc
Definition: global.h:111
int MPIrank
Definition: global.h:134
#define lnc
Definition: global.h:102
int64_t * sendOffset
Definition: comm.c:40
int cell
Definition: comm.c:33
int * recvCount
Definition: comm.c:45
int MPIsize
Definition: global.h:135
struct densPotData * sendDensPotData
Definition: global.h:150
int numExp
Definition: global.h:153
int * sendCount
Definition: comm.c:46
int64_t * recvOffset
Definition: comm.c:41
int numImp
Definition: global.h:154
struct cellData * cells
Definition: global.h:82
MPI_Request * reqSend
Definition: comm.c:37
double v
Definition: global.h:127
struct densPotData * recvDensPotData
Definition: global.h:151
#define nc
Definition: global.h:93
MPI_Request * reqRecv
Definition: comm.c:38
struct expData * expList
Definition: comm.c:43
double v
Definition: global.h:74
double density
Definition: global.h:128

Here is the caller graph for this function:

void densPotExchangeWait ( )

This function waits for density and potential data exchange completion.

Definition at line 301 of file comm.c.

References lnc, MPIsize, nc, recvCount, reqRecv, reqSend, sendCount, sendDensPotData, stopRun(), and tlnc.

302 {
303  int i;
304  MPI_Status status;
305 
306  if (nc < MPIsize || MPIsize == 1)
307  return;
308 
309  // Wait for send completion
310  for (i = 0; i < MPIsize; i++) {
311  if (sendCount[i] == 0 || tlnc[i] == 0 || lnc == 0)
312  continue;
313  if (MPI_Wait(&reqSend[i], &status) != MPI_SUCCESS)
314  stopRun(103, "sending", __FILE__, __LINE__);
315  }
316 
317  // Wait for receive completion
318  for (i = 0; i < MPIsize; i++) {
319  if (recvCount[i] == 0 || tlnc[i] == 0 || lnc == 0)
320  continue;
321  if (MPI_Wait(&reqRecv[i], &status) != MPI_SUCCESS)
322  stopRun(103, "receiving", __FILE__, __LINE__);
323  }
324 
325  /* some of the buffers can be deallocated */
326  free(sendDensPotData);
327  free(reqSend);
328  free(reqRecv);
329 }
int64_t * tlnc
Definition: global.h:111
#define lnc
Definition: global.h:102
int * recvCount
Definition: comm.c:45
int MPIsize
Definition: global.h:135
struct densPotData * sendDensPotData
Definition: global.h:150
int * sendCount
Definition: comm.c:46
MPI_Request * reqSend
Definition: comm.c:37
#define nc
Definition: global.h:93
MPI_Request * reqRecv
Definition: comm.c:38
void stopRun(int ierr, char *name, char *file, int line)
Definition: utils.c:72

Here is the call graph for this function:

Here is the caller graph for this function:

Variable Documentation

struct expData* expList

Definition at line 43 of file comm.c.

int* recvCount

Definition at line 45 of file comm.c.

int64_t* recvOffset

Definition at line 41 of file comm.c.

MPI_Request* reqRecv

Definition at line 38 of file comm.c.

MPI_Request* reqSend

Definition at line 37 of file comm.c.

int* sendCount

Definition at line 46 of file comm.c.

int64_t* sendOffset

Definition at line 40 of file comm.c.