Timothy  0.9
Tissue Modelling Framework
 All Data Structures Files Functions Variables Typedefs Macros
interp.c File Reference

contains grid to cellular data interpolation functions More...

#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <limits.h>
#include "global.h"
#include "fields.h"
Include dependency graph for interp.c:

Go to the source code of this file.

Macros

#define patch(p, i, j, k)   (cicPatch[p][patchSize[p].y*patchSize[p].z*i+patchSize[p].z*j+k])
 

Functions

void findPatches ()
 
void doInterpolation ()
 
void initPatchExchange ()
 
int waitPatchExchange ()
 
int applyPatches ()
 
void initFieldsPatchesExchange ()
 
void waitFieldsPatchesExchange ()
 
void applyFieldsPatches ()
 
void interpolateCellsToGrid ()
 
void initCellsToGridExchange ()
 
void waitCellsToGridExchange ()
 
void interpolateFieldsToCells ()
 

Variables

double ** cicPatch
 
int * cicIntersect
 
int * cicReceiver
 
int * cicSender
 
double ** cicRecvPatch
 
MPI_Request * cicReqSend
 
MPI_Request * cicReqRecv
 
int * recvP
 
struct int64Vector3dlowerPatchCorner
 
struct int64Vector3dupperPatchCorner
 
struct int64Vector3dlowerPatchCornerR
 
struct int64Vector3dupperPatchCornerR
 
struct int64Vector3dpatchSize
 
struct int64Vector3dpatchSizeR
 

Detailed Description

contains grid to cellular data interpolation functions

Definition in file interp.c.

Macro Definition Documentation

#define patch (   p,
  i,
  j,
 
)    (cicPatch[p][patchSize[p].y*patchSize[p].z*i+patchSize[p].z*j+k])

Definition at line 49 of file interp.c.

Function Documentation

void applyFieldsPatches ( )

Update local cell fields with information from remote processes received in patches. Receiveing field patches are deallocated here.

Definition at line 509 of file interp.c.

References cellFields, cells, cicReceiver, fieldsPatches, gridEndIdx, gridResolution, gridStartIdx, lnc, MPIsize, NFIELDS, x, cellData::x, doubleVector3d::x, int64Vector3d::x, cellData::y, doubleVector3d::y, int64Vector3d::y, cellData::z, doubleVector3d::z, and int64Vector3d::z.

510 {
511 
512  int p, c, f, i, j, k;
513  struct int64Vector3d idx;
514  struct doubleVector3d d, t;
515  struct int64Vector3d cellIdx;
516  struct doubleVector3d cicCoord;
517 
518  int max = 0;
519  int iddd;
520 
521  /* reset fields */
522  for (f = 0; f < NFIELDS; f++)
523  for (c = 0; c < lnc; c++)
524  cellFields[f][c] = 0.0;
525 
526  for (c = 0; c < lnc; c++) { /* for every cell */
527  cellIdx.x = ((cells[c].x - lowerGridCorner.x) / gridResolution);
528  cellIdx.y = ((cells[c].y - lowerGridCorner.y) / gridResolution);
529  cellIdx.z = ((cells[c].z - lowerGridCorner.z) / gridResolution);
530  for (p = 0; p < MPIsize; p++) { /* for each process */
531  int ax, ay, az;
532  if (!cicReceiver[p])
533  continue; /* there is no patch from this process */
534  for (ax = 0; ax < 2; ax++)
535  for (ay = 0; ay < 2; ay++)
536  for (az = 0; az < 2; az++) {
537  if (cellIdx.x + ax >= gridStartIdx[p].x
538  && cellIdx.y + ay >= gridStartIdx[p].y
539  && cellIdx.z + az >= gridStartIdx[p].z
540  && cellIdx.x + ax <= gridEndIdx[p].x
541  && cellIdx.y + ay <= gridEndIdx[p].y
542  && cellIdx.z + az <= gridEndIdx[p].z) {
543 
544  idx.x = (cellIdx.x + ax) - lowerPatchCorner[p].x;
545  idx.y = (cellIdx.y + ay) - lowerPatchCorner[p].y;
546  idx.z = (cellIdx.z + az) - lowerPatchCorner[p].z;
547 
548  cicCoord.x = lowerGridCorner.x + cellIdx.x * gridResolution;
549  cicCoord.y = lowerGridCorner.y + cellIdx.y * gridResolution;
550  cicCoord.z = lowerGridCorner.z + cellIdx.z * gridResolution;
551 
552  d.x = (cells[c].x - cicCoord.x) / gridResolution;
553  d.y = (cells[c].y - cicCoord.y) / gridResolution;
554  d.z = (cells[c].z - cicCoord.z) / gridResolution;
555 
556  t.x = 1.0 - d.x;
557  t.y = 1.0 - d.y;
558  t.z = 1.0 - d.z;
559 
560  /* interpolating back to cells */
561  /* scaling from mol/cm^3 to mol/cell */
562  for (f = 0; f < NFIELDS; f++) {
563  cellFields[f][c] += fieldsPatches[p][f * patchSize[p].x * patchSize[p].y * patchSize[p].z + patchSize[p].y * patchSize[p].z * idx.x + patchSize[p].z * idx.y + idx.z] * (ax * d.x + (1 - ax) * t.x) * (ay * d.y + (1 - ay) * t.y) * (az * d.z + (1 - az) * t.z); //*cellVolume;
564  }
565  } // if
566  } // az
567  } // p
568  } // c
569 
570  for (p = 0; p < MPIsize; p++)
571  free(fieldsPatches[p]);
572  free(fieldsPatches);
573 
574  return;
575 }
int64_t x
Definition: global.h:229
double z
Definition: global.h:225
#define lnc
Definition: global.h:102
HYPRE_SStructVector x
Definition: tempf.c:41
struct int64Vector3d * gridEndIdx
Definition: fields.h:42
#define NFIELDS
Definition: fields.h:27
struct int64Vector3d * lowerPatchCorner
Definition: interp.c:45
int * cicReceiver
Definition: interp.c:37
int MPIsize
Definition: global.h:135
double ** fieldsPatches
Definition: fields.h:79
int64_t z
Definition: global.h:231
double y
Definition: global.h:224
double x
Definition: global.h:69
struct cellData * cells
Definition: global.h:82
struct int64Vector3d * gridStartIdx
Definition: fields.h:42
double gridResolution
Definition: fields.h:45
struct int64Vector3d * patchSize
Definition: interp.c:47
double z
Definition: global.h:71
int64_t y
Definition: global.h:230
double y
Definition: global.h:70
double ** cellFields
Definition: global.h:83

Here is the caller graph for this function:

int applyPatches ( )

Update local densityField part with information from remote processes received in patches. Receiveing patches are deallocated here.

Definition at line 338 of file interp.c.

References cicIntersect, cicPatch, cicRecvPatch, cicSender, densityField, gridEndIdx, gridSize, gridStartIdx, MPIrank, MPIsize, x, int64Vector3d::x, doubleVector3d::y, int64Vector3d::y, doubleVector3d::z, and int64Vector3d::z.

339 {
340 
341  int p;
342  int i, j, k;
343 
344  for (i = 0; i < gridSize.x * gridSize.y * gridSize.z; i++)
345  densityField[i] = 0.0;
346 
347  for (p = 0; p < MPIsize; p++) {
348  int i, j, k;
349  if (!cicSender[p])
350  continue;
351  for (i = lowerPatchCornerR[p].x; i <= upperPatchCornerR[p].x; i++)
352  for (j = lowerPatchCornerR[p].y; j <= upperPatchCornerR[p].y; j++)
353  for (k = lowerPatchCornerR[p].z; k <= upperPatchCornerR[p].z; k++) {
354  struct int64Vector3d c, g, size;
355  size.x = upperPatchCornerR[p].x - lowerPatchCornerR[p].x + 1;
356  size.y = upperPatchCornerR[p].y - lowerPatchCornerR[p].y + 1;
357  size.z = upperPatchCornerR[p].z - lowerPatchCornerR[p].z + 1;
358  c.x = i - lowerPatchCornerR[p].x;
359  c.y = j - lowerPatchCornerR[p].y;
360  c.z = k - lowerPatchCornerR[p].z;
361  if (i >= gridStartIdx[MPIrank].x && i <= gridEndIdx[MPIrank].x
362  && j >= gridStartIdx[MPIrank].y && j <= gridEndIdx[MPIrank].y
363  && k >= gridStartIdx[MPIrank].z
364  && k <= gridEndIdx[MPIrank].z) {
365  g.x = i - gridStartIdx[MPIrank].x;
366  g.y = j - gridStartIdx[MPIrank].y;
367  g.z = k - gridStartIdx[MPIrank].z;
368  densityField[gridSize.z * gridSize.y * g.x + gridSize.z * g.y +
369  g.z] +=
370  cicRecvPatch[p][size.z * size.y * c.x + size.z * c.y +
371  c.z];
372  }
373  }
374  free(cicRecvPatch[p]);
375  }
376 
377  free(cicRecvPatch);
378 
379  for (p = 0; p < MPIsize; p++)
380  if (cicIntersect[p])
381  free(cicPatch[p]);
382 
383  free(cicPatch);
384  return;
385 }
int64_t x
Definition: global.h:229
double z
Definition: global.h:225
int MPIrank
Definition: global.h:134
HYPRE_SStructVector x
Definition: tempf.c:41
struct int64Vector3d * gridEndIdx
Definition: fields.h:42
double ** cicPatch
Definition: interp.c:35
int MPIsize
Definition: global.h:135
int64_t z
Definition: global.h:231
int * cicSender
Definition: interp.c:38
double y
Definition: global.h:224
struct int64Vector3d gridSize
Definition: fields.h:41
struct int64Vector3d * gridStartIdx
Definition: fields.h:42
struct int64Vector3d * lowerPatchCornerR
Definition: interp.c:46
struct int64Vector3d * upperPatchCornerR
Definition: interp.c:46
int64_t y
Definition: global.h:230
double ** cicRecvPatch
Definition: interp.c:39
double * densityField
Definition: fields.h:75
int * cicIntersect
Definition: interp.c:36

Here is the caller graph for this function:

void doInterpolation ( )

For each local cell its density value is interpolated accross neighbouring grid vertices with the use of Cloud-In-Cell method. Computed values are stored in patches instead in field buffers. No additional memory allocations are made here.

Definition at line 181 of file interp.c.

References cells, gridEndIdx, gridResolution, gridStartIdx, lnc, MPIsize, patch, x, cellData::x, doubleVector3d::x, int64Vector3d::x, cellData::y, doubleVector3d::y, int64Vector3d::y, cellData::z, doubleVector3d::z, and int64Vector3d::z.

182 {
183 
184  int c, i, p, j, k;
185  struct int64Vector3d idx;
186  struct doubleVector3d d, t;
187  struct int64Vector3d cellIdx;
188  struct doubleVector3d cicCoord;
189 
190  for (p = 0; p < MPIsize; p++)
191  for (i = 0; i < patchSize[p].x; i++)
192  for (j = 0; j < patchSize[p].y; j++)
193  for (k = 0; k < patchSize[p].z; k++)
194  patch(p, i, j, k) = 0.0;
195 
196  for (c = 0; c < lnc; c++) {
197 
198  cellIdx.x = ((cells[c].x - lowerGridCorner.x) / gridResolution);
199  cellIdx.y = ((cells[c].y - lowerGridCorner.y) / gridResolution);
200  cellIdx.z = ((cells[c].z - lowerGridCorner.z) / gridResolution);
201 
202  for (p = 0; p < MPIsize; p++) {
203  int ax, ay, az;
204  for (ax = 0; ax < 2; ax++)
205  for (ay = 0; ay < 2; ay++)
206  for (az = 0; az < 2; az++) {
207  if (cellIdx.x + ax >= gridStartIdx[p].x
208  && cellIdx.y + ay >= gridStartIdx[p].y
209  && cellIdx.z + az >= gridStartIdx[p].z
210  && cellIdx.x + ax <= gridEndIdx[p].x
211  && cellIdx.y + ay <= gridEndIdx[p].y
212  && cellIdx.z + az <= gridEndIdx[p].z) {
213 
214  idx.x = (cellIdx.x + ax) - lowerPatchCorner[p].x;
215  idx.y = (cellIdx.y + ay) - lowerPatchCorner[p].y;
216  idx.z = (cellIdx.z + az) - lowerPatchCorner[p].z;
217 
218  cicCoord.x = lowerGridCorner.x + cellIdx.x * gridResolution;
219  cicCoord.y = lowerGridCorner.y + cellIdx.y * gridResolution;
220  cicCoord.z = lowerGridCorner.z + cellIdx.z * gridResolution;
221 
222  d.x = (cells[c].x - cicCoord.x) / gridResolution;
223  d.y = (cells[c].y - cicCoord.y) / gridResolution;
224  d.z = (cells[c].z - cicCoord.z) / gridResolution;
225 
226  t.x = 1.0 - d.x;
227  t.y = 1.0 - d.y;
228  t.z = 1.0 - d.z;
229 
230  if (cells[c].phase != 5) { /* if not in necrotic phase */
231  if (cells[c].phase == 0) { /* if in G0 phase - lower consumption */
232  patch(p, idx.x, idx.y, idx.z) +=
233  0.75 * (ax * d.x + (1 - ax) * t.x) * (ay * d.y +
234  (1 -
235  ay) * t.y) *
236  (az * d.z + (1 - az) * t.z);
237  } else { /* if not in G0 phase - normal consumption */
238  patch(p, idx.x, idx.y, idx.z) +=
239  1.0 * (ax * d.x + (1 - ax) * t.x) * (ay * d.y +
240  (1 -
241  ay) * t.y) *
242  (az * d.z + (1 - az) * t.z);
243  }
244  }
245 
246  }
247  }
248  }
249  }
250  return;
251 }
int64_t x
Definition: global.h:229
double z
Definition: global.h:225
#define lnc
Definition: global.h:102
HYPRE_SStructVector x
Definition: tempf.c:41
struct int64Vector3d * gridEndIdx
Definition: fields.h:42
struct int64Vector3d * lowerPatchCorner
Definition: interp.c:45
int MPIsize
Definition: global.h:135
int64_t z
Definition: global.h:231
double y
Definition: global.h:224
double x
Definition: global.h:69
#define patch(p, i, j, k)
Definition: interp.c:49
struct cellData * cells
Definition: global.h:82
struct int64Vector3d * gridStartIdx
Definition: fields.h:42
double gridResolution
Definition: fields.h:45
struct int64Vector3d * patchSize
Definition: interp.c:47
double z
Definition: global.h:71
int64_t y
Definition: global.h:230
double y
Definition: global.h:70

Here is the caller graph for this function:

void findPatches ( )

For each local cell we check its position in the grid. If cell is located in the grid partition of other process than the information about this cell should be send to remote process by MPI communication. This is done by preparing special patches and sending the information on all bunch of cells rather than sendind it one by one. This function identifies patches and allocate memory buffers for patches.

Definition at line 60 of file interp.c.

References cells, cicIntersect, cicPatch, gridEndIdx, gridResolution, gridStartIdx, lnc, MPIsize, sdim, x, cellData::x, int64Vector3d::x, cellData::y, int64Vector3d::y, cellData::z, and int64Vector3d::z.

61 {
62 
63  int i, c, p;
64  struct int64Vector3d cellIdx;
65 
66  cicPatch = (double **) calloc(MPIsize, sizeof(double *));
67  cicIntersect = (int *) calloc(MPIsize, sizeof(int));
69  (struct int64Vector3d *) calloc(MPIsize,
70  sizeof(struct int64Vector3d));
72  (struct int64Vector3d *) calloc(MPIsize,
73  sizeof(struct int64Vector3d));
75  (struct int64Vector3d *) calloc(MPIsize,
76  sizeof(struct int64Vector3d));
78  (struct int64Vector3d *) calloc(MPIsize,
79  sizeof(struct int64Vector3d));
80  patchSize =
81  (struct int64Vector3d *) calloc(MPIsize,
82  sizeof(struct int64Vector3d));
83  patchSizeR =
84  (struct int64Vector3d *) calloc(MPIsize,
85  sizeof(struct int64Vector3d));
86 
87  for (p = 0; p < MPIsize; p++) {
88  cicIntersect[p] = 0;
89  patchSize[p].x = 0;
90  patchSize[p].y = 0;
91  patchSize[p].z = 0;
92  lowerPatchCorner[p].x = INT_MAX;
93  lowerPatchCorner[p].y = INT_MAX;
94  if (sdim == 3)
95  lowerPatchCorner[p].z = INT_MAX;
96  else
97  lowerPatchCorner[p].z = 0;
98  upperPatchCorner[p].x = INT_MIN, upperPatchCorner[p].y = INT_MIN;
99  if (sdim == 3)
100  upperPatchCorner[p].z = INT_MIN;
101  else
102  upperPatchCorner[p].z = 0;
103  }
104 
105  //#pragma omp parallel for default(none) private(p,c,cellIdx) shared(cells,gridResolution,lnc,MPIsize,gridStartIdx,gridEndIdx,lowerPatchCorner,upperPatchCorner,cicIntersect,sdim,lowerGridCorner)
106  for (p = 0; p < MPIsize; p++) {
107 
108  for (c = 0; c < lnc; c++) {
109 
110  int ax, ay, az;
111 
112  cellIdx.x = ((cells[c].x - lowerGridCorner.x) / gridResolution);
113  cellIdx.y = ((cells[c].y - lowerGridCorner.y) / gridResolution);
114  cellIdx.z = ((cells[c].z - lowerGridCorner.z) / gridResolution);
115 
116  for (ax = 0; ax < 2; ax++)
117  for (ay = 0; ay < 2; ay++)
118  for (az = 0; az < 2; az++) {
119 
120  if (cellIdx.x + ax >= gridStartIdx[p].x
121  && cellIdx.y + ay >= gridStartIdx[p].y
122  && cellIdx.z + az >= gridStartIdx[p].z
123  && cellIdx.x + ax <= gridEndIdx[p].x
124  && cellIdx.y + ay <= gridEndIdx[p].y
125  && cellIdx.z + az <= gridEndIdx[p].z) {
126  lowerPatchCorner[p].x =
127  (lowerPatchCorner[p].x >
128  cellIdx.x + ax ? cellIdx.x +
129  ax : lowerPatchCorner[p].x);
130  lowerPatchCorner[p].y =
131  (lowerPatchCorner[p].y >
132  cellIdx.y + ay ? cellIdx.y +
133  ay : lowerPatchCorner[p].y);
134  if (sdim == 3)
135  lowerPatchCorner[p].z =
136  (lowerPatchCorner[p].z >
137  cellIdx.z + az ? cellIdx.z +
138  az : lowerPatchCorner[p].z);
139  upperPatchCorner[p].x =
140  (upperPatchCorner[p].x <
141  cellIdx.x + ax ? cellIdx.x +
142  ax : upperPatchCorner[p].x);
143  upperPatchCorner[p].y =
144  (upperPatchCorner[p].y <
145  cellIdx.y + ay ? cellIdx.y +
146  ay : upperPatchCorner[p].y);
147  if (sdim == 3)
148  upperPatchCorner[p].z =
149  (upperPatchCorner[p].z <
150  cellIdx.z + az ? cellIdx.z +
151  az : upperPatchCorner[p].z);
152 
153  cicIntersect[p] = 1;
154  }
155  }
156  }
157  }
158 
159  for (p = 0; p < MPIsize; p++)
160  if (cicIntersect[p]) {
161  patchSize[p].x = upperPatchCorner[p].x - lowerPatchCorner[p].x + 1;
162  patchSize[p].y = upperPatchCorner[p].y - lowerPatchCorner[p].y + 1;
163  if (sdim == 3)
164  patchSize[p].z = upperPatchCorner[p].z - lowerPatchCorner[p].z + 1;
165  else
166  patchSize[p].z = 1;
167  cicPatch[p] =
168  (double *) calloc(patchSize[p].x * patchSize[p].y *
169  patchSize[p].z, sizeof(double));
170  }
171 
172  return;
173 }
int64_t x
Definition: global.h:229
#define lnc
Definition: global.h:102
HYPRE_SStructVector x
Definition: tempf.c:41
struct int64Vector3d * gridEndIdx
Definition: fields.h:42
struct int64Vector3d * lowerPatchCorner
Definition: interp.c:45
double ** cicPatch
Definition: interp.c:35
int MPIsize
Definition: global.h:135
int64_t z
Definition: global.h:231
int sdim
Definition: global.h:160
double x
Definition: global.h:69
struct cellData * cells
Definition: global.h:82
struct int64Vector3d * patchSizeR
Definition: interp.c:48
struct int64Vector3d * gridStartIdx
Definition: fields.h:42
double gridResolution
Definition: fields.h:45
struct int64Vector3d * upperPatchCorner
Definition: interp.c:45
struct int64Vector3d * lowerPatchCornerR
Definition: interp.c:46
struct int64Vector3d * patchSize
Definition: interp.c:47
double z
Definition: global.h:71
struct int64Vector3d * upperPatchCornerR
Definition: interp.c:46
int64_t y
Definition: global.h:230
double y
Definition: global.h:70
int * cicIntersect
Definition: interp.c:36

Here is the caller graph for this function:

void initCellsToGridExchange ( )

This function initializes data exchange between processes required in cells-to-grid interpolation. This function enables overlapping communication and computations.

Definition at line 598 of file interp.c.

References doInterpolation(), findPatches(), gfields, and initPatchExchange().

599 {
600  if (!gfields)
601  return;
602  findPatches();
603  doInterpolation();
605 }
void findPatches()
Definition: interp.c:60
void doInterpolation()
Definition: interp.c:181
void initPatchExchange()
Definition: interp.c:259
int gfields
Definition: fields.h:33

Here is the call graph for this function:

Here is the caller graph for this function:

void initFieldsPatchesExchange ( )

Now each local cell should receive information about values of global fields in the grid. Field patches are filled with appropriate values. Non-blocking communication is initiated. This is executed after global fields are computed. Field patches buffers are allocated here. Sizes of the patches are the same as those from previous CIC communication. Receiving field patches are also allocated here. MPI_Request tables are allocated here.

Definition at line 398 of file interp.c.

References cicReceiver, cicReqRecv, cicReqSend, cicSender, fieldAddr, fieldsPatches, fieldsPatchesCommBuff, gridSize, gridStartIdx, MPIrank, MPIsize, NFIELDS, stopRun(), x, int64Vector3d::x, int64Vector3d::y, and int64Vector3d::z.

399 {
400 
401  int f; /* fields index */
402  int p; /* process index */
403  struct int64Vector3d idx, g;
404  struct int64Vector3d size;
405 
406  fieldsPatchesCommBuff = (double **) calloc(MPIsize, sizeof(double *));
407 
408  for (p = 0; p < MPIsize; p++) {
409  int fieldPatchSize;
410  if (!cicSender[p])
411  continue; /* continue to next process if current process do not overlap domain */
412  size.x = upperPatchCornerR[p].x - lowerPatchCornerR[p].x + 1;
413  size.y = upperPatchCornerR[p].y - lowerPatchCornerR[p].y + 1;
414  size.z = upperPatchCornerR[p].z - lowerPatchCornerR[p].z + 1;
415  fieldPatchSize = size.x * size.y * size.z;
416  fieldsPatchesCommBuff[p] =
417  (double *) calloc(NFIELDS * fieldPatchSize, sizeof(double));
418  for (f = 0; f < NFIELDS; f++) {
419  int64_t i, j, k;
420  for (i = lowerPatchCornerR[p].x; i <= upperPatchCornerR[p].x; i++)
421  for (j = lowerPatchCornerR[p].y; j <= upperPatchCornerR[p].y; j++)
422  for (k = lowerPatchCornerR[p].z; k <= upperPatchCornerR[p].z;
423  k++) {
424  idx.x = i - lowerPatchCornerR[p].x;
425  idx.y = j - lowerPatchCornerR[p].y;
426  idx.z = k - lowerPatchCornerR[p].z;
427  g.x = i - gridStartIdx[MPIrank].x;
428  g.y = j - gridStartIdx[MPIrank].y;
429  g.z = k - gridStartIdx[MPIrank].z;
430  fieldsPatchesCommBuff[p][f * fieldPatchSize +
431  size.z * size.y * idx.x +
432  size.z * idx.y + idx.z] =
433  ((double *) fieldAddr[f])[gridSize.z * gridSize.y * g.x +
434  gridSize.z * g.y + g.z];
435  }
436  }
437  }
438 
439  if (!(fieldsPatches = (double **) calloc(MPIsize, sizeof(double *))))
440  stopRun(106, "fieldsPatches", __FILE__, __LINE__);
441 
442  cicReqSend = (MPI_Request *) malloc(sizeof(MPI_Request) * MPIsize);
443  cicReqRecv = (MPI_Request *) malloc(sizeof(MPI_Request) * MPIsize);
444 
445  for (p = 0; p < MPIsize; p++) {
446  if (cicSender[p]) {
447  int sendSize;
448  sendSize =
450  1) * (upperPatchCornerR[p].y - lowerPatchCornerR[p].y +
451  1) * (upperPatchCornerR[p].z - lowerPatchCornerR[p].z +
452  1) * NFIELDS;
453  MPI_Isend(&(fieldsPatchesCommBuff[p][0]), sendSize, MPI_DOUBLE, p,
454  MPIrank, MPI_COMM_WORLD, &cicReqSend[p]);
455  }
456  if (cicReceiver[p]) {
457  int recvSize;
458  recvSize =
459  patchSize[p].x * patchSize[p].y * patchSize[p].z * NFIELDS;
460  if (!
461  (fieldsPatches[p] = (double *) calloc(recvSize, sizeof(double))))
462  stopRun(106, "fieldsPatches", __FILE__, __LINE__);
463  MPI_Irecv(&(fieldsPatches[p][0]), recvSize, MPI_DOUBLE, p, p,
464  MPI_COMM_WORLD, &cicReqRecv[p]);
465  }
466  }
467  return;
468 }
int64_t x
Definition: global.h:229
int MPIrank
Definition: global.h:134
HYPRE_SStructVector x
Definition: tempf.c:41
#define NFIELDS
Definition: fields.h:27
int * cicReceiver
Definition: interp.c:37
int MPIsize
Definition: global.h:135
double ** fieldsPatches
Definition: fields.h:79
double ** fieldsPatchesCommBuff
Definition: fields.h:78
int64_t z
Definition: global.h:231
int * cicSender
Definition: interp.c:38
struct int64Vector3d gridSize
Definition: fields.h:41
struct int64Vector3d * gridStartIdx
Definition: fields.h:42
MPI_Request * cicReqRecv
Definition: interp.c:41
struct int64Vector3d * lowerPatchCornerR
Definition: interp.c:46
struct int64Vector3d * patchSize
Definition: interp.c:47
struct int64Vector3d * upperPatchCornerR
Definition: interp.c:46
double * fieldAddr[NFIELDS]
Definition: fields.h:52
int64_t y
Definition: global.h:230
void stopRun(int ierr, char *name, char *file, int line)
Definition: utils.c:72
MPI_Request * cicReqSend
Definition: interp.c:41

Here is the call graph for this function:

Here is the caller graph for this function:

void initPatchExchange ( )

Patches are being sent to receiving processes with non-blocking communication scheme (Isend, Irecv). Receiving patches are allocated here. MPI_Request tables are allocated here.

Definition at line 259 of file interp.c.

References cicIntersect, cicPatch, cicReceiver, cicRecvPatch, cicReqRecv, cicReqSend, cicSender, MPIrank, MPIsize, x, int64Vector3d::x, doubleVector3d::y, doubleVector3d::z, and int64Vector3d::z.

260 {
261  int p;
262 
263  cicReceiver = (int *) calloc(MPIsize, sizeof(int));
264  cicSender = (int *) calloc(MPIsize, sizeof(int));
265 
266  for (p = 0; p < MPIsize; p++)
267  cicReceiver[p] = cicIntersect[p];
268 
269  MPI_Alltoall(cicReceiver, 1, MPI_INT, cicSender, 1, MPI_INT,
270  MPI_COMM_WORLD);
271 
272  MPI_Alltoall(lowerPatchCorner, sizeof(struct int64Vector3d), MPI_BYTE,
273  lowerPatchCornerR, sizeof(struct int64Vector3d), MPI_BYTE,
274  MPI_COMM_WORLD);
275  MPI_Alltoall(upperPatchCorner, sizeof(struct int64Vector3d), MPI_BYTE,
276  upperPatchCornerR, sizeof(struct int64Vector3d), MPI_BYTE,
277  MPI_COMM_WORLD);
278 
279  cicRecvPatch = (double **) calloc(MPIsize, sizeof(double *));
280  cicReqSend = (MPI_Request *) malloc(sizeof(MPI_Request) * MPIsize);
281  cicReqRecv = (MPI_Request *) malloc(sizeof(MPI_Request) * MPIsize);
282 
283  for (p = 0; p < MPIsize; p++) {
284  if (cicReceiver[p]) {
285  MPI_Isend(&(cicPatch[p][0]),
286  patchSize[p].x * patchSize[p].y * patchSize[p].z,
287  MPI_DOUBLE, p, MPIrank, MPI_COMM_WORLD, &cicReqSend[p]);
288  }
289  if (cicSender[p]) {
290  int recvSize;
291  recvSize =
293  1) * (upperPatchCornerR[p].y - lowerPatchCornerR[p].y +
294  1) * (upperPatchCornerR[p].z - lowerPatchCornerR[p].z +
295  1);
296  if (!(cicRecvPatch[p] = (double *) calloc(recvSize, sizeof(double))))
297  exit(0);
298  MPI_Irecv(&(cicRecvPatch[p][0]), recvSize, MPI_DOUBLE, p, p,
299  MPI_COMM_WORLD, &cicReqRecv[p]);
300  }
301  }
302  return;
303 }
int64_t x
Definition: global.h:229
int MPIrank
Definition: global.h:134
HYPRE_SStructVector x
Definition: tempf.c:41
struct int64Vector3d * lowerPatchCorner
Definition: interp.c:45
int * cicReceiver
Definition: interp.c:37
double ** cicPatch
Definition: interp.c:35
int MPIsize
Definition: global.h:135
int64_t z
Definition: global.h:231
int * cicSender
Definition: interp.c:38
MPI_Request * cicReqRecv
Definition: interp.c:41
struct int64Vector3d * upperPatchCorner
Definition: interp.c:45
struct int64Vector3d * lowerPatchCornerR
Definition: interp.c:46
struct int64Vector3d * patchSize
Definition: interp.c:47
struct int64Vector3d * upperPatchCornerR
Definition: interp.c:46
int64_t y
Definition: global.h:230
double ** cicRecvPatch
Definition: interp.c:39
int * cicIntersect
Definition: interp.c:36
MPI_Request * cicReqSend
Definition: interp.c:41

Here is the caller graph for this function:

void interpolateCellsToGrid ( )

This is a driving function interpolating cellular data to grid data. This function does not enable overlapping communication and computations.

Definition at line 583 of file interp.c.

References applyPatches(), doInterpolation(), findPatches(), initPatchExchange(), and waitPatchExchange().

584 {
585  findPatches();
586  doInterpolation();
589  applyPatches();
590 }
void findPatches()
Definition: interp.c:60
void doInterpolation()
Definition: interp.c:181
void initPatchExchange()
Definition: interp.c:259
int applyPatches()
Definition: interp.c:338
int waitPatchExchange()
Definition: interp.c:309

Here is the call graph for this function:

void interpolateFieldsToCells ( )

This function is used to interpolate field data back to cells. Overlapping of communication and computations is not implemented here. This function deallocates all important arrays used in interpolation.

Definition at line 627 of file interp.c.

References applyFieldsPatches(), cicIntersect, cicReceiver, cicSender, gfields, initFieldsPatchesExchange(), and waitFieldsPatchesExchange().

628 {
629  if (!gfields)
630  return;
631 
635 
636  free(cicReceiver);
637  free(cicSender);
638  free(cicIntersect);
639 
640  free(lowerPatchCorner);
641  free(upperPatchCorner);
642  free(lowerPatchCornerR);
643  free(upperPatchCornerR);
644  free(patchSize);
645  free(patchSizeR);
646 }
void waitFieldsPatchesExchange()
Definition: interp.c:475
struct int64Vector3d * lowerPatchCorner
Definition: interp.c:45
int * cicReceiver
Definition: interp.c:37
int * cicSender
Definition: interp.c:38
struct int64Vector3d * patchSizeR
Definition: interp.c:48
struct int64Vector3d * upperPatchCorner
Definition: interp.c:45
struct int64Vector3d * lowerPatchCornerR
Definition: interp.c:46
struct int64Vector3d * patchSize
Definition: interp.c:47
struct int64Vector3d * upperPatchCornerR
Definition: interp.c:46
void applyFieldsPatches()
Definition: interp.c:509
void initFieldsPatchesExchange()
Definition: interp.c:398
int * cicIntersect
Definition: interp.c:36
int gfields
Definition: fields.h:33

Here is the call graph for this function:

Here is the caller graph for this function:

void waitCellsToGridExchange ( )

This function wait for patches communication to finish in cells-to-grid interpolation. This function enables overlapping communication and computations.

Definition at line 613 of file interp.c.

References applyPatches(), gfields, and waitPatchExchange().

614 {
615  if (!gfields)
616  return;
618  applyPatches();
619 }
int applyPatches()
Definition: interp.c:338
int waitPatchExchange()
Definition: interp.c:309
int gfields
Definition: fields.h:33

Here is the call graph for this function:

Here is the caller graph for this function:

void waitFieldsPatchesExchange ( )

Wait for communication to finish. MPI_Request tables are deallocated here. Field patches buffers are deallocated here.

Definition at line 475 of file interp.c.

References cicReceiver, cicReqRecv, cicReqSend, cicSender, fieldsPatchesCommBuff, MPIsize, and stopRun().

476 {
477  int p, f;
478  MPI_Status status;
479 
480  for (p = 0; p < MPIsize; p++) {
481  if (!cicSender[p])
482  continue;
483  if (MPI_Wait(&cicReqSend[p], &status) != MPI_SUCCESS)
484  stopRun(103, "sending", __FILE__, __LINE__);
485  }
486 
487  for (p = 0; p < MPIsize; p++) {
488  if (!cicReceiver[p])
489  continue;
490  if (MPI_Wait(&cicReqRecv[p], &status) != MPI_SUCCESS)
491  stopRun(103, "receiving", __FILE__, __LINE__);
492  }
493 
494  free(cicReqSend);
495  free(cicReqRecv);
496 
497  for (p = 0; p < MPIsize; p++)
498  free(fieldsPatchesCommBuff[p]);
499  free(fieldsPatchesCommBuff);
500 
501  return;
502 }
int * cicReceiver
Definition: interp.c:37
int MPIsize
Definition: global.h:135
double ** fieldsPatchesCommBuff
Definition: fields.h:78
int * cicSender
Definition: interp.c:38
MPI_Request * cicReqRecv
Definition: interp.c:41
void stopRun(int ierr, char *name, char *file, int line)
Definition: utils.c:72
MPI_Request * cicReqSend
Definition: interp.c:41

Here is the call graph for this function:

Here is the caller graph for this function:

int waitPatchExchange ( )

Wait for communication to finish. MPI_Request tables are deallocated here.

Definition at line 309 of file interp.c.

References cicReceiver, cicReqRecv, cicReqSend, cicSender, MPIsize, and stopRun().

310 {
311  int p;
312  MPI_Status status;
313 
314  for (p = 0; p < MPIsize; p++) {
315  if (!cicReceiver[p])
316  continue;
317  if (MPI_Wait(&cicReqSend[p], &status) != MPI_SUCCESS)
318  stopRun(103, "sending", __FILE__, __LINE__);
319  }
320 
321  for (p = 0; p < MPIsize; p++) {
322  if (!cicSender[p])
323  continue;
324  if (MPI_Wait(&cicReqRecv[p], &status) != MPI_SUCCESS)
325  stopRun(103, "receiving", __FILE__, __LINE__);
326  }
327 
328  free(cicReqSend);
329  free(cicReqRecv);
330 
331  return 0;
332 }
int * cicReceiver
Definition: interp.c:37
int MPIsize
Definition: global.h:135
int * cicSender
Definition: interp.c:38
MPI_Request * cicReqRecv
Definition: interp.c:41
void stopRun(int ierr, char *name, char *file, int line)
Definition: utils.c:72
MPI_Request * cicReqSend
Definition: interp.c:41

Here is the call graph for this function:

Here is the caller graph for this function:

Variable Documentation

int* cicIntersect

Definition at line 36 of file interp.c.

double** cicPatch

Definition at line 35 of file interp.c.

int* cicReceiver

Definition at line 37 of file interp.c.

double** cicRecvPatch

Definition at line 39 of file interp.c.

MPI_Request * cicReqRecv

Definition at line 41 of file interp.c.

MPI_Request* cicReqSend

Definition at line 41 of file interp.c.

int* cicSender

Definition at line 38 of file interp.c.

struct int64Vector3d* lowerPatchCorner

Definition at line 45 of file interp.c.

struct int64Vector3d* lowerPatchCornerR

Definition at line 46 of file interp.c.

struct int64Vector3d* patchSize

Definition at line 47 of file interp.c.

struct int64Vector3d* patchSizeR

Definition at line 48 of file interp.c.

int* recvP

Definition at line 43 of file interp.c.

struct int64Vector3d * upperPatchCorner

Definition at line 45 of file interp.c.

struct int64Vector3d * upperPatchCornerR

Definition at line 46 of file interp.c.