source: src/comm/mpi/settings.cpp@ f003a9

Last change on this file since f003a9 was f003a9, checked in by Julian Iseringhausen <isering@…>, 14 years ago

Refactored vmg in order to separate the core library and the particle simulation part properly.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1798 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 14.3 KB
Line 
1#ifdef HAVE_CONFIG_H
2#include <config.h>
3#endif
4
5#ifdef HAVE_MPI
6#include <mpi.h>
7#ifdef HAVE_MARMOT
8#include <enhancempicalls.h>
9#include <sourceinfompicalls.h>
10#endif
11#else
12#error MPI is needed to compile CommMPISettings
13#endif
14
15#include <cassert>
16
17#include "comm/comm.hpp"
18#include "comm/mpi/settings.hpp"
19#include "grid/multigrid.hpp"
20#include "grid/tempgrid.hpp"
21#include "mg.hpp"
22
23using namespace VMG;
24
25Grid& VMG::MPI::Settings::FinerGrid(const Grid& grid)
26{
27 assert(finer_grids.find(&grid) != finer_grids.end());
28 return *finer_grids.find(&grid)->second;
29}
30
31Grid& VMG::MPI::Settings::CoarserGrid(const Grid& grid)
32{
33 assert(coarser_grids.find(&grid) != coarser_grids.end());
34 return *coarser_grids.find(&grid)->second;
35}
36
37MPI_Comm VMG::MPI::Settings::CommunicatorGlobal(const Grid& grid) const
38{
39 assert(communicators_global.find(grid.Level()) != communicators_global.end());
40 return communicators_global.find(grid.Level())->second;
41}
42
43MPI_Comm VMG::MPI::Settings::CommunicatorLocal(const Grid& grid) const
44{
45 KeyUnsorted k(grid, 0);
46 assert(communicators_local.find(k) != communicators_local.end());
47 return communicators_local.find(k)->second;
48}
49
50VMG::MPI::DatatypesGlobal& VMG::MPI::Settings::DatatypesGlobal(const Grid& grid_old, const Grid& grid_new, const int& direction)
51{
52 KeyUnsorted k(grid_old, grid_new, direction);
53 assert(datatypes_global.find(k) != datatypes_global.end());
54 return datatypes_global.find(k)->second;
55}
56
57VMG::MPI::DatatypesLocal& VMG::MPI::Settings::DatatypesLocal(const Grid& grid)
58{
59 KeyUnsorted k(grid, 0);
60 assert(datatypes_local.find(k) != datatypes_local.end());
61 return datatypes_local.find(k)->second;
62}
63
64void VMG::MPI::Settings::ComputeSettings(Multigrid& sol, Multigrid& rhs, MPI_Comm& comm_global)
65{
66 int rank;
67 MPI_Comm comm_last, comm_new;
68 std::map<const Grid*, Grid*>::const_iterator grid_iter;
69
70 Comm& comm = *MG::GetComm();
71
72 /*
73 * Create coarser grids
74 */
75 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i) {
76
77 TempGrid* temp_grid = new TempGrid();
78 temp_grid->SetPropertiesToCoarser(sol(i), comm.BoundaryConditions());
79
80 if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
81 temp_grid->Global().LocalBegin().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd()) &&
82 temp_grid->Global().LocalEnd().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
83 temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd())) {
84 delete temp_grid;
85 coarser_grids.insert(std::make_pair(&sol(i), &sol(i-1)));
86 }else {
87 coarser_grids.insert(std::make_pair(&sol(i), temp_grid));
88 }
89
90 }
91
92 for (int i=rhs.MaxLevel(); i>rhs.MinLevel(); --i) {
93
94 TempGrid* temp_grid = new TempGrid();
95 temp_grid->SetPropertiesToCoarser(rhs(i), comm.BoundaryConditions());
96
97 if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
98 temp_grid->Global().LocalBegin().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd()) &&
99 temp_grid->Global().LocalEnd().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
100 temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd())) {
101 delete temp_grid;
102 coarser_grids.insert(std::make_pair(&rhs(i), &rhs(i-1)));
103 }else {
104 coarser_grids.insert(std::make_pair(&rhs(i), temp_grid));
105 }
106
107 }
108
109 /*
110 * Create finer grids
111 */
112 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i) {
113
114 TempGrid* temp_grid = new TempGrid();
115 temp_grid->SetPropertiesToFiner(sol(i), comm.BoundaryConditions());
116
117 if (temp_grid->Global().LocalBegin() == sol(i+1).Global().LocalBegin() &&
118 temp_grid->Global().LocalEnd() == sol(i+1).Global().LocalEnd()) {
119 delete temp_grid;
120 finer_grids.insert(std::make_pair(&sol(i), &sol(i+1)));
121 }else {
122 finer_grids.insert(std::make_pair(&sol(i), temp_grid));
123 }
124
125 }
126
127 for (int i=rhs.MinLevel(); i<rhs.MaxLevel(); ++i) {
128
129 TempGrid* temp_grid = new TempGrid();
130 temp_grid->SetPropertiesToFiner(rhs(i), comm.BoundaryConditions());
131
132 if (temp_grid->Global().LocalBegin() == rhs(i+1).Global().LocalBegin() &&
133 temp_grid->Global().LocalEnd() == rhs(i+1).Global().LocalEnd()) {
134 delete temp_grid;
135 finer_grids.insert(std::make_pair(&rhs(i), &rhs(i+1)));
136 }else {
137 finer_grids.insert(std::make_pair(&rhs(i), temp_grid));
138 }
139
140 }
141
142 /*
143 * Create global communicators
144 */
145 for (int i=sol.MinLevel()+1; i<sol.MaxLevel(); ++i)
146 CreateGlobalCommunicator(comm_global, &sol(i), &CoarserGrid(sol(i+1)), &FinerGrid(sol(i-1)));
147
148 CreateGlobalCommunicator(comm_global, &sol(sol.MinLevel()), &CoarserGrid(sol(sol.MinLevel()+1)));
149 CreateGlobalCommunicator(comm_global, &sol(sol.MaxLevel()), &FinerGrid(sol(sol.MaxLevel()-1)));
150
151 MPI_Comm my_comm_global;
152 MPI_Comm_dup(comm_global, &my_comm_global);
153 communicators_global.insert(std::make_pair(0, my_comm_global));
154
155 /*
156 * Create local communicators
157 */
158 for (int i=sol.MinLevel(); i<=sol.MaxLevel(); ++i)
159 CreateLocalCommunicator(comm_global, sol(i));
160
161 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i)
162 CreateLocalCommunicator(comm_global, FinerGrid(sol(i)));
163
164 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i)
165 CreateLocalCommunicator(comm_global, CoarserGrid(sol(i)));
166
167 if (MG::GetFactory().TestObject("PARTICLE_NEAR_FIELD_CELLS"))
168 CreateLocalCommunicator(comm_global, comm.GetParticleGrid());
169
170 /*
171 * Create single grid datatypes
172 */
173 for (int i=sol.MinLevel(); i<=sol.MaxLevel(); ++i)
174 datatypes_local.insert(std::make_pair(KeyUnsorted(sol(i), 0), VMG::MPI::DatatypesLocal(sol(i), CommunicatorLocal(sol(i)), true)));
175
176 for (grid_iter=finer_grids.begin(); grid_iter!=finer_grids.end(); ++grid_iter)
177 datatypes_local.insert(std::make_pair(KeyUnsorted(*grid_iter->second, 0), VMG::MPI::DatatypesLocal(*grid_iter->second, CommunicatorLocal(*grid_iter->second), true)));
178
179 for (grid_iter=coarser_grids.begin(); grid_iter!=coarser_grids.end(); ++grid_iter)
180 datatypes_local.insert(std::make_pair(KeyUnsorted(*grid_iter->second, 0), VMG::MPI::DatatypesLocal(*grid_iter->second, CommunicatorLocal(*grid_iter->second), true)));
181
182 if (MG::GetFactory().TestObject("PARTICLE_NEAR_FIELD_CELLS"))
183 datatypes_local.insert(std::make_pair(KeyUnsorted(comm.GetParticleGrid(), 0), VMG::MPI::DatatypesLocal(comm.GetParticleGrid(), CommunicatorLocal(comm.GetParticleGrid()), true)));
184
185 /*
186 * Create two grid datatypes
187 */
188 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i) {
189 AddDatatypeGlobal(sol(i), CoarserGrid(sol(i+1)), 0);
190 AddDatatypeGlobal(CoarserGrid(sol(i+1)), sol(i), 1);
191 }
192
193 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i) {
194 AddDatatypeGlobal(sol(i), FinerGrid(sol(i-1)), 0);
195 AddDatatypeGlobal(FinerGrid(sol(i-1)), sol(i), 1);
196 }
197}
198
199void VMG::MPI::Settings::CreateGlobalCommunicator(MPI_Comm& comm_global, const Grid* grid_1, const Grid* grid_2, const Grid* grid_3)
200{
201 int rank;
202 MPI_Comm comm_new;
203
204 const bool in_communicator = (grid_1->Global().LocalSize().Product() > 0) ||
205 (grid_2 && grid_2->Global().LocalSize().Product() > 0) ||
206 (grid_3 && grid_3->Global().LocalSize().Product() > 0);
207
208 MPI_Comm_rank(comm_global, &rank);
209
210 if (in_communicator) {
211 Index dims, periods, coords;
212 MPI_Comm comm_temp;
213 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
214 MPI_Comm_split(comm_global, 1, rank, &comm_temp);
215 dims = GlobalDims(comm_temp, coords);
216 MPI_Cart_create(comm_temp, 3, dims.vec(), periods.vec(), 0, &comm_new);
217 MPI_Comm_free(&comm_temp);
218 }else {
219 MPI_Comm_split(comm_global, MPI_UNDEFINED, rank, &comm_new);
220 }
221
222 communicators_global.insert(std::make_pair(grid_1->Level(), comm_new));
223}
224
225void VMG::MPI::Settings::CreateLocalCommunicator(MPI_Comm& comm_global, const Grid& grid)
226{
227 int rank, comm_equal;
228 MPI_Comm comm_new;
229 std::set<MPI_Comm>::iterator iter;
230
231 MPI_Comm_rank(comm_global, &rank);
232
233 if (grid.Global().LocalSize().Product() > 0) {
234 Index dims, periods, coords;
235 MPI_Comm comm_temp;
236 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
237 MPI_Comm_split(comm_global, 1, rank, &comm_temp);
238 dims = GlobalDims(comm_temp, coords);
239 MPI_Cart_create(comm_temp, 3, dims.vec(), periods.vec(), 0, &comm_new);
240 MPI_Comm_free(&comm_temp);
241 }else {
242 MPI_Comm_split(comm_global, MPI_UNDEFINED, rank, &comm_new);
243 }
244
245 if (comm_new != MPI_COMM_NULL) {
246 for (iter=communicators_local_unique.begin(); iter!=communicators_local_unique.end(); ++iter) {
247 if (*iter != MPI_COMM_NULL) {
248 MPI_Comm_compare(comm_new, *iter, &comm_equal);
249 assert(comm_equal != MPI_SIMILAR);
250 if (comm_equal == MPI_IDENT || comm_equal == MPI_CONGRUENT) {
251 MPI_Comm_free(&comm_new);
252 comm_new = *iter;
253 break;
254 }
255 }
256 }
257 }
258
259 std::pair<std::set<MPI_Comm>::iterator, bool> insert_result = communicators_local_unique.insert(comm_new);
260 communicators_local.insert(std::make_pair(KeyUnsorted(grid, 0), *insert_result.first));
261}
262
263void VMG::MPI::Settings::AddDatatypeGlobal(const Grid& grid_old, const Grid& grid_new, const int& direction)
264{
265 MPI_Comm comm = CommunicatorGlobal(grid_old);
266 bool dt_is_new = true;
267
268 // Insert into map
269 std::pair< std::map<VMG::MPI::KeyUnsorted, VMG::MPI::DatatypesGlobal>::iterator, bool > insert_result =
270 datatypes_global.insert(std::make_pair(VMG::MPI::KeyUnsorted(grid_old, grid_new, direction), VMG::MPI::DatatypesGlobal()));
271 VMG::MPI::DatatypesGlobal& dt_global = insert_result.first->second;
272 dt_is_new = insert_result.second;
273
274
275 if (comm != MPI_COMM_NULL) {
276
277 Index begin, end, offset_old, offset_new;
278 int rank, size;
279
280 MPI_Comm_rank(comm, &rank);
281 MPI_Comm_size(comm, &size);
282
283 std::vector<int> buffer;
284 buffer.resize(6*size);
285
286 // Compute local offset for ghost cells
287 for (int i=0; i<3; ++i) {
288 offset_old[i] = (grid_old.Local().HaloSize1()[i] > 0 ? grid_old.Local().Begin()[i] : 0);
289 offset_new[i] = (grid_new.Local().HaloSize1()[i] > 0 ? grid_new.Local().Begin()[i] : 0);
290 }
291
292 // Publish which grid part this process can offer
293 if (&grid_old == &grid_new) {
294 for (int i=0; i<6; ++i)
295 buffer[6*rank+i] = 0;
296 }else {
297 for (int i=0; i<3; ++i) {
298 buffer[6*rank+i] = grid_old.Global().LocalBegin()[i];
299 buffer[6*rank+i+3] = grid_old.Global().LocalEnd()[i];
300 }
301 }
302
303 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &buffer.front(), 6, MPI_INT, comm);
304
305 if (dt_is_new) {
306
307 // Decide who offers a useful grid part
308 for (int i=0; i<size; ++i) {
309 for (int j=0; j<3; ++j) {
310 begin[j] = buffer[6*i+j];
311 end[j] = buffer[6*i+j+3];
312 }
313
314 begin = begin.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
315 end = end.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
316
317 if ((end-begin).Product() > 0) {
318 // This process has a useful part
319 dt_global.Receive().push_back(VMG::MPI::Datatype(grid_new.Local().SizeTotal(),
320 end - begin,
321 begin - grid_new.Global().LocalBegin() + offset_new,
322 i, 0, 0, true));
323 }
324 }
325 }
326
327 // Publish which grid parts this process needs
328 for (int i=0; i<3; ++i) {
329 buffer[6*rank+i] = grid_new.Global().LocalBegin()[i];
330 buffer[6*rank+i+3] = grid_new.Global().LocalEnd()[i];
331 }
332
333 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &buffer.front(), 6, MPI_INT, comm);
334
335 if (dt_is_new) {
336
337 // Decide who needs a part of my grid
338 for (int i=0; i<size; ++i) {
339
340 if ((i == rank) && (&grid_old == &grid_new))
341 continue;
342
343 for (int j=0; j<3; ++j) {
344 begin[j] = buffer[6*i+j];
345 end[j] = buffer[6*i+j+3];
346 }
347
348 begin = begin.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
349 end = end.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
350
351 if ((end-begin).Product() > 0) {
352 // This process needs one of my parts
353 dt_global.Send().push_back(VMG::MPI::Datatype(grid_old.Local().SizeTotal(),
354 end - begin,
355 begin - grid_old.Global().LocalBegin() + offset_old,
356 i, 0, 0, true));
357 }
358 }
359 }
360 }
361}
362
363MPI_Datatype& VMG::MPI::Settings::Datatype(const Index& begin, const Index& end,
364 const Index& size_local, const Index& size_global,
365 const int& level)
366{
367 KeyUnsorted k(begin, end, size_local, size_global, level, 0);
368 std::map<KeyUnsorted, MPI_Datatype>::iterator iter = datatypes.find(k);
369
370 if (iter != datatypes.end())
371 return iter->second;
372
373 MPI_Datatype dt;
374 Index sizes = size_local;
375 Index subsizes = end - begin;
376 Index starts = begin;
377
378 MPI_Type_create_subarray(3, sizes.vec(), subsizes.vec(), starts.vec(), MPI_ORDER_C, MPI_DOUBLE, &dt);
379 MPI_Type_commit(&dt);
380
381 return datatypes.insert(std::make_pair(k, dt)).first->second;
382}
383
384Index VMG::MPI::Settings::GlobalDims(MPI_Comm comm, Index pos)
385{
386 std::set<int> unique_set[3];
387 Index dims;
388
389 int size;
390 MPI_Comm_size(comm, &size);
391
392 int* coordinates = new int[3*size];
393
394 MPI_Allgather(pos.vec(), 3, MPI_INT, coordinates, 3, MPI_INT, comm);
395
396 for (int i=0; i<size; ++i)
397 for (int j=0; j<3; ++j)
398 unique_set[j].insert(coordinates[3*i+j]);
399
400 for (int j=0; j<3; ++j)
401 dims[j] = static_cast<int>(unique_set[j].size());
402
403 delete [] coordinates;
404
405 return dims;
406}
407
408VMG::MPI::Settings::Settings()
409{
410}
411
412VMG::MPI::Settings::~Settings()
413{
414 std::map<int, MPI_Comm>::iterator iter_comm_global;
415 for (iter_comm_global=communicators_global.begin(); iter_comm_global!=communicators_global.end(); ++iter_comm_global)
416 if (iter_comm_global->second != MPI_COMM_NULL)
417 MPI_Comm_free(&iter_comm_global->second);
418
419 /*
420 * We simply copied some communicators so we have to make sure that we free
421 * each communicator exactly once
422 */
423 std::set<MPI_Comm>::iterator iter_comm_set;
424 for (iter_comm_set=communicators_local_unique.begin(); iter_comm_set!=communicators_local_unique.end(); ++iter_comm_set)
425 if (*iter_comm_set != MPI_COMM_NULL) {
426 MPI_Comm comm_temp = *iter_comm_set;
427 MPI_Comm_free(&comm_temp);
428 }
429
430 std::map<KeyUnsorted, MPI_Datatype>::iterator iter_dt;
431 for (iter_dt=datatypes.begin(); iter_dt!=datatypes.end(); ++iter_dt)
432 if (iter_dt->second != MPI_DATATYPE_NULL)
433 MPI_Type_free(&iter_dt->second);
434
435 std::map<const Grid*, Grid*>::iterator iter_grid;
436 for (iter_grid=finer_grids.begin(); iter_grid!=finer_grids.end(); ++iter_grid)
437 if (iter_grid->second->Father() == NULL)
438 delete iter_grid->second;
439
440 for (iter_grid=coarser_grids.begin(); iter_grid!=coarser_grids.end(); ++iter_grid)
441 if (iter_grid->second->Father() == NULL)
442 delete iter_grid->second;
443}
Note: See TracBrowser for help on using the repository browser.