source: src/comm/mpi/settings.cpp@ d24c2f

Last change on this file since d24c2f was 9fc7e1, checked in by Julian Iseringhausen <isering@…>, 14 years ago

Bugfix for trac ticket #61.

Removed all variable-length arrays, even those of POD datatypes. Throws no warnings/errors when compiled with GCC 4.6.1 and -pedantic.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1339 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 14.1 KB
Line 
1#ifdef HAVE_CONFIG_H
2#include <config.h>
3#endif
4
5#ifdef HAVE_MPI
6#include <mpi.h>
7#else
8#error MPI is needed to compile CommMPISettings
9#endif
10
11#include <cassert>
12
13#include "comm/comm.hpp"
14#include "comm/mpi/settings.hpp"
15#include "grid/multigrid.hpp"
16#include "grid/tempgrid.hpp"
17#include "mg.hpp"
18
19using namespace VMG;
20
21Grid& VMG::MPI::Settings::FinerGrid(const Grid& grid)
22{
23 assert(finer_grids.find(&grid) != finer_grids.end());
24 return *finer_grids.find(&grid)->second;
25}
26
27Grid& VMG::MPI::Settings::CoarserGrid(const Grid& grid)
28{
29 assert(coarser_grids.find(&grid) != coarser_grids.end());
30 return *coarser_grids.find(&grid)->second;
31}
32
33Grid& VMG::MPI::Settings::GlobalCoarseGrid()
34{
35 assert(global_coarse_grid != NULL);
36 return *global_coarse_grid;
37}
38
39MPI_Comm VMG::MPI::Settings::CommunicatorGlobal(const Grid& grid)
40{
41 assert(communicators_global.find(grid.Level()) != communicators_global.end());
42 return communicators_global.find(grid.Level())->second;
43}
44
45MPI_Comm VMG::MPI::Settings::CommunicatorLocal(const Grid& grid)
46{
47 KeyUnsorted k(grid);
48 assert(communicators_local.find(k) != communicators_local.end());
49 return communicators_local.find(k)->second;
50}
51
52VMG::MPI::DatatypesGlobal& VMG::MPI::Settings::DatatypesGlobal(const Grid& grid_old, const Grid& grid_new)
53{
54 KeyUnsorted k(grid_old, grid_new);
55 assert(datatypes_global.find(k) != datatypes_global.end());
56 return datatypes_global.find(k)->second;
57}
58
59VMG::MPI::DatatypesLocal& VMG::MPI::Settings::DatatypesLocal(const Grid& grid)
60{
61 KeyUnsorted k(grid);
62 assert(datatypes_local.find(k) != datatypes_local.end());
63 return datatypes_local.find(k)->second;
64}
65
66void VMG::MPI::Settings::ComputeSettings(Multigrid& sol, Multigrid& rhs, MPI_Comm& comm_global)
67{
68 int rank;
69 MPI_Comm comm_last, comm_new;
70 std::map<const Grid*, Grid*>::const_iterator grid_iter;
71
72 Comm& comm = *MG::GetComm();
73
74 /*
75 * Create coarser grids
76 */
77 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i) {
78
79 TempGrid* temp_grid = new TempGrid();
80 temp_grid->SetPropertiesToCoarser(sol(i), comm.BoundaryConditions());
81
82 if (temp_grid->Global().BeginLocal() == sol(i-1).Global().BeginLocal() &&
83 temp_grid->Global().EndLocal() == sol(i-1).Global().EndLocal()) {
84 delete temp_grid;
85 coarser_grids.insert(std::make_pair(&sol(i), &sol(i-1)));
86 }else {
87 coarser_grids.insert(std::make_pair(&sol(i), temp_grid));
88 }
89
90 }
91
92 for (int i=rhs.MaxLevel(); i>rhs.MinLevel(); --i) {
93
94 TempGrid* temp_grid = new TempGrid();
95 temp_grid->SetPropertiesToCoarser(rhs(i), comm.BoundaryConditions());
96
97 if (temp_grid->Global().BeginLocal() == rhs(i-1).Global().BeginLocal() &&
98 temp_grid->Global().EndLocal() == rhs(i-1).Global().EndLocal()) {
99 delete temp_grid;
100 coarser_grids.insert(std::make_pair(&rhs(i), &rhs(i-1)));
101 }else {
102 coarser_grids.insert(std::make_pair(&rhs(i), temp_grid));
103 }
104
105 }
106
107 /*
108 * Create finer grids
109 */
110 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i) {
111
112 TempGrid* temp_grid = new TempGrid();
113 temp_grid->SetPropertiesToFiner(sol(i), comm.BoundaryConditions());
114
115 if (temp_grid->Global().BeginLocal() == sol(i+1).Global().BeginLocal() &&
116 temp_grid->Global().EndLocal() == sol(i+1).Global().EndLocal()) {
117 delete temp_grid;
118 finer_grids.insert(std::make_pair(&sol(i), &sol(i+1)));
119 }else {
120 finer_grids.insert(std::make_pair(&sol(i), temp_grid));
121 }
122
123 }
124
125 for (int i=rhs.MinLevel(); i<rhs.MaxLevel(); ++i) {
126
127 TempGrid* temp_grid = new TempGrid();
128 temp_grid->SetPropertiesToFiner(rhs(i), comm.BoundaryConditions());
129
130 if (temp_grid->Global().BeginLocal() == rhs(i+1).Global().BeginLocal() &&
131 temp_grid->Global().EndLocal() == rhs(i+1).Global().EndLocal()) {
132 delete temp_grid;
133 finer_grids.insert(std::make_pair(&rhs(i), &rhs(i+1)));
134 }else {
135 finer_grids.insert(std::make_pair(&rhs(i), temp_grid));
136 }
137
138 }
139
140 /*
141 * Create global coarse grid
142 */
143 global_coarse_grid = new TempGrid();
144 global_coarse_grid->SetPropertiesToGlobalCoarseGrid();
145
146 /*
147 * Create global communicators
148 */
149 for (int i=sol.MinLevel()+1; i<sol.MaxLevel(); ++i)
150 CreateGlobalCommunicator(comm_global, &sol(i), &CoarserGrid(sol(i+1)), &FinerGrid(sol(i-1)));
151
152 CreateGlobalCommunicator(comm_global, &sol(sol.MinLevel()), &CoarserGrid(sol(sol.MinLevel()+1)));
153 CreateGlobalCommunicator(comm_global, &sol(sol.MaxLevel()), &FinerGrid(sol(sol.MaxLevel()-1)));
154
155 MPI_Comm my_comm_global;
156 MPI_Comm_dup(comm_global, &my_comm_global);
157 communicators_global.insert(std::make_pair(0, my_comm_global));
158
159 /*
160 * Create local communicators
161 */
162 for (int i=sol.MinLevel(); i<=sol.MaxLevel(); ++i)
163 CreateLocalCommunicator(comm_global, sol(i));
164
165 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i)
166 CreateLocalCommunicator(comm_global, FinerGrid(sol(i)));
167
168 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i)
169 CreateLocalCommunicator(comm_global, CoarserGrid(sol(i)));
170
171 CreateLocalCommunicator(comm_global, comm.GetParticleGrid());
172 CreateLocalCommunicator(comm_global, *global_coarse_grid);
173
174 /*
175 * Create single grid datatypes
176 */
177 for (int i=sol.MinLevel(); i<=sol.MaxLevel(); ++i)
178 datatypes_local.insert(std::make_pair(KeyUnsorted(sol(i)), VMG::MPI::DatatypesLocal(sol(i), CommunicatorLocal(sol(i)))));
179
180 for (grid_iter=finer_grids.begin(); grid_iter!=finer_grids.end(); ++grid_iter)
181 datatypes_local.insert(std::make_pair(KeyUnsorted(*grid_iter->second), VMG::MPI::DatatypesLocal(*grid_iter->second, CommunicatorLocal(*grid_iter->second))));
182
183 for (grid_iter=coarser_grids.begin(); grid_iter!=coarser_grids.end(); ++grid_iter)
184 datatypes_local.insert(std::make_pair(KeyUnsorted(*grid_iter->second), VMG::MPI::DatatypesLocal(*grid_iter->second, CommunicatorLocal(*grid_iter->second))));
185
186 datatypes_local.insert(std::make_pair(KeyUnsorted(comm.GetParticleGrid()), VMG::MPI::DatatypesLocal(comm.GetParticleGrid(), CommunicatorLocal(comm.GetParticleGrid()))));
187 datatypes_local.insert(std::make_pair(KeyUnsorted(*global_coarse_grid), VMG::MPI::DatatypesLocal(*global_coarse_grid, CommunicatorLocal(*global_coarse_grid))));
188
189 /*
190 * Create two grid datatypes
191 */
192 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i) {
193 AddDatatypeGlobal(sol(i), CoarserGrid(sol(i+1)));
194 AddDatatypeGlobal(CoarserGrid(sol(i+1)), sol(i));
195 }
196
197 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i) {
198 AddDatatypeGlobal(sol(i), FinerGrid(sol(i-1)));
199 AddDatatypeGlobal(FinerGrid(sol(i-1)), sol(i));
200 }
201
202 AddDatatypeGlobal(sol(sol.MinLevel()), *global_coarse_grid);
203 AddDatatypeGlobal(*global_coarse_grid, sol(sol.MinLevel()));
204
205}
206
207void VMG::MPI::Settings::CreateGlobalCommunicator(MPI_Comm& comm_global, const Grid* grid_1, const Grid* grid_2, const Grid* grid_3)
208{
209 int rank;
210 MPI_Comm comm_new;
211
212 const bool in_communicator = (grid_1->Global().SizeLocal().Product() > 0) ||
213 (grid_2 && grid_2->Global().SizeLocal().Product() > 0) ||
214 (grid_3 && grid_3->Global().SizeLocal().Product() > 0);
215
216 MPI_Comm_rank(comm_global, &rank);
217
218 if (in_communicator) {
219 Index dims, periods, coords;
220 MPI_Comm comm_temp;
221 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
222 MPI_Comm_split(comm_global, 1, rank, &comm_temp);
223 dims = GlobalDims(comm_temp, coords);
224 MPI_Cart_create(comm_temp, 3, dims.vec(), periods.vec(), 0, &comm_new);
225 MPI_Comm_free(&comm_temp);
226 }else {
227 MPI_Comm_split(comm_global, MPI_UNDEFINED, rank, &comm_new);
228 }
229
230 communicators_global.insert(std::make_pair(grid_1->Level(), comm_new));
231}
232
233void VMG::MPI::Settings::CreateLocalCommunicator(MPI_Comm& comm_global, const Grid& grid)
234{
235 int rank, comm_equal;
236 MPI_Comm comm_new;
237 std::set<MPI_Comm>::iterator iter;
238
239 MPI_Comm_rank(comm_global, &rank);
240
241 if (grid.Global().SizeLocal().Product() > 0) {
242 Index dims, periods, coords;
243 MPI_Comm comm_temp;
244 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
245 MPI_Comm_split(comm_global, 1, rank, &comm_temp);
246 dims = GlobalDims(comm_temp, coords);
247 MPI_Cart_create(comm_temp, 3, dims.vec(), periods.vec(), 0, &comm_new);
248 MPI_Comm_free(&comm_temp);
249 }else {
250 MPI_Comm_split(comm_global, MPI_UNDEFINED, rank, &comm_new);
251 }
252
253 if (comm_new != MPI_COMM_NULL) {
254 for (iter=communicators_local_unique.begin(); iter!=communicators_local_unique.end(); ++iter) {
255 if (*iter != MPI_COMM_NULL) {
256 MPI_Comm_compare(comm_new, *iter, &comm_equal);
257 assert(comm_equal != MPI_SIMILAR);
258 if (comm_equal == MPI_IDENT || comm_equal == MPI_CONGRUENT) {
259 MPI_Comm_free(&comm_new);
260 comm_new = *iter;
261 break;
262 }
263 }
264 }
265 }
266
267 std::pair<std::set<MPI_Comm>::iterator, bool> insert_result = communicators_local_unique.insert(comm_new);
268 communicators_local.insert(std::make_pair(KeyUnsorted(grid), *insert_result.first));
269}
270
271void VMG::MPI::Settings::AddDatatypeGlobal(const Grid& grid_old, const Grid& grid_new)
272{
273 MPI_Comm comm = CommunicatorGlobal(grid_old);
274
275 // Insert into map
276 std::pair< std::map<VMG::MPI::KeyUnsorted, VMG::MPI::DatatypesGlobal>::iterator, bool > insert_result =
277 datatypes_global.insert(std::make_pair(VMG::MPI::KeyUnsorted(grid_old, grid_new), VMG::MPI::DatatypesGlobal()));
278
279 VMG::MPI::DatatypesGlobal& dt_global = insert_result.first->second;
280
281 if (comm != MPI_COMM_NULL) {
282
283 Index begin, end, offset_old, offset_new;
284 int rank, size;
285
286 MPI_Comm_rank(comm, &rank);
287 MPI_Comm_size(comm, &size);
288
289 std::vector<int> buffer;
290 buffer.resize(6*size);
291
292 // Compute local offset for ghost cells
293 for (int i=0; i<3; ++i) {
294 offset_old[i] = (grid_old.Local().HasHalo1()[i] ? grid_old.Local().Begin()[i] : 0);
295 offset_new[i] = (grid_new.Local().HasHalo1()[i] ? grid_new.Local().Begin()[i] : 0);
296 }
297
298 // Publish which grid part this process can offer
299 if (&grid_old == &grid_new) {
300 for (int i=0; i<6; ++i)
301 buffer[6*rank+i] = 0;
302 }else {
303 for (int i=0; i<3; ++i) {
304 buffer[6*rank+i] = grid_old.Global().BeginLocal()[i];
305 buffer[6*rank+i+3] = grid_old.Global().EndLocal()[i];
306 }
307 }
308
309 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &buffer.front(), 6, MPI_INT, comm);
310
311 if (insert_result.second) {
312
313 // Decide who offers a useful grid part
314 for (int i=0; i<size; ++i) {
315 for (int j=0; j<3; ++j) {
316 begin[j] = buffer[6*i+j];
317 end[j] = buffer[6*i+j+3];
318 }
319
320 begin = begin.Clamp(grid_new.Global().BeginLocal(), grid_new.Global().EndLocal());
321 end = end.Clamp(grid_new.Global().BeginLocal(), grid_new.Global().EndLocal());
322
323 if ((end-begin).Product() > 0) {
324 // This process has a useful part
325 dt_global.Receive().push_back(VMG::MPI::Datatype(grid_new.Local().SizeTotal(),
326 end - begin,
327 begin - grid_new.Global().BeginLocal() + offset_new,
328 i, 0, 0));
329 }
330 }
331 }
332
333 // Publish which grid parts this process needs
334 if (&grid_old == &grid_new) {
335 for (int i=0; i<6; ++i)
336 buffer[6*rank+i] = 0;
337 }else {
338 for (int i=0; i<3; ++i) {
339 buffer[6*rank+i] = grid_new.Global().BeginLocal()[i];
340 buffer[6*rank+i+3] = grid_new.Global().EndLocal()[i];
341 }
342 }
343
344 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &buffer.front(), 6, MPI_INT, comm);
345
346 if (insert_result.second) {
347
348 // Decide who needs a part of my grid
349 for (int i=0; i<size; ++i) {
350
351 for (int j=0; j<3; ++j) {
352 begin[j] = buffer[6*i+j];
353 end[j] = buffer[6*i+j+3];
354 }
355
356 begin = begin.Clamp(grid_old.Global().BeginLocal(), grid_old.Global().EndLocal());
357 end = end.Clamp(grid_old.Global().BeginLocal(), grid_old.Global().EndLocal());
358
359 if ((end-begin).Product() > 0) {
360 // This process needs one of my parts
361 dt_global.Send().push_back(VMG::MPI::Datatype(grid_old.Local().SizeTotal(),
362 end - begin,
363 begin - grid_old.Global().BeginLocal() + offset_old,
364 i, 0, 0));
365 }
366 }
367 }
368 }
369}
370
371MPI_Datatype& VMG::MPI::Settings::Datatype(const Index& begin, const Index& end, const Index& size_local, const Index& size_global)
372{
373 KeyUnsorted k(begin, end, size_local, size_global);
374 std::map<KeyUnsorted, MPI_Datatype>::iterator iter = datatypes.find(k);
375
376 if (iter != datatypes.end())
377 return iter->second;
378
379 MPI_Datatype dt;
380 Index sizes = size_local;
381 Index subsizes = end - begin;
382 Index starts = begin;
383
384 MPI_Type_create_subarray(3, sizes.vec(), subsizes.vec(), starts.vec(), MPI_ORDER_C, MPI_DOUBLE, &dt);
385 MPI_Type_commit(&dt);
386
387 return datatypes.insert(std::make_pair(k, dt)).first->second;
388}
389
390Index VMG::MPI::Settings::GlobalDims(MPI_Comm comm, Index pos)
391{
392 std::set<int> unique_set[3];
393 Index dims;
394
395 int size;
396 MPI_Comm_size(comm, &size);
397
398 int* coordinates = new int[3*size];
399
400 MPI_Allgather(pos.vec(), 3, MPI_INT, coordinates, 3, MPI_INT, comm);
401
402 for (int i=0; i<size; ++i)
403 for (int j=0; j<3; ++j)
404 unique_set[j].insert(coordinates[3*i+j]);
405
406 for (int j=0; j<3; ++j)
407 dims[j] = static_cast<int>(unique_set[j].size());
408
409 delete [] coordinates;
410
411 return dims;
412}
413
414VMG::MPI::Settings::Settings()
415{
416 global_coarse_grid = NULL;
417}
418
419VMG::MPI::Settings::~Settings()
420{
421 std::map<int, MPI_Comm>::iterator iter_comm_global;
422 for (iter_comm_global=communicators_global.begin(); iter_comm_global!=communicators_global.end(); ++iter_comm_global)
423 if (iter_comm_global->second != MPI_COMM_NULL)
424 MPI_Comm_free(&iter_comm_global->second);
425
426 /*
427 * We simply copied some communicators so we have to make sure that we free
428 * each communicator exactly once
429 */
430 std::set<MPI_Comm>::iterator iter_comm_set;
431 for (iter_comm_set=communicators_local_unique.begin(); iter_comm_set!=communicators_local_unique.end(); ++iter_comm_set)
432 if (*iter_comm_set != MPI_COMM_NULL) {
433 MPI_Comm comm_temp = *iter_comm_set;
434 MPI_Comm_free(&comm_temp);
435 }
436
437 std::map<KeyUnsorted, MPI_Datatype>::iterator iter_dt;
438 for (iter_dt=datatypes.begin(); iter_dt!=datatypes.end(); ++iter_dt)
439 if (iter_dt->second != MPI_DATATYPE_NULL)
440 MPI_Type_free(&iter_dt->second);
441
442 std::map<const Grid*, Grid*>::iterator iter_grid;
443 for (iter_grid=finer_grids.begin(); iter_grid!=finer_grids.end(); ++iter_grid)
444 if (iter_grid->second->Father() == NULL)
445 delete iter_grid->second;
446
447 for (iter_grid=coarser_grids.begin(); iter_grid!=coarser_grids.end(); ++iter_grid)
448 if (iter_grid->second->Father() == NULL)
449 delete iter_grid->second;
450
451 if (global_coarse_grid != NULL)
452 delete global_coarse_grid;
453}
Note: See TracBrowser for help on using the repository browser.