source: src/comm/mpi/settings.cpp@ c3acbb

Last change on this file since c3acbb was fcf7f6, checked in by Julian Iseringhausen <isering@…>, 14 years ago

vmg: Added license files and headers (GPLv3).

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1812 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 15.0 KB
Line 
1/*
2 * vmg - a versatile multigrid solver
3 * Copyright (C) 2012 Institute for Numerical Simulation, University of Bonn
4 *
5 * vmg is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 3 of the License, or
8 * (at your option) any later version.
9 *
10 * vmg is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifdef HAVE_CONFIG_H
20#include <config.h>
21#endif
22
23#ifdef HAVE_MPI
24#include <mpi.h>
25#ifdef HAVE_MARMOT
26#include <enhancempicalls.h>
27#include <sourceinfompicalls.h>
28#endif
29#else
30#error MPI is needed to compile CommMPISettings
31#endif
32
33#include <cassert>
34
35#include "comm/comm.hpp"
36#include "comm/mpi/settings.hpp"
37#include "grid/multigrid.hpp"
38#include "grid/tempgrid.hpp"
39#include "mg.hpp"
40
41using namespace VMG;
42
43Grid& VMG::MPI::Settings::FinerGrid(const Grid& grid)
44{
45 assert(finer_grids.find(&grid) != finer_grids.end());
46 return *finer_grids.find(&grid)->second;
47}
48
49Grid& VMG::MPI::Settings::CoarserGrid(const Grid& grid)
50{
51 assert(coarser_grids.find(&grid) != coarser_grids.end());
52 return *coarser_grids.find(&grid)->second;
53}
54
55MPI_Comm VMG::MPI::Settings::CommunicatorGlobal(const Grid& grid) const
56{
57 assert(communicators_global.find(grid.Level()) != communicators_global.end());
58 return communicators_global.find(grid.Level())->second;
59}
60
61MPI_Comm VMG::MPI::Settings::CommunicatorLocal(const Grid& grid) const
62{
63 KeyUnsorted k(grid, 0);
64 assert(communicators_local.find(k) != communicators_local.end());
65 return communicators_local.find(k)->second;
66}
67
68VMG::MPI::DatatypesGlobal& VMG::MPI::Settings::DatatypesGlobal(const Grid& grid_old, const Grid& grid_new, const int& direction)
69{
70 KeyUnsorted k(grid_old, grid_new, direction);
71 assert(datatypes_global.find(k) != datatypes_global.end());
72 return datatypes_global.find(k)->second;
73}
74
75VMG::MPI::DatatypesLocal& VMG::MPI::Settings::DatatypesLocal(const Grid& grid)
76{
77 KeyUnsorted k(grid, 0);
78 assert(datatypes_local.find(k) != datatypes_local.end());
79 return datatypes_local.find(k)->second;
80}
81
82void VMG::MPI::Settings::ComputeSettings(Multigrid& sol, Multigrid& rhs, MPI_Comm& comm_global)
83{
84
85 std::map<const Grid*, Grid*>::const_iterator grid_iter;
86
87 Comm& comm = *MG::GetComm();
88
89 /*
90 * Create coarser grids
91 */
92 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i) {
93
94 TempGrid* temp_grid = new TempGrid();
95 temp_grid->SetPropertiesToCoarser(sol(i), comm.BoundaryConditions());
96
97 if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
98 temp_grid->Global().LocalBegin().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd()) &&
99 temp_grid->Global().LocalEnd().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
100 temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd())) {
101 delete temp_grid;
102 coarser_grids.insert(std::make_pair(&sol(i), &sol(i-1)));
103 }else {
104 coarser_grids.insert(std::make_pair(&sol(i), temp_grid));
105 }
106
107 }
108
109 for (int i=rhs.MaxLevel(); i>rhs.MinLevel(); --i) {
110
111 TempGrid* temp_grid = new TempGrid();
112 temp_grid->SetPropertiesToCoarser(rhs(i), comm.BoundaryConditions());
113
114 if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
115 temp_grid->Global().LocalBegin().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd()) &&
116 temp_grid->Global().LocalEnd().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
117 temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd())) {
118 delete temp_grid;
119 coarser_grids.insert(std::make_pair(&rhs(i), &rhs(i-1)));
120 }else {
121 coarser_grids.insert(std::make_pair(&rhs(i), temp_grid));
122 }
123
124 }
125
126 /*
127 * Create finer grids
128 */
129 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i) {
130
131 TempGrid* temp_grid = new TempGrid();
132 temp_grid->SetPropertiesToFiner(sol(i), comm.BoundaryConditions());
133
134 if (temp_grid->Global().LocalBegin() == sol(i+1).Global().LocalBegin() &&
135 temp_grid->Global().LocalEnd() == sol(i+1).Global().LocalEnd()) {
136 delete temp_grid;
137 finer_grids.insert(std::make_pair(&sol(i), &sol(i+1)));
138 }else {
139 finer_grids.insert(std::make_pair(&sol(i), temp_grid));
140 }
141
142 }
143
144 for (int i=rhs.MinLevel(); i<rhs.MaxLevel(); ++i) {
145
146 TempGrid* temp_grid = new TempGrid();
147 temp_grid->SetPropertiesToFiner(rhs(i), comm.BoundaryConditions());
148
149 if (temp_grid->Global().LocalBegin() == rhs(i+1).Global().LocalBegin() &&
150 temp_grid->Global().LocalEnd() == rhs(i+1).Global().LocalEnd()) {
151 delete temp_grid;
152 finer_grids.insert(std::make_pair(&rhs(i), &rhs(i+1)));
153 }else {
154 finer_grids.insert(std::make_pair(&rhs(i), temp_grid));
155 }
156
157 }
158
159 /*
160 * Create global communicators
161 */
162 for (int i=sol.MinLevel()+1; i<sol.MaxLevel(); ++i)
163 CreateGlobalCommunicator(comm_global, &sol(i), &CoarserGrid(sol(i+1)), &FinerGrid(sol(i-1)));
164
165 CreateGlobalCommunicator(comm_global, &sol(sol.MinLevel()), &CoarserGrid(sol(sol.MinLevel()+1)));
166 CreateGlobalCommunicator(comm_global, &sol(sol.MaxLevel()), &FinerGrid(sol(sol.MaxLevel()-1)));
167
168 MPI_Comm my_comm_global;
169 MPI_Comm_dup(comm_global, &my_comm_global);
170 communicators_global.insert(std::make_pair(0, my_comm_global));
171
172 /*
173 * Create local communicators
174 */
175 for (int i=sol.MinLevel(); i<=sol.MaxLevel(); ++i)
176 CreateLocalCommunicator(comm_global, sol(i));
177
178 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i)
179 CreateLocalCommunicator(comm_global, FinerGrid(sol(i)));
180
181 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i)
182 CreateLocalCommunicator(comm_global, CoarserGrid(sol(i)));
183
184 if (MG::GetFactory().TestObject("PARTICLE_NEAR_FIELD_CELLS"))
185 CreateLocalCommunicator(comm_global, comm.GetParticleGrid());
186
187 /*
188 * Create single grid datatypes
189 */
190 for (int i=sol.MinLevel(); i<=sol.MaxLevel(); ++i)
191 datatypes_local.insert(std::make_pair(KeyUnsorted(sol(i), 0), VMG::MPI::DatatypesLocal(sol(i), CommunicatorLocal(sol(i)), true)));
192
193 for (grid_iter=finer_grids.begin(); grid_iter!=finer_grids.end(); ++grid_iter)
194 datatypes_local.insert(std::make_pair(KeyUnsorted(*grid_iter->second, 0), VMG::MPI::DatatypesLocal(*grid_iter->second, CommunicatorLocal(*grid_iter->second), true)));
195
196 for (grid_iter=coarser_grids.begin(); grid_iter!=coarser_grids.end(); ++grid_iter)
197 datatypes_local.insert(std::make_pair(KeyUnsorted(*grid_iter->second, 0), VMG::MPI::DatatypesLocal(*grid_iter->second, CommunicatorLocal(*grid_iter->second), true)));
198
199 if (MG::GetFactory().TestObject("PARTICLE_NEAR_FIELD_CELLS"))
200 datatypes_local.insert(std::make_pair(KeyUnsorted(comm.GetParticleGrid(), 0), VMG::MPI::DatatypesLocal(comm.GetParticleGrid(), CommunicatorLocal(comm.GetParticleGrid()), true)));
201
202 /*
203 * Create two grid datatypes
204 */
205 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i) {
206 AddDatatypeGlobal(sol(i), CoarserGrid(sol(i+1)), 0);
207 AddDatatypeGlobal(CoarserGrid(sol(i+1)), sol(i), 1);
208 }
209
210 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i) {
211 AddDatatypeGlobal(sol(i), FinerGrid(sol(i-1)), 0);
212 AddDatatypeGlobal(FinerGrid(sol(i-1)), sol(i), 1);
213 }
214}
215
216void VMG::MPI::Settings::CreateGlobalCommunicator(MPI_Comm& comm_global, const Grid* grid_1, const Grid* grid_2, const Grid* grid_3)
217{
218 int rank;
219 MPI_Comm comm_new;
220
221 const bool in_communicator = (grid_1->Global().LocalSize().Product() > 0) ||
222 (grid_2 && grid_2->Global().LocalSize().Product() > 0) ||
223 (grid_3 && grid_3->Global().LocalSize().Product() > 0);
224
225 MPI_Comm_rank(comm_global, &rank);
226
227 if (in_communicator) {
228 Index dims, periods, coords;
229 MPI_Comm comm_temp;
230 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
231 MPI_Comm_split(comm_global, 1, rank, &comm_temp);
232 dims = GlobalDims(comm_temp, coords);
233 MPI_Cart_create(comm_temp, 3, dims.vec(), periods.vec(), 0, &comm_new);
234 MPI_Comm_free(&comm_temp);
235 }else {
236 MPI_Comm_split(comm_global, MPI_UNDEFINED, rank, &comm_new);
237 }
238
239 communicators_global.insert(std::make_pair(grid_1->Level(), comm_new));
240}
241
242void VMG::MPI::Settings::CreateLocalCommunicator(MPI_Comm& comm_global, const Grid& grid)
243{
244 int rank, comm_equal;
245 MPI_Comm comm_new;
246 std::set<MPI_Comm>::iterator iter;
247
248 MPI_Comm_rank(comm_global, &rank);
249
250 if (grid.Global().LocalSize().Product() > 0) {
251 Index dims, periods, coords;
252 MPI_Comm comm_temp;
253 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
254 MPI_Comm_split(comm_global, 1, rank, &comm_temp);
255 dims = GlobalDims(comm_temp, coords);
256 MPI_Cart_create(comm_temp, 3, dims.vec(), periods.vec(), 0, &comm_new);
257 MPI_Comm_free(&comm_temp);
258 }else {
259 MPI_Comm_split(comm_global, MPI_UNDEFINED, rank, &comm_new);
260 }
261
262 if (comm_new != MPI_COMM_NULL) {
263 for (iter=communicators_local_unique.begin(); iter!=communicators_local_unique.end(); ++iter) {
264 if (*iter != MPI_COMM_NULL) {
265 MPI_Comm_compare(comm_new, *iter, &comm_equal);
266 assert(comm_equal != MPI_SIMILAR);
267 if (comm_equal == MPI_IDENT || comm_equal == MPI_CONGRUENT) {
268 MPI_Comm_free(&comm_new);
269 comm_new = *iter;
270 break;
271 }
272 }
273 }
274 }
275
276 std::pair<std::set<MPI_Comm>::iterator, bool> insert_result = communicators_local_unique.insert(comm_new);
277 communicators_local.insert(std::make_pair(KeyUnsorted(grid, 0), *insert_result.first));
278}
279
280void VMG::MPI::Settings::AddDatatypeGlobal(const Grid& grid_old, const Grid& grid_new, const int& direction)
281{
282 MPI_Comm comm = CommunicatorGlobal(grid_old);
283 bool dt_is_new = true;
284
285 // Insert into map
286 std::pair< std::map<VMG::MPI::KeyUnsorted, VMG::MPI::DatatypesGlobal>::iterator, bool > insert_result =
287 datatypes_global.insert(std::make_pair(VMG::MPI::KeyUnsorted(grid_old, grid_new, direction), VMG::MPI::DatatypesGlobal()));
288 VMG::MPI::DatatypesGlobal& dt_global = insert_result.first->second;
289 dt_is_new = insert_result.second;
290
291
292 if (comm != MPI_COMM_NULL) {
293
294 Index begin, end, offset_old, offset_new;
295 int rank, size;
296
297 MPI_Comm_rank(comm, &rank);
298 MPI_Comm_size(comm, &size);
299
300 std::vector<int> buffer;
301 buffer.resize(6*size);
302
303 // Compute local offset for ghost cells
304 for (int i=0; i<3; ++i) {
305 offset_old[i] = (grid_old.Local().HaloSize1()[i] > 0 ? grid_old.Local().Begin()[i] : 0);
306 offset_new[i] = (grid_new.Local().HaloSize1()[i] > 0 ? grid_new.Local().Begin()[i] : 0);
307 }
308
309 // Publish which grid part this process can offer
310 if (&grid_old == &grid_new) {
311 for (int i=0; i<6; ++i)
312 buffer[6*rank+i] = 0;
313 }else {
314 for (int i=0; i<3; ++i) {
315 buffer[6*rank+i] = grid_old.Global().LocalBegin()[i];
316 buffer[6*rank+i+3] = grid_old.Global().LocalEnd()[i];
317 }
318 }
319
320 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &buffer.front(), 6, MPI_INT, comm);
321
322 if (dt_is_new) {
323
324 // Decide who offers a useful grid part
325 for (int i=0; i<size; ++i) {
326 for (int j=0; j<3; ++j) {
327 begin[j] = buffer[6*i+j];
328 end[j] = buffer[6*i+j+3];
329 }
330
331 begin = begin.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
332 end = end.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
333
334 if ((end-begin).Product() > 0) {
335 // This process has a useful part
336 dt_global.Receive().push_back(VMG::MPI::Datatype(grid_new.Local().SizeTotal(),
337 end - begin,
338 begin - grid_new.Global().LocalBegin() + offset_new,
339 i, 0, 0, true));
340 }
341 }
342 }
343
344 // Publish which grid parts this process needs
345 for (int i=0; i<3; ++i) {
346 buffer[6*rank+i] = grid_new.Global().LocalBegin()[i];
347 buffer[6*rank+i+3] = grid_new.Global().LocalEnd()[i];
348 }
349
350 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &buffer.front(), 6, MPI_INT, comm);
351
352 if (dt_is_new) {
353
354 // Decide who needs a part of my grid
355 for (int i=0; i<size; ++i) {
356
357 if ((i == rank) && (&grid_old == &grid_new))
358 continue;
359
360 for (int j=0; j<3; ++j) {
361 begin[j] = buffer[6*i+j];
362 end[j] = buffer[6*i+j+3];
363 }
364
365 begin = begin.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
366 end = end.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
367
368 if ((end-begin).Product() > 0) {
369 // This process needs one of my parts
370 dt_global.Send().push_back(VMG::MPI::Datatype(grid_old.Local().SizeTotal(),
371 end - begin,
372 begin - grid_old.Global().LocalBegin() + offset_old,
373 i, 0, 0, true));
374 }
375 }
376 }
377 }
378}
379
380MPI_Datatype& VMG::MPI::Settings::Datatype(const Index& begin, const Index& end,
381 const Index& size_local, const Index& size_global,
382 const int& level)
383{
384 KeyUnsorted k(begin, end, size_local, size_global, level, 0);
385 std::map<KeyUnsorted, MPI_Datatype>::iterator iter = datatypes.find(k);
386
387 if (iter != datatypes.end())
388 return iter->second;
389
390 MPI_Datatype dt;
391 Index sizes = size_local;
392 Index subsizes = end - begin;
393 Index starts = begin;
394
395 MPI_Type_create_subarray(3, sizes.vec(), subsizes.vec(), starts.vec(), MPI_ORDER_C, MPI_DOUBLE, &dt);
396 MPI_Type_commit(&dt);
397
398 return datatypes.insert(std::make_pair(k, dt)).first->second;
399}
400
401Index VMG::MPI::Settings::GlobalDims(MPI_Comm comm, Index pos)
402{
403 std::set<int> unique_set[3];
404 Index dims;
405
406 int size;
407 MPI_Comm_size(comm, &size);
408
409 int* coordinates = new int[3*size];
410
411 MPI_Allgather(pos.vec(), 3, MPI_INT, coordinates, 3, MPI_INT, comm);
412
413 for (int i=0; i<size; ++i)
414 for (int j=0; j<3; ++j)
415 unique_set[j].insert(coordinates[3*i+j]);
416
417 for (int j=0; j<3; ++j)
418 dims[j] = static_cast<int>(unique_set[j].size());
419
420 delete [] coordinates;
421
422 return dims;
423}
424
425VMG::MPI::Settings::Settings()
426{
427}
428
429VMG::MPI::Settings::~Settings()
430{
431 std::map<int, MPI_Comm>::iterator iter_comm_global;
432 for (iter_comm_global=communicators_global.begin(); iter_comm_global!=communicators_global.end(); ++iter_comm_global)
433 if (iter_comm_global->second != MPI_COMM_NULL)
434 MPI_Comm_free(&iter_comm_global->second);
435
436 /*
437 * We simply copied some communicators so we have to make sure that we free
438 * each communicator exactly once
439 */
440 std::set<MPI_Comm>::iterator iter_comm_set;
441 for (iter_comm_set=communicators_local_unique.begin(); iter_comm_set!=communicators_local_unique.end(); ++iter_comm_set)
442 if (*iter_comm_set != MPI_COMM_NULL) {
443 MPI_Comm comm_temp = *iter_comm_set;
444 MPI_Comm_free(&comm_temp);
445 }
446
447 std::map<KeyUnsorted, MPI_Datatype>::iterator iter_dt;
448 for (iter_dt=datatypes.begin(); iter_dt!=datatypes.end(); ++iter_dt)
449 if (iter_dt->second != MPI_DATATYPE_NULL)
450 MPI_Type_free(&iter_dt->second);
451
452 std::map<const Grid*, Grid*>::iterator iter_grid;
453 for (iter_grid=finer_grids.begin(); iter_grid!=finer_grids.end(); ++iter_grid)
454 if (iter_grid->second->Father() == NULL)
455 delete iter_grid->second;
456
457 for (iter_grid=coarser_grids.begin(); iter_grid!=coarser_grids.end(); ++iter_grid)
458 if (iter_grid->second->Father() == NULL)
459 delete iter_grid->second;
460}
Note: See TracBrowser for help on using the repository browser.