source: src/comm/mpi/settings.cpp@ 14d38c

Last change on this file since 14d38c was 1e45786, checked in by Julian Iseringhausen <isering@…>, 13 years ago

Don't use finer tempgrid only when boundaries are equal to original grid.

  • Property mode set to 100644
File size: 15.8 KB
Line 
1/*
2 * vmg - a versatile multigrid solver
3 * Copyright (C) 2012 Institute for Numerical Simulation, University of Bonn
4 *
5 * vmg is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 3 of the License, or
8 * (at your option) any later version.
9 *
10 * vmg is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/**
20 * @file settings.cpp
21 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
22 * @date Mon Jan 2 18:45:22 2012
23 *
24 * @brief Computes several MPI-related settings.
25 *
26 */
27
28#ifdef HAVE_CONFIG_H
29#include <config.h>
30#endif
31
32#ifdef HAVE_MPI
33#include <mpi.h>
34#ifdef HAVE_MARMOT
35#include <enhancempicalls.h>
36#include <sourceinfompicalls.h>
37#endif
38#else
39#error MPI is needed to compile CommMPISettings
40#endif
41
42#include <cassert>
43#include <sstream>
44#include <string>
45
46#include "comm/comm.hpp"
47#include "comm/mpi/settings.hpp"
48#include "grid/multigrid.hpp"
49#include "grid/tempgrid.hpp"
50#include "mg.hpp"
51
52using namespace VMG;
53
54Grid& VMG::MPI::Settings::FinerGrid(const Grid& grid)
55{
56 assert(finer_grids.find(&grid) != finer_grids.end());
57 return *finer_grids.find(&grid)->second;
58}
59
60Grid& VMG::MPI::Settings::CoarserGrid(const Grid& grid)
61{
62 assert(coarser_grids.find(&grid) != coarser_grids.end());
63 return *coarser_grids.find(&grid)->second;
64}
65
66MPI_Comm VMG::MPI::Settings::CommunicatorGlobal(const Grid& grid) const
67{
68 assert(communicators_global.find(grid.Level()) != communicators_global.end());
69 return communicators_global.find(grid.Level())->second;
70}
71
72MPI_Comm VMG::MPI::Settings::CommunicatorLocal(const Grid& grid) const
73{
74 KeyUnsorted k(grid, 0);
75 assert(communicators_local.find(k) != communicators_local.end());
76 return communicators_local.find(k)->second;
77}
78
79VMG::MPI::DatatypesGlobal& VMG::MPI::Settings::DatatypesGlobal(const Grid& grid_old, const Grid& grid_new, const int& direction)
80{
81 KeyUnsorted k(grid_old, grid_new, direction);
82 assert(datatypes_global.find(k) != datatypes_global.end());
83 return datatypes_global.find(k)->second;
84}
85
86VMG::MPI::DatatypesLocal& VMG::MPI::Settings::DatatypesLocal(const Grid& grid)
87{
88 KeyUnsorted k(grid, 0);
89 assert(datatypes_local.find(k) != datatypes_local.end());
90 return datatypes_local.find(k)->second;
91}
92
93void VMG::MPI::Settings::ComputeSettings(Multigrid& sol, Multigrid& rhs, MPI_Comm& comm_global)
94{
95
96 std::map<const Grid*, Grid*>::const_iterator grid_iter;
97
98 Comm& comm = *MG::GetComm();
99
100 /*
101 * Create coarser grids
102 */
103 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i) {
104
105 TempGrid* temp_grid = new TempGrid();
106 temp_grid->SetPropertiesToCoarser(sol(i), comm.BoundaryConditions());
107
108 if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
109 temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd())) {
110 delete temp_grid;
111 coarser_grids.insert(std::make_pair(&sol(i), &sol(i-1)));
112 }else {
113 coarser_grids.insert(std::make_pair(&sol(i), temp_grid));
114 }
115
116 }
117
118 //FIXME
119 for (int i=rhs.MaxLevel(); i>rhs.MinLevel(); --i) {
120
121 TempGrid* temp_grid = new TempGrid();
122 temp_grid->SetPropertiesToCoarser(rhs(i), comm.BoundaryConditions());
123
124 if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(rhs(i-1).Global().LocalBegin()) &&
125 temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(rhs(i-1).Global().LocalEnd())) {
126 delete temp_grid;
127 coarser_grids.insert(std::make_pair(&rhs(i), &rhs(i-1)));
128 }else {
129 coarser_grids.insert(std::make_pair(&rhs(i), temp_grid));
130 }
131
132 }
133
134 /*
135 * Create finer grids
136 */
137 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i) {
138
139 TempGrid* temp_grid = new TempGrid();
140 temp_grid->SetPropertiesToFiner(sol(i), comm.BoundaryConditions());
141 if (temp_grid->Global().LocalBegin() == sol(i+1).Global().LocalBegin() &&
142 temp_grid->Global().LocalEnd() == sol(i+1).Global().LocalEnd()) {
143 delete temp_grid;
144 finer_grids.insert(std::make_pair(&sol(i), &sol(i+1)));
145 }else {
146 finer_grids.insert(std::make_pair(&sol(i), temp_grid));
147 }
148
149 }
150
151 for (int i=rhs.MinLevel(); i<rhs.MaxLevel(); ++i) {
152
153 TempGrid* temp_grid = new TempGrid();
154 temp_grid->SetPropertiesToFiner(rhs(i), comm.BoundaryConditions());
155 if (temp_grid->Global().LocalBegin() == rhs(i+1).Global().LocalBegin() &&
156 temp_grid->Global().LocalEnd() == rhs(i+1).Global().LocalEnd()) {
157 delete temp_grid;
158 finer_grids.insert(std::make_pair(&rhs(i), &rhs(i+1)));
159 }else {
160 finer_grids.insert(std::make_pair(&rhs(i), temp_grid));
161 }
162
163 }
164
165 /*
166 * Create global communicators
167 */
168 for (int i=sol.MinLevel()+1; i<sol.MaxLevel(); ++i)
169 CreateGlobalCommunicator(comm_global, &sol(i), &CoarserGrid(sol(i+1)), &FinerGrid(sol(i-1)));
170
171 CreateGlobalCommunicator(comm_global, &sol(sol.MinLevel()), &CoarserGrid(sol(sol.MinLevel()+1)));
172 CreateGlobalCommunicator(comm_global, &sol(sol.MaxLevel()), &FinerGrid(sol(sol.MaxLevel()-1)));
173
174 MPI_Comm my_comm_global;
175 MPI_Comm_dup(comm_global, &my_comm_global);
176 communicators_global.insert(std::make_pair(0, my_comm_global));
177
178 /*
179 * Create local communicators
180 */
181 for (int i=sol.MinLevel(); i<=sol.MaxLevel(); ++i)
182 CreateLocalCommunicator(comm_global, sol(i));
183
184 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i)
185 CreateLocalCommunicator(comm_global, FinerGrid(sol(i)));
186
187 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i)
188 CreateLocalCommunicator(comm_global, CoarserGrid(sol(i)));
189
190 if (MG::GetFactory().TestObject("PARTICLE_NEAR_FIELD_CELLS"))
191 CreateLocalCommunicator(comm_global, comm.GetParticleGrid());
192
193 /*
194 * Create single grid datatypes
195 */
196 for (int i=sol.MinLevel(); i<=sol.MaxLevel(); ++i)
197 datatypes_local.insert(std::make_pair(KeyUnsorted(sol(i), 0), VMG::MPI::DatatypesLocal(sol(i), CommunicatorLocal(sol(i)), true)));
198
199 for (grid_iter=finer_grids.begin(); grid_iter!=finer_grids.end(); ++grid_iter)
200 datatypes_local.insert(std::make_pair(KeyUnsorted(*grid_iter->second, 0), VMG::MPI::DatatypesLocal(*grid_iter->second, CommunicatorLocal(*grid_iter->second), true)));
201
202 for (grid_iter=coarser_grids.begin(); grid_iter!=coarser_grids.end(); ++grid_iter)
203 datatypes_local.insert(std::make_pair(KeyUnsorted(*grid_iter->second, 0), VMG::MPI::DatatypesLocal(*grid_iter->second, CommunicatorLocal(*grid_iter->second), true)));
204
205 if (MG::GetFactory().TestObject("PARTICLE_NEAR_FIELD_CELLS"))
206 datatypes_local.insert(std::make_pair(KeyUnsorted(comm.GetParticleGrid(), 0), VMG::MPI::DatatypesLocal(comm.GetParticleGrid(), CommunicatorLocal(comm.GetParticleGrid()), true)));
207
208 /*
209 * Create two grid datatypes
210 */
211 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i) {
212 AddDatatypeGlobal(sol(i), CoarserGrid(sol(i+1)), 0);
213 AddDatatypeGlobal(CoarserGrid(sol(i+1)), sol(i), 1);
214 }
215
216 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i) {
217 AddDatatypeGlobal(sol(i), FinerGrid(sol(i-1)), 0);
218 AddDatatypeGlobal(FinerGrid(sol(i-1)), sol(i), 1);
219 }
220}
221
222void VMG::MPI::Settings::CreateGlobalCommunicator(MPI_Comm& comm_global, const Grid* grid_1, const Grid* grid_2, const Grid* grid_3)
223{
224 int rank;
225 MPI_Comm comm_new;
226
227 const bool in_communicator = (grid_1->Global().LocalSize().Product() > 0) ||
228 (grid_2 && grid_2->Global().LocalSize().Product() > 0) ||
229 (grid_3 && grid_3->Global().LocalSize().Product() > 0);
230
231 MPI_Comm_rank(comm_global, &rank);
232
233 if (in_communicator) {
234 Index dims, periods, coords;
235 MPI_Comm comm_temp;
236 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
237 MPI_Comm_split(comm_global, 1, rank, &comm_temp);
238 dims = GlobalDims(comm_temp, coords);
239 MPI_Cart_create(comm_temp, 3, dims.vec(), periods.vec(), 0, &comm_new);
240 MPI_Comm_free(&comm_temp);
241 }else {
242 MPI_Comm_split(comm_global, MPI_UNDEFINED, rank, &comm_new);
243 }
244
245 communicators_global.insert(std::make_pair(grid_1->Level(), comm_new));
246}
247
248void VMG::MPI::Settings::CreateLocalCommunicator(MPI_Comm& comm_global, const Grid& grid)
249{
250 int rank, comm_equal;
251 MPI_Comm comm_new;
252 std::set<MPI_Comm>::iterator iter;
253
254 MPI_Comm_rank(comm_global, &rank);
255
256 if (grid.Global().LocalSize().Product() > 0) {
257 Index dims, periods, coords;
258 MPI_Comm comm_temp;
259 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
260 MPI_Comm_split(comm_global, 1, rank, &comm_temp);
261 dims = GlobalDims(comm_temp, coords);
262 MPI_Cart_create(comm_temp, 3, dims.vec(), periods.vec(), 0, &comm_new);
263 MPI_Comm_free(&comm_temp);
264 }else {
265 MPI_Comm_split(comm_global, MPI_UNDEFINED, rank, &comm_new);
266 }
267
268 if (comm_new != MPI_COMM_NULL) {
269 for (iter=communicators_local_unique.begin(); iter!=communicators_local_unique.end(); ++iter) {
270 if (*iter != MPI_COMM_NULL) {
271 MPI_Comm_compare(comm_new, *iter, &comm_equal);
272 assert(comm_equal != MPI_SIMILAR);
273 if (comm_equal == MPI_IDENT || comm_equal == MPI_CONGRUENT) {
274 MPI_Comm_free(&comm_new);
275 comm_new = *iter;
276 break;
277 }
278 }
279 }
280 }
281
282 std::pair<std::set<MPI_Comm>::iterator, bool> insert_result = communicators_local_unique.insert(comm_new);
283 communicators_local.insert(std::make_pair(KeyUnsorted(grid, 0), *insert_result.first));
284}
285
286void VMG::MPI::Settings::AddDatatypeGlobal(const Grid& grid_old, const Grid& grid_new, const int& direction)
287{
288 MPI_Comm comm = CommunicatorGlobal(grid_old);
289
290 // Insert into map
291 std::pair< std::map<VMG::MPI::KeyUnsorted, VMG::MPI::DatatypesGlobal>::iterator, bool > insert_result =
292 datatypes_global.insert(std::make_pair(VMG::MPI::KeyUnsorted(grid_old, grid_new, direction), VMG::MPI::DatatypesGlobal()));
293 VMG::MPI::DatatypesGlobal& dt_global = insert_result.first->second;
294 bool dt_is_new = insert_result.second;
295
296
297 if (comm != MPI_COMM_NULL) {
298
299 Index begin, end, offset_old, offset_new;
300 int rank, size;
301
302 MPI_Comm_rank(comm, &rank);
303 MPI_Comm_size(comm, &size);
304
305 std::vector<int> buffer;
306 buffer.resize(6*size);
307
308 // Compute local offset for ghost cells
309 for (int i=0; i<3; ++i) {
310 offset_old[i] = (grid_old.Local().HaloSize1()[i] > 0 ? grid_old.Local().Begin()[i] : 0);
311 offset_new[i] = (grid_new.Local().HaloSize1()[i] > 0 ? grid_new.Local().Begin()[i] : 0);
312 }
313
314 // Publish which grid part this process can offer
315 if (&grid_old == &grid_new) {
316 for (int i=0; i<6; ++i)
317 buffer[6*rank+i] = 0;
318 }else {
319 for (int i=0; i<3; ++i) {
320 buffer[6*rank+i] = grid_old.Global().LocalBegin()[i];
321 buffer[6*rank+i+3] = grid_old.Global().LocalEnd()[i];
322 }
323 }
324
325 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &buffer.front(), 6, MPI_INT, comm);
326
327 if (dt_is_new) {
328
329 // Decide who offers a useful grid part
330 for (int i=0; i<size; ++i) {
331 for (int j=0; j<3; ++j) {
332 begin[j] = buffer[6*i+j];
333 end[j] = buffer[6*i+j+3];
334 }
335
336 begin = begin.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
337 end = end.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
338
339 if ((end-begin).Product() > 0) {
340 // This process has a useful part
341 dt_global.Receive().push_back(VMG::MPI::Datatype(grid_new.Local().SizeTotal(),
342 end - begin,
343 begin - grid_new.Global().LocalBegin() + offset_new,
344 i, 0, 0, true));
345 }
346 }
347 }
348
349 // Publish which grid parts this process needs
350 for (int i=0; i<3; ++i) {
351 buffer[6*rank+i] = grid_new.Global().LocalBegin()[i];
352 buffer[6*rank+i+3] = grid_new.Global().LocalEnd()[i];
353 }
354
355 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &buffer.front(), 6, MPI_INT, comm);
356
357 if (dt_is_new) {
358
359 // Decide who needs a part of my grid
360 for (int i=0; i<size; ++i) {
361
362 if ((i == rank) && (&grid_old == &grid_new))
363 continue;
364
365 for (int j=0; j<3; ++j) {
366 begin[j] = buffer[6*i+j];
367 end[j] = buffer[6*i+j+3];
368 }
369
370 begin = begin.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
371 end = end.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
372
373 if ((end-begin).Product() > 0) {
374 // This process needs one of my parts
375 dt_global.Send().push_back(VMG::MPI::Datatype(grid_old.Local().SizeTotal(),
376 end - begin,
377 begin - grid_old.Global().LocalBegin() + offset_old,
378 i, 0, 0, true));
379 }
380 }
381 }
382 }
383}
384
385MPI_Datatype& VMG::MPI::Settings::Datatype(const Index& begin, const Index& end,
386 const Index& size_local, const Index& size_global,
387 const int& level)
388{
389 KeyUnsorted k(begin, end, size_local, size_global, level, 0);
390 std::map<KeyUnsorted, MPI_Datatype>::iterator iter = datatypes.find(k);
391
392 if (iter != datatypes.end())
393 return iter->second;
394
395 MPI_Datatype dt;
396 Index sizes = size_local;
397 Index subsizes = end - begin;
398 Index starts = begin;
399
400 MPI_Type_create_subarray(3, sizes.vec(), subsizes.vec(), starts.vec(), MPI_ORDER_C, MPI_DOUBLE, &dt);
401 MPI_Type_commit(&dt);
402
403 return datatypes.insert(std::make_pair(k, dt)).first->second;
404}
405
406Index VMG::MPI::Settings::GlobalDims(MPI_Comm comm, Index pos)
407{
408 std::set<int> unique_set[3];
409 Index dims;
410
411 int size;
412 MPI_Comm_size(comm, &size);
413
414 int* coordinates = new int[3*size];
415
416 MPI_Allgather(pos.vec(), 3, MPI_INT, coordinates, 3, MPI_INT, comm);
417
418 for (int i=0; i<size; ++i)
419 for (int j=0; j<3; ++j)
420 unique_set[j].insert(coordinates[3*i+j]);
421
422 for (int j=0; j<3; ++j)
423 dims[j] = static_cast<int>(unique_set[j].size());
424
425 delete [] coordinates;
426
427 return dims;
428}
429
430std::string VMG::MPI::Settings::ToString() const
431{
432 std::stringstream str;
433 std::map<KeyUnsorted, VMG::MPI::DatatypesGlobal>::const_iterator iter_global;
434 std::map<KeyUnsorted, VMG::MPI::DatatypesLocal>::const_iterator iter_local;
435
436 str << "#### MPI_SETTINGS ####" << std::endl;
437
438 for (iter_global = datatypes_global.begin(); iter_global != datatypes_global.end(); ++iter_global)
439 str << iter_global->second << std::endl;
440
441 for (iter_local = datatypes_local.begin(); iter_local != datatypes_local.end(); ++iter_local)
442 str << iter_local->second << std::endl;
443
444 str << "######################";
445
446 return str.str();
447}
448
449std::ostream& VMG::MPI::operator<<(std::ostream& out, const VMG::MPI::Settings& settings)
450{
451 return out << settings.ToString();
452}
453
454VMG::MPI::Settings::Settings()
455{
456}
457
458VMG::MPI::Settings::~Settings()
459{
460 std::map<int, MPI_Comm>::iterator iter_comm_global;
461 for (iter_comm_global=communicators_global.begin(); iter_comm_global!=communicators_global.end(); ++iter_comm_global)
462 if (iter_comm_global->second != MPI_COMM_NULL)
463 MPI_Comm_free(&iter_comm_global->second);
464
465 /*
466 * We simply copied some communicators so we have to make sure that we free
467 * each communicator exactly once
468 */
469 std::set<MPI_Comm>::iterator iter_comm_set;
470 for (iter_comm_set=communicators_local_unique.begin(); iter_comm_set!=communicators_local_unique.end(); ++iter_comm_set)
471 if (*iter_comm_set != MPI_COMM_NULL) {
472 MPI_Comm comm_temp = *iter_comm_set;
473 MPI_Comm_free(&comm_temp);
474 }
475
476 std::map<KeyUnsorted, MPI_Datatype>::iterator iter_dt;
477 for (iter_dt=datatypes.begin(); iter_dt!=datatypes.end(); ++iter_dt)
478 if (iter_dt->second != MPI_DATATYPE_NULL)
479 MPI_Type_free(&iter_dt->second);
480
481 std::map<const Grid*, Grid*>::iterator iter_grid;
482 for (iter_grid=finer_grids.begin(); iter_grid!=finer_grids.end(); ++iter_grid)
483 if (iter_grid->second->Father() == NULL)
484 delete iter_grid->second;
485
486 for (iter_grid=coarser_grids.begin(); iter_grid!=coarser_grids.end(); ++iter_grid)
487 if (iter_grid->second->Father() == NULL)
488 delete iter_grid->second;
489}
Note: See TracBrowser for help on using the repository browser.