source: src/comm/mpi/settings.cpp@ e85cfd

Last change on this file since e85cfd was e85cfd, checked in by Julian Iseringhausen <isering@…>, 13 years ago

Work.

  • Property mode set to 100644
File size: 15.0 KB
Line 
1/*
2 * vmg - a versatile multigrid solver
3 * Copyright (C) 2012 Institute for Numerical Simulation, University of Bonn
4 *
5 * vmg is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 3 of the License, or
8 * (at your option) any later version.
9 *
10 * vmg is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifdef HAVE_CONFIG_H
20#include <config.h>
21#endif
22
23#ifdef HAVE_MPI
24#include <mpi.h>
25#ifdef HAVE_MARMOT
26#include <enhancempicalls.h>
27#include <sourceinfompicalls.h>
28#endif
29#else
30#error MPI is needed to compile CommMPISettings
31#endif
32
33#include <cassert>
34
35#include "comm/comm.hpp"
36#include "comm/mpi/settings.hpp"
37#include "grid/multigrid.hpp"
38#include "grid/tempgrid.hpp"
39#include "mg.hpp"
40
41using namespace VMG;
42
43Grid& VMG::MPI::Settings::FinerGrid(const Grid& grid)
44{
45 assert(finer_grids.find(&grid) != finer_grids.end());
46 return *finer_grids.find(&grid)->second;
47}
48
49Grid& VMG::MPI::Settings::CoarserGrid(const Grid& grid)
50{
51 assert(coarser_grids.find(&grid) != coarser_grids.end());
52 return *coarser_grids.find(&grid)->second;
53}
54
55MPI_Comm VMG::MPI::Settings::CommunicatorGlobal(const Grid& grid) const
56{
57 assert(communicators_global.find(grid.Level()) != communicators_global.end());
58 return communicators_global.find(grid.Level())->second;
59}
60
61MPI_Comm VMG::MPI::Settings::CommunicatorLocal(const Grid& grid) const
62{
63 KeyUnsorted k(grid, 0);
64 assert(communicators_local.find(k) != communicators_local.end());
65 return communicators_local.find(k)->second;
66}
67
68VMG::MPI::DatatypesGlobal& VMG::MPI::Settings::DatatypesGlobal(const Grid& grid_old, const Grid& grid_new, const int& direction)
69{
70 KeyUnsorted k(grid_old, grid_new, direction);
71 assert(datatypes_global.find(k) != datatypes_global.end());
72 return datatypes_global.find(k)->second;
73}
74
75VMG::MPI::DatatypesLocal& VMG::MPI::Settings::DatatypesLocal(const Grid& grid)
76{
77 KeyUnsorted k(grid, 0);
78 assert(datatypes_local.find(k) != datatypes_local.end());
79 return datatypes_local.find(k)->second;
80}
81
82void VMG::MPI::Settings::ComputeSettings(Multigrid& sol, Multigrid& rhs, MPI_Comm& comm_global)
83{
84
85 std::map<const Grid*, Grid*>::const_iterator grid_iter;
86
87 Comm& comm = *MG::GetComm();
88
89 /*
90 * Create coarser grids
91 */
92 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i) {
93
94 TempGrid* temp_grid = new TempGrid();
95 temp_grid->SetPropertiesToCoarser(sol(i), comm.BoundaryConditions());
96
97 if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
98 temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd())) {
99 delete temp_grid;
100 coarser_grids.insert(std::make_pair(&sol(i), &sol(i-1)));
101 }else {
102 coarser_grids.insert(std::make_pair(&sol(i), temp_grid));
103 }
104
105 }
106
107 //FIXME
108 for (int i=rhs.MaxLevel(); i>rhs.MinLevel(); --i) {
109
110 TempGrid* temp_grid = new TempGrid();
111 temp_grid->SetPropertiesToCoarser(rhs(i), comm.BoundaryConditions());
112
113 if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(rhs(i-1).Global().LocalBegin()) &&
114 temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(rhs(i-1).Global().LocalEnd())) {
115 delete temp_grid;
116 coarser_grids.insert(std::make_pair(&rhs(i), &rhs(i-1)));
117 }else {
118 coarser_grids.insert(std::make_pair(&rhs(i), temp_grid));
119 }
120
121 }
122
123 /*
124 * Create finer grids
125 */
126 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i) {
127
128 TempGrid* temp_grid = new TempGrid();
129 temp_grid->SetPropertiesToFiner(sol(i), comm.BoundaryConditions());
130
131 if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(sol(i+1).Global().LocalBegin()) &&
132 temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(sol(i+1).Global().LocalEnd())) {
133 delete temp_grid;
134 finer_grids.insert(std::make_pair(&sol(i), &sol(i+1)));
135 }else {
136 finer_grids.insert(std::make_pair(&sol(i), temp_grid));
137 }
138
139 }
140
141 for (int i=rhs.MinLevel(); i<rhs.MaxLevel(); ++i) {
142
143 TempGrid* temp_grid = new TempGrid();
144 temp_grid->SetPropertiesToFiner(rhs(i), comm.BoundaryConditions());
145
146 if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(rhs(i+1).Global().LocalBegin()) &&
147 temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(rhs(i+1).Global().LocalEnd())) {
148 delete temp_grid;
149 finer_grids.insert(std::make_pair(&rhs(i), &rhs(i+1)));
150 }else {
151 finer_grids.insert(std::make_pair(&rhs(i), temp_grid));
152 }
153
154 }
155
156 /*
157 * Create global communicators
158 */
159 for (int i=sol.MinLevel()+1; i<sol.MaxLevel(); ++i)
160 CreateGlobalCommunicator(comm_global, &sol(i), &CoarserGrid(sol(i+1)), &FinerGrid(sol(i-1)));
161
162 CreateGlobalCommunicator(comm_global, &sol(sol.MinLevel()), &CoarserGrid(sol(sol.MinLevel()+1)));
163 CreateGlobalCommunicator(comm_global, &sol(sol.MaxLevel()), &FinerGrid(sol(sol.MaxLevel()-1)));
164
165 MPI_Comm my_comm_global;
166 MPI_Comm_dup(comm_global, &my_comm_global);
167 communicators_global.insert(std::make_pair(0, my_comm_global));
168
169 /*
170 * Create local communicators
171 */
172 for (int i=sol.MinLevel(); i<=sol.MaxLevel(); ++i)
173 CreateLocalCommunicator(comm_global, sol(i));
174
175 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i)
176 CreateLocalCommunicator(comm_global, FinerGrid(sol(i)));
177
178 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i)
179 CreateLocalCommunicator(comm_global, CoarserGrid(sol(i)));
180
181 if (MG::GetFactory().TestObject("PARTICLE_NEAR_FIELD_CELLS"))
182 CreateLocalCommunicator(comm_global, comm.GetParticleGrid());
183
184 /*
185 * Create single grid datatypes
186 */
187 for (int i=sol.MinLevel(); i<=sol.MaxLevel(); ++i)
188 datatypes_local.insert(std::make_pair(KeyUnsorted(sol(i), 0), VMG::MPI::DatatypesLocal(sol(i), CommunicatorLocal(sol(i)), true)));
189
190 for (grid_iter=finer_grids.begin(); grid_iter!=finer_grids.end(); ++grid_iter)
191 datatypes_local.insert(std::make_pair(KeyUnsorted(*grid_iter->second, 0), VMG::MPI::DatatypesLocal(*grid_iter->second, CommunicatorLocal(*grid_iter->second), true)));
192
193 for (grid_iter=coarser_grids.begin(); grid_iter!=coarser_grids.end(); ++grid_iter)
194 datatypes_local.insert(std::make_pair(KeyUnsorted(*grid_iter->second, 0), VMG::MPI::DatatypesLocal(*grid_iter->second, CommunicatorLocal(*grid_iter->second), true)));
195
196 if (MG::GetFactory().TestObject("PARTICLE_NEAR_FIELD_CELLS"))
197 datatypes_local.insert(std::make_pair(KeyUnsorted(comm.GetParticleGrid(), 0), VMG::MPI::DatatypesLocal(comm.GetParticleGrid(), CommunicatorLocal(comm.GetParticleGrid()), true)));
198
199 /*
200 * Create two grid datatypes
201 */
202 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i) {
203 AddDatatypeGlobal(sol(i), CoarserGrid(sol(i+1)), 0);
204 AddDatatypeGlobal(CoarserGrid(sol(i+1)), sol(i), 1);
205 }
206
207 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i) {
208 AddDatatypeGlobal(sol(i), FinerGrid(sol(i-1)), 0);
209 AddDatatypeGlobal(FinerGrid(sol(i-1)), sol(i), 1);
210 }
211}
212
213void VMG::MPI::Settings::CreateGlobalCommunicator(MPI_Comm& comm_global, const Grid* grid_1, const Grid* grid_2, const Grid* grid_3)
214{
215 int rank;
216 MPI_Comm comm_new;
217
218 const bool in_communicator = (grid_1->Global().LocalSize().Product() > 0) ||
219 (grid_2 && grid_2->Global().LocalSize().Product() > 0) ||
220 (grid_3 && grid_3->Global().LocalSize().Product() > 0);
221
222 MPI_Comm_rank(comm_global, &rank);
223
224 if (in_communicator) {
225 Index dims, periods, coords;
226 MPI_Comm comm_temp;
227 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
228 MPI_Comm_split(comm_global, 1, rank, &comm_temp);
229 dims = GlobalDims(comm_temp, coords);
230 MPI_Cart_create(comm_temp, 3, dims.vec(), periods.vec(), 0, &comm_new);
231 MPI_Comm_free(&comm_temp);
232 }else {
233 MPI_Comm_split(comm_global, MPI_UNDEFINED, rank, &comm_new);
234 }
235
236 communicators_global.insert(std::make_pair(grid_1->Level(), comm_new));
237}
238
239void VMG::MPI::Settings::CreateLocalCommunicator(MPI_Comm& comm_global, const Grid& grid)
240{
241 int rank, comm_equal;
242 MPI_Comm comm_new;
243 std::set<MPI_Comm>::iterator iter;
244
245 MPI_Comm_rank(comm_global, &rank);
246
247 if (grid.Global().LocalSize().Product() > 0) {
248 Index dims, periods, coords;
249 MPI_Comm comm_temp;
250 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
251 MPI_Comm_split(comm_global, 1, rank, &comm_temp);
252 dims = GlobalDims(comm_temp, coords);
253 MPI_Cart_create(comm_temp, 3, dims.vec(), periods.vec(), 0, &comm_new);
254 MPI_Comm_free(&comm_temp);
255 }else {
256 MPI_Comm_split(comm_global, MPI_UNDEFINED, rank, &comm_new);
257 }
258
259 if (comm_new != MPI_COMM_NULL) {
260 for (iter=communicators_local_unique.begin(); iter!=communicators_local_unique.end(); ++iter) {
261 if (*iter != MPI_COMM_NULL) {
262 MPI_Comm_compare(comm_new, *iter, &comm_equal);
263 assert(comm_equal != MPI_SIMILAR);
264 if (comm_equal == MPI_IDENT || comm_equal == MPI_CONGRUENT) {
265 MPI_Comm_free(&comm_new);
266 comm_new = *iter;
267 break;
268 }
269 }
270 }
271 }
272
273 std::pair<std::set<MPI_Comm>::iterator, bool> insert_result = communicators_local_unique.insert(comm_new);
274 communicators_local.insert(std::make_pair(KeyUnsorted(grid, 0), *insert_result.first));
275}
276
277void VMG::MPI::Settings::AddDatatypeGlobal(const Grid& grid_old, const Grid& grid_new, const int& direction)
278{
279 MPI_Comm comm = CommunicatorGlobal(grid_old);
280
281 // Insert into map
282 std::pair< std::map<VMG::MPI::KeyUnsorted, VMG::MPI::DatatypesGlobal>::iterator, bool > insert_result =
283 datatypes_global.insert(std::make_pair(VMG::MPI::KeyUnsorted(grid_old, grid_new, direction), VMG::MPI::DatatypesGlobal()));
284 VMG::MPI::DatatypesGlobal& dt_global = insert_result.first->second;
285 bool dt_is_new = insert_result.second;
286
287
288 if (comm != MPI_COMM_NULL) {
289
290 Index begin, end, offset_old, offset_new;
291 int rank, size;
292
293 MPI_Comm_rank(comm, &rank);
294 MPI_Comm_size(comm, &size);
295
296 std::vector<int> buffer;
297 buffer.resize(6*size);
298
299 // Compute local offset for ghost cells
300 for (int i=0; i<3; ++i) {
301 offset_old[i] = (grid_old.Local().HaloSize1()[i] > 0 ? grid_old.Local().Begin()[i] : 0);
302 offset_new[i] = (grid_new.Local().HaloSize1()[i] > 0 ? grid_new.Local().Begin()[i] : 0);
303 }
304
305 // Publish which grid part this process can offer
306 if (&grid_old == &grid_new) {
307 for (int i=0; i<6; ++i)
308 buffer[6*rank+i] = 0;
309 }else {
310 for (int i=0; i<3; ++i) {
311 buffer[6*rank+i] = grid_old.Global().LocalBegin()[i];
312 buffer[6*rank+i+3] = grid_old.Global().LocalEnd()[i];
313 }
314 }
315
316 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &buffer.front(), 6, MPI_INT, comm);
317
318 if (dt_is_new) {
319
320 // Decide who offers a useful grid part
321 for (int i=0; i<size; ++i) {
322 for (int j=0; j<3; ++j) {
323 begin[j] = buffer[6*i+j];
324 end[j] = buffer[6*i+j+3];
325 }
326
327 begin = begin.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
328 end = end.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
329
330 if ((end-begin).Product() > 0) {
331 // This process has a useful part
332 dt_global.Receive().push_back(VMG::MPI::Datatype(grid_new.Local().SizeTotal(),
333 end - begin,
334 begin - grid_new.Global().LocalBegin() + offset_new,
335 i, 0, 0, true));
336 }
337 }
338 }
339
340 // Publish which grid parts this process needs
341 for (int i=0; i<3; ++i) {
342 buffer[6*rank+i] = grid_new.Global().LocalBegin()[i];
343 buffer[6*rank+i+3] = grid_new.Global().LocalEnd()[i];
344 }
345
346 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &buffer.front(), 6, MPI_INT, comm);
347
348 if (dt_is_new) {
349
350 // Decide who needs a part of my grid
351 for (int i=0; i<size; ++i) {
352
353 if ((i == rank) && (&grid_old == &grid_new))
354 continue;
355
356 for (int j=0; j<3; ++j) {
357 begin[j] = buffer[6*i+j];
358 end[j] = buffer[6*i+j+3];
359 }
360
361 begin = begin.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
362 end = end.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
363
364 if ((end-begin).Product() > 0) {
365 // This process needs one of my parts
366 dt_global.Send().push_back(VMG::MPI::Datatype(grid_old.Local().SizeTotal(),
367 end - begin,
368 begin - grid_old.Global().LocalBegin() + offset_old,
369 i, 0, 0, true));
370 }
371 }
372 }
373 }
374}
375
376MPI_Datatype& VMG::MPI::Settings::Datatype(const Index& begin, const Index& end,
377 const Index& size_local, const Index& size_global,
378 const int& level)
379{
380 KeyUnsorted k(begin, end, size_local, size_global, level, 0);
381 std::map<KeyUnsorted, MPI_Datatype>::iterator iter = datatypes.find(k);
382
383 if (iter != datatypes.end())
384 return iter->second;
385
386 MPI_Datatype dt;
387 Index sizes = size_local;
388 Index subsizes = end - begin;
389 Index starts = begin;
390
391 MPI_Type_create_subarray(3, sizes.vec(), subsizes.vec(), starts.vec(), MPI_ORDER_C, MPI_DOUBLE, &dt);
392 MPI_Type_commit(&dt);
393
394 return datatypes.insert(std::make_pair(k, dt)).first->second;
395}
396
397Index VMG::MPI::Settings::GlobalDims(MPI_Comm comm, Index pos)
398{
399 std::set<int> unique_set[3];
400 Index dims;
401
402 int size;
403 MPI_Comm_size(comm, &size);
404
405 int* coordinates = new int[3*size];
406
407 MPI_Allgather(pos.vec(), 3, MPI_INT, coordinates, 3, MPI_INT, comm);
408
409 for (int i=0; i<size; ++i)
410 for (int j=0; j<3; ++j)
411 unique_set[j].insert(coordinates[3*i+j]);
412
413 for (int j=0; j<3; ++j)
414 dims[j] = static_cast<int>(unique_set[j].size());
415
416 delete [] coordinates;
417
418 return dims;
419}
420
421VMG::MPI::Settings::Settings()
422{
423}
424
425VMG::MPI::Settings::~Settings()
426{
427 std::map<int, MPI_Comm>::iterator iter_comm_global;
428 for (iter_comm_global=communicators_global.begin(); iter_comm_global!=communicators_global.end(); ++iter_comm_global)
429 if (iter_comm_global->second != MPI_COMM_NULL)
430 MPI_Comm_free(&iter_comm_global->second);
431
432 /*
433 * We simply copied some communicators so we have to make sure that we free
434 * each communicator exactly once
435 */
436 std::set<MPI_Comm>::iterator iter_comm_set;
437 for (iter_comm_set=communicators_local_unique.begin(); iter_comm_set!=communicators_local_unique.end(); ++iter_comm_set)
438 if (*iter_comm_set != MPI_COMM_NULL) {
439 MPI_Comm comm_temp = *iter_comm_set;
440 MPI_Comm_free(&comm_temp);
441 }
442
443 std::map<KeyUnsorted, MPI_Datatype>::iterator iter_dt;
444 for (iter_dt=datatypes.begin(); iter_dt!=datatypes.end(); ++iter_dt)
445 if (iter_dt->second != MPI_DATATYPE_NULL)
446 MPI_Type_free(&iter_dt->second);
447
448 std::map<const Grid*, Grid*>::iterator iter_grid;
449 for (iter_grid=finer_grids.begin(); iter_grid!=finer_grids.end(); ++iter_grid)
450 if (iter_grid->second->Father() == NULL)
451 delete iter_grid->second;
452
453 for (iter_grid=coarser_grids.begin(); iter_grid!=coarser_grids.end(); ++iter_grid)
454 if (iter_grid->second->Father() == NULL)
455 delete iter_grid->second;
456}
Note: See TracBrowser for help on using the repository browser.