source: ThirdParty/vmg/src/comm/mpi/settings.cpp@ 79b089

Action_Thermostats Add_AtomRandomPerturbation Add_RotateAroundBondAction Add_SelectAtomByNameAction Adding_Graph_to_ChangeBondActions Adding_MD_integration_tests Adding_StructOpt_integration_tests AutomationFragmentation_failures Candidate_v1.6.0 Candidate_v1.6.1 ChangeBugEmailaddress ChangingTestPorts ChemicalSpaceEvaluator Combining_Subpackages Debian_Package_split Debian_package_split_molecuildergui_only Disabling_MemDebug Docu_Python_wait EmpiricalPotential_contain_HomologyGraph_documentation Enable_parallel_make_install Enhance_userguide Enhanced_StructuralOptimization Enhanced_StructuralOptimization_continued Example_ManyWaysToTranslateAtom Exclude_Hydrogens_annealWithBondGraph FitPartialCharges_GlobalError Fix_ChronosMutex Fix_StatusMsg Fix_StepWorldTime_single_argument Fix_Verbose_Codepatterns ForceAnnealing_goodresults ForceAnnealing_oldresults ForceAnnealing_tocheck ForceAnnealing_with_BondGraph ForceAnnealing_with_BondGraph_continued ForceAnnealing_with_BondGraph_continued_betteresults ForceAnnealing_with_BondGraph_contraction-expansion GeometryObjects Gui_displays_atomic_force_velocity IndependentFragmentGrids_IntegrationTest JobMarket_RobustOnKillsSegFaults JobMarket_StableWorkerPool JobMarket_unresolvable_hostname_fix ODR_violation_mpqc_open PartialCharges_OrthogonalSummation PythonUI_with_named_parameters QtGui_reactivate_TimeChanged_changes Recreated_GuiChecks RotateToPrincipalAxisSystem_UndoRedo StoppableMakroAction Subpackage_vmg TremoloParser_IncreasedPrecision TremoloParser_MultipleTimesteps Ubuntu_1604_changes stable
Last change on this file since 79b089 was 7faa5c, checked in by Frederik Heber <heber@…>, 8 years ago

Merge commit 'de061d9d851257a04e924d4472df4523d33bb08b' as 'ThirdParty/vmg'

  • Property mode set to 100644
File size: 17.2 KB
Line 
1/*
2 * vmg - a versatile multigrid solver
3 * Copyright (C) 2012 Institute for Numerical Simulation, University of Bonn
4 *
5 * vmg is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 3 of the License, or
8 * (at your option) any later version.
9 *
10 * vmg is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/**
20 * @file settings.cpp
21 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
22 * @date Mon Jan 2 18:45:22 2012
23 *
24 * @brief Computes several MPI-related settings.
25 *
26 */
27
28#ifdef HAVE_CONFIG_H
29#include <libvmg_config.h>
30#endif
31
32#ifdef HAVE_MPI
33#include <mpi.h>
34#ifdef HAVE_MARMOT
35#include <enhancempicalls.h>
36#include <sourceinfompicalls.h>
37#endif
38#else
39#error MPI is needed to compile CommMPISettings
40#endif
41
42#include <cassert>
43#include <sstream>
44#include <string>
45
46#include "base/index.hpp"
47#include "comm/comm.hpp"
48#include "comm/mpi/settings.hpp"
49#include "discretization/boundary_value_setter.hpp"
50#include "grid/multigrid.hpp"
51#include "grid/tempgrid.hpp"
52#include "mg.hpp"
53
54using namespace VMG;
55
56Grid& VMG::MPI::Settings::FinerGrid(const Grid& grid)
57{
58 assert(finer_grids.find(&grid) != finer_grids.end());
59 return *finer_grids.find(&grid)->second;
60}
61
62Grid& VMG::MPI::Settings::CoarserGrid(const Grid& grid)
63{
64 assert(coarser_grids.find(&grid) != coarser_grids.end());
65 return *coarser_grids.find(&grid)->second;
66}
67
68MPI_Comm VMG::MPI::Settings::CommunicatorGlobal(const Grid& grid) const
69{
70 assert(communicators_global.find(grid.Level()) != communicators_global.end());
71 return communicators_global.find(grid.Level())->second;
72}
73
74MPI_Comm VMG::MPI::Settings::CommunicatorLocal(const Grid& grid) const
75{
76 KeyUnsorted k(grid, 0);
77 assert(communicators_local.find(k) != communicators_local.end());
78 return communicators_local.find(k)->second;
79}
80
81VMG::MPI::DatatypesGlobal& VMG::MPI::Settings::DatatypesGlobal(const Grid& grid_old, const Grid& grid_new, const int& direction)
82{
83 KeyUnsorted k(grid_old, grid_new, direction);
84 assert(datatypes_global.find(k) != datatypes_global.end());
85 return datatypes_global.find(k)->second;
86}
87
88VMG::MPI::DatatypesLocal& VMG::MPI::Settings::DatatypesLocal(const Grid& grid)
89{
90 KeyUnsorted k(grid, 0);
91 assert(datatypes_local.find(k) != datatypes_local.end());
92 return datatypes_local.find(k)->second;
93}
94
95void VMG::MPI::Settings::ComputeSettings(Multigrid& sol, Multigrid& rhs, MPI_Comm& comm_global)
96{
97
98 std::map<const Grid*, Grid*>::const_iterator grid_iter;
99
100 Comm& comm = *MG::GetComm();
101
102 /*
103 * Create coarser grids
104 */
105 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i) {
106
107 TempGrid* temp_grid = new TempGrid();
108 temp_grid->SetPropertiesToCoarser(sol(i), comm.BoundaryConditions());
109
110 if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
111 temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd())) {
112 delete temp_grid;
113 coarser_grids.insert(std::make_pair(&sol(i), &sol(i-1)));
114 }else {
115 coarser_grids.insert(std::make_pair(&sol(i), temp_grid));
116 }
117
118 }
119
120 //FIXME
121 for (int i=rhs.MaxLevel(); i>rhs.MinLevel(); --i) {
122
123 TempGrid* temp_grid = new TempGrid();
124 temp_grid->SetPropertiesToCoarser(rhs(i), comm.BoundaryConditions());
125
126 if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(rhs(i-1).Global().LocalBegin()) &&
127 temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(rhs(i-1).Global().LocalEnd())) {
128 delete temp_grid;
129 coarser_grids.insert(std::make_pair(&rhs(i), &rhs(i-1)));
130 }else {
131 coarser_grids.insert(std::make_pair(&rhs(i), temp_grid));
132 }
133
134 }
135
136 /*
137 * Create finer grids
138 */
139 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i) {
140
141 TempGrid* temp_grid = new TempGrid();
142 temp_grid->SetPropertiesToFiner(sol(i), comm.BoundaryConditions());
143 if (temp_grid->Global().LocalBegin() == sol(i+1).Global().LocalBegin() &&
144 temp_grid->Global().LocalEnd() == sol(i+1).Global().LocalEnd()) {
145 delete temp_grid;
146 finer_grids.insert(std::make_pair(&sol(i), &sol(i+1)));
147 }else {
148 finer_grids.insert(std::make_pair(&sol(i), temp_grid));
149 }
150
151 }
152
153 for (int i=rhs.MinLevel(); i<rhs.MaxLevel(); ++i) {
154
155 TempGrid* temp_grid = new TempGrid();
156 temp_grid->SetPropertiesToFiner(rhs(i), comm.BoundaryConditions());
157 if (temp_grid->Global().LocalBegin() == rhs(i+1).Global().LocalBegin() &&
158 temp_grid->Global().LocalEnd() == rhs(i+1).Global().LocalEnd()) {
159 delete temp_grid;
160 finer_grids.insert(std::make_pair(&rhs(i), &rhs(i+1)));
161 }else {
162 finer_grids.insert(std::make_pair(&rhs(i), temp_grid));
163 }
164
165 }
166
167 /*
168 * Create global communicators
169 */
170 for (int i=sol.MinLevel()+1; i<sol.MaxLevel(); ++i)
171 CreateGlobalCommunicator(comm_global, &sol(i), &CoarserGrid(sol(i+1)), &FinerGrid(sol(i-1)));
172
173 CreateGlobalCommunicator(comm_global, &sol(sol.MinLevel()), &CoarserGrid(sol(sol.MinLevel()+1)));
174 CreateGlobalCommunicator(comm_global, &sol(sol.MaxLevel()), &FinerGrid(sol(sol.MaxLevel()-1)));
175
176 MPI_Comm my_comm_global;
177 MPI_Comm_dup(comm_global, &my_comm_global);
178 communicators_global.insert(std::make_pair(0, my_comm_global));
179
180 /*
181 * Create local communicators
182 */
183 for (int i=sol.MinLevel(); i<=sol.MaxLevel(); ++i)
184 CreateLocalCommunicator(comm_global, sol(i));
185
186 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i)
187 CreateLocalCommunicator(comm_global, FinerGrid(sol(i)));
188
189 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i)
190 CreateLocalCommunicator(comm_global, CoarserGrid(sol(i)));
191
192 if (MG::GetFactory().TestObject("PARTICLE_NEAR_FIELD_CELLS"))
193 CreateLocalCommunicator(comm_global, comm.GetParticleGrid());
194
195 /*
196 * Create single grid datatypes
197 */
198 for (int i=sol.MinLevel(); i<=sol.MaxLevel(); ++i)
199 datatypes_local.insert(std::make_pair(KeyUnsorted(sol(i), 0), VMG::MPI::DatatypesLocal(sol(i), CommunicatorLocal(sol(i)), true)));
200
201 for (grid_iter=finer_grids.begin(); grid_iter!=finer_grids.end(); ++grid_iter)
202 datatypes_local.insert(std::make_pair(KeyUnsorted(*grid_iter->second, 0), VMG::MPI::DatatypesLocal(*grid_iter->second, CommunicatorLocal(*grid_iter->second), true)));
203
204 for (grid_iter=coarser_grids.begin(); grid_iter!=coarser_grids.end(); ++grid_iter)
205 datatypes_local.insert(std::make_pair(KeyUnsorted(*grid_iter->second, 0), VMG::MPI::DatatypesLocal(*grid_iter->second, CommunicatorLocal(*grid_iter->second), true)));
206
207 if (MG::GetFactory().TestObject("PARTICLE_NEAR_FIELD_CELLS"))
208 datatypes_local.insert(std::make_pair(KeyUnsorted(comm.GetParticleGrid(), 0), VMG::MPI::DatatypesLocal(comm.GetParticleGrid(), CommunicatorLocal(comm.GetParticleGrid()), true)));
209
210 /*
211 * Create two grid datatypes
212 */
213 for (int i=sol.MinLevel(); i<sol.MaxLevel(); ++i) {
214 AddDatatypeGlobal(sol(i), CoarserGrid(sol(i+1)), 0);
215 AddDatatypeGlobal(CoarserGrid(sol(i+1)), sol(i), 1);
216 }
217
218 for (int i=sol.MaxLevel(); i>sol.MinLevel(); --i) {
219 AddDatatypeGlobal(sol(i), FinerGrid(sol(i-1)), 0);
220 AddDatatypeGlobal(FinerGrid(sol(i-1)), sol(i), 1);
221 }
222
223 InitializeBoundaryValues();
224}
225
226void VMG::MPI::Settings::CreateGlobalCommunicator(MPI_Comm& comm_global, const Grid* grid_1, const Grid* grid_2, const Grid* grid_3)
227{
228 int rank;
229 MPI_Comm comm_new;
230
231 const bool in_communicator = (grid_1->Global().LocalSize().Product() > 0) ||
232 (grid_2 && grid_2->Global().LocalSize().Product() > 0) ||
233 (grid_3 && grid_3->Global().LocalSize().Product() > 0);
234
235 MPI_Comm_rank(comm_global, &rank);
236
237 if (in_communicator) {
238 Index dims, periods, coords;
239 MPI_Comm comm_temp;
240 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
241 MPI_Comm_split(comm_global, 1, rank, &comm_temp);
242 dims = GlobalDims(comm_temp, coords);
243 MPI_Cart_create(comm_temp, 3, dims.vec(), periods.vec(), 0, &comm_new);
244 MPI_Comm_free(&comm_temp);
245 }else {
246 MPI_Comm_split(comm_global, MPI_UNDEFINED, rank, &comm_new);
247 }
248
249 communicators_global.insert(std::make_pair(grid_1->Level(), comm_new));
250}
251
252void VMG::MPI::Settings::CreateLocalCommunicator(MPI_Comm& comm_global, const Grid& grid)
253{
254 int rank, comm_equal;
255 MPI_Comm comm_new;
256 std::set<MPI_Comm>::iterator iter;
257
258 MPI_Comm_rank(comm_global, &rank);
259
260 if (grid.Global().LocalSize().Product() > 0) {
261 Index dims, periods, coords;
262 MPI_Comm comm_temp;
263 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
264 MPI_Comm_split(comm_global, 1, rank, &comm_temp);
265 dims = GlobalDims(comm_temp, coords);
266 MPI_Cart_create(comm_temp, 3, dims.vec(), periods.vec(), 0, &comm_new);
267 MPI_Comm_free(&comm_temp);
268 }else {
269 MPI_Comm_split(comm_global, MPI_UNDEFINED, rank, &comm_new);
270 }
271
272 if (comm_new != MPI_COMM_NULL) {
273 for (iter=communicators_local_unique.begin(); iter!=communicators_local_unique.end(); ++iter) {
274 if (*iter != MPI_COMM_NULL) {
275 MPI_Comm_compare(comm_new, *iter, &comm_equal);
276 assert(comm_equal != MPI_SIMILAR);
277 if (comm_equal == MPI_IDENT || comm_equal == MPI_CONGRUENT) {
278 MPI_Comm_free(&comm_new);
279 comm_new = *iter;
280 break;
281 }
282 }
283 }
284 }
285
286 std::pair<std::set<MPI_Comm>::iterator, bool> insert_result = communicators_local_unique.insert(comm_new);
287 communicators_local.insert(std::make_pair(KeyUnsorted(grid, 0), *insert_result.first));
288}
289
290void VMG::MPI::Settings::AddDatatypeGlobal(const Grid& grid_old, const Grid& grid_new, const int& direction)
291{
292 MPI_Comm comm = CommunicatorGlobal(grid_old);
293
294 // Insert into map
295 std::pair< std::map<VMG::MPI::KeyUnsorted, VMG::MPI::DatatypesGlobal>::iterator, bool > insert_result =
296 datatypes_global.insert(std::make_pair(VMG::MPI::KeyUnsorted(grid_old, grid_new, direction), VMG::MPI::DatatypesGlobal()));
297 VMG::MPI::DatatypesGlobal& dt_global = insert_result.first->second;
298 bool dt_is_new = insert_result.second;
299
300
301 if (comm != MPI_COMM_NULL) {
302
303 Index begin, end, offset_old, offset_new;
304 int rank, size;
305
306 MPI_Comm_rank(comm, &rank);
307 MPI_Comm_size(comm, &size);
308
309 std::vector<int> buffer;
310 buffer.resize(6*size);
311
312 // Compute local offset for ghost cells
313 for (int i=0; i<3; ++i) {
314 offset_old[i] = (grid_old.Local().HaloSize1()[i] > 0 ? grid_old.Local().Begin()[i] : 0);
315 offset_new[i] = (grid_new.Local().HaloSize1()[i] > 0 ? grid_new.Local().Begin()[i] : 0);
316 }
317
318 // Publish which grid part this process can offer
319 if (&grid_old == &grid_new) {
320 for (int i=0; i<6; ++i)
321 buffer[6*rank+i] = 0;
322 }else {
323 for (int i=0; i<3; ++i) {
324 buffer[6*rank+i] = grid_old.Global().LocalBegin()[i];
325 buffer[6*rank+i+3] = grid_old.Global().LocalEnd()[i];
326 }
327 }
328
329 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &buffer.front(), 6, MPI_INT, comm);
330
331 if (dt_is_new) {
332
333 // Decide who offers a useful grid part
334 for (int i=0; i<size; ++i) {
335 for (int j=0; j<3; ++j) {
336 begin[j] = buffer[6*i+j];
337 end[j] = buffer[6*i+j+3];
338 }
339
340 begin = begin.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
341 end = end.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
342
343 if ((end-begin).Product() > 0) {
344 // This process has a useful part
345 dt_global.Receive().push_back(VMG::MPI::Datatype(grid_new.Local().SizeTotal(),
346 end - begin,
347 begin - grid_new.Global().LocalBegin() + offset_new,
348 i, 0, 0, true));
349 }
350 }
351 }
352
353 // Publish which grid parts this process needs
354 for (int i=0; i<3; ++i) {
355 buffer[6*rank+i] = grid_new.Global().LocalBegin()[i];
356 buffer[6*rank+i+3] = grid_new.Global().LocalEnd()[i];
357 }
358
359 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &buffer.front(), 6, MPI_INT, comm);
360
361 if (dt_is_new) {
362
363 // Decide who needs a part of my grid
364 for (int i=0; i<size; ++i) {
365
366 if ((i == rank) && (&grid_old == &grid_new))
367 continue;
368
369 for (int j=0; j<3; ++j) {
370 begin[j] = buffer[6*i+j];
371 end[j] = buffer[6*i+j+3];
372 }
373
374 begin = begin.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
375 end = end.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
376
377 if ((end-begin).Product() > 0) {
378 // This process needs one of my parts
379 dt_global.Send().push_back(VMG::MPI::Datatype(grid_old.Local().SizeTotal(),
380 end - begin,
381 begin - grid_old.Global().LocalBegin() + offset_old,
382 i, 0, 0, true));
383 }
384 }
385 }
386 }
387}
388
389MPI_Datatype& VMG::MPI::Settings::Datatype(const Index& begin, const Index& end,
390 const Index& size_local, const Index& size_global,
391 const int& level)
392{
393 KeyUnsorted k(begin, end, size_local, size_global, level, 0);
394 std::map<KeyUnsorted, MPI_Datatype>::iterator iter = datatypes.find(k);
395
396 if (iter != datatypes.end())
397 return iter->second;
398
399 MPI_Datatype dt;
400 Index sizes = size_local;
401 Index subsizes = end - begin;
402 Index starts = begin;
403
404 MPI_Type_create_subarray(3, sizes.vec(), subsizes.vec(), starts.vec(), MPI_ORDER_C, MPI_DOUBLE, &dt);
405 MPI_Type_commit(&dt);
406
407 return datatypes.insert(std::make_pair(k, dt)).first->second;
408}
409
410Index VMG::MPI::Settings::GlobalDims(MPI_Comm comm, Index pos)
411{
412 std::set<int> unique_set[3];
413 Index dims;
414
415 int size;
416 MPI_Comm_size(comm, &size);
417
418 int* coordinates = new int[3*size];
419
420 MPI_Allgather(pos.vec(), 3, MPI_INT, coordinates, 3, MPI_INT, comm);
421
422 for (int i=0; i<size; ++i)
423 for (int j=0; j<3; ++j)
424 unique_set[j].insert(coordinates[3*i+j]);
425
426 for (int j=0; j<3; ++j)
427 dims[j] = static_cast<int>(unique_set[j].size());
428
429 delete [] coordinates;
430
431 return dims;
432}
433
434void VMG::MPI::Settings::InitializeBoundaryValues()
435{
436 assert(bv_ranks.size() == 0);
437
438 if (MG::GetFactory().TestObject("BOUNDARY_VALUE_SETTER")) {
439
440 Index coord;
441
442 const int level_index = MG::GetRhs()->MaxLevel() - MG::GetRhs()->GlobalMaxLevel();
443 const std::vector<BoundaryValue>& bvs = MG::GetBoundaryValueSetter()->BoundaryValues();
444 const std::map<Index, std::vector<GlobalIndices> >& global = MG::GetComm()->DecomposedGlobal();
445
446 assert(global.find(0)->second[level_index].BoundaryType() == GlobalMax);
447
448 MPI_Comm comm = CommunicatorLocal((*MG::GetRhs())(MG::GetRhs()->GlobalMaxLevel()));
449
450 bv_ranks.reserve(bvs.size());
451
452 for (std::vector<BoundaryValue>::const_iterator iter_b = bvs.begin(); iter_b != bvs.end(); ++iter_b) {
453 for (std::map<Index, std::vector<GlobalIndices> >::const_iterator iter_g = global.begin(); iter_g != global.end(); ++iter_g) {
454 if (iter_b->GetIndex().IsComponentwiseGreaterOrEqual(iter_g->second[level_index].LocalBegin()) &&
455 iter_b->GetIndex().IsComponentwiseLess(iter_g->second[level_index].LocalEnd())) {
456 bv_ranks.push_back(0);
457 coord = iter_g->first;
458 MPI_Cart_rank(comm, coord.vec(), &bv_ranks.back());
459 break;
460 }
461 }
462 }
463 }
464}
465
466std::string VMG::MPI::Settings::ToString() const
467{
468 std::stringstream str;
469 std::map<KeyUnsorted, VMG::MPI::DatatypesGlobal>::const_iterator iter_global;
470 std::map<KeyUnsorted, VMG::MPI::DatatypesLocal>::const_iterator iter_local;
471
472 str << "#### MPI_SETTINGS ####" << std::endl;
473
474 for (iter_global = datatypes_global.begin(); iter_global != datatypes_global.end(); ++iter_global)
475 str << iter_global->second << std::endl;
476
477 for (iter_local = datatypes_local.begin(); iter_local != datatypes_local.end(); ++iter_local)
478 str << iter_local->second << std::endl;
479
480 str << "######################";
481
482 return str.str();
483}
484
485std::ostream& VMG::MPI::operator<<(std::ostream& out, const VMG::MPI::Settings& settings)
486{
487 return out << settings.ToString();
488}
489
490VMG::MPI::Settings::Settings()
491{
492}
493
494VMG::MPI::Settings::~Settings()
495{
496 std::map<int, MPI_Comm>::iterator iter_comm_global;
497 for (iter_comm_global=communicators_global.begin(); iter_comm_global!=communicators_global.end(); ++iter_comm_global)
498 if (iter_comm_global->second != MPI_COMM_NULL)
499 MPI_Comm_free(&iter_comm_global->second);
500
501 /*
502 * We simply copied some communicators so we have to make sure that we free
503 * each communicator exactly once
504 */
505 std::set<MPI_Comm>::iterator iter_comm_set;
506 for (iter_comm_set=communicators_local_unique.begin(); iter_comm_set!=communicators_local_unique.end(); ++iter_comm_set)
507 if (*iter_comm_set != MPI_COMM_NULL) {
508 MPI_Comm comm_temp = *iter_comm_set;
509 MPI_Comm_free(&comm_temp);
510 }
511
512 std::map<KeyUnsorted, MPI_Datatype>::iterator iter_dt;
513 for (iter_dt=datatypes.begin(); iter_dt!=datatypes.end(); ++iter_dt)
514 if (iter_dt->second != MPI_DATATYPE_NULL)
515 MPI_Type_free(&iter_dt->second);
516
517 std::map<const Grid*, Grid*>::iterator iter_grid;
518 for (iter_grid=finer_grids.begin(); iter_grid!=finer_grids.end(); ++iter_grid)
519 if (iter_grid->second->Father() == NULL)
520 delete iter_grid->second;
521
522 for (iter_grid=coarser_grids.begin(); iter_grid!=coarser_grids.end(); ++iter_grid)
523 if (iter_grid->second->Father() == NULL)
524 delete iter_grid->second;
525}
Note: See TracBrowser for help on using the repository browser.