source: src/comm/comm_mpi.cpp@ ac6d04

Last change on this file since ac6d04 was ac6d04, checked in by Julian Iseringhausen <isering@…>, 14 years ago

Merge recent changes of the vmg library into ScaFaCos.

Includes a fix for the communication problems on Jugene.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1666 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 38.7 KB
Line 
1/**
2 * @file comm_mpi.cpp
3 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
4 * @date Wed Jun 16 13:21:06 2010
5 *
6 * @brief Class for MPI-based communication.
7 *
8 */
9
10#ifdef HAVE_CONFIG_H
11#include <config.h>
12#endif
13
14#ifdef HAVE_MPI
15
16#include <mpi.h>
17#ifdef HAVE_MARMOT
18#include <enhancempicalls.h>
19#include <sourceinfompicalls.h>
20#endif
21
22#ifdef HAVE_BOOST_FILESYSTEM
23#include <boost/filesystem.hpp>
24namespace fs = boost::filesystem;
25#endif
26
27#ifdef HAVE_VTK
28#include <vtkAbstractArray.h>
29#include <vtkImageData.h>
30#include <vtkPointData.h>
31#include <vtkSmartPointer.h>
32#include <vtkXMLImageDataWriter.h>
33#endif
34
35#include <cstring>
36#include <sstream>
37
38#include "base/helper.hpp"
39#include "base/linked_cell_list.hpp"
40#include "base/tuple.hpp"
41#include "comm/comm_mpi.hpp"
42#include "comm/mpi/datatypes_local.hpp"
43#include "grid/grid.hpp"
44#include "grid/multigrid.hpp"
45#include "grid/tempgrid.hpp"
46#include "mg.hpp"
47#include "base/timer.hpp"
48static char print_buffer[512];
49
50using namespace VMG;
51
52void CommMPI::IsendAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
53{
54 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
55 iter->Isend(grid, tag_start, comm, Request());
56}
57
58void CommMPI::IrecvAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
59{
60 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
61 iter->Irecv(grid, tag_start, comm, Request());
62}
63
64void CommMPI::IsendAllBuffered(const Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
65{
66 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
67 iter->IsendBuffered(grid, tag_start, comm, Request());
68}
69
70void CommMPI::IrecvAllBuffered(std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
71{
72 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
73 iter->IrecvBuffered(tag_start, comm, Request());
74}
75
76void CommMPI::ReplaceBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
77{
78 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
79 iter->GridReplace(grid);
80}
81
82void CommMPI::AddBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
83{
84 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
85 iter->GridSum(grid);
86}
87
88void CommMPI::CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
89{
90 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
91 if (comm != MPI_COMM_NULL) {
92 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
93 IrecvAllBuffered(datatypes.Receive(), comm, 0411);
94 IsendAllBuffered(grid_old, datatypes.Send(), comm, 0411);
95 WaitAll();
96 ReplaceBufferAll(grid_new, datatypes.Receive());
97 }
98}
99
100void CommMPI::CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
101{
102 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
103 if (comm != MPI_COMM_NULL) {
104 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
105 IrecvAllBuffered(datatypes.Receive(), comm, 1806);
106 IsendAllBuffered(grid_old, datatypes.Send(), comm, 1806);
107 WaitAll();
108 AddBufferAll(grid_new, datatypes.Receive());
109 }
110}
111
112void CommMPI::CommToGhosts(Grid& grid)
113{
114 MPI_Comm comm = settings.CommunicatorLocal(grid);
115 if (comm != MPI_COMM_NULL) {
116 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
117 IrecvAllBuffered(types.Halo(), comm, 2310);
118 IsendAllBuffered(grid, types.NB(), comm, 2310);
119 WaitAll();
120 ReplaceBufferAll(grid, types.Halo());
121 }
122}
123
124void CommMPI::CommToGhostsAsyncStart(Grid& grid)
125{
126 MPI_Comm comm = settings.CommunicatorLocal(grid);
127 if (comm != MPI_COMM_NULL) {
128 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
129 IrecvAllBuffered(types.Halo(), comm, 2412);
130 IsendAllBuffered(grid, types.NB(), comm, 2412);
131 TestAll();
132 }
133}
134
135void CommMPI::CommToGhostsAsyncFinish(Grid& grid)
136{
137 WaitAll();
138 ReplaceBufferAll(grid, settings.DatatypesLocal(grid).Halo());
139}
140
141void CommMPI::CommFromGhosts(Grid& grid)
142{
143 MPI_Comm comm = settings.CommunicatorLocal(grid);
144 if (comm != MPI_COMM_NULL) {
145 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
146 IrecvAllBuffered(types.NB(), comm, 1337);
147 IsendAllBuffered(grid, types.Halo(), comm, 1337);
148 WaitAll();
149 AddBufferAll(grid, types.NB());
150 }
151}
152
153void CommMPI::CommFromGhostsAsyncStart(Grid& grid)
154{
155 MPI_Comm comm = settings.CommunicatorLocal(grid);
156 if (comm != MPI_COMM_NULL) {
157 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
158 IrecvAllBuffered(types.NB(), comm, 0xc0ffee);
159 IsendAllBuffered(grid, types.Halo(), comm, 0xc0ffee);
160 TestAll();
161 }
162}
163
164void CommMPI::CommFromGhostsAsyncFinish(Grid& grid)
165{
166 WaitAll();
167 AddBufferAll(grid, settings.DatatypesLocal(grid).NB());
168}
169
170void CommMPI::CommParticles(const Grid& grid, std::list<Particle::Particle>& particles)
171{
172 Factory& factory = MG::GetFactory();
173
174 const vmg_int& num_particles_local = factory.GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
175 vmg_float* x = factory.GetObjectStorageArray<vmg_float>("PARTICLE_POS_ARRAY");
176 vmg_float* q = factory.GetObjectStorageArray<vmg_float>("PARTICLE_CHARGE_ARRAY");
177
178 int rank, size;
179 MPI_Comm_rank(comm_global, &rank);
180 MPI_Comm_size(comm_global, &size);
181
182#ifndef VMG_ONE_SIDED
183 vmg_int* receiver;
184 if (!factory.TestObject("PARTICLE_RECEIVER_ARRAY"))
185 new ObjectStorageArray<vmg_int>("PARTICLE_RECEIVER_ARRAY", size);
186 receiver = factory.GetObjectStorageArray<vmg_int>("PARTICLE_RECEIVER_ARRAY");
187#endif
188
189 Index index;
190 std::vector<int> global_extent(6*size);
191 std::vector<int> send_sizes(size);
192 std::vector<int> recv_sizes(size);
193 std::vector<Index> begin_remote(size);
194 std::vector<Index> end_remote(size);
195 std::vector< std::vector<vmg_float> > send_buffer_x(size);
196 std::vector< std::vector<vmg_float> > send_buffer_q(size);
197 std::vector< std::vector<vmg_int> > send_buffer_ind(size);
198 std::vector< std::vector<vmg_float> > recv_buffer_x(size);
199 std::vector< std::vector<vmg_float> > recv_buffer_q(size);
200 std::vector< std::vector<vmg_int> > recv_buffer_ind(size);
201 MPI_Datatype dt_temp;
202
203 std::memcpy(&global_extent[6*rank], grid.Global().LocalBegin().vec(), 3*sizeof(int));
204 std::memcpy(&global_extent[6*rank+3], grid.Global().LocalEnd().vec(), 3*sizeof(int));
205
206 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &global_extent.front(), 6, MPI_INT, comm_global);
207
208 for (int i=0; i<size; ++i) {
209 begin_remote[i] = static_cast<Index>(&global_extent[6*i]);
210 end_remote[i] = static_cast<Index>(&global_extent[6*i+3]);
211 }
212
213 for (int i=0; i<num_particles_local; ++i) {
214 index = static_cast<Index>((Vector(&x[3*i]) - grid.Extent().Begin()) / grid.Extent().MeshWidth());
215 for (int j=0; j<size; ++j)
216 if (index.IsInBounds(begin_remote[j], end_remote[j])) {
217 send_buffer_x[j].push_back(x[3*i+0]);
218 send_buffer_x[j].push_back(x[3*i+1]);
219 send_buffer_x[j].push_back(x[3*i+2]);
220 send_buffer_q[j].push_back(q[i]);
221 send_buffer_ind[j].push_back(i);
222 break;
223 }
224 }
225
226 /*
227 * Communicate which process gets how many particles
228 */
229 for (int i=0; i<size; ++i)
230 send_sizes[i] = send_buffer_q[i].size();
231
232 MPI_Alltoall(&send_sizes.front(), 1, MPI_INT, &recv_sizes.front(), 1, MPI_INT, comm_global);
233
234 /*
235 * Send particles
236 */
237 for (int i=0; i<size; ++i) {
238
239 if (!send_buffer_q[i].empty()) {
240 MPI_Isend(&send_buffer_x[i].front(), send_buffer_x[i].size(), MPI_DOUBLE, i, 0, comm_global, &Request());
241 MPI_Isend(&send_buffer_q[i].front(), send_buffer_q[i].size(), MPI_DOUBLE, i, 1, comm_global, &Request());
242 MPI_Isend(&send_buffer_ind[i].front(), send_buffer_ind[i].size(), MPI_INT, i, 2, comm_global, &Request());
243 }
244
245#ifndef VMG_ONE_SIDED
246 receiver[i] = send_buffer_q[i].size();
247#endif
248 }
249
250 /*
251 * Receive particles
252 */
253 for (int i=0; i<size; ++i) {
254
255 if (recv_sizes[i] > 0) {
256
257 recv_buffer_x[i].resize(3*recv_sizes[i]);
258 recv_buffer_q[i].resize(recv_sizes[i]);
259 recv_buffer_ind[i].resize(recv_sizes[i]);
260
261 MPI_Irecv(&recv_buffer_x[i].front(), 3*recv_sizes[i], MPI_DOUBLE, i, 0, comm_global, &Request());
262 MPI_Irecv(&recv_buffer_q[i].front(), recv_sizes[i], MPI_DOUBLE, i, 1, comm_global, &Request());
263 MPI_Irecv(&recv_buffer_ind[i].front(), recv_sizes[i], MPI_INT, i, 2, comm_global, &Request());
264
265 }
266
267 }
268
269 WaitAll();
270
271 particles.clear();
272
273 for (int i=0; i<size; ++i)
274 for (int j=0; j<recv_sizes[i]; ++j)
275 particles.push_back(Particle::Particle(&recv_buffer_x[i][3*j], recv_buffer_q[i][j], 0.0, 0.0, i, recv_buffer_ind[i][j]));
276}
277
278void CommMPI::CommParticlesBack(std::list<Particle::Particle>& particles)
279{
280 std::list<Particle::Particle>::iterator iter;
281
282#ifdef VMG_ONE_SIDED
283 // if (!win_created) {
284 // vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
285 // const vmg_int& num_particles_local = MG::GetFactory().GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
286 // MPI_Win_create(p, num_particles_local*sizeof(vmg_float), sizeof(vmg_float), info, comm_global, &win);
287 // win_created = true;
288 // }
289
290 // MPI_Win_fence(MPI_MODE_NOPRECEDE, win);
291
292 // for (iter=particles.begin(); iter!=particles.end(); ++iter)
293 // MPI_Put(&iter->Pot(), 1, MPI_DOUBLE, iter->Rank(), iter->Index(), 1, MPI_DOUBLE, win);
294
295 // MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED, win);
296#else
297 int rank, size;
298 MPI_Comm_rank(comm_global, &rank);
299 MPI_Comm_size(comm_global, &size);
300
301 std::vector< std::vector<vmg_float> > send_buffer_pot(size);
302 std::vector< std::vector<vmg_float> > recv_buffer_pot(size);
303 std::vector< std::vector<vmg_int> > send_buffer_index(size);
304 std::vector< std::vector<vmg_int> > recv_buffer_index(size);
305
306 vmg_int* size_receive = MG::GetFactory().GetObjectStorageArray<vmg_int>("PARTICLE_RECEIVER_ARRAY");
307 vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
308
309 // Build send buffer
310 for (iter=particles.begin(); iter!=particles.end(); ++iter) {
311 send_buffer_pot[iter->Rank()].push_back(iter->Pot());
312 send_buffer_index[iter->Rank()].push_back(iter->Index());
313 }
314
315 // Send potentials
316 for (int i=0; i<size; ++i) {
317 if (!send_buffer_pot[i].empty()) {
318 MPI_Isend(&send_buffer_pot[i].front(), send_buffer_pot[i].size(), MPI_DOUBLE, i, 699+rank, comm_global, &Request());
319 MPI_Isend(&send_buffer_index[i].front(), send_buffer_index[i].size(), MPI_INT, i, 32111+rank, comm_global, &Request());
320 }
321 }
322
323 //Receive potentials
324 for (int i=0; i<size; ++i) {
325 if (size_receive[i] > 0) {
326 recv_buffer_pot[i].resize(size_receive[i]);
327 recv_buffer_index[i].resize(size_receive[i]);
328 MPI_Irecv(&recv_buffer_pot[i].front(), size_receive[i], MPI_DOUBLE, i, 699+i, comm_global, &Request());
329 MPI_Irecv(&recv_buffer_index[i].front(), size_receive[i], MPI_INT, i, 32111+i, comm_global, &Request());
330 }
331 }
332
333 WaitAll();
334
335 // Add potential values
336 for (int i=0; i<size; ++i)
337 for (unsigned int j=0; j<size_receive[i]; ++j)
338 p[recv_buffer_index[i][j]] = recv_buffer_pot[i][j];
339#endif
340
341}
342
343void CommMPI::CommLCListToGhosts(const Grid& grid, Particle::LinkedCellList& lc)
344{
345 VMG::MPI::DatatypesLocal types(lc, comm_global);
346 std::vector< std::vector<vmg_float> > send_buffer(types.NB().size());
347 std::vector< std::vector<vmg_float> > recv_buffer(types.Halo().size());
348 std::vector<vmg_int> send_size(types.NB().size());
349 vmg_int recv_size;
350 std::list<Particle::Particle*>::iterator iter;
351 Index ind;
352
353 lc.ClearHalo();
354
355 for (int i=0; i<3; ++i) {
356 for (Grid::iterator iter=lc.Iterators().Halo1()[i].Begin(); iter!=lc.Iterators().Halo1()[i].End(); ++iter)
357 assert(lc(*iter).size() == 0);
358 for (Grid::iterator iter=lc.Iterators().Halo2()[i].Begin(); iter!=lc.Iterators().Halo2()[i].End(); ++iter)
359 assert(lc(*iter).size() == 0);
360 }
361
362 for (unsigned int i=0; i<types.NB().size(); ++i)
363 if (types.NB()[i].Feasible()) {
364 for (ind.X() = types.NB()[i].Starts().X(); ind.X() < types.NB()[i].Starts().X()+types.NB()[i].Subsizes().X(); ++ind.X())
365 for (ind.Y() = types.NB()[i].Starts().Y(); ind.Y() < types.NB()[i].Starts().Y()+types.NB()[i].Subsizes().Y(); ++ind.Y())
366 for (ind.Z() = types.NB()[i].Starts().Z(); ind.Z() < types.NB()[i].Starts().Z()+types.NB()[i].Subsizes().Z(); ++ind.Z())
367 for (iter=lc(ind).begin(); iter!=lc(ind).end(); ++iter) {
368 if (grid.Global().LocalBegin()[i] == 0)
369 for (int j=0; j<3; ++j)
370 send_buffer[i].push_back((*iter)->Pos()[j] + (i==j ? grid.Extent().Size()[j] : 0.0));
371 else
372 for (int j=0; j<3; ++j)
373 send_buffer[1].push_back((*iter)->Pos()[j]);
374
375 send_buffer[1].push_back((*iter)->Charge());
376 }
377
378 MPI_Isend(&send_size[i], 1, MPI_INT, types.NB()[i].Rank(), 2048+types.NB()[i].TagSend(), comm_global, &Request());
379
380 if (send_size[i] > 0)
381 MPI_Isend(&send_buffer[i].front(), send_size[i], MPI_DOUBLE,
382 types.NB()[i].Rank(), 4096+types.NB()[i].TagSend(),
383 comm_global, &Request());
384 }
385
386 for (unsigned int i=0; i<types.Halo().size(); ++i)
387 if (types.Halo()[i].Feasible()) {
388 MPI_Recv(&recv_size, 1, MPI_INT, types.Halo()[i].Rank(), 2048+types.Halo()[i].TagReceive(), comm_global, MPI_STATUS_IGNORE);
389 if (recv_size > 0) {
390 recv_buffer[i].resize(recv_size);
391 MPI_Irecv(&recv_buffer[i].front(), recv_size, MPI_DOUBLE,
392 types.Halo()[i].Rank(), 4096+types.Halo()[i].TagReceive(),
393 comm_global, &Request());
394 }
395 }
396
397 WaitAll();
398
399 for (unsigned int i=0; i<recv_buffer.size(); ++i)
400 for (unsigned int j=0; j<recv_buffer[i].size(); j+=4)
401 lc.AddParticleToHalo(&recv_buffer[i][j], recv_buffer[i][j+3]);
402}
403
404void CommMPI::CommLCListFromGhosts(const Grid& grid, Particle::LinkedCellList& lc)
405{
406 VMG::MPI::DatatypesLocal types(lc, comm_global);
407 std::vector< std::vector<vmg_float> > send_buffer(types.NB().size());
408 std::vector< std::vector<vmg_float> > recv_buffer(types.Halo().size());
409 std::vector<vmg_int> send_size(types.NB().size());
410 vmg_int recv_size;
411 std::list<Particle::Particle*>::iterator iter;
412 Index ind;
413
414 for (unsigned int i=0; i<types.Halo().size(); ++i) {
415 assert(types.Halo()[i].Subsizes().X() == types.NB()[i].Subsizes().X());
416 assert(types.Halo()[i].Subsizes().Y() == types.NB()[i].Subsizes().Y());
417 assert(types.Halo()[i].Subsizes().Z() == types.NB()[i].Subsizes().Z());
418
419 for (ind.X() = 0; ind.X() < types.Halo()[i].Subsizes().X(); ++ind.X())
420 for (ind.Y() = 0; ind.Y() < types.Halo()[i].Subsizes().Y(); ++ind.Y())
421 for (ind.Z() = 0; ind.Z() < types.Halo()[i].Subsizes().Z(); ++ind.Z())
422 assert(lc(ind+types.Halo()[i].Starts()).size() == lc(ind+types.NB()[i].Starts()).size());
423 }
424
425 for (unsigned int i=0; i<types.Halo().size(); ++i)
426 if (types.Halo()[i].Feasible()) {
427
428 for (ind.X() = types.Halo()[i].Starts().X(); ind.X() < types.Halo()[i].Starts().X()+types.Halo()[i].Subsizes().X(); ++ind.X())
429 for (ind.Y() = types.Halo()[i].Starts().X(); ind.Y() < types.Halo()[i].Starts().Y()+types.Halo()[i].Subsizes().Y(); ++ind.Y())
430 for (ind.Z() = types.Halo()[i].Starts().X(); ind.Z() < types.Halo()[i].Starts().Z()+types.Halo()[i].Subsizes().Z(); ++ind.Z())
431 for (iter=lc(ind).begin(); iter!=lc(ind).end(); ++iter)
432 send_buffer[i].push_back((*iter)->Pot());
433
434 MPI_Isend(&send_buffer[i].front(), send_buffer[i].size(), MPI_DOUBLE, types.Halo()[i].Rank(), 342523+types.Halo()[i].TagSend(), comm_global, &Request());
435 }
436
437 for (unsigned int i=0; i<types.NB().size(); ++i)
438 if (types.NB()[i].Feasible()) {
439
440 recv_size = 0;
441 for (ind.X() = types.NB()[i].Starts().X(); ind.X() < types.NB()[i].Starts().X()+types.NB()[i].Subsizes().X(); ++ind.X())
442 for (ind.Y() = types.NB()[i].Starts().X(); ind.Y() < types.NB()[i].Starts().Y()+types.NB()[i].Subsizes().Y(); ++ind.Y())
443 for (ind.Z() = types.NB()[i].Starts().X(); ind.Z() < types.NB()[i].Starts().Z()+types.NB()[i].Subsizes().Z(); ++ind.Z())
444 recv_size += lc(ind).size();
445
446 if (recv_size > 0) {
447 recv_buffer[i].resize(recv_size);
448 MPI_Irecv(&recv_buffer[i].front(), recv_size, MPI_DOUBLE, types.NB()[i].Rank(), 342523+types.NB()[i].TagReceive(), comm_global, &Request());
449 }
450 }
451
452 WaitAll();
453
454 for (unsigned int i=0; i<types.NB().size(); ++i) {
455 unsigned int c=0;
456 if (types.NB()[i].Feasible()) {
457 for (ind.X() = types.NB()[i].Starts().X(); ind.X() < types.NB()[i].Starts().X()+types.NB()[i].Subsizes().X(); ++ind.X())
458 for (ind.Y() = types.NB()[i].Starts().X(); ind.Y() < types.NB()[i].Starts().Y()+types.NB()[i].Subsizes().Y(); ++ind.Y())
459 for (ind.Z() = types.NB()[i].Starts().X(); ind.Z() < types.NB()[i].Starts().Z()+types.NB()[i].Subsizes().Z(); ++ind.Z())
460 for (iter=lc(ind).begin(); iter!=lc(ind).end(); ++iter)
461 (*iter)->Pot() += recv_buffer[i][c++];
462 }
463 }
464}
465
466vmg_float CommMPI::GlobalSum(const vmg_float& value)
467{
468 vmg_float result = value;
469 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm_global);
470 return result;
471}
472
473vmg_float CommMPI::GlobalSumRoot(const vmg_float& value)
474{
475 vmg_float recv_buffer = value;
476 vmg_float send_buffer = value;
477 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm_global);
478 return recv_buffer;
479}
480
481void CommMPI::GlobalSumArray(vmg_float* array, const vmg_int& size)
482{
483 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm_global);
484}
485
486vmg_int CommMPI::GlobalSum(const vmg_int& value)
487{
488 vmg_int result = value;
489 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, comm_global);
490 return result;
491}
492
493vmg_int CommMPI::GlobalSumRoot(const vmg_int& value)
494{
495 vmg_int recv_buffer = value;
496 vmg_int send_buffer = value;
497 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_SUM, 0, comm_global);
498 return recv_buffer;
499}
500
501void CommMPI::GlobalSumArray(vmg_int* array, const vmg_int& size)
502{
503 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm_global);
504}
505
506vmg_float CommMPI::LevelSum(const Grid& grid, const vmg_float& value)
507{
508 vmg_float result = value;
509 MPI_Comm comm = settings.CommunicatorLocal(grid);
510 assert(comm != MPI_COMM_NULL);
511 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm);
512 return result;
513}
514
515vmg_float CommMPI::LevelSumRoot(const Grid& grid, const vmg_float& value)
516{
517 vmg_float recv_buffer = value;
518 vmg_float send_buffer = value;
519 MPI_Comm comm = settings.CommunicatorLocal(grid);
520 assert(comm != MPI_COMM_NULL);
521 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm);
522 return recv_buffer;
523}
524
525void CommMPI::LevelSumArray(const Grid& grid, vmg_float* array, const vmg_int& size)
526{
527 MPI_Comm comm = settings.CommunicatorLocal(grid);
528 assert(comm != MPI_COMM_NULL);
529 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm);
530}
531
532vmg_int CommMPI::LevelSum(const Grid& grid, const vmg_int& value)
533{
534 vmg_int result = value;
535 MPI_Comm comm = settings.CommunicatorLocal(grid);
536 assert(comm != MPI_COMM_NULL);
537 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, comm);
538 return result;
539}
540
541vmg_int CommMPI::LevelSumRoot(const Grid& grid, const vmg_int& value)
542{
543 vmg_int recv_buffer = value;
544 vmg_int send_buffer = value;
545 MPI_Comm comm = settings.CommunicatorLocal(grid);
546 assert(comm != MPI_COMM_NULL);
547 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_SUM, 0, comm);
548 return recv_buffer;
549}
550
551void CommMPI::LevelSumArray(const Grid& grid, vmg_int* array, const vmg_int& size)
552{
553 MPI_Comm comm = settings.CommunicatorLocal(grid);
554 assert(comm != MPI_COMM_NULL);
555 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm);
556}
557
558void CommMPI::PrintString(const char* format, ...)
559{
560 va_list args;
561 va_start(args, format);
562 vsprintf(print_buffer, format, args);
563 printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
564 va_end(args);
565}
566
567void CommMPI::PrintStringOnce(const char* format, ...)
568{
569 if (GlobalRank() == 0) {
570 va_list args;
571 va_start(args, format);
572 vsprintf(print_buffer, format, args);
573 printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
574 va_end(args);
575 }
576}
577
578void CommMPI::PrintXML(const std::string& filename, const std::string& xml_data)
579{
580 MPI_File file;
581 std::stringstream path, xml_header;
582
583 pugi::xml_document doc;
584 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
585 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
586 doc.save(xml_header);
587
588 path << OutputPath() << filename;
589
590 char* filename_array = Helper::GetCharArray(path.str());
591 char* xml_header_array = Helper::GetCharArray(xml_header.str());
592 char* str_array = Helper::GetCharArray(xml_data);
593
594 MPI_File_open(MPI_COMM_SELF, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
595 MPI_File_set_size(file, 0);
596 MPI_File_write(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
597 MPI_File_write(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
598 MPI_File_close(&file);
599
600 delete [] filename_array;
601 delete [] xml_header_array;
602 delete [] str_array;
603}
604
605void CommMPI::PrintXMLAll(const std::string& filename, const std::string& xml_data)
606{
607 MPI_File file;
608 std::stringstream path;
609
610 path << OutputPath() << filename;
611
612 char* filename_array = Helper::GetCharArray(path.str());
613 char* str_array = Helper::GetCharArray(xml_data);
614
615 MPI_File_open(comm_global, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
616 MPI_File_set_size(file, 0);
617
618 if (GlobalRank() == 0) {
619 std::stringstream xml_header;
620 pugi::xml_document doc;
621 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
622 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
623 doc.save(xml_header);
624
625 char* xml_header_array = Helper::GetCharArray(xml_header.str());
626
627 MPI_File_write_shared(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
628
629 delete [] xml_header_array;
630 }
631
632 MPI_File_write_ordered(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
633 MPI_File_close(&file);
634
635 delete [] filename_array;
636 delete [] str_array;
637}
638
639void CommMPI::PrintGridInformation(const Grid& grid, char* filename, const std::string& name)
640{
641 std::stringstream buf;
642 MPI_File file;
643 int rank, size;
644 int size_local, size_local_max;
645
646 MPI_Comm comm = settings.CommunicatorGlobal(grid);
647 MPI_Comm comm_local = settings.CommunicatorLocal(grid);
648
649 if (comm_local != MPI_COMM_NULL)
650 MPI_Comm_size(comm_local, &size_local);
651 else
652 size_local = 0;
653
654 if (comm != MPI_COMM_NULL) {
655
656 MPI_Reduce(&size_local, &size_local_max, 1, MPI_INT, MPI_MAX, 0, comm);
657
658 MPI_File_open(comm, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
659
660 MPI_Comm_rank(comm, &rank);
661 MPI_Comm_size(comm, &size);
662
663 if (rank == 0) {
664
665 buf << "###########################################################" << std::endl
666 << "GLOBAL INFORMATION:" << std::endl
667 << " NAME: " << name << std::endl
668 << " LEVEL: " << grid.Level() << std::endl
669 << " COMM_SIZE_GLOBAL: " << size << std::endl
670 << " COMM_SIZE_LOCAL: " << size_local_max << std::endl
671 << " GLOBAL:" << std::endl
672 << " GLOBAL_FINER_BEGIN: " << grid.Global().GlobalFinerBegin() << std::endl
673 << " GLOBAL_FINER_END: " << grid.Global().GlobalFinerEnd() << std::endl
674 << " GLOBAL_FINER_SIZE: " << grid.Global().GlobalFinerSize() << std::endl
675 << " FINEST_ABS_BEGIN: " << grid.Global().FinestAbsBegin() << std::endl
676 << " FINEST_ABS_END: " << grid.Global().FinestAbsEnd() << std::endl
677 << " FINEST_ABS_SIZE: " << grid.Global().FinestAbsSize() << std::endl
678 << " GLOBAL_SIZE: " << grid.Global().GlobalSize() << std::endl
679 << " EXTENT:" << std::endl
680 << " BEGIN: " << grid.Extent().Begin() << std::endl
681 << " END: " << grid.Extent().End() << std::endl
682 << " SIZE: " << grid.Extent().Size() << std::endl
683 << " MESH_WIDTH: " << grid.Extent().MeshWidth() << std::endl
684 << std::endl
685 << "LOCAL INFORMATION:" << std::endl;
686 }
687
688 buf << "RANK " << rank << ":" << std::endl
689 << " GLOBAL:" << std::endl
690 << " LOCAL_BEGIN: " << grid.Global().LocalBegin() << std::endl
691 << " LOCAL_END: " << grid.Global().LocalEnd() << std::endl
692 << " LOCAL_SIZE: " << grid.Global().LocalSize() << std::endl
693 << " LOCAL_FINER_BEGIN: " << grid.Global().LocalFinerBegin() << std::endl
694 << " LOCAL_FINER_END: " << grid.Global().LocalFinerEnd() << std::endl
695 << " LOCAL_FINER_SIZE: " << grid.Global().LocalFinerSize() << std::endl
696 << " BOUNDARY_TYPE: " << grid.Global().BoundaryType() << std::endl
697 << " LOCAL:" << std::endl
698 << " BEGIN: " << grid.Local().Begin() << std::endl
699 << " END: " << grid.Local().End() << std::endl
700 << " SIZE: " << grid.Local().Size() << std::endl
701 << " SIZE_TOTAL: " << grid.Local().SizeTotal() << std::endl
702 << " HALO_BEGIN_1: " << grid.Local().HaloBegin1() << std::endl
703 << " HALO_END_1: " << grid.Local().HaloEnd1() << std::endl
704 << " HALO_SIZE_1: " << grid.Local().HaloSize1() << std::endl
705 << " HALO_BEGIN_2: " << grid.Local().HaloBegin2() << std::endl
706 << " HALO_END_2: " << grid.Local().HaloEnd2() << std::endl
707 << " HALO_SIZE_2: " << grid.Local().HaloSize2() << std::endl
708 << " BOUNDARY_BEGIN_1: " << grid.Local().BoundaryBegin1() << std::endl
709 << " BOUNDARY_END_1: " << grid.Local().BoundaryEnd1() << std::endl
710 << " BOUNDARY_SIZE_1: " << grid.Local().BoundarySize1() << std::endl
711 << " BOUNDARY_BEGIN_2: " << grid.Local().BoundaryBegin2() << std::endl
712 << " BOUNDARY_END_2: " << grid.Local().BoundaryEnd2() << std::endl
713 << " BOUNDARY_SIZE_2: " << grid.Local().BoundarySize2() << std::endl
714 << " FINER_BEGIN: " << grid.Local().FinerBeginFoo() << std::endl
715 << " FINER_END: " << grid.Local().FinerEndFoo() << std::endl
716 << " FINER_SIZE: " << grid.Local().FinerSizeFoo() << std::endl;
717
718 if (rank == size-1)
719 buf << "###########################################################" << std::endl;
720
721 char* char_buf = Helper::GetCharArray(buf.str());
722 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
723 delete [] char_buf;
724
725 MPI_File_close(&file);
726
727 }
728}
729
730void CommMPI::PrintAllSettings()
731{
732 std::stringstream buf;
733 MPI_File file;
734
735 const Multigrid& mg = *MG::GetSol();
736
737 buf << OutputPath() << "settings.txt";
738 char *filename = Helper::GetCharArray(buf.str());
739
740 MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
741 MPI_File_set_size(file, 0);
742 MPI_File_close(&file);
743
744 for (int i=mg.MinLevel(); i<=mg.MaxLevel(); ++i)
745 PrintGridInformation(mg(i), filename, "MULTIGRID");
746
747 for (int i=mg.MinLevel()+1; i<=mg.MaxLevel(); ++i)
748 PrintGridInformation(settings.CoarserGrid(mg(i)), filename, "COARSER_GRID");
749
750 for (int i=mg.MinLevel(); i<mg.MaxLevel(); ++i)
751 PrintGridInformation(settings.FinerGrid(mg(i)), filename, "FINER_GRID");
752
753 delete [] filename;
754
755}
756
757void CommMPI::PrintGrid(Grid& grid, const char* information)
758{
759 int output_count = OutputCount();
760
761#ifdef HAVE_VTK
762
763 if (settings.CommunicatorLocal(grid) != MPI_COMM_NULL) {
764
765 Index end, end_global;
766
767 for (int i=0; i<3; ++i) {
768 end[i] = grid.Local().End()[i];
769 end_global[i] = grid.Global().LocalEnd()[i];
770 }
771
772 vtkSmartPointer<vtkImageData> image = vtkSmartPointer<vtkImageData>::New();
773 image->SetExtent(grid.Global().LocalBegin().X(), end_global.X()-1,
774 grid.Global().LocalBegin().Y(), end_global.Y()-1,
775 grid.Global().LocalBegin().Z(), end_global.Z()-1);
776 image->SetSpacing(grid.Extent().MeshWidth().vec());
777 image->SetOrigin(grid.Extent().Begin().vec());
778 image->SetScalarTypeToDouble();
779 image->SetNumberOfScalarComponents(1);
780 image->AllocateScalars();
781 image->GetPointData()->GetScalars()->SetName(information);
782
783 Index i;
784 for (i.X()=grid.Local().Begin().X(); i.X()<end.X(); ++i.X())
785 for (i.Y()=grid.Local().Begin().Y(); i.Y()<end.Y(); ++i.Y())
786 for (i.Z()=grid.Local().Begin().Z(); i.Z()<end.Z(); ++i.Z())
787 image->SetScalarComponentFromDouble(i.X() - grid.Local().Begin().X() + grid.Global().LocalBegin().X(),
788 i.Y() - grid.Local().Begin().Y() + grid.Global().LocalBegin().Y(),
789 i.Z() - grid.Local().Begin().Z() + grid.Global().LocalBegin().Z(),
790 0, grid.GetVal(i));
791
792 image->Update();
793
794 int rank, size;
795 MPI_Comm_rank(comm_global, &rank);
796 MPI_Comm_size(comm_global, &size);
797
798 std::stringstream filename;
799 filename << OutputPath() << std::setw(4) << std::setfill('0') << output_count << "_" << rank << ".vti";
800
801 vtkSmartPointer<vtkXMLImageDataWriter> writer = vtkSmartPointer<vtkXMLImageDataWriter>::New();
802 writer->SetFileName(filename.str().c_str());
803 writer->SetCompressorTypeToNone();
804 writer->SetDataModeToAscii();
805 writer->SetInput(image);
806 writer->Update();
807 writer->Write();
808
809 }
810
811#else /* HAVE_VTK */
812 Index i;
813 std::stringstream buf;
814
815 Index begin, end;
816 Index begin_local, end_local, begin_global, end_global;
817
818 CommToGhosts(grid);
819
820 for (int i=0; i<3; ++i) {
821 end[i] = grid.Local().End()[i] + (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 0 : grid.Local().HaloSize1()[i]);
822 end_local[i] = grid.Global().LocalEnd()[i] - (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 1 : 0);
823 }
824
825 begin = grid.Local().Begin();
826 begin_local = grid.Global().LocalBegin();
827 begin_global = 0;
828 end_global = grid.Global().GlobalSize()-1;
829
830 for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
831 for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
832 for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
833 buf << std::scientific << grid.GetVal(i) << " ";
834
835 CreateOutputFiles(grid, buf, information,
836 begin_global, end_global,
837 begin_local, end_local,
838 output_count);
839#endif /* HAVE_VTK */
840}
841
842void CommMPI::PrintDefect(Grid& sol, Grid& rhs, const char* information)
843{
844 TempGrid *temp = MG::GetTempGrid();
845 temp->SetProperties(sol);
846 temp->ImportFromResidual(sol, rhs);
847 PrintGrid(*temp, information);
848}
849
850void CommMPI::CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
851 const Index& begin_global, const Index& end_global,
852 const Index& begin_local, const Index& end_local,
853 const int& output_count)
854{
855 MPI_Comm comm = settings.CommunicatorGlobal(grid);
856
857 if (comm != MPI_COMM_NULL) {
858
859 MPI_File file;
860 std::string conv_information = Helper::ReplaceWhitespaces(information, "_");
861
862 CreateParallelOutputFile(grid, comm, output_count, conv_information.c_str(),
863 begin_global, end_global, begin_local, end_local);
864
865 file = CreateSerialOutputFile(grid, comm, output_count, conv_information.c_str(),
866 begin_global, end_global, begin_local, end_local);
867
868 char *char_buf = Helper::GetCharArray(serial_data.str());
869 MPI_File_write(file, char_buf, serial_data.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
870 delete [] char_buf;
871
872 FinalizeSerialOutputFile(file);
873
874 }
875}
876
877void CommMPI::CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
878 const int& output_count, const char* information,
879 const Index& begin_global, const Index& end_global,
880 const Index& begin_local, const Index& end_local)
881{
882 int rank;
883 MPI_File file;
884 char parallel_filename[513], serial_filename[513];
885 std::stringstream buf;
886
887 MPI_Comm_rank(comm, &rank);
888
889 sprintf(parallel_filename, "%s%04d.pvti", OutputPath().c_str(), output_count);
890 sprintf(serial_filename, "%04d_%d.vti", output_count, rank);
891
892 MPI_File_open(comm, parallel_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
893 MPI_File_set_size(file, 0);
894
895 if (rank == 0) {
896
897 buf << "<?xml version=\"1.0\"?>" << std::endl
898 << "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
899 << " <PImageData WholeExtent=\"";
900
901 for (int i=0; i<3; ++i)
902 buf << begin_global[i] << " " << end_global[i] << " ";
903
904 buf << "\"" << std::endl
905 << " GhostLevel=\"0\" Origin=\"0 0 0\" Spacing=\"";
906
907 for (int i=0; i<3; ++i)
908 buf << grid.Extent().MeshWidth()[i] << " ";
909
910 buf << "\">" << std::endl
911 << " <PPointData Scalars=\"" << information << "\">" << std::endl
912 << " <PDataArray type=\"Float32\" Name=\"" << information << "\"/>" << std::endl
913 << " </PPointData>" << std::endl;
914
915 char* char_buf = Helper::GetCharArray(buf.str());
916
917 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
918
919 delete [] char_buf;
920 }
921
922 buf.str("");
923
924 if ((end_local-begin_local).Product() > 0) {
925 buf << " <Piece Extent=\"";
926
927 for (int i=0; i<3; ++i)
928 buf << begin_local[i] << " " << end_local[i] << " ";
929
930 buf << "\" Source=\"" << serial_filename << "\"/>" << std::endl;
931 }
932
933 char* char_buf = Helper::GetCharArray(buf.str());
934
935 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
936
937 delete [] char_buf;
938
939 if (rank == 0) {
940
941 buf.str("");
942
943 buf << " </PImageData>" << std::endl
944 << "</VTKFile>" << std::endl;
945
946 char* char_buf = Helper::GetCharArray(buf.str());
947
948 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
949
950 delete [] char_buf;
951 }
952
953 MPI_File_close(&file);
954}
955
956MPI_File CommMPI::CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
957 const int& output_count, const char* information,
958 const Index& begin_global, const Index& end_global,
959 const Index& begin_local, const Index& end_local)
960{
961 char serial_filename[513];
962 int rank;
963 MPI_File file;
964 std::stringstream buf;
965
966 MPI_Comm_rank(comm_global, &rank);
967
968 sprintf(serial_filename, "%s%04d_%d.vti", OutputPath().c_str(), output_count, rank);
969
970 MPI_File_open(MPI_COMM_SELF, serial_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
971
972 buf << "<?xml version=\"1.0\"?>" << std::endl
973 << "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
974 << " <ImageData WholeExtent=\"";
975
976 for (int i=0; i<3; ++i)
977 buf << begin_global[i] << " " << end_global[i] << " ";
978
979 buf << "\"" << std::endl
980 << " Origin=\"0 0 0\" Spacing=\"";
981
982 for (int i=0; i<3; ++i)
983 buf << grid.Extent().MeshWidth()[i] << " ";
984
985 buf << "\">" << std::endl
986 << " <Piece Extent=\"";
987
988 for (int i=0; i<3; ++i)
989 buf << begin_local[i] << " " << end_local[i] << " ";
990
991 buf << "\">" << std::endl
992 << " <PointData Scalars=\"" << information << "\">" << std::endl
993 << " <DataArray type=\"Float32\" Name=\"" << information << "\" format=\"ascii\">" << std::endl
994 << " ";
995
996 char* char_buf = Helper::GetCharArray(buf.str());
997 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
998 delete [] char_buf;
999
1000 return file;
1001}
1002
1003void CommMPI::FinalizeSerialOutputFile(MPI_File& file)
1004{
1005 std::stringstream buf;
1006
1007 buf << std::endl
1008 << " </DataArray>" << std::endl
1009 << " </PointData>" << std::endl
1010 << " </Piece>" << std::endl
1011 << " </ImageData>" << std::endl
1012 << "</VTKFile>" << std::endl;
1013
1014 char* char_buf = Helper::GetCharArray(buf.str());
1015 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
1016 delete [] char_buf;
1017
1018 MPI_File_close(&file);
1019}
1020
1021int CommMPI::GlobalRank() const
1022{
1023 int rank;
1024 MPI_Comm_rank(comm_global, &rank);
1025 return rank;
1026}
1027
1028int CommMPI::GlobalSize() const
1029{
1030 int size;
1031 MPI_Comm_size(comm_global, &size);
1032 return size;
1033}
1034
1035Index CommMPI::GlobalPos() const
1036{
1037 Index dims, periods, coords;
1038 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
1039 return coords;
1040}
1041
1042Index CommMPI::GlobalProcs() const
1043{
1044 Index dims, periods, coords;
1045 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
1046 return dims;
1047}
1048
1049int CommMPI::Rank(const Grid& grid) const
1050{
1051 int rank;
1052 MPI_Comm comm = settings.CommunicatorLocal(grid);
1053 assert(comm != MPI_COMM_NULL);
1054 MPI_Comm_rank(comm, &rank);
1055 return rank;
1056}
1057
1058int CommMPI::Size(const Grid& grid) const
1059{
1060 int size;
1061 MPI_Comm comm = settings.CommunicatorLocal(grid);
1062 assert(comm != MPI_COMM_NULL);
1063 MPI_Comm_size(comm, &size);
1064 return size;
1065}
1066
1067Index CommMPI::Pos(const Grid& grid) const
1068{
1069 Index dims, periods, coords;
1070 MPI_Comm comm = settings.CommunicatorLocal(grid);
1071 assert(comm != MPI_COMM_NULL);
1072 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
1073 return coords;
1074}
1075
1076Index CommMPI::Procs(const Grid& grid) const
1077{
1078 Index dims, periods, coords;
1079 MPI_Comm comm = settings.CommunicatorLocal(grid);
1080 assert(comm != MPI_COMM_NULL);
1081 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
1082 return dims;
1083}
1084
1085void CommMPI::InitCommMPI(const MPI_Comm& comm)
1086{
1087 int status, size, rank;
1088 int dims[3] = {0, 0, 0};
1089 int periods[3];
1090
1091 for (int i=0; i<3; ++i)
1092 periods[i] = (BoundaryConditions()[i] == Periodic ? 1 : 0);
1093
1094 MPI_Comm_size(comm, &size);
1095 MPI_Comm_rank(comm, &rank);
1096
1097 MPI_Topo_test(comm, &status);
1098
1099 if (status == MPI_CART) {
1100
1101 comm_global = comm;
1102
1103 }else {
1104
1105 MPI_Dims_create(size, 3, dims);
1106
1107 MPI_Cart_create(comm, 3, dims, periods, 1, &comm_global);
1108
1109 }
1110
1111 MPI_Info_create(&info);
1112 char key[] = "no_locks";
1113 char val[] = "true";
1114 MPI_Info_set(info, key, val);
1115
1116}
1117
1118CommMPI::~CommMPI()
1119{
1120 MPI_Comm_free(&comm_global);
1121#ifdef VMG_ONE_SIDED
1122 if (win_created)
1123 MPI_Win_free(&win);
1124#endif
1125 MPI_Info_free(&info);
1126}
1127
1128Grid& CommMPI::GetCoarserGrid(Multigrid& multigrid)
1129{
1130 return settings.CoarserGrid(multigrid(multigrid.Level()));
1131}
1132
1133Grid& CommMPI::GetFinerGrid(Multigrid& multigrid)
1134{
1135 return settings.FinerGrid(multigrid(multigrid.Level()));
1136}
1137
1138std::string CommMPI::CreateOutputDirectory()
1139{
1140#ifdef HAVE_BOOST_FILESYSTEM
1141 std::string path, unique_path;
1142 std::stringstream unique_suffix;
1143 int suffix_counter = 0;
1144 char buffer[129];
1145 time_t rawtime;
1146 struct tm *timeinfo;
1147 int path_size;
1148 char* path_array;
1149
1150 if (GlobalRank() == 0) {
1151
1152 time(&rawtime);
1153 timeinfo = localtime(&rawtime);
1154 strftime(buffer, 128, "./output/%Y_%m_%d_%H_%M_%S/", timeinfo);
1155 path = buffer;
1156
1157 if (!fs::exists("output"))
1158 fs::create_directory("output");
1159
1160 unique_path = path;
1161
1162 while (fs::exists(unique_path.c_str())) {
1163
1164 unique_suffix.str("");
1165 unique_suffix << "_" << suffix_counter++ << "/";
1166
1167 unique_path = path;
1168 unique_path.replace(unique_path.size()-1, 1, unique_suffix.str());
1169
1170 }
1171
1172 fs::create_directory(unique_path.c_str());
1173
1174 path_size = unique_path.size() + 1;
1175 path_array = Helper::GetCharArray(unique_path);
1176
1177 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
1178
1179 }else {
1180
1181 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
1182 path_array = new char[path_size];
1183
1184 }
1185
1186 MPI_Bcast(path_array, path_size, MPI_CHAR, 0, comm_global);
1187
1188 unique_path = path_array;
1189
1190 delete [] path_array;
1191
1192 return unique_path;
1193
1194#else
1195
1196 return "./";
1197
1198#endif
1199}
1200
1201
1202#endif /* HAVE_MPI */
Note: See TracBrowser for help on using the repository browser.