source: src/comm/comm_mpi.cpp@ 06e153

Last change on this file since 06e153 was f003a9, checked in by Julian Iseringhausen <isering@…>, 14 years ago

Refactored vmg in order to separate the core library and the particle simulation part properly.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1798 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 28.4 KB
Line 
1/**
2 * @file comm_mpi.cpp
3 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
4 * @date Wed Jun 16 13:21:06 2010
5 *
6 * @brief Class for MPI-based communication.
7 *
8 */
9
10#ifdef HAVE_CONFIG_H
11#include <config.h>
12#endif
13
14#ifdef HAVE_MPI
15
16#include <mpi.h>
17#ifdef HAVE_MARMOT
18#include <enhancempicalls.h>
19#include <sourceinfompicalls.h>
20#endif
21
22#ifdef HAVE_BOOST_FILESYSTEM
23#include <boost/filesystem.hpp>
24namespace fs = boost::filesystem;
25#endif
26
27#ifdef HAVE_VTK
28#include <vtkAbstractArray.h>
29#include <vtkImageData.h>
30#include <vtkPointData.h>
31#include <vtkSmartPointer.h>
32#include <vtkXMLImageDataWriter.h>
33#endif
34
35#include <cstring>
36#include <sstream>
37
38#include "base/helper.hpp"
39#include "base/tuple.hpp"
40#include "comm/comm_mpi.hpp"
41#include "comm/mpi/datatypes_local.hpp"
42#include "grid/grid.hpp"
43#include "grid/multigrid.hpp"
44#include "grid/tempgrid.hpp"
45#include "mg.hpp"
46#include "base/timer.hpp"
47
48static char print_buffer[512];
49
50using namespace VMG;
51
52void CommMPI::IsendAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
53{
54 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
55 iter->Isend(grid, tag_start, comm, Request());
56}
57
58void CommMPI::IrecvAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
59{
60 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
61 iter->Irecv(grid, tag_start, comm, Request());
62}
63
64void CommMPI::IsendAllBuffered(const Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
65{
66 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
67 iter->IsendBuffered(grid, tag_start, comm, Request());
68}
69
70void CommMPI::IrecvAllBuffered(std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
71{
72 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
73 iter->IrecvBuffered(tag_start, comm, Request());
74}
75
76void CommMPI::ReplaceBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
77{
78 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
79 iter->GridReplace(grid);
80}
81
82void CommMPI::AddBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
83{
84 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
85 iter->GridSum(grid);
86}
87
88void CommMPI::CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
89{
90 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
91 if (comm != MPI_COMM_NULL) {
92 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
93 IrecvAllBuffered(datatypes.Receive(), comm, 0411);
94 IsendAllBuffered(grid_old, datatypes.Send(), comm, 0411);
95 WaitAll();
96 ReplaceBufferAll(grid_new, datatypes.Receive());
97 }
98}
99
100void CommMPI::CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
101{
102 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
103 if (comm != MPI_COMM_NULL) {
104 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
105 IrecvAllBuffered(datatypes.Receive(), comm, 1806);
106 IsendAllBuffered(grid_old, datatypes.Send(), comm, 1806);
107 WaitAll();
108 AddBufferAll(grid_new, datatypes.Receive());
109 }
110}
111
112void CommMPI::CommToGhosts(Grid& grid)
113{
114 MPI_Comm comm = settings.CommunicatorLocal(grid);
115 if (comm != MPI_COMM_NULL) {
116 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
117 IrecvAllBuffered(types.Halo(), comm, 2310);
118 IsendAllBuffered(grid, types.NB(), comm, 2310);
119 WaitAll();
120 ReplaceBufferAll(grid, types.Halo());
121 }
122}
123
124void CommMPI::CommToGhostsAsyncStart(Grid& grid)
125{
126 MPI_Comm comm = settings.CommunicatorLocal(grid);
127 if (comm != MPI_COMM_NULL) {
128 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
129 IrecvAllBuffered(types.Halo(), comm, 2412);
130 IsendAllBuffered(grid, types.NB(), comm, 2412);
131 TestAll();
132 }
133}
134
135void CommMPI::CommToGhostsAsyncFinish(Grid& grid)
136{
137 WaitAll();
138 ReplaceBufferAll(grid, settings.DatatypesLocal(grid).Halo());
139}
140
141void CommMPI::CommFromGhosts(Grid& grid)
142{
143 MPI_Comm comm = settings.CommunicatorLocal(grid);
144 if (comm != MPI_COMM_NULL) {
145 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
146 IrecvAllBuffered(types.NB(), comm, 1337);
147 IsendAllBuffered(grid, types.Halo(), comm, 1337);
148 WaitAll();
149 AddBufferAll(grid, types.NB());
150 }
151}
152
153void CommMPI::CommFromGhostsAsyncStart(Grid& grid)
154{
155 MPI_Comm comm = settings.CommunicatorLocal(grid);
156 if (comm != MPI_COMM_NULL) {
157 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
158 IrecvAllBuffered(types.NB(), comm, 0xc0ffee);
159 IsendAllBuffered(grid, types.Halo(), comm, 0xc0ffee);
160 TestAll();
161 }
162}
163
164void CommMPI::CommFromGhostsAsyncFinish(Grid& grid)
165{
166 WaitAll();
167 AddBufferAll(grid, settings.DatatypesLocal(grid).NB());
168}
169
170vmg_float CommMPI::GlobalSum(const vmg_float& value)
171{
172 vmg_float result = value;
173 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm_global);
174 return result;
175}
176
177vmg_float CommMPI::GlobalSumRoot(const vmg_float& value)
178{
179 vmg_float recv_buffer = value;
180 vmg_float send_buffer = value;
181 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm_global);
182 return recv_buffer;
183}
184
185void CommMPI::GlobalSumArray(vmg_float* array, const vmg_int& size)
186{
187 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm_global);
188}
189
190vmg_int CommMPI::GlobalSum(const vmg_int& value)
191{
192 vmg_int result = value;
193 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, comm_global);
194 return result;
195}
196
197vmg_int CommMPI::GlobalSumRoot(const vmg_int& value)
198{
199 vmg_int recv_buffer = value;
200 vmg_int send_buffer = value;
201 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_SUM, 0, comm_global);
202 return recv_buffer;
203}
204
205void CommMPI::GlobalSumArray(vmg_int* array, const vmg_int& size)
206{
207 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm_global);
208}
209
210vmg_float CommMPI::GlobalMax(const vmg_float& value)
211{
212 vmg_float result = value;
213 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_MAX, comm_global);
214 return result;
215}
216
217vmg_float CommMPI::GlobalMaxRoot(const vmg_float& value)
218{
219 vmg_float recv_buffer = value;
220 vmg_float send_buffer = value;
221 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_MAX, 0, comm_global);
222 return recv_buffer;
223}
224
225void CommMPI::GlobalMaxArray(vmg_float* array, const vmg_int& size)
226{
227 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_MAX, comm_global);
228}
229
230vmg_int CommMPI::GlobalMax(const vmg_int& value)
231{
232 vmg_int result = value;
233 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_MAX, comm_global);
234 return result;
235}
236
237vmg_int CommMPI::GlobalMaxRoot(const vmg_int& value)
238{
239 vmg_int recv_buffer = value;
240 vmg_int send_buffer = value;
241 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_MAX, 0, comm_global);
242 return recv_buffer;
243}
244
245void CommMPI::GlobalMaxArray(vmg_int* array, const vmg_int& size)
246{
247 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_MAX, comm_global);
248}
249
250vmg_float CommMPI::LevelSum(const Grid& grid, const vmg_float& value)
251{
252 vmg_float result = value;
253 MPI_Comm comm = settings.CommunicatorLocal(grid);
254 assert(comm != MPI_COMM_NULL);
255 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm);
256 return result;
257}
258
259vmg_float CommMPI::LevelSumRoot(const Grid& grid, const vmg_float& value)
260{
261 vmg_float recv_buffer = value;
262 vmg_float send_buffer = value;
263 MPI_Comm comm = settings.CommunicatorLocal(grid);
264 assert(comm != MPI_COMM_NULL);
265 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm);
266 return recv_buffer;
267}
268
269void CommMPI::LevelSumArray(const Grid& grid, vmg_float* array, const vmg_int& size)
270{
271 MPI_Comm comm = settings.CommunicatorLocal(grid);
272 assert(comm != MPI_COMM_NULL);
273 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm);
274}
275
276vmg_int CommMPI::LevelSum(const Grid& grid, const vmg_int& value)
277{
278 vmg_int result = value;
279 MPI_Comm comm = settings.CommunicatorLocal(grid);
280 assert(comm != MPI_COMM_NULL);
281 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, comm);
282 return result;
283}
284
285vmg_int CommMPI::LevelSumRoot(const Grid& grid, const vmg_int& value)
286{
287 vmg_int recv_buffer = value;
288 vmg_int send_buffer = value;
289 MPI_Comm comm = settings.CommunicatorLocal(grid);
290 assert(comm != MPI_COMM_NULL);
291 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_SUM, 0, comm);
292 return recv_buffer;
293}
294
295void CommMPI::LevelSumArray(const Grid& grid, vmg_int* array, const vmg_int& size)
296{
297 MPI_Comm comm = settings.CommunicatorLocal(grid);
298 assert(comm != MPI_COMM_NULL);
299 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm);
300}
301
302void CommMPI::PrintString(const char* format, ...)
303{
304 va_list args;
305 va_start(args, format);
306 vsprintf(print_buffer, format, args);
307 printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
308 va_end(args);
309}
310
311void CommMPI::PrintStringOnce(const char* format, ...)
312{
313 if (GlobalRank() == 0) {
314 va_list args;
315 va_start(args, format);
316 vsprintf(print_buffer, format, args);
317 printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
318 va_end(args);
319 }
320}
321
322void CommMPI::PrintXML(const std::string& filename, const std::string& xml_data)
323{
324 MPI_File file;
325 std::stringstream path, xml_header;
326
327 pugi::xml_document doc;
328 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
329 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
330 doc.save(xml_header);
331
332 path << OutputPath() << filename;
333
334 char* filename_array = Helper::GetCharArray(path.str());
335 char* xml_header_array = Helper::GetCharArray(xml_header.str());
336 char* str_array = Helper::GetCharArray(xml_data);
337
338 MPI_File_open(MPI_COMM_SELF, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
339 MPI_File_set_size(file, 0);
340 MPI_File_write(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
341 MPI_File_write(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
342 MPI_File_close(&file);
343
344 delete [] filename_array;
345 delete [] xml_header_array;
346 delete [] str_array;
347}
348
349void CommMPI::PrintXMLAll(const std::string& filename, const std::string& xml_data)
350{
351 MPI_File file;
352 std::stringstream path;
353
354 path << OutputPath() << filename;
355
356 char* filename_array = Helper::GetCharArray(path.str());
357 char* str_array = Helper::GetCharArray(xml_data);
358
359 MPI_File_open(comm_global, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
360 MPI_File_set_size(file, 0);
361
362 if (GlobalRank() == 0) {
363 std::stringstream xml_header;
364 pugi::xml_document doc;
365 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
366 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
367 doc.save(xml_header);
368
369 char* xml_header_array = Helper::GetCharArray(xml_header.str());
370
371 MPI_File_write_shared(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
372
373 delete [] xml_header_array;
374 }
375
376 MPI_File_write_ordered(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
377 MPI_File_close(&file);
378
379 delete [] filename_array;
380 delete [] str_array;
381}
382
383void CommMPI::PrintGridInformation(const Grid& grid, char* filename, const std::string& name)
384{
385 std::stringstream buf;
386 MPI_File file;
387 int rank, size;
388 int size_local, size_local_max;
389
390 MPI_Comm comm = settings.CommunicatorGlobal(grid);
391 MPI_Comm comm_local = settings.CommunicatorLocal(grid);
392
393 if (comm_local != MPI_COMM_NULL)
394 MPI_Comm_size(comm_local, &size_local);
395 else
396 size_local = 0;
397
398 if (comm != MPI_COMM_NULL) {
399
400 MPI_Reduce(&size_local, &size_local_max, 1, MPI_INT, MPI_MAX, 0, comm);
401
402 MPI_File_open(comm, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
403
404 MPI_Comm_rank(comm, &rank);
405 MPI_Comm_size(comm, &size);
406
407 if (rank == 0) {
408
409 buf << "###########################################################" << std::endl
410 << "GLOBAL INFORMATION:" << std::endl
411 << " NAME: " << name << std::endl
412 << " LEVEL: " << grid.Level() << std::endl
413 << " COMM_SIZE_GLOBAL: " << size << std::endl
414 << " COMM_SIZE_LOCAL: " << size_local_max << std::endl
415 << " GLOBAL:" << std::endl
416 << " GLOBAL_FINER_BEGIN: " << grid.Global().GlobalFinerBegin() << std::endl
417 << " GLOBAL_FINER_END: " << grid.Global().GlobalFinerEnd() << std::endl
418 << " GLOBAL_FINER_SIZE: " << grid.Global().GlobalFinerSize() << std::endl
419 << " FINEST_ABS_BEGIN: " << grid.Global().FinestAbsBegin() << std::endl
420 << " FINEST_ABS_END: " << grid.Global().FinestAbsEnd() << std::endl
421 << " FINEST_ABS_SIZE: " << grid.Global().FinestAbsSize() << std::endl
422 << " GLOBAL_SIZE: " << grid.Global().GlobalSize() << std::endl
423 << " EXTENT:" << std::endl
424 << " BEGIN: " << grid.Extent().Begin() << std::endl
425 << " END: " << grid.Extent().End() << std::endl
426 << " SIZE: " << grid.Extent().Size() << std::endl
427 << " MESH_WIDTH: " << grid.Extent().MeshWidth() << std::endl
428 << std::endl
429 << "LOCAL INFORMATION:" << std::endl;
430 }
431
432 buf << "RANK " << rank << ":" << std::endl
433 << " GLOBAL:" << std::endl
434 << " LOCAL_BEGIN: " << grid.Global().LocalBegin() << std::endl
435 << " LOCAL_END: " << grid.Global().LocalEnd() << std::endl
436 << " LOCAL_SIZE: " << grid.Global().LocalSize() << std::endl
437 << " LOCAL_FINER_BEGIN: " << grid.Global().LocalFinerBegin() << std::endl
438 << " LOCAL_FINER_END: " << grid.Global().LocalFinerEnd() << std::endl
439 << " LOCAL_FINER_SIZE: " << grid.Global().LocalFinerSize() << std::endl
440 << " BOUNDARY_TYPE: " << grid.Global().BoundaryType() << std::endl
441 << " LOCAL:" << std::endl
442 << " BEGIN: " << grid.Local().Begin() << std::endl
443 << " END: " << grid.Local().End() << std::endl
444 << " SIZE: " << grid.Local().Size() << std::endl
445 << " SIZE_TOTAL: " << grid.Local().SizeTotal() << std::endl
446 << " HALO_BEGIN_1: " << grid.Local().HaloBegin1() << std::endl
447 << " HALO_END_1: " << grid.Local().HaloEnd1() << std::endl
448 << " HALO_SIZE_1: " << grid.Local().HaloSize1() << std::endl
449 << " HALO_BEGIN_2: " << grid.Local().HaloBegin2() << std::endl
450 << " HALO_END_2: " << grid.Local().HaloEnd2() << std::endl
451 << " HALO_SIZE_2: " << grid.Local().HaloSize2() << std::endl
452 << " BOUNDARY_BEGIN_1: " << grid.Local().BoundaryBegin1() << std::endl
453 << " BOUNDARY_END_1: " << grid.Local().BoundaryEnd1() << std::endl
454 << " BOUNDARY_SIZE_1: " << grid.Local().BoundarySize1() << std::endl
455 << " BOUNDARY_BEGIN_2: " << grid.Local().BoundaryBegin2() << std::endl
456 << " BOUNDARY_END_2: " << grid.Local().BoundaryEnd2() << std::endl
457 << " BOUNDARY_SIZE_2: " << grid.Local().BoundarySize2() << std::endl
458 << " FINER_BEGIN: " << grid.Local().FinerBegin() << std::endl
459 << " FINER_END: " << grid.Local().FinerEnd() << std::endl
460 << " FINER_SIZE: " << grid.Local().FinerSize() << std::endl;
461
462 if (rank == size-1)
463 buf << "###########################################################" << std::endl;
464
465 char* char_buf = Helper::GetCharArray(buf.str());
466 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
467 delete [] char_buf;
468
469 MPI_File_close(&file);
470
471 }
472}
473
474void CommMPI::PrintAllSettings()
475{
476 std::stringstream buf;
477 MPI_File file;
478
479 const Multigrid& mg = *MG::GetSol();
480
481 buf << OutputPath() << "settings.txt";
482 char *filename = Helper::GetCharArray(buf.str());
483
484 MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
485 MPI_File_set_size(file, 0);
486 MPI_File_close(&file);
487
488 for (int i=mg.MinLevel(); i<=mg.MaxLevel(); ++i)
489 PrintGridInformation(mg(i), filename, "MULTIGRID");
490
491 for (int i=mg.MinLevel()+1; i<=mg.MaxLevel(); ++i)
492 PrintGridInformation(settings.CoarserGrid(mg(i)), filename, "COARSER_GRID");
493
494 for (int i=mg.MinLevel(); i<mg.MaxLevel(); ++i)
495 PrintGridInformation(settings.FinerGrid(mg(i)), filename, "FINER_GRID");
496
497 delete [] filename;
498
499}
500
501void CommMPI::PrintGrid(Grid& grid, const char* information)
502{
503 int output_count = OutputCount();
504
505#ifdef HAVE_VTK
506
507 if (settings.CommunicatorLocal(grid) != MPI_COMM_NULL) {
508
509 Index end, end_global;
510
511 for (int i=0; i<3; ++i) {
512 end[i] = grid.Local().End()[i];
513 end_global[i] = grid.Global().LocalEnd()[i];
514 }
515
516 vtkSmartPointer<vtkImageData> image = vtkSmartPointer<vtkImageData>::New();
517 image->SetExtent(grid.Global().LocalBegin().X(), end_global.X()-1,
518 grid.Global().LocalBegin().Y(), end_global.Y()-1,
519 grid.Global().LocalBegin().Z(), end_global.Z()-1);
520 image->SetSpacing(grid.Extent().MeshWidth().vec());
521 image->SetOrigin(grid.Extent().Begin().vec());
522 image->SetScalarTypeToDouble();
523 image->SetNumberOfScalarComponents(1);
524 image->AllocateScalars();
525 image->GetPointData()->GetScalars()->SetName(information);
526
527 Index i;
528 for (i.X()=grid.Local().Begin().X(); i.X()<end.X(); ++i.X())
529 for (i.Y()=grid.Local().Begin().Y(); i.Y()<end.Y(); ++i.Y())
530 for (i.Z()=grid.Local().Begin().Z(); i.Z()<end.Z(); ++i.Z())
531 image->SetScalarComponentFromDouble(i.X() - grid.Local().Begin().X() + grid.Global().LocalBegin().X(),
532 i.Y() - grid.Local().Begin().Y() + grid.Global().LocalBegin().Y(),
533 i.Z() - grid.Local().Begin().Z() + grid.Global().LocalBegin().Z(),
534 0, grid.GetVal(i));
535
536 image->Update();
537
538 int rank, size;
539 MPI_Comm_rank(comm_global, &rank);
540 MPI_Comm_size(comm_global, &size);
541
542 std::stringstream filename;
543 filename << OutputPath() << std::setw(4) << std::setfill('0') << output_count << "_" << rank << ".vti";
544
545 vtkSmartPointer<vtkXMLImageDataWriter> writer = vtkSmartPointer<vtkXMLImageDataWriter>::New();
546 writer->SetFileName(filename.str().c_str());
547 writer->SetCompressorTypeToNone();
548 writer->SetDataModeToAscii();
549 writer->SetInput(image);
550 writer->Update();
551 writer->Write();
552
553 }
554
555#else /* HAVE_VTK */
556 Index i;
557 std::stringstream buf;
558
559 Index begin, end;
560 Index begin_local, end_local, begin_global, end_global;
561
562 CommToGhosts(grid);
563
564 for (int i=0; i<3; ++i) {
565 end[i] = grid.Local().End()[i] + (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 0 : grid.Local().HaloSize1()[i]);
566 end_local[i] = grid.Global().LocalEnd()[i] - (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 1 : 0);
567 }
568
569 begin = grid.Local().Begin();
570 begin_local = grid.Global().LocalBegin();
571 begin_global = 0;
572 end_global = grid.Global().GlobalSize()-1;
573
574 for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
575 for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
576 for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
577 buf << std::scientific << grid.GetVal(i) << " ";
578
579 CreateOutputFiles(grid, buf, information,
580 begin_global, end_global,
581 begin_local, end_local,
582 output_count);
583#endif /* HAVE_VTK */
584}
585
586void CommMPI::PrintDefect(Grid& sol, Grid& rhs, const char* information)
587{
588 TempGrid *temp = MG::GetTempGrid();
589 temp->SetProperties(sol);
590 temp->ImportFromResidual(sol, rhs);
591 PrintGrid(*temp, information);
592}
593
594void CommMPI::CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
595 const Index& begin_global, const Index& end_global,
596 const Index& begin_local, const Index& end_local,
597 const int& output_count)
598{
599 MPI_Comm comm = settings.CommunicatorGlobal(grid);
600
601 if (comm != MPI_COMM_NULL) {
602
603 MPI_File file;
604 std::string conv_information = Helper::ReplaceWhitespaces(information, "_");
605
606 CreateParallelOutputFile(grid, comm, output_count, conv_information.c_str(),
607 begin_global, end_global, begin_local, end_local);
608
609 file = CreateSerialOutputFile(grid, comm, output_count, conv_information.c_str(),
610 begin_global, end_global, begin_local, end_local);
611
612 char *char_buf = Helper::GetCharArray(serial_data.str());
613 MPI_File_write(file, char_buf, serial_data.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
614 delete [] char_buf;
615
616 FinalizeSerialOutputFile(file);
617
618 }
619}
620
621void CommMPI::CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
622 const int& output_count, const char* information,
623 const Index& begin_global, const Index& end_global,
624 const Index& begin_local, const Index& end_local)
625{
626 int rank;
627 MPI_File file;
628 char parallel_filename[513], serial_filename[513];
629 std::stringstream buf;
630
631 MPI_Comm_rank(comm, &rank);
632
633 sprintf(parallel_filename, "%s%04d.pvti", OutputPath().c_str(), output_count);
634 sprintf(serial_filename, "%04d_%d.vti", output_count, rank);
635
636 MPI_File_open(comm, parallel_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
637 MPI_File_set_size(file, 0);
638
639 if (rank == 0) {
640
641 buf << "<?xml version=\"1.0\"?>" << std::endl
642 << "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
643 << " <PImageData WholeExtent=\"";
644
645 for (int i=0; i<3; ++i)
646 buf << begin_global[i] << " " << end_global[i] << " ";
647
648 buf << "\"" << std::endl
649 << " GhostLevel=\"0\" Origin=\"0 0 0\" Spacing=\"";
650
651 for (int i=0; i<3; ++i)
652 buf << grid.Extent().MeshWidth()[i] << " ";
653
654 buf << "\">" << std::endl
655 << " <PPointData Scalars=\"" << information << "\">" << std::endl
656 << " <PDataArray type=\"Float32\" Name=\"" << information << "\"/>" << std::endl
657 << " </PPointData>" << std::endl;
658
659 char* char_buf = Helper::GetCharArray(buf.str());
660
661 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
662
663 delete [] char_buf;
664 }
665
666 buf.str("");
667
668 if ((end_local-begin_local).Product() > 0) {
669 buf << " <Piece Extent=\"";
670
671 for (int i=0; i<3; ++i)
672 buf << begin_local[i] << " " << end_local[i] << " ";
673
674 buf << "\" Source=\"" << serial_filename << "\"/>" << std::endl;
675 }
676
677 char* char_buf = Helper::GetCharArray(buf.str());
678
679 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
680
681 delete [] char_buf;
682
683 if (rank == 0) {
684
685 buf.str("");
686
687 buf << " </PImageData>" << std::endl
688 << "</VTKFile>" << std::endl;
689
690 char* char_buf = Helper::GetCharArray(buf.str());
691
692 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
693
694 delete [] char_buf;
695 }
696
697 MPI_File_close(&file);
698}
699
700MPI_File CommMPI::CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
701 const int& output_count, const char* information,
702 const Index& begin_global, const Index& end_global,
703 const Index& begin_local, const Index& end_local)
704{
705 char serial_filename[513];
706 int rank;
707 MPI_File file;
708 std::stringstream buf;
709
710 MPI_Comm_rank(comm_global, &rank);
711
712 sprintf(serial_filename, "%s%04d_%d.vti", OutputPath().c_str(), output_count, rank);
713
714 MPI_File_open(MPI_COMM_SELF, serial_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
715
716 buf << "<?xml version=\"1.0\"?>" << std::endl
717 << "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
718 << " <ImageData WholeExtent=\"";
719
720 for (int i=0; i<3; ++i)
721 buf << begin_global[i] << " " << end_global[i] << " ";
722
723 buf << "\"" << std::endl
724 << " Origin=\"0 0 0\" Spacing=\"";
725
726 for (int i=0; i<3; ++i)
727 buf << grid.Extent().MeshWidth()[i] << " ";
728
729 buf << "\">" << std::endl
730 << " <Piece Extent=\"";
731
732 for (int i=0; i<3; ++i)
733 buf << begin_local[i] << " " << end_local[i] << " ";
734
735 buf << "\">" << std::endl
736 << " <PointData Scalars=\"" << information << "\">" << std::endl
737 << " <DataArray type=\"Float32\" Name=\"" << information << "\" format=\"ascii\">" << std::endl
738 << " ";
739
740 char* char_buf = Helper::GetCharArray(buf.str());
741 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
742 delete [] char_buf;
743
744 return file;
745}
746
747void CommMPI::FinalizeSerialOutputFile(MPI_File& file)
748{
749 std::stringstream buf;
750
751 buf << std::endl
752 << " </DataArray>" << std::endl
753 << " </PointData>" << std::endl
754 << " </Piece>" << std::endl
755 << " </ImageData>" << std::endl
756 << "</VTKFile>" << std::endl;
757
758 char* char_buf = Helper::GetCharArray(buf.str());
759 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
760 delete [] char_buf;
761
762 MPI_File_close(&file);
763}
764
765int CommMPI::GlobalRank() const
766{
767 int rank;
768 MPI_Comm_rank(comm_global, &rank);
769 return rank;
770}
771
772int CommMPI::GlobalSize() const
773{
774 int size;
775 MPI_Comm_size(comm_global, &size);
776 return size;
777}
778
779Index CommMPI::GlobalPos() const
780{
781 Index dims, periods, coords;
782 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
783 return coords;
784}
785
786Index CommMPI::GlobalProcs() const
787{
788 Index dims, periods, coords;
789 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
790 return dims;
791}
792
793int CommMPI::Rank(const Grid& grid) const
794{
795 int rank;
796 MPI_Comm comm = settings.CommunicatorLocal(grid);
797 assert(comm != MPI_COMM_NULL);
798 MPI_Comm_rank(comm, &rank);
799 return rank;
800}
801
802int CommMPI::Size(const Grid& grid) const
803{
804 int size;
805 MPI_Comm comm = settings.CommunicatorLocal(grid);
806 assert(comm != MPI_COMM_NULL);
807 MPI_Comm_size(comm, &size);
808 return size;
809}
810
811Index CommMPI::Pos(const Grid& grid) const
812{
813 Index dims, periods, coords;
814 MPI_Comm comm = settings.CommunicatorLocal(grid);
815 assert(comm != MPI_COMM_NULL);
816 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
817 return coords;
818}
819
820Index CommMPI::Procs(const Grid& grid) const
821{
822 Index dims, periods, coords;
823 MPI_Comm comm = settings.CommunicatorLocal(grid);
824 assert(comm != MPI_COMM_NULL);
825 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
826 return dims;
827}
828
829void CommMPI::InitCommMPI(const MPI_Comm& comm)
830{
831 int status, size, rank;
832 int dims[3] = {0, 0, 0};
833 int periods[3];
834
835 for (int i=0; i<3; ++i)
836 periods[i] = (BoundaryConditions()[i] == Periodic ? 1 : 0);
837
838 MPI_Comm_size(comm, &size);
839 MPI_Comm_rank(comm, &rank);
840
841 MPI_Topo_test(comm, &status);
842
843 if (status == MPI_CART) {
844
845 comm_global = comm;
846
847 }else {
848
849 const int log2 = Helper::log_2(size);
850
851 if (Helper::intpow(2, log2) == size) {
852 for (int i=0; i<3; ++i)
853 dims[i] = Helper::intpow(2, log2 / 3 + (log2%3 > i ? 1 : 0));
854 }else {
855 MPI_Dims_create(size, 3, dims);
856 }
857
858#ifdef DEBUG_OUTPUT
859 if (rank == 0)
860 std::printf("Dims: %d %d %d\n", dims[0], dims[1], dims[2]);
861#endif
862
863 MPI_Cart_create(comm, 3, dims, periods, 1, &comm_global);
864
865 }
866
867 MPI_Info_create(&info);
868 char key[] = "no_locks";
869 char val[] = "true";
870 MPI_Info_set(info, key, val);
871
872}
873
874CommMPI::~CommMPI()
875{
876 MPI_Comm_free(&comm_global);
877#ifdef VMG_ONE_SIDED
878 if (win_created)
879 MPI_Win_free(&win);
880#endif
881 MPI_Info_free(&info);
882}
883
884Grid& CommMPI::GetCoarserGrid(Multigrid& multigrid)
885{
886 return settings.CoarserGrid(multigrid(multigrid.Level()));
887}
888
889Grid& CommMPI::GetFinerGrid(Multigrid& multigrid)
890{
891 return settings.FinerGrid(multigrid(multigrid.Level()));
892}
893
894std::string CommMPI::CreateOutputDirectory()
895{
896#ifdef HAVE_BOOST_FILESYSTEM
897 std::string path, unique_path;
898 std::stringstream unique_suffix;
899 int suffix_counter = 0;
900 char buffer[129];
901 time_t rawtime;
902 struct tm *timeinfo;
903 int path_size;
904 char* path_array;
905
906 if (GlobalRank() == 0) {
907
908 time(&rawtime);
909 timeinfo = localtime(&rawtime);
910 strftime(buffer, 128, "./output/%Y_%m_%d_%H_%M_%S/", timeinfo);
911 path = buffer;
912
913 if (!fs::exists("output"))
914 fs::create_directory("output");
915
916 unique_path = path;
917
918 while (fs::exists(unique_path.c_str())) {
919
920 unique_suffix.str("");
921 unique_suffix << "_" << suffix_counter++ << "/";
922
923 unique_path = path;
924 unique_path.replace(unique_path.size()-1, 1, unique_suffix.str());
925
926 }
927
928 fs::create_directory(unique_path.c_str());
929
930 path_size = unique_path.size() + 1;
931 path_array = Helper::GetCharArray(unique_path);
932
933 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
934
935 }else {
936
937 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
938 path_array = new char[path_size];
939
940 }
941
942 MPI_Bcast(path_array, path_size, MPI_CHAR, 0, comm_global);
943
944 unique_path = path_array;
945
946 delete [] path_array;
947
948 return unique_path;
949
950#else
951
952 return "./";
953
954#endif
955}
956
957
958#endif /* HAVE_MPI */
Note: See TracBrowser for help on using the repository browser.