source: src/comm/comm_mpi.cpp@ d23483

Last change on this file since d23483 was 0bd47e, checked in by Julian Iseringhausen <isering@…>, 13 years ago

Added MPI classes output.

  • Property mode set to 100644
File size: 30.2 KB
Line 
1/*
2 * vmg - a versatile multigrid solver
3 * Copyright (C) 2012 Institute for Numerical Simulation, University of Bonn
4 *
5 * vmg is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 3 of the License, or
8 * (at your option) any later version.
9 *
10 * vmg is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/**
20 * @file comm_mpi.cpp
21 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
22 * @date Wed Jun 16 13:21:06 2010
23 *
24 * @brief Class for MPI-based communication.
25 *
26 */
27
28#ifdef HAVE_CONFIG_H
29#include <config.h>
30#endif
31
32#ifdef HAVE_MPI
33
34#include <mpi.h>
35#ifdef HAVE_MARMOT
36#include <enhancempicalls.h>
37#include <sourceinfompicalls.h>
38#endif
39
40#ifdef HAVE_BOOST_FILESYSTEM
41#include <boost/filesystem.hpp>
42namespace fs = boost::filesystem;
43#endif
44
45#ifdef HAVE_VTK
46#include <vtkAbstractArray.h>
47#include <vtkImageData.h>
48#include <vtkPointData.h>
49#include <vtkSmartPointer.h>
50#include <vtkXMLImageDataWriter.h>
51#endif
52
53#include <cstring>
54#include <sstream>
55
56#include "base/helper.hpp"
57#include "base/tuple.hpp"
58#include "comm/comm_mpi.hpp"
59#include "comm/mpi/datatypes_local.hpp"
60#include "grid/grid.hpp"
61#include "grid/multigrid.hpp"
62#include "grid/tempgrid.hpp"
63#include "mg.hpp"
64#include "base/timer.hpp"
65
66static char print_buffer[512];
67
68using namespace VMG;
69
70void CommMPI::IsendAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
71{
72 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
73 iter->Isend(grid, tag_start, comm, Request());
74}
75
76void CommMPI::IrecvAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
77{
78 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
79 iter->Irecv(grid, tag_start, comm, Request());
80}
81
82void CommMPI::IsendAllBuffered(const Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
83{
84 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
85 iter->IsendBuffered(grid, tag_start, comm, Request());
86}
87
88void CommMPI::IrecvAllBuffered(std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
89{
90 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
91 iter->IrecvBuffered(tag_start, comm, Request());
92}
93
94void CommMPI::ReplaceBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
95{
96 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
97 iter->GridReplace(grid);
98}
99
100void CommMPI::AddBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
101{
102 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
103 iter->GridSum(grid);
104}
105
106void CommMPI::CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
107{
108 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
109 if (comm != MPI_COMM_NULL) {
110 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
111 IrecvAllBuffered(datatypes.Receive(), comm, 0411);
112 IsendAllBuffered(grid_old, datatypes.Send(), comm, 0411);
113 WaitAll();
114 ReplaceBufferAll(grid_new, datatypes.Receive());
115 }
116}
117
118void CommMPI::CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
119{
120 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
121 if (comm != MPI_COMM_NULL) {
122 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
123 IrecvAllBuffered(datatypes.Receive(), comm, 1806);
124 IsendAllBuffered(grid_old, datatypes.Send(), comm, 1806);
125 WaitAll();
126 AddBufferAll(grid_new, datatypes.Receive());
127 }
128}
129
130void CommMPI::CommToGhosts(Grid& grid)
131{
132 MPI_Comm comm = settings.CommunicatorLocal(grid);
133 if (comm != MPI_COMM_NULL) {
134 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
135 IrecvAllBuffered(types.Halo(), comm, 2310);
136 IsendAllBuffered(grid, types.NB(), comm, 2310);
137 WaitAll();
138 ReplaceBufferAll(grid, types.Halo());
139 }
140}
141
142void CommMPI::CommToGhostsAsyncStart(Grid& grid)
143{
144 MPI_Comm comm = settings.CommunicatorLocal(grid);
145 if (comm != MPI_COMM_NULL) {
146 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
147 IrecvAllBuffered(types.Halo(), comm, 2412);
148 IsendAllBuffered(grid, types.NB(), comm, 2412);
149 TestAll();
150 }
151}
152
153void CommMPI::CommToGhostsAsyncFinish(Grid& grid)
154{
155 WaitAll();
156 ReplaceBufferAll(grid, settings.DatatypesLocal(grid).Halo());
157}
158
159void CommMPI::CommFromGhosts(Grid& grid)
160{
161 MPI_Comm comm = settings.CommunicatorLocal(grid);
162 if (comm != MPI_COMM_NULL) {
163 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
164 IrecvAllBuffered(types.NB(), comm, 1337);
165 IsendAllBuffered(grid, types.Halo(), comm, 1337);
166 WaitAll();
167 AddBufferAll(grid, types.NB());
168 }
169}
170
171void CommMPI::CommFromGhostsAsyncStart(Grid& grid)
172{
173 MPI_Comm comm = settings.CommunicatorLocal(grid);
174 if (comm != MPI_COMM_NULL) {
175 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
176 IrecvAllBuffered(types.NB(), comm, 0xc0ffee);
177 IsendAllBuffered(grid, types.Halo(), comm, 0xc0ffee);
178 TestAll();
179 }
180}
181
182void CommMPI::CommFromGhostsAsyncFinish(Grid& grid)
183{
184 WaitAll();
185 AddBufferAll(grid, settings.DatatypesLocal(grid).NB());
186}
187
188void CommMPI::Barrier()
189{
190 MPI_Barrier(comm_global);
191}
192
193vmg_float CommMPI::GlobalSum(vmg_float value)
194{
195 vmg_float result = 0;
196 MPI_Allreduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, comm_global);
197 return result;
198}
199
200vmg_float CommMPI::GlobalSumRoot(vmg_float value)
201{
202 vmg_float result = 0;
203 MPI_Reduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, 0, comm_global);
204 return result;
205}
206
207void CommMPI::GlobalSumArray(vmg_float* array, const vmg_int& size)
208{
209 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm_global);
210}
211
212void CommMPI::GlobalBroadcast(vmg_float& value)
213{
214 MPI_Bcast(&value, 1, MPI_DOUBLE, 0, comm_global);
215}
216
217void CommMPI::GlobalGather(vmg_float& value, vmg_float* array)
218{
219 MPI_Gather(&value, 1, MPI_DOUBLE, array, 1, MPI_DOUBLE, 0, comm_global);
220}
221
222vmg_int CommMPI::GlobalSum(vmg_int value)
223{
224 vmg_int result = 0;
225 MPI_Allreduce(&value, &result, 1, MPI_INT, MPI_SUM, comm_global);
226 return result;
227}
228
229vmg_int CommMPI::GlobalSumRoot(vmg_int value)
230{
231 vmg_int result = 0;
232 MPI_Reduce(&value, &result, 1, MPI_INT, MPI_SUM, 0, comm_global);
233 return result;
234}
235
236void CommMPI::GlobalSumArray(vmg_int* array, const vmg_int& size)
237{
238 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm_global);
239}
240
241vmg_float CommMPI::GlobalMax(vmg_float value)
242{
243 vmg_float result = 0.0;
244 MPI_Allreduce(&value, &result, 1, MPI_DOUBLE, MPI_MAX, comm_global);
245 return result;
246}
247
248vmg_float CommMPI::GlobalMaxRoot(vmg_float value)
249{
250 vmg_float result = 0.0;
251 MPI_Reduce(&value, &result, 1, MPI_DOUBLE, MPI_MAX, 0, comm_global);
252 return result;
253}
254
255void CommMPI::GlobalMaxArray(vmg_float* array, const vmg_int& size)
256{
257 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_MAX, comm_global);
258}
259
260vmg_int CommMPI::GlobalMax(vmg_int value)
261{
262 vmg_int result = 0;
263 MPI_Allreduce(&value, &result, 1, MPI_INT, MPI_MAX, comm_global);
264 return result;
265}
266
267vmg_int CommMPI::GlobalMaxRoot(vmg_int value)
268{
269 vmg_int result = 0;
270 MPI_Reduce(&value, &result, 1, MPI_INT, MPI_MAX, 0, comm_global);
271 return result;
272}
273
274void CommMPI::GlobalMaxArray(vmg_int* array, const vmg_int& size)
275{
276 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_MAX, comm_global);
277}
278
279void CommMPI::GlobalBroadcast(vmg_int& value)
280{
281 MPI_Bcast(&value, 1, MPI_INT, 0, comm_global);
282}
283
284void CommMPI::GlobalGather(vmg_int& value, vmg_int* array)
285{
286 MPI_Gather(&value, 1, MPI_INT, array, 1, MPI_INT, 0, comm_global);
287}
288
289void CommMPI::GlobalBroadcast(char* str)
290{
291 int size = std::strlen(str) + 1;
292 MPI_Bcast(&size, 1, MPI_INT, 0, comm_global);
293 MPI_Bcast(str, size, MPI_CHAR, 0, comm_global);
294}
295
296vmg_float CommMPI::LevelSum(const Grid& grid, vmg_float value)
297{
298 vmg_float result = 0.0;
299 MPI_Comm comm = settings.CommunicatorLocal(grid);
300 assert(comm != MPI_COMM_NULL);
301 MPI_Allreduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, comm);
302 return result;
303}
304
305vmg_float CommMPI::LevelSumRoot(const Grid& grid, vmg_float value)
306{
307 vmg_float result = 0.0;
308 MPI_Comm comm = settings.CommunicatorLocal(grid);
309 assert(comm != MPI_COMM_NULL);
310 MPI_Reduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, 0, comm);
311 return result;
312}
313
314void CommMPI::LevelSumArray(const Grid& grid, vmg_float* array, const vmg_int& size)
315{
316 MPI_Comm comm = settings.CommunicatorLocal(grid);
317 assert(comm != MPI_COMM_NULL);
318 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm);
319}
320
321vmg_int CommMPI::LevelSum(const Grid& grid, vmg_int value)
322{
323 vmg_int result = 0;
324 MPI_Comm comm = settings.CommunicatorLocal(grid);
325 assert(comm != MPI_COMM_NULL);
326 MPI_Allreduce(&value, &result, 1, MPI_INT, MPI_SUM, comm);
327 return result;
328}
329
330vmg_int CommMPI::LevelSumRoot(const Grid& grid, vmg_int value)
331{
332 vmg_int result = 0;
333 MPI_Comm comm = settings.CommunicatorLocal(grid);
334 assert(comm != MPI_COMM_NULL);
335 MPI_Reduce(&value, &result, 1, MPI_INT, MPI_SUM, 0, comm);
336 return result;
337}
338
339void CommMPI::LevelSumArray(const Grid& grid, vmg_int* array, const vmg_int& size)
340{
341 MPI_Comm comm = settings.CommunicatorLocal(grid);
342 assert(comm != MPI_COMM_NULL);
343 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm);
344}
345
346void CommMPI::Print(const OutputLevel level, const char* format, ...)
347{
348 bool print = (level == Output);
349
350#ifdef OUTPUT_INFO
351 print |= (level == Info);
352#endif
353#ifdef OUTPUT_DEBUG
354 print |= (level == Debug);
355#endif
356#ifdef OUTPUT_TIMING
357 print |= (level == Timing);
358#endif
359
360 if (print) {
361 va_list args;
362 va_start(args, format);
363 vsprintf(print_buffer, format, args);
364 printf("vmg: Rank %d: %s\n", GlobalRank(), print_buffer);
365 va_end(args);
366 }
367}
368
369void CommMPI::PrintOnce(const OutputLevel level, const char* format, ...)
370{
371 bool print = (level == Output);
372
373#ifdef OUTPUT_INFO
374 print |= (level == Info);
375#endif
376#ifdef OUTPUT_DEBUG
377 print |= (level == Debug);
378#endif
379#ifdef OUTPUT_TIMING
380 print |= (level == Timing);
381#endif
382
383 if (GlobalRank() == 0 && print) {
384 va_list args;
385 va_start(args, format);
386 vsprintf(print_buffer, format, args);
387 printf("vmg: Rank %d: %s\n", GlobalRank(), print_buffer);
388 va_end(args);
389 }
390}
391
392void CommMPI::PrintXML(const std::string& filename, const std::string& xml_data)
393{
394 MPI_File file;
395 std::stringstream path, xml_header;
396
397 pugi::xml_document doc;
398 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
399 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
400 doc.save(xml_header);
401
402 path << OutputPath() << filename;
403
404 char* filename_array = Helper::GetCharArray(path.str());
405 char* xml_header_array = Helper::GetCharArray(xml_header.str());
406 char* str_array = Helper::GetCharArray(xml_data);
407
408 MPI_File_open(MPI_COMM_SELF, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
409 MPI_File_set_size(file, 0);
410 MPI_File_write(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
411 MPI_File_write(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
412 MPI_File_close(&file);
413
414 delete [] filename_array;
415 delete [] xml_header_array;
416 delete [] str_array;
417}
418
419void CommMPI::PrintXMLAll(const std::string& filename, const std::string& xml_data)
420{
421 MPI_File file;
422 std::stringstream path;
423
424 path << OutputPath() << filename;
425
426 char* filename_array = Helper::GetCharArray(path.str());
427 char* str_array = Helper::GetCharArray(xml_data);
428
429 MPI_File_open(comm_global, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
430 MPI_File_set_size(file, 0);
431
432 if (GlobalRank() == 0) {
433 std::stringstream xml_header;
434 pugi::xml_document doc;
435 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
436 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
437 doc.save(xml_header);
438
439 char* xml_header_array = Helper::GetCharArray(xml_header.str());
440
441 MPI_File_write_shared(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
442
443 delete [] xml_header_array;
444 }
445
446 MPI_File_write_ordered(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
447 MPI_File_close(&file);
448
449 delete [] filename_array;
450 delete [] str_array;
451}
452
453void CommMPI::PrintGridInformation(const Grid& grid, char* filename, const std::string& name)
454{
455 std::stringstream buf;
456 MPI_File file;
457 int rank, size;
458 int size_local, size_local_max;
459
460 MPI_Comm comm = settings.CommunicatorGlobal(grid);
461 MPI_Comm comm_local = settings.CommunicatorLocal(grid);
462
463 if (comm_local != MPI_COMM_NULL)
464 MPI_Comm_size(comm_local, &size_local);
465 else
466 size_local = 0;
467
468 if (comm != MPI_COMM_NULL) {
469
470 MPI_Reduce(&size_local, &size_local_max, 1, MPI_INT, MPI_MAX, 0, comm);
471
472 MPI_File_open(comm, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
473
474 MPI_Comm_rank(comm, &rank);
475 MPI_Comm_size(comm, &size);
476
477 if (rank == 0) {
478
479 buf << "###########################################################" << std::endl
480 << "GLOBAL INFORMATION:" << std::endl
481 << " NAME: " << name << std::endl
482 << " LEVEL: " << grid.Level() << std::endl
483 << " COMM_SIZE_GLOBAL: " << size << std::endl
484 << " COMM_SIZE_LOCAL: " << size_local_max << std::endl
485 << " GLOBAL:" << std::endl
486 << " GLOBAL_BEGIN: " << grid.Global().GlobalBegin() << std::endl
487 << " GLOBAL_END: " << grid.Global().GlobalEnd() << std::endl
488 << " GLOBAL_SIZE: " << grid.Global().GlobalSizeNew() << std::endl
489 << " GLOBAL_BEGIN_FINEST: " << grid.Global().GlobalBeginFinest() << std::endl
490 << " GLOBAL_END_FINEST: " << grid.Global().GlobalEndFinest() << std::endl
491 << " GLOBAL_SIZE_FINEST: " << grid.Global().GlobalSizeFinest() << std::endl
492 << " EXTENT:" << std::endl
493 << " BEGIN: " << grid.Extent().Begin() << std::endl
494 << " END: " << grid.Extent().End() << std::endl
495 << " SIZE: " << grid.Extent().Size() << std::endl
496 << " MESH_WIDTH: " << grid.Extent().MeshWidth() << std::endl
497 << std::endl
498 << "LOCAL INFORMATION:" << std::endl;
499 }
500
501 buf << "RANK " << rank << ":" << std::endl
502 << " GLOBAL:" << std::endl
503 << " LOCAL_BEGIN: " << grid.Global().LocalBegin() << std::endl
504 << " LOCAL_END: " << grid.Global().LocalEnd() << std::endl
505 << " LOCAL_SIZE: " << grid.Global().LocalSize() << std::endl
506 << " BOUNDARY_TYPE: " << grid.Global().BoundaryType() << std::endl
507 << " LOCAL:" << std::endl
508 << " BEGIN: " << grid.Local().Begin() << std::endl
509 << " END: " << grid.Local().End() << std::endl
510 << " SIZE: " << grid.Local().Size() << std::endl
511 << " SIZE_TOTAL: " << grid.Local().SizeTotal() << std::endl
512 << " HALO_BEGIN_1: " << grid.Local().HaloBegin1() << std::endl
513 << " HALO_END_1: " << grid.Local().HaloEnd1() << std::endl
514 << " HALO_SIZE_1: " << grid.Local().HaloSize1() << std::endl
515 << " HALO_BEGIN_2: " << grid.Local().HaloBegin2() << std::endl
516 << " HALO_END_2: " << grid.Local().HaloEnd2() << std::endl
517 << " HALO_SIZE_2: " << grid.Local().HaloSize2() << std::endl
518 << " BOUNDARY_BEGIN_1: " << grid.Local().BoundaryBegin1() << std::endl
519 << " BOUNDARY_END_1: " << grid.Local().BoundaryEnd1() << std::endl
520 << " BOUNDARY_SIZE_1: " << grid.Local().BoundarySize1() << std::endl
521 << " BOUNDARY_BEGIN_2: " << grid.Local().BoundaryBegin2() << std::endl
522 << " BOUNDARY_END_2: " << grid.Local().BoundaryEnd2() << std::endl
523 << " BOUNDARY_SIZE_2: " << grid.Local().BoundarySize2() << std::endl;
524
525 if (rank == size-1)
526 buf << "###########################################################" << std::endl;
527
528 char* char_buf = Helper::GetCharArray(buf.str());
529 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
530 delete [] char_buf;
531
532 MPI_File_close(&file);
533
534 }
535}
536
537void CommMPI::PrintDatatypes(char* filename)
538{
539 std::stringstream buf;
540 MPI_File file;
541
542 if (comm_global != MPI_COMM_NULL) {
543
544 MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
545
546 buf << settings << std::endl;
547
548 char* char_buf = Helper::GetCharArray(buf.str());
549 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
550 delete [] char_buf;
551
552 MPI_File_close(&file);
553
554 }
555}
556
557void CommMPI::PrintAllSettings()
558{
559 std::stringstream buf;
560 MPI_File file;
561
562 const Multigrid& mg = *MG::GetSol();
563
564 buf << OutputPath() << "settings.txt";
565 char *filename = Helper::GetCharArray(buf.str());
566
567 MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
568 MPI_File_set_size(file, 0);
569 MPI_File_close(&file);
570
571 for (int i=mg.MinLevel(); i<=mg.MaxLevel(); ++i)
572 PrintGridInformation(mg(i), filename, "MULTIGRID");
573
574 for (int i=mg.MinLevel()+1; i<=mg.MaxLevel(); ++i)
575 PrintGridInformation(settings.CoarserGrid(mg(i)), filename, "COARSER_GRID");
576
577 for (int i=mg.MinLevel(); i<mg.MaxLevel(); ++i)
578 PrintGridInformation(settings.FinerGrid(mg(i)), filename, "FINER_GRID");
579
580 PrintDatatypes(filename);
581
582 delete [] filename;
583
584}
585
586void CommMPI::PrintGrid(Grid& grid, const char* information)
587{
588 int output_count = OutputCount();
589
590#ifdef HAVE_VTK
591
592 if (settings.CommunicatorLocal(grid) != MPI_COMM_NULL) {
593
594 vtkSmartPointer<vtkImageData> image = vtkSmartPointer<vtkImageData>::New();
595 image->SetExtent(grid.Global().LocalBegin().X(), grid.Global().LocalEnd().X()-1,
596 grid.Global().LocalBegin().Y(), grid.Global().LocalEnd().Y()-1,
597 grid.Global().LocalBegin().Z(), grid.Global().LocalEnd().Z()-1);
598
599 image->SetSpacing(grid.Extent().MeshWidth().vec());
600 image->SetOrigin(grid.Extent().Begin().vec());
601 image->SetScalarTypeToDouble();
602 image->SetNumberOfScalarComponents(1);
603 image->AllocateScalars();
604 image->GetPointData()->GetScalars()->SetName(information);
605
606 Index i;
607 const Index offset = grid.Global().LocalBegin() - grid.Local().Begin();
608
609 for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
610 for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
611 for (i.Z()=grid.Local().Begin().Z(); i.Z()<grid.Local().End().Z(); ++i.Z())
612 image->SetScalarComponentFromDouble(i.X() + offset.X(),
613 i.Y() + offset.Y(),
614 i.Z() + offset.Z(),
615 0, grid.GetVal(i));
616
617 image->Update();
618
619 int rank, size;
620 MPI_Comm_rank(comm_global, &rank);
621 MPI_Comm_size(comm_global, &size);
622
623 std::stringstream filename;
624 filename << OutputPath() << std::setw(4) << std::setfill('0') << output_count << "_" << rank << ".vti";
625
626 vtkSmartPointer<vtkXMLImageDataWriter> writer = vtkSmartPointer<vtkXMLImageDataWriter>::New();
627 writer->SetFileName(filename.str().c_str());
628 writer->SetCompressorTypeToNone();
629 writer->SetDataModeToAscii();
630 writer->SetInput(image);
631 writer->Update();
632 writer->Write();
633
634 }
635
636#else /* HAVE_VTK */
637 Index i;
638 std::stringstream buf;
639
640 Index begin, end;
641 Index begin_local, end_local, begin_global, end_global;
642
643 CommToGhosts(grid);
644
645 for (int i=0; i<3; ++i) {
646 end[i] = grid.Local().End()[i] + (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 0 : grid.Local().HaloSize1()[i]);
647 end_local[i] = grid.Global().LocalEnd()[i] - (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 1 : 0);
648 }
649
650 begin = grid.Local().Begin();
651 begin_local = grid.Global().LocalBegin();
652 begin_global = 0;
653 end_global = grid.Global().GlobalSize()-1;
654
655 for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
656 for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
657 for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
658 buf << std::scientific << grid.GetVal(i) << " ";
659
660 CreateOutputFiles(grid, buf, information,
661 begin_global, end_global,
662 begin_local, end_local,
663 output_count);
664#endif /* HAVE_VTK */
665}
666
667void CommMPI::PrintDefect(Grid& sol, Grid& rhs, const char* information)
668{
669 TempGrid *temp = MG::GetTempGrid();
670 temp->SetProperties(sol);
671 temp->ImportFromResidual(sol, rhs);
672 PrintGrid(*temp, information);
673}
674
675void CommMPI::CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
676 const Index& begin_global, const Index& end_global,
677 const Index& begin_local, const Index& end_local,
678 const int& output_count)
679{
680 MPI_Comm comm = settings.CommunicatorGlobal(grid);
681
682 if (comm != MPI_COMM_NULL) {
683
684 MPI_File file;
685 std::string conv_information = Helper::ReplaceWhitespaces(information, "_");
686
687 CreateParallelOutputFile(grid, comm, output_count, conv_information.c_str(),
688 begin_global, end_global, begin_local, end_local);
689
690 file = CreateSerialOutputFile(grid, comm, output_count, conv_information.c_str(),
691 begin_global, end_global, begin_local, end_local);
692
693 char *char_buf = Helper::GetCharArray(serial_data.str());
694 MPI_File_write(file, char_buf, serial_data.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
695 delete [] char_buf;
696
697 FinalizeSerialOutputFile(file);
698
699 }
700}
701
702void CommMPI::CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
703 const int& output_count, const char* information,
704 const Index& begin_global, const Index& end_global,
705 const Index& begin_local, const Index& end_local)
706{
707 int rank;
708 MPI_File file;
709 char parallel_filename[513], serial_filename[513];
710 std::stringstream buf;
711
712 MPI_Comm_rank(comm, &rank);
713
714 sprintf(parallel_filename, "%s%04d.pvti", OutputPath().c_str(), output_count);
715 sprintf(serial_filename, "%04d_%d.vti", output_count, rank);
716
717 MPI_File_open(comm, parallel_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
718 MPI_File_set_size(file, 0);
719
720 if (rank == 0) {
721
722 buf << "<?xml version=\"1.0\"?>" << std::endl
723 << "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
724 << " <PImageData WholeExtent=\"";
725
726 for (int i=0; i<3; ++i)
727 buf << begin_global[i] << " " << end_global[i] << " ";
728
729 buf << "\"" << std::endl
730 << " GhostLevel=\"0\" Origin=\"0 0 0\" Spacing=\"";
731
732 for (int i=0; i<3; ++i)
733 buf << grid.Extent().MeshWidth()[i] << " ";
734
735 buf << "\">" << std::endl
736 << " <PPointData Scalars=\"" << information << "\">" << std::endl
737 << " <PDataArray type=\"Float32\" Name=\"" << information << "\"/>" << std::endl
738 << " </PPointData>" << std::endl;
739
740 char* char_buf = Helper::GetCharArray(buf.str());
741
742 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
743
744 delete [] char_buf;
745 }
746
747 buf.str("");
748
749 if ((end_local-begin_local).Product() > 0) {
750 buf << " <Piece Extent=\"";
751
752 for (int i=0; i<3; ++i)
753 buf << begin_local[i] << " " << end_local[i] << " ";
754
755 buf << "\" Source=\"" << serial_filename << "\"/>" << std::endl;
756 }
757
758 char* char_buf = Helper::GetCharArray(buf.str());
759
760 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
761
762 delete [] char_buf;
763
764 if (rank == 0) {
765
766 buf.str("");
767
768 buf << " </PImageData>" << std::endl
769 << "</VTKFile>" << std::endl;
770
771 char* char_buf = Helper::GetCharArray(buf.str());
772
773 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
774
775 delete [] char_buf;
776 }
777
778 MPI_File_close(&file);
779}
780
781MPI_File CommMPI::CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
782 const int& output_count, const char* information,
783 const Index& begin_global, const Index& end_global,
784 const Index& begin_local, const Index& end_local)
785{
786 char serial_filename[513];
787 int rank;
788 MPI_File file;
789 std::stringstream buf;
790
791 MPI_Comm_rank(comm_global, &rank);
792
793 sprintf(serial_filename, "%s%04d_%d.vti", OutputPath().c_str(), output_count, rank);
794
795 MPI_File_open(MPI_COMM_SELF, serial_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
796
797 buf << "<?xml version=\"1.0\"?>" << std::endl
798 << "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
799 << " <ImageData WholeExtent=\"";
800
801 for (int i=0; i<3; ++i)
802 buf << begin_global[i] << " " << end_global[i] << " ";
803
804 buf << "\"" << std::endl
805 << " Origin=\"0 0 0\" Spacing=\"";
806
807 for (int i=0; i<3; ++i)
808 buf << grid.Extent().MeshWidth()[i] << " ";
809
810 buf << "\">" << std::endl
811 << " <Piece Extent=\"";
812
813 for (int i=0; i<3; ++i)
814 buf << begin_local[i] << " " << end_local[i] << " ";
815
816 buf << "\">" << std::endl
817 << " <PointData Scalars=\"" << information << "\">" << std::endl
818 << " <DataArray type=\"Float32\" Name=\"" << information << "\" format=\"ascii\">" << std::endl
819 << " ";
820
821 char* char_buf = Helper::GetCharArray(buf.str());
822 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
823 delete [] char_buf;
824
825 return file;
826}
827
828void CommMPI::FinalizeSerialOutputFile(MPI_File& file)
829{
830 std::stringstream buf;
831
832 buf << std::endl
833 << " </DataArray>" << std::endl
834 << " </PointData>" << std::endl
835 << " </Piece>" << std::endl
836 << " </ImageData>" << std::endl
837 << "</VTKFile>" << std::endl;
838
839 char* char_buf = Helper::GetCharArray(buf.str());
840 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
841 delete [] char_buf;
842
843 MPI_File_close(&file);
844}
845
846int CommMPI::GlobalRank() const
847{
848 int rank;
849 MPI_Comm_rank(comm_global, &rank);
850 return rank;
851}
852
853int CommMPI::GlobalSize() const
854{
855 int size;
856 MPI_Comm_size(comm_global, &size);
857 return size;
858}
859
860Index CommMPI::GlobalPos() const
861{
862 Index dims, periods, coords;
863 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
864 return coords;
865}
866
867Index CommMPI::GlobalProcs() const
868{
869 Index dims, periods, coords;
870 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
871 return dims;
872}
873
874int CommMPI::Rank(const Grid& grid) const
875{
876 int rank;
877 MPI_Comm comm = settings.CommunicatorLocal(grid);
878 assert(comm != MPI_COMM_NULL);
879 MPI_Comm_rank(comm, &rank);
880 return rank;
881}
882
883int CommMPI::Size(const Grid& grid) const
884{
885 int size;
886 MPI_Comm comm = settings.CommunicatorLocal(grid);
887 assert(comm != MPI_COMM_NULL);
888 MPI_Comm_size(comm, &size);
889 return size;
890}
891
892Index CommMPI::Pos(const Grid& grid) const
893{
894 Index dims, periods, coords;
895 MPI_Comm comm = settings.CommunicatorLocal(grid);
896 assert(comm != MPI_COMM_NULL);
897 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
898 return coords;
899}
900
901Index CommMPI::Procs(const Grid& grid) const
902{
903 Index dims, periods, coords;
904 MPI_Comm comm = settings.CommunicatorLocal(grid);
905 assert(comm != MPI_COMM_NULL);
906 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
907 return dims;
908}
909
910void CommMPI::InitCommMPI(const MPI_Comm& comm)
911{
912 int status, size, rank;
913 int dims[3] = {0, 0, 0};
914 int periods[3];
915
916 for (int i=0; i<3; ++i)
917 periods[i] = (BoundaryConditions()[i] == Periodic ? 1 : 0);
918
919 MPI_Comm_size(comm, &size);
920 MPI_Comm_rank(comm, &rank);
921
922 MPI_Topo_test(comm, &status);
923
924 if (status == MPI_CART) {
925
926 comm_global = comm;
927
928 }else {
929
930 const int log2 = Helper::log_2(size);
931
932 if (Helper::intpow(2, log2) == size) {
933 for (int i=0; i<3; ++i)
934 dims[i] = Helper::intpow(2, log2 / 3 + (log2%3 > i ? 1 : 0));
935 }else {
936 MPI_Dims_create(size, 3, dims);
937 }
938
939#ifdef OUTPUT_DEBUG
940 if (rank == 0)
941 std::printf("Process grid: %d %d %d\n", dims[0], dims[1], dims[2]);
942#endif
943
944 MPI_Cart_create(comm, 3, dims, periods, 1, &comm_global);
945
946 }
947
948 MPI_Info_create(&info);
949 char key[] = "no_locks";
950 char val[] = "true";
951 MPI_Info_set(info, key, val);
952
953}
954
955CommMPI::~CommMPI()
956{
957 MPI_Comm_free(&comm_global);
958#ifdef VMG_ONE_SIDED
959 if (win_created)
960 MPI_Win_free(&win);
961#endif
962 MPI_Info_free(&info);
963}
964
965Grid& CommMPI::GetCoarserGrid(Multigrid& multigrid)
966{
967 return settings.CoarserGrid(multigrid(multigrid.Level()));
968}
969
970Grid& CommMPI::GetFinerGrid(Multigrid& multigrid)
971{
972 return settings.FinerGrid(multigrid(multigrid.Level()));
973}
974
975std::string CommMPI::CreateOutputDirectory()
976{
977#ifdef HAVE_BOOST_FILESYSTEM
978 std::string path, unique_path;
979 std::stringstream unique_suffix;
980 int suffix_counter = 0;
981 char buffer[129];
982 time_t rawtime;
983 struct tm *timeinfo;
984 int path_size;
985 char* path_array;
986
987 if (GlobalRank() == 0) {
988
989 time(&rawtime);
990 timeinfo = localtime(&rawtime);
991 strftime(buffer, 128, "./output/%Y_%m_%d_%H_%M_%S/", timeinfo);
992 path = buffer;
993
994 if (!fs::exists("output"))
995 fs::create_directory("output");
996
997 unique_path = path;
998
999 while (fs::exists(unique_path.c_str())) {
1000
1001 unique_suffix.str("");
1002 unique_suffix << "_" << suffix_counter++ << "/";
1003
1004 unique_path = path;
1005 unique_path.replace(unique_path.size()-1, 1, unique_suffix.str());
1006
1007 }
1008
1009 fs::create_directory(unique_path.c_str());
1010
1011 path_size = unique_path.size() + 1;
1012 path_array = Helper::GetCharArray(unique_path);
1013
1014 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
1015
1016 }else {
1017
1018 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
1019 path_array = new char[path_size];
1020
1021 }
1022
1023 MPI_Bcast(path_array, path_size, MPI_CHAR, 0, comm_global);
1024
1025 unique_path = path_array;
1026
1027 delete [] path_array;
1028
1029 return unique_path;
1030
1031#else
1032
1033 return "./";
1034
1035#endif
1036}
1037
1038
1039#endif /* HAVE_MPI */
Note: See TracBrowser for help on using the repository browser.