source: src/comm/comm_mpi.cpp@ 290aa3

Last change on this file since 290aa3 was 290aa3, checked in by Julian Iseringhausen <isering@…>, 13 years ago

Bugfix in CommMPI::GlobalBroadcast(char* str).

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1874 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 29.8 KB
Line 
1/*
2 * vmg - a versatile multigrid solver
3 * Copyright (C) 2012 Institute for Numerical Simulation, University of Bonn
4 *
5 * vmg is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 3 of the License, or
8 * (at your option) any later version.
9 *
10 * vmg is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/**
20 * @file comm_mpi.cpp
21 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
22 * @date Wed Jun 16 13:21:06 2010
23 *
24 * @brief Class for MPI-based communication.
25 *
26 */
27
28#ifdef HAVE_CONFIG_H
29#include <config.h>
30#endif
31
32#ifdef HAVE_MPI
33
34#include <mpi.h>
35#ifdef HAVE_MARMOT
36#include <enhancempicalls.h>
37#include <sourceinfompicalls.h>
38#endif
39
40#ifdef HAVE_BOOST_FILESYSTEM
41#include <boost/filesystem.hpp>
42namespace fs = boost::filesystem;
43#endif
44
45#ifdef HAVE_VTK
46#include <vtkAbstractArray.h>
47#include <vtkImageData.h>
48#include <vtkPointData.h>
49#include <vtkSmartPointer.h>
50#include <vtkXMLImageDataWriter.h>
51#endif
52
53#include <cstring>
54#include <sstream>
55
56#include "base/helper.hpp"
57#include "base/tuple.hpp"
58#include "comm/comm_mpi.hpp"
59#include "comm/mpi/datatypes_local.hpp"
60#include "grid/grid.hpp"
61#include "grid/multigrid.hpp"
62#include "grid/tempgrid.hpp"
63#include "mg.hpp"
64#include "base/timer.hpp"
65
66static char print_buffer[512];
67
68using namespace VMG;
69
70void CommMPI::IsendAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
71{
72 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
73 iter->Isend(grid, tag_start, comm, Request());
74}
75
76void CommMPI::IrecvAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
77{
78 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
79 iter->Irecv(grid, tag_start, comm, Request());
80}
81
82void CommMPI::IsendAllBuffered(const Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
83{
84 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
85 iter->IsendBuffered(grid, tag_start, comm, Request());
86}
87
88void CommMPI::IrecvAllBuffered(std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
89{
90 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
91 iter->IrecvBuffered(tag_start, comm, Request());
92}
93
94void CommMPI::ReplaceBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
95{
96 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
97 iter->GridReplace(grid);
98}
99
100void CommMPI::AddBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
101{
102 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
103 iter->GridSum(grid);
104}
105
106void CommMPI::CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
107{
108 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
109 if (comm != MPI_COMM_NULL) {
110 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
111 IrecvAllBuffered(datatypes.Receive(), comm, 0411);
112 IsendAllBuffered(grid_old, datatypes.Send(), comm, 0411);
113 WaitAll();
114 ReplaceBufferAll(grid_new, datatypes.Receive());
115 }
116}
117
118void CommMPI::CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
119{
120 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
121 if (comm != MPI_COMM_NULL) {
122 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
123 IrecvAllBuffered(datatypes.Receive(), comm, 1806);
124 IsendAllBuffered(grid_old, datatypes.Send(), comm, 1806);
125 WaitAll();
126 AddBufferAll(grid_new, datatypes.Receive());
127 }
128}
129
130void CommMPI::CommToGhosts(Grid& grid)
131{
132 MPI_Comm comm = settings.CommunicatorLocal(grid);
133 if (comm != MPI_COMM_NULL) {
134 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
135 IrecvAllBuffered(types.Halo(), comm, 2310);
136 IsendAllBuffered(grid, types.NB(), comm, 2310);
137 WaitAll();
138 ReplaceBufferAll(grid, types.Halo());
139 }
140}
141
142void CommMPI::CommToGhostsAsyncStart(Grid& grid)
143{
144 MPI_Comm comm = settings.CommunicatorLocal(grid);
145 if (comm != MPI_COMM_NULL) {
146 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
147 IrecvAllBuffered(types.Halo(), comm, 2412);
148 IsendAllBuffered(grid, types.NB(), comm, 2412);
149 TestAll();
150 }
151}
152
153void CommMPI::CommToGhostsAsyncFinish(Grid& grid)
154{
155 WaitAll();
156 ReplaceBufferAll(grid, settings.DatatypesLocal(grid).Halo());
157}
158
159void CommMPI::CommFromGhosts(Grid& grid)
160{
161 MPI_Comm comm = settings.CommunicatorLocal(grid);
162 if (comm != MPI_COMM_NULL) {
163 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
164 IrecvAllBuffered(types.NB(), comm, 1337);
165 IsendAllBuffered(grid, types.Halo(), comm, 1337);
166 WaitAll();
167 AddBufferAll(grid, types.NB());
168 }
169}
170
171void CommMPI::CommFromGhostsAsyncStart(Grid& grid)
172{
173 MPI_Comm comm = settings.CommunicatorLocal(grid);
174 if (comm != MPI_COMM_NULL) {
175 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
176 IrecvAllBuffered(types.NB(), comm, 0xc0ffee);
177 IsendAllBuffered(grid, types.Halo(), comm, 0xc0ffee);
178 TestAll();
179 }
180}
181
182void CommMPI::CommFromGhostsAsyncFinish(Grid& grid)
183{
184 WaitAll();
185 AddBufferAll(grid, settings.DatatypesLocal(grid).NB());
186}
187
188void CommMPI::Barrier()
189{
190 MPI_Barrier(comm_global);
191}
192
193vmg_float CommMPI::GlobalSum(const vmg_float& value)
194{
195 vmg_float result = value;
196 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm_global);
197 return result;
198}
199
200vmg_float CommMPI::GlobalSumRoot(const vmg_float& value)
201{
202 vmg_float recv_buffer = value;
203 vmg_float send_buffer = value;
204 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm_global);
205 return recv_buffer;
206}
207
208void CommMPI::GlobalSumArray(vmg_float* array, const vmg_int& size)
209{
210 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm_global);
211}
212
213void CommMPI::GlobalBroadcast(vmg_float& value)
214{
215 MPI_Bcast(&value, 1, MPI_DOUBLE, 0, comm_global);
216}
217
218void CommMPI::GlobalGather(vmg_float& value, vmg_float* array)
219{
220 MPI_Gather(&value, 1, MPI_DOUBLE, array, 1, MPI_DOUBLE, 0, comm_global);
221}
222
223vmg_int CommMPI::GlobalSum(const vmg_int& value)
224{
225 vmg_int result = value;
226 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, comm_global);
227 return result;
228}
229
230vmg_int CommMPI::GlobalSumRoot(const vmg_int& value)
231{
232 vmg_int recv_buffer = value;
233 vmg_int send_buffer = value;
234 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_SUM, 0, comm_global);
235 return recv_buffer;
236}
237
238void CommMPI::GlobalSumArray(vmg_int* array, const vmg_int& size)
239{
240 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm_global);
241}
242
243vmg_float CommMPI::GlobalMax(const vmg_float& value)
244{
245 vmg_float result = value;
246 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_MAX, comm_global);
247 return result;
248}
249
250vmg_float CommMPI::GlobalMaxRoot(const vmg_float& value)
251{
252 vmg_float recv_buffer = value;
253 vmg_float send_buffer = value;
254 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_MAX, 0, comm_global);
255 return recv_buffer;
256}
257
258void CommMPI::GlobalMaxArray(vmg_float* array, const vmg_int& size)
259{
260 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_MAX, comm_global);
261}
262
263vmg_int CommMPI::GlobalMax(const vmg_int& value)
264{
265 vmg_int result = value;
266 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_MAX, comm_global);
267 return result;
268}
269
270vmg_int CommMPI::GlobalMaxRoot(const vmg_int& value)
271{
272 vmg_int recv_buffer = value;
273 vmg_int send_buffer = value;
274 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_MAX, 0, comm_global);
275 return recv_buffer;
276}
277
278void CommMPI::GlobalMaxArray(vmg_int* array, const vmg_int& size)
279{
280 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_MAX, comm_global);
281}
282
283void CommMPI::GlobalBroadcast(vmg_int& value)
284{
285 MPI_Bcast(&value, 1, MPI_INT, 0, comm_global);
286}
287
288void CommMPI::GlobalGather(vmg_int& value, vmg_int* array)
289{
290 MPI_Gather(&value, 1, MPI_INT, array, 1, MPI_INT, 0, comm_global);
291}
292
293void CommMPI::GlobalBroadcast(char* str)
294{
295 int size = std::strlen(str) + 1;
296 MPI_Bcast(&size, 1, MPI_INT, 0, comm_global);
297 MPI_Bcast(str, size, MPI_CHAR, 0, comm_global);
298}
299
300vmg_float CommMPI::LevelSum(const Grid& grid, const vmg_float& value)
301{
302 vmg_float result = value;
303 MPI_Comm comm = settings.CommunicatorLocal(grid);
304 assert(comm != MPI_COMM_NULL);
305 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm);
306 return result;
307}
308
309vmg_float CommMPI::LevelSumRoot(const Grid& grid, const vmg_float& value)
310{
311 vmg_float recv_buffer = value;
312 vmg_float send_buffer = value;
313 MPI_Comm comm = settings.CommunicatorLocal(grid);
314 assert(comm != MPI_COMM_NULL);
315 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm);
316 return recv_buffer;
317}
318
319void CommMPI::LevelSumArray(const Grid& grid, vmg_float* array, const vmg_int& size)
320{
321 MPI_Comm comm = settings.CommunicatorLocal(grid);
322 assert(comm != MPI_COMM_NULL);
323 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm);
324}
325
326vmg_int CommMPI::LevelSum(const Grid& grid, const vmg_int& value)
327{
328 vmg_int result = value;
329 MPI_Comm comm = settings.CommunicatorLocal(grid);
330 assert(comm != MPI_COMM_NULL);
331 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, comm);
332 return result;
333}
334
335vmg_int CommMPI::LevelSumRoot(const Grid& grid, const vmg_int& value)
336{
337 vmg_int recv_buffer = value;
338 vmg_int send_buffer = value;
339 MPI_Comm comm = settings.CommunicatorLocal(grid);
340 assert(comm != MPI_COMM_NULL);
341 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_SUM, 0, comm);
342 return recv_buffer;
343}
344
345void CommMPI::LevelSumArray(const Grid& grid, vmg_int* array, const vmg_int& size)
346{
347 MPI_Comm comm = settings.CommunicatorLocal(grid);
348 assert(comm != MPI_COMM_NULL);
349 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm);
350}
351
352void CommMPI::PrintString(const char* format, ...)
353{
354 va_list args;
355 va_start(args, format);
356 vsprintf(print_buffer, format, args);
357 printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
358 va_end(args);
359}
360
361void CommMPI::PrintStringOnce(const char* format, ...)
362{
363 if (GlobalRank() == 0) {
364 va_list args;
365 va_start(args, format);
366 vsprintf(print_buffer, format, args);
367 printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
368 va_end(args);
369 }
370}
371
372void CommMPI::PrintXML(const std::string& filename, const std::string& xml_data)
373{
374 MPI_File file;
375 std::stringstream path, xml_header;
376
377 pugi::xml_document doc;
378 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
379 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
380 doc.save(xml_header);
381
382 path << OutputPath() << filename;
383
384 char* filename_array = Helper::GetCharArray(path.str());
385 char* xml_header_array = Helper::GetCharArray(xml_header.str());
386 char* str_array = Helper::GetCharArray(xml_data);
387
388 MPI_File_open(MPI_COMM_SELF, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
389 MPI_File_set_size(file, 0);
390 MPI_File_write(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
391 MPI_File_write(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
392 MPI_File_close(&file);
393
394 delete [] filename_array;
395 delete [] xml_header_array;
396 delete [] str_array;
397}
398
399void CommMPI::PrintXMLAll(const std::string& filename, const std::string& xml_data)
400{
401 MPI_File file;
402 std::stringstream path;
403
404 path << OutputPath() << filename;
405
406 char* filename_array = Helper::GetCharArray(path.str());
407 char* str_array = Helper::GetCharArray(xml_data);
408
409 MPI_File_open(comm_global, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
410 MPI_File_set_size(file, 0);
411
412 if (GlobalRank() == 0) {
413 std::stringstream xml_header;
414 pugi::xml_document doc;
415 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
416 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
417 doc.save(xml_header);
418
419 char* xml_header_array = Helper::GetCharArray(xml_header.str());
420
421 MPI_File_write_shared(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
422
423 delete [] xml_header_array;
424 }
425
426 MPI_File_write_ordered(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
427 MPI_File_close(&file);
428
429 delete [] filename_array;
430 delete [] str_array;
431}
432
433void CommMPI::PrintGridInformation(const Grid& grid, char* filename, const std::string& name)
434{
435 std::stringstream buf;
436 MPI_File file;
437 int rank, size;
438 int size_local, size_local_max;
439
440 MPI_Comm comm = settings.CommunicatorGlobal(grid);
441 MPI_Comm comm_local = settings.CommunicatorLocal(grid);
442
443 if (comm_local != MPI_COMM_NULL)
444 MPI_Comm_size(comm_local, &size_local);
445 else
446 size_local = 0;
447
448 if (comm != MPI_COMM_NULL) {
449
450 MPI_Reduce(&size_local, &size_local_max, 1, MPI_INT, MPI_MAX, 0, comm);
451
452 MPI_File_open(comm, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
453
454 MPI_Comm_rank(comm, &rank);
455 MPI_Comm_size(comm, &size);
456
457 if (rank == 0) {
458
459 buf << "###########################################################" << std::endl
460 << "GLOBAL INFORMATION:" << std::endl
461 << " NAME: " << name << std::endl
462 << " LEVEL: " << grid.Level() << std::endl
463 << " COMM_SIZE_GLOBAL: " << size << std::endl
464 << " COMM_SIZE_LOCAL: " << size_local_max << std::endl
465 << " GLOBAL:" << std::endl
466 << " GLOBAL_FINER_BEGIN: " << grid.Global().GlobalFinerBegin() << std::endl
467 << " GLOBAL_FINER_END: " << grid.Global().GlobalFinerEnd() << std::endl
468 << " GLOBAL_FINER_SIZE: " << grid.Global().GlobalFinerSize() << std::endl
469 << " FINEST_ABS_BEGIN: " << grid.Global().FinestAbsBegin() << std::endl
470 << " FINEST_ABS_END: " << grid.Global().FinestAbsEnd() << std::endl
471 << " FINEST_ABS_SIZE: " << grid.Global().FinestAbsSize() << std::endl
472 << " GLOBAL_SIZE: " << grid.Global().GlobalSize() << std::endl
473 << " EXTENT:" << std::endl
474 << " BEGIN: " << grid.Extent().Begin() << std::endl
475 << " END: " << grid.Extent().End() << std::endl
476 << " SIZE: " << grid.Extent().Size() << std::endl
477 << " MESH_WIDTH: " << grid.Extent().MeshWidth() << std::endl
478 << std::endl
479 << "LOCAL INFORMATION:" << std::endl;
480 }
481
482 buf << "RANK " << rank << ":" << std::endl
483 << " GLOBAL:" << std::endl
484 << " LOCAL_BEGIN: " << grid.Global().LocalBegin() << std::endl
485 << " LOCAL_END: " << grid.Global().LocalEnd() << std::endl
486 << " LOCAL_SIZE: " << grid.Global().LocalSize() << std::endl
487 << " LOCAL_FINER_BEGIN: " << grid.Global().LocalFinerBegin() << std::endl
488 << " LOCAL_FINER_END: " << grid.Global().LocalFinerEnd() << std::endl
489 << " LOCAL_FINER_SIZE: " << grid.Global().LocalFinerSize() << std::endl
490 << " BOUNDARY_TYPE: " << grid.Global().BoundaryType() << std::endl
491 << " LOCAL:" << std::endl
492 << " BEGIN: " << grid.Local().Begin() << std::endl
493 << " END: " << grid.Local().End() << std::endl
494 << " SIZE: " << grid.Local().Size() << std::endl
495 << " SIZE_TOTAL: " << grid.Local().SizeTotal() << std::endl
496 << " HALO_BEGIN_1: " << grid.Local().HaloBegin1() << std::endl
497 << " HALO_END_1: " << grid.Local().HaloEnd1() << std::endl
498 << " HALO_SIZE_1: " << grid.Local().HaloSize1() << std::endl
499 << " HALO_BEGIN_2: " << grid.Local().HaloBegin2() << std::endl
500 << " HALO_END_2: " << grid.Local().HaloEnd2() << std::endl
501 << " HALO_SIZE_2: " << grid.Local().HaloSize2() << std::endl
502 << " BOUNDARY_BEGIN_1: " << grid.Local().BoundaryBegin1() << std::endl
503 << " BOUNDARY_END_1: " << grid.Local().BoundaryEnd1() << std::endl
504 << " BOUNDARY_SIZE_1: " << grid.Local().BoundarySize1() << std::endl
505 << " BOUNDARY_BEGIN_2: " << grid.Local().BoundaryBegin2() << std::endl
506 << " BOUNDARY_END_2: " << grid.Local().BoundaryEnd2() << std::endl
507 << " BOUNDARY_SIZE_2: " << grid.Local().BoundarySize2() << std::endl
508 << " FINER_BEGIN: " << grid.Local().FinerBegin() << std::endl
509 << " FINER_END: " << grid.Local().FinerEnd() << std::endl
510 << " FINER_SIZE: " << grid.Local().FinerSize() << std::endl;
511
512 if (rank == size-1)
513 buf << "###########################################################" << std::endl;
514
515 char* char_buf = Helper::GetCharArray(buf.str());
516 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
517 delete [] char_buf;
518
519 MPI_File_close(&file);
520
521 }
522}
523
524void CommMPI::PrintAllSettings()
525{
526 std::stringstream buf;
527 MPI_File file;
528
529 const Multigrid& mg = *MG::GetSol();
530
531 buf << OutputPath() << "settings.txt";
532 char *filename = Helper::GetCharArray(buf.str());
533
534 MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
535 MPI_File_set_size(file, 0);
536 MPI_File_close(&file);
537
538 for (int i=mg.MinLevel(); i<=mg.MaxLevel(); ++i)
539 PrintGridInformation(mg(i), filename, "MULTIGRID");
540
541 for (int i=mg.MinLevel()+1; i<=mg.MaxLevel(); ++i)
542 PrintGridInformation(settings.CoarserGrid(mg(i)), filename, "COARSER_GRID");
543
544 for (int i=mg.MinLevel(); i<mg.MaxLevel(); ++i)
545 PrintGridInformation(settings.FinerGrid(mg(i)), filename, "FINER_GRID");
546
547 delete [] filename;
548
549}
550
551void CommMPI::PrintGrid(Grid& grid, const char* information)
552{
553 int output_count = OutputCount();
554
555#ifdef HAVE_VTK
556
557 if (settings.CommunicatorLocal(grid) != MPI_COMM_NULL) {
558
559 Index end, end_global;
560
561 for (int i=0; i<3; ++i) {
562 end[i] = grid.Local().End()[i];
563 end_global[i] = grid.Global().LocalEnd()[i];
564 }
565
566 vtkSmartPointer<vtkImageData> image = vtkSmartPointer<vtkImageData>::New();
567 image->SetExtent(grid.Global().LocalBegin().X(), end_global.X()-1,
568 grid.Global().LocalBegin().Y(), end_global.Y()-1,
569 grid.Global().LocalBegin().Z(), end_global.Z()-1);
570 image->SetSpacing(grid.Extent().MeshWidth().vec());
571 image->SetOrigin(grid.Extent().Begin().vec());
572 image->SetScalarTypeToDouble();
573 image->SetNumberOfScalarComponents(1);
574 image->AllocateScalars();
575 image->GetPointData()->GetScalars()->SetName(information);
576
577 Index i;
578 for (i.X()=grid.Local().Begin().X(); i.X()<end.X(); ++i.X())
579 for (i.Y()=grid.Local().Begin().Y(); i.Y()<end.Y(); ++i.Y())
580 for (i.Z()=grid.Local().Begin().Z(); i.Z()<end.Z(); ++i.Z())
581 image->SetScalarComponentFromDouble(i.X() - grid.Local().Begin().X() + grid.Global().LocalBegin().X(),
582 i.Y() - grid.Local().Begin().Y() + grid.Global().LocalBegin().Y(),
583 i.Z() - grid.Local().Begin().Z() + grid.Global().LocalBegin().Z(),
584 0, grid.GetVal(i));
585
586 image->Update();
587
588 int rank, size;
589 MPI_Comm_rank(comm_global, &rank);
590 MPI_Comm_size(comm_global, &size);
591
592 std::stringstream filename;
593 filename << OutputPath() << std::setw(4) << std::setfill('0') << output_count << "_" << rank << ".vti";
594
595 vtkSmartPointer<vtkXMLImageDataWriter> writer = vtkSmartPointer<vtkXMLImageDataWriter>::New();
596 writer->SetFileName(filename.str().c_str());
597 writer->SetCompressorTypeToNone();
598 writer->SetDataModeToAscii();
599 writer->SetInput(image);
600 writer->Update();
601 writer->Write();
602
603 }
604
605#else /* HAVE_VTK */
606 Index i;
607 std::stringstream buf;
608
609 Index begin, end;
610 Index begin_local, end_local, begin_global, end_global;
611
612 CommToGhosts(grid);
613
614 for (int i=0; i<3; ++i) {
615 end[i] = grid.Local().End()[i] + (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 0 : grid.Local().HaloSize1()[i]);
616 end_local[i] = grid.Global().LocalEnd()[i] - (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 1 : 0);
617 }
618
619 begin = grid.Local().Begin();
620 begin_local = grid.Global().LocalBegin();
621 begin_global = 0;
622 end_global = grid.Global().GlobalSize()-1;
623
624 for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
625 for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
626 for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
627 buf << std::scientific << grid.GetVal(i) << " ";
628
629 CreateOutputFiles(grid, buf, information,
630 begin_global, end_global,
631 begin_local, end_local,
632 output_count);
633#endif /* HAVE_VTK */
634}
635
636void CommMPI::PrintDefect(Grid& sol, Grid& rhs, const char* information)
637{
638 TempGrid *temp = MG::GetTempGrid();
639 temp->SetProperties(sol);
640 temp->ImportFromResidual(sol, rhs);
641 PrintGrid(*temp, information);
642}
643
644void CommMPI::CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
645 const Index& begin_global, const Index& end_global,
646 const Index& begin_local, const Index& end_local,
647 const int& output_count)
648{
649 MPI_Comm comm = settings.CommunicatorGlobal(grid);
650
651 if (comm != MPI_COMM_NULL) {
652
653 MPI_File file;
654 std::string conv_information = Helper::ReplaceWhitespaces(information, "_");
655
656 CreateParallelOutputFile(grid, comm, output_count, conv_information.c_str(),
657 begin_global, end_global, begin_local, end_local);
658
659 file = CreateSerialOutputFile(grid, comm, output_count, conv_information.c_str(),
660 begin_global, end_global, begin_local, end_local);
661
662 char *char_buf = Helper::GetCharArray(serial_data.str());
663 MPI_File_write(file, char_buf, serial_data.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
664 delete [] char_buf;
665
666 FinalizeSerialOutputFile(file);
667
668 }
669}
670
671void CommMPI::CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
672 const int& output_count, const char* information,
673 const Index& begin_global, const Index& end_global,
674 const Index& begin_local, const Index& end_local)
675{
676 int rank;
677 MPI_File file;
678 char parallel_filename[513], serial_filename[513];
679 std::stringstream buf;
680
681 MPI_Comm_rank(comm, &rank);
682
683 sprintf(parallel_filename, "%s%04d.pvti", OutputPath().c_str(), output_count);
684 sprintf(serial_filename, "%04d_%d.vti", output_count, rank);
685
686 MPI_File_open(comm, parallel_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
687 MPI_File_set_size(file, 0);
688
689 if (rank == 0) {
690
691 buf << "<?xml version=\"1.0\"?>" << std::endl
692 << "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
693 << " <PImageData WholeExtent=\"";
694
695 for (int i=0; i<3; ++i)
696 buf << begin_global[i] << " " << end_global[i] << " ";
697
698 buf << "\"" << std::endl
699 << " GhostLevel=\"0\" Origin=\"0 0 0\" Spacing=\"";
700
701 for (int i=0; i<3; ++i)
702 buf << grid.Extent().MeshWidth()[i] << " ";
703
704 buf << "\">" << std::endl
705 << " <PPointData Scalars=\"" << information << "\">" << std::endl
706 << " <PDataArray type=\"Float32\" Name=\"" << information << "\"/>" << std::endl
707 << " </PPointData>" << std::endl;
708
709 char* char_buf = Helper::GetCharArray(buf.str());
710
711 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
712
713 delete [] char_buf;
714 }
715
716 buf.str("");
717
718 if ((end_local-begin_local).Product() > 0) {
719 buf << " <Piece Extent=\"";
720
721 for (int i=0; i<3; ++i)
722 buf << begin_local[i] << " " << end_local[i] << " ";
723
724 buf << "\" Source=\"" << serial_filename << "\"/>" << std::endl;
725 }
726
727 char* char_buf = Helper::GetCharArray(buf.str());
728
729 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
730
731 delete [] char_buf;
732
733 if (rank == 0) {
734
735 buf.str("");
736
737 buf << " </PImageData>" << std::endl
738 << "</VTKFile>" << std::endl;
739
740 char* char_buf = Helper::GetCharArray(buf.str());
741
742 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
743
744 delete [] char_buf;
745 }
746
747 MPI_File_close(&file);
748}
749
750MPI_File CommMPI::CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
751 const int& output_count, const char* information,
752 const Index& begin_global, const Index& end_global,
753 const Index& begin_local, const Index& end_local)
754{
755 char serial_filename[513];
756 int rank;
757 MPI_File file;
758 std::stringstream buf;
759
760 MPI_Comm_rank(comm_global, &rank);
761
762 sprintf(serial_filename, "%s%04d_%d.vti", OutputPath().c_str(), output_count, rank);
763
764 MPI_File_open(MPI_COMM_SELF, serial_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
765
766 buf << "<?xml version=\"1.0\"?>" << std::endl
767 << "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
768 << " <ImageData WholeExtent=\"";
769
770 for (int i=0; i<3; ++i)
771 buf << begin_global[i] << " " << end_global[i] << " ";
772
773 buf << "\"" << std::endl
774 << " Origin=\"0 0 0\" Spacing=\"";
775
776 for (int i=0; i<3; ++i)
777 buf << grid.Extent().MeshWidth()[i] << " ";
778
779 buf << "\">" << std::endl
780 << " <Piece Extent=\"";
781
782 for (int i=0; i<3; ++i)
783 buf << begin_local[i] << " " << end_local[i] << " ";
784
785 buf << "\">" << std::endl
786 << " <PointData Scalars=\"" << information << "\">" << std::endl
787 << " <DataArray type=\"Float32\" Name=\"" << information << "\" format=\"ascii\">" << std::endl
788 << " ";
789
790 char* char_buf = Helper::GetCharArray(buf.str());
791 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
792 delete [] char_buf;
793
794 return file;
795}
796
797void CommMPI::FinalizeSerialOutputFile(MPI_File& file)
798{
799 std::stringstream buf;
800
801 buf << std::endl
802 << " </DataArray>" << std::endl
803 << " </PointData>" << std::endl
804 << " </Piece>" << std::endl
805 << " </ImageData>" << std::endl
806 << "</VTKFile>" << std::endl;
807
808 char* char_buf = Helper::GetCharArray(buf.str());
809 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
810 delete [] char_buf;
811
812 MPI_File_close(&file);
813}
814
815int CommMPI::GlobalRank() const
816{
817 int rank;
818 MPI_Comm_rank(comm_global, &rank);
819 return rank;
820}
821
822int CommMPI::GlobalSize() const
823{
824 int size;
825 MPI_Comm_size(comm_global, &size);
826 return size;
827}
828
829Index CommMPI::GlobalPos() const
830{
831 Index dims, periods, coords;
832 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
833 return coords;
834}
835
836Index CommMPI::GlobalProcs() const
837{
838 Index dims, periods, coords;
839 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
840 return dims;
841}
842
843int CommMPI::Rank(const Grid& grid) const
844{
845 int rank;
846 MPI_Comm comm = settings.CommunicatorLocal(grid);
847 assert(comm != MPI_COMM_NULL);
848 MPI_Comm_rank(comm, &rank);
849 return rank;
850}
851
852int CommMPI::Size(const Grid& grid) const
853{
854 int size;
855 MPI_Comm comm = settings.CommunicatorLocal(grid);
856 assert(comm != MPI_COMM_NULL);
857 MPI_Comm_size(comm, &size);
858 return size;
859}
860
861Index CommMPI::Pos(const Grid& grid) const
862{
863 Index dims, periods, coords;
864 MPI_Comm comm = settings.CommunicatorLocal(grid);
865 assert(comm != MPI_COMM_NULL);
866 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
867 return coords;
868}
869
870Index CommMPI::Procs(const Grid& grid) const
871{
872 Index dims, periods, coords;
873 MPI_Comm comm = settings.CommunicatorLocal(grid);
874 assert(comm != MPI_COMM_NULL);
875 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
876 return dims;
877}
878
879void CommMPI::InitCommMPI(const MPI_Comm& comm)
880{
881 int status, size, rank;
882 int dims[3] = {0, 0, 0};
883 int periods[3];
884
885 for (int i=0; i<3; ++i)
886 periods[i] = (BoundaryConditions()[i] == Periodic ? 1 : 0);
887
888 MPI_Comm_size(comm, &size);
889 MPI_Comm_rank(comm, &rank);
890
891 MPI_Topo_test(comm, &status);
892
893 if (status == MPI_CART) {
894
895 comm_global = comm;
896
897 }else {
898
899 const int log2 = Helper::log_2(size);
900
901 if (Helper::intpow(2, log2) == size) {
902 for (int i=0; i<3; ++i)
903 dims[i] = Helper::intpow(2, log2 / 3 + (log2%3 > i ? 1 : 0));
904 }else {
905 MPI_Dims_create(size, 3, dims);
906 }
907
908#ifdef DEBUG_OUTPUT
909 if (rank == 0)
910 std::printf("Dims: %d %d %d\n", dims[0], dims[1], dims[2]);
911#endif
912
913 MPI_Cart_create(comm, 3, dims, periods, 1, &comm_global);
914
915 }
916
917 MPI_Info_create(&info);
918 char key[] = "no_locks";
919 char val[] = "true";
920 MPI_Info_set(info, key, val);
921
922}
923
924CommMPI::~CommMPI()
925{
926 MPI_Comm_free(&comm_global);
927#ifdef VMG_ONE_SIDED
928 if (win_created)
929 MPI_Win_free(&win);
930#endif
931 MPI_Info_free(&info);
932}
933
934Grid& CommMPI::GetCoarserGrid(Multigrid& multigrid)
935{
936 return settings.CoarserGrid(multigrid(multigrid.Level()));
937}
938
939Grid& CommMPI::GetFinerGrid(Multigrid& multigrid)
940{
941 return settings.FinerGrid(multigrid(multigrid.Level()));
942}
943
944std::string CommMPI::CreateOutputDirectory()
945{
946#ifdef HAVE_BOOST_FILESYSTEM
947 std::string path, unique_path;
948 std::stringstream unique_suffix;
949 int suffix_counter = 0;
950 char buffer[129];
951 time_t rawtime;
952 struct tm *timeinfo;
953 int path_size;
954 char* path_array;
955
956 if (GlobalRank() == 0) {
957
958 time(&rawtime);
959 timeinfo = localtime(&rawtime);
960 strftime(buffer, 128, "./output/%Y_%m_%d_%H_%M_%S/", timeinfo);
961 path = buffer;
962
963 if (!fs::exists("output"))
964 fs::create_directory("output");
965
966 unique_path = path;
967
968 while (fs::exists(unique_path.c_str())) {
969
970 unique_suffix.str("");
971 unique_suffix << "_" << suffix_counter++ << "/";
972
973 unique_path = path;
974 unique_path.replace(unique_path.size()-1, 1, unique_suffix.str());
975
976 }
977
978 fs::create_directory(unique_path.c_str());
979
980 path_size = unique_path.size() + 1;
981 path_array = Helper::GetCharArray(unique_path);
982
983 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
984
985 }else {
986
987 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
988 path_array = new char[path_size];
989
990 }
991
992 MPI_Bcast(path_array, path_size, MPI_CHAR, 0, comm_global);
993
994 unique_path = path_array;
995
996 delete [] path_array;
997
998 return unique_path;
999
1000#else
1001
1002 return "./";
1003
1004#endif
1005}
1006
1007
1008#endif /* HAVE_MPI */
Note: See TracBrowser for help on using the repository browser.