source: src/comm/comm_mpi.cpp@ 2d4211

Last change on this file since 2d4211 was 2a5451, checked in by Julian Iseringhausen <isering@…>, 14 years ago

vmg now succeeds on test_comm.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1816 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 29.8 KB
Line 
1/*
2 * vmg - a versatile multigrid solver
3 * Copyright (C) 2012 Institute for Numerical Simulation, University of Bonn
4 *
5 * vmg is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 3 of the License, or
8 * (at your option) any later version.
9 *
10 * vmg is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/**
20 * @file comm_mpi.cpp
21 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
22 * @date Wed Jun 16 13:21:06 2010
23 *
24 * @brief Class for MPI-based communication.
25 *
26 */
27
28#ifdef HAVE_CONFIG_H
29#include <config.h>
30#endif
31
32#ifdef HAVE_MPI
33
34#include <mpi.h>
35#ifdef HAVE_MARMOT
36#include <enhancempicalls.h>
37#include <sourceinfompicalls.h>
38#endif
39
40#ifdef HAVE_BOOST_FILESYSTEM
41#include <boost/filesystem.hpp>
42namespace fs = boost::filesystem;
43#endif
44
45#ifdef HAVE_VTK
46#include <vtkAbstractArray.h>
47#include <vtkImageData.h>
48#include <vtkPointData.h>
49#include <vtkSmartPointer.h>
50#include <vtkXMLImageDataWriter.h>
51#endif
52
53#include <cstring>
54#include <sstream>
55
56#include "base/helper.hpp"
57#include "base/tuple.hpp"
58#include "comm/comm_mpi.hpp"
59#include "comm/mpi/datatypes_local.hpp"
60#include "grid/grid.hpp"
61#include "grid/multigrid.hpp"
62#include "grid/tempgrid.hpp"
63#include "mg.hpp"
64#include "base/timer.hpp"
65
66static char print_buffer[512];
67
68using namespace VMG;
69
70void CommMPI::IsendAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
71{
72 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
73 iter->Isend(grid, tag_start, comm, Request());
74}
75
76void CommMPI::IrecvAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
77{
78 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
79 iter->Irecv(grid, tag_start, comm, Request());
80}
81
82void CommMPI::IsendAllBuffered(const Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
83{
84 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
85 iter->IsendBuffered(grid, tag_start, comm, Request());
86}
87
88void CommMPI::IrecvAllBuffered(std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
89{
90 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
91 iter->IrecvBuffered(tag_start, comm, Request());
92}
93
94void CommMPI::ReplaceBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
95{
96 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
97 iter->GridReplace(grid);
98}
99
100void CommMPI::AddBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
101{
102 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
103 iter->GridSum(grid);
104}
105
106void CommMPI::CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
107{
108 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
109 if (comm != MPI_COMM_NULL) {
110 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
111 IrecvAllBuffered(datatypes.Receive(), comm, 0411);
112 IsendAllBuffered(grid_old, datatypes.Send(), comm, 0411);
113 WaitAll();
114 ReplaceBufferAll(grid_new, datatypes.Receive());
115 }
116}
117
118void CommMPI::CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
119{
120 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
121 if (comm != MPI_COMM_NULL) {
122 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
123 IrecvAllBuffered(datatypes.Receive(), comm, 1806);
124 IsendAllBuffered(grid_old, datatypes.Send(), comm, 1806);
125 WaitAll();
126 AddBufferAll(grid_new, datatypes.Receive());
127 }
128}
129
130void CommMPI::CommToGhosts(Grid& grid)
131{
132 MPI_Comm comm = settings.CommunicatorLocal(grid);
133 if (comm != MPI_COMM_NULL) {
134 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
135 IrecvAllBuffered(types.Halo(), comm, 2310);
136 IsendAllBuffered(grid, types.NB(), comm, 2310);
137 WaitAll();
138 ReplaceBufferAll(grid, types.Halo());
139 }
140}
141
142void CommMPI::CommToGhostsAsyncStart(Grid& grid)
143{
144 MPI_Comm comm = settings.CommunicatorLocal(grid);
145 if (comm != MPI_COMM_NULL) {
146 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
147 IrecvAllBuffered(types.Halo(), comm, 2412);
148 IsendAllBuffered(grid, types.NB(), comm, 2412);
149 TestAll();
150 }
151}
152
153void CommMPI::CommToGhostsAsyncFinish(Grid& grid)
154{
155 WaitAll();
156 ReplaceBufferAll(grid, settings.DatatypesLocal(grid).Halo());
157}
158
159void CommMPI::CommFromGhosts(Grid& grid)
160{
161 MPI_Comm comm = settings.CommunicatorLocal(grid);
162 if (comm != MPI_COMM_NULL) {
163 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
164 IrecvAllBuffered(types.NB(), comm, 1337);
165 IsendAllBuffered(grid, types.Halo(), comm, 1337);
166 WaitAll();
167 AddBufferAll(grid, types.NB());
168 }
169}
170
171void CommMPI::CommFromGhostsAsyncStart(Grid& grid)
172{
173 MPI_Comm comm = settings.CommunicatorLocal(grid);
174 if (comm != MPI_COMM_NULL) {
175 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
176 IrecvAllBuffered(types.NB(), comm, 0xc0ffee);
177 IsendAllBuffered(grid, types.Halo(), comm, 0xc0ffee);
178 TestAll();
179 }
180}
181
182void CommMPI::CommFromGhostsAsyncFinish(Grid& grid)
183{
184 WaitAll();
185 AddBufferAll(grid, settings.DatatypesLocal(grid).NB());
186}
187
188void CommMPI::Barrier()
189{
190 MPI_Barrier(comm_global);
191}
192
193vmg_float CommMPI::GlobalSum(const vmg_float& value)
194{
195 vmg_float result = value;
196 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm_global);
197 return result;
198}
199
200vmg_float CommMPI::GlobalSumRoot(const vmg_float& value)
201{
202 vmg_float recv_buffer = value;
203 vmg_float send_buffer = value;
204 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm_global);
205 return recv_buffer;
206}
207
208void CommMPI::GlobalSumArray(vmg_float* array, const vmg_int& size)
209{
210 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm_global);
211}
212
213void CommMPI::GlobalBroadcast(vmg_float& value)
214{
215 MPI_Bcast(&value, 1, MPI_DOUBLE, 0, comm_global);
216}
217
218void CommMPI::GlobalGather(vmg_float& value, vmg_float* array)
219{
220 MPI_Gather(&value, 1, MPI_DOUBLE, array, 1, MPI_DOUBLE, 0, comm_global);
221}
222
223vmg_int CommMPI::GlobalSum(const vmg_int& value)
224{
225 vmg_int result = value;
226 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, comm_global);
227 return result;
228}
229
230vmg_int CommMPI::GlobalSumRoot(const vmg_int& value)
231{
232 vmg_int recv_buffer = value;
233 vmg_int send_buffer = value;
234 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_SUM, 0, comm_global);
235 return recv_buffer;
236}
237
238void CommMPI::GlobalSumArray(vmg_int* array, const vmg_int& size)
239{
240 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm_global);
241}
242
243vmg_float CommMPI::GlobalMax(const vmg_float& value)
244{
245 vmg_float result = value;
246 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_MAX, comm_global);
247 return result;
248}
249
250vmg_float CommMPI::GlobalMaxRoot(const vmg_float& value)
251{
252 vmg_float recv_buffer = value;
253 vmg_float send_buffer = value;
254 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_MAX, 0, comm_global);
255 return recv_buffer;
256}
257
258void CommMPI::GlobalMaxArray(vmg_float* array, const vmg_int& size)
259{
260 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_MAX, comm_global);
261}
262
263vmg_int CommMPI::GlobalMax(const vmg_int& value)
264{
265 vmg_int result = value;
266 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_MAX, comm_global);
267 return result;
268}
269
270vmg_int CommMPI::GlobalMaxRoot(const vmg_int& value)
271{
272 vmg_int recv_buffer = value;
273 vmg_int send_buffer = value;
274 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_MAX, 0, comm_global);
275 return recv_buffer;
276}
277
278void CommMPI::GlobalMaxArray(vmg_int* array, const vmg_int& size)
279{
280 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_MAX, comm_global);
281}
282
283void CommMPI::GlobalBroadcast(vmg_int& value)
284{
285 MPI_Bcast(&value, 1, MPI_INT, 0, comm_global);
286}
287
288void CommMPI::GlobalGather(vmg_int& value, vmg_int* array)
289{
290 MPI_Gather(&value, 1, MPI_INT, array, 1, MPI_INT, 0, comm_global);
291}
292
293void CommMPI::GlobalBroadcast(char* str)
294{
295 MPI_Bcast(str, std::strlen(str)+1, MPI_CHAR, 0, comm_global);
296}
297
298vmg_float CommMPI::LevelSum(const Grid& grid, const vmg_float& value)
299{
300 vmg_float result = value;
301 MPI_Comm comm = settings.CommunicatorLocal(grid);
302 assert(comm != MPI_COMM_NULL);
303 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm);
304 return result;
305}
306
307vmg_float CommMPI::LevelSumRoot(const Grid& grid, const vmg_float& value)
308{
309 vmg_float recv_buffer = value;
310 vmg_float send_buffer = value;
311 MPI_Comm comm = settings.CommunicatorLocal(grid);
312 assert(comm != MPI_COMM_NULL);
313 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm);
314 return recv_buffer;
315}
316
317void CommMPI::LevelSumArray(const Grid& grid, vmg_float* array, const vmg_int& size)
318{
319 MPI_Comm comm = settings.CommunicatorLocal(grid);
320 assert(comm != MPI_COMM_NULL);
321 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm);
322}
323
324vmg_int CommMPI::LevelSum(const Grid& grid, const vmg_int& value)
325{
326 vmg_int result = value;
327 MPI_Comm comm = settings.CommunicatorLocal(grid);
328 assert(comm != MPI_COMM_NULL);
329 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, comm);
330 return result;
331}
332
333vmg_int CommMPI::LevelSumRoot(const Grid& grid, const vmg_int& value)
334{
335 vmg_int recv_buffer = value;
336 vmg_int send_buffer = value;
337 MPI_Comm comm = settings.CommunicatorLocal(grid);
338 assert(comm != MPI_COMM_NULL);
339 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_SUM, 0, comm);
340 return recv_buffer;
341}
342
343void CommMPI::LevelSumArray(const Grid& grid, vmg_int* array, const vmg_int& size)
344{
345 MPI_Comm comm = settings.CommunicatorLocal(grid);
346 assert(comm != MPI_COMM_NULL);
347 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm);
348}
349
350void CommMPI::PrintString(const char* format, ...)
351{
352 va_list args;
353 va_start(args, format);
354 vsprintf(print_buffer, format, args);
355 printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
356 va_end(args);
357}
358
359void CommMPI::PrintStringOnce(const char* format, ...)
360{
361 if (GlobalRank() == 0) {
362 va_list args;
363 va_start(args, format);
364 vsprintf(print_buffer, format, args);
365 printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
366 va_end(args);
367 }
368}
369
370void CommMPI::PrintXML(const std::string& filename, const std::string& xml_data)
371{
372 MPI_File file;
373 std::stringstream path, xml_header;
374
375 pugi::xml_document doc;
376 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
377 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
378 doc.save(xml_header);
379
380 path << OutputPath() << filename;
381
382 char* filename_array = Helper::GetCharArray(path.str());
383 char* xml_header_array = Helper::GetCharArray(xml_header.str());
384 char* str_array = Helper::GetCharArray(xml_data);
385
386 MPI_File_open(MPI_COMM_SELF, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
387 MPI_File_set_size(file, 0);
388 MPI_File_write(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
389 MPI_File_write(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
390 MPI_File_close(&file);
391
392 delete [] filename_array;
393 delete [] xml_header_array;
394 delete [] str_array;
395}
396
397void CommMPI::PrintXMLAll(const std::string& filename, const std::string& xml_data)
398{
399 MPI_File file;
400 std::stringstream path;
401
402 path << OutputPath() << filename;
403
404 char* filename_array = Helper::GetCharArray(path.str());
405 char* str_array = Helper::GetCharArray(xml_data);
406
407 MPI_File_open(comm_global, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
408 MPI_File_set_size(file, 0);
409
410 if (GlobalRank() == 0) {
411 std::stringstream xml_header;
412 pugi::xml_document doc;
413 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
414 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
415 doc.save(xml_header);
416
417 char* xml_header_array = Helper::GetCharArray(xml_header.str());
418
419 MPI_File_write_shared(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
420
421 delete [] xml_header_array;
422 }
423
424 MPI_File_write_ordered(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
425 MPI_File_close(&file);
426
427 delete [] filename_array;
428 delete [] str_array;
429}
430
431void CommMPI::PrintGridInformation(const Grid& grid, char* filename, const std::string& name)
432{
433 std::stringstream buf;
434 MPI_File file;
435 int rank, size;
436 int size_local, size_local_max;
437
438 MPI_Comm comm = settings.CommunicatorGlobal(grid);
439 MPI_Comm comm_local = settings.CommunicatorLocal(grid);
440
441 if (comm_local != MPI_COMM_NULL)
442 MPI_Comm_size(comm_local, &size_local);
443 else
444 size_local = 0;
445
446 if (comm != MPI_COMM_NULL) {
447
448 MPI_Reduce(&size_local, &size_local_max, 1, MPI_INT, MPI_MAX, 0, comm);
449
450 MPI_File_open(comm, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
451
452 MPI_Comm_rank(comm, &rank);
453 MPI_Comm_size(comm, &size);
454
455 if (rank == 0) {
456
457 buf << "###########################################################" << std::endl
458 << "GLOBAL INFORMATION:" << std::endl
459 << " NAME: " << name << std::endl
460 << " LEVEL: " << grid.Level() << std::endl
461 << " COMM_SIZE_GLOBAL: " << size << std::endl
462 << " COMM_SIZE_LOCAL: " << size_local_max << std::endl
463 << " GLOBAL:" << std::endl
464 << " GLOBAL_FINER_BEGIN: " << grid.Global().GlobalFinerBegin() << std::endl
465 << " GLOBAL_FINER_END: " << grid.Global().GlobalFinerEnd() << std::endl
466 << " GLOBAL_FINER_SIZE: " << grid.Global().GlobalFinerSize() << std::endl
467 << " FINEST_ABS_BEGIN: " << grid.Global().FinestAbsBegin() << std::endl
468 << " FINEST_ABS_END: " << grid.Global().FinestAbsEnd() << std::endl
469 << " FINEST_ABS_SIZE: " << grid.Global().FinestAbsSize() << std::endl
470 << " GLOBAL_SIZE: " << grid.Global().GlobalSize() << std::endl
471 << " EXTENT:" << std::endl
472 << " BEGIN: " << grid.Extent().Begin() << std::endl
473 << " END: " << grid.Extent().End() << std::endl
474 << " SIZE: " << grid.Extent().Size() << std::endl
475 << " MESH_WIDTH: " << grid.Extent().MeshWidth() << std::endl
476 << std::endl
477 << "LOCAL INFORMATION:" << std::endl;
478 }
479
480 buf << "RANK " << rank << ":" << std::endl
481 << " GLOBAL:" << std::endl
482 << " LOCAL_BEGIN: " << grid.Global().LocalBegin() << std::endl
483 << " LOCAL_END: " << grid.Global().LocalEnd() << std::endl
484 << " LOCAL_SIZE: " << grid.Global().LocalSize() << std::endl
485 << " LOCAL_FINER_BEGIN: " << grid.Global().LocalFinerBegin() << std::endl
486 << " LOCAL_FINER_END: " << grid.Global().LocalFinerEnd() << std::endl
487 << " LOCAL_FINER_SIZE: " << grid.Global().LocalFinerSize() << std::endl
488 << " BOUNDARY_TYPE: " << grid.Global().BoundaryType() << std::endl
489 << " LOCAL:" << std::endl
490 << " BEGIN: " << grid.Local().Begin() << std::endl
491 << " END: " << grid.Local().End() << std::endl
492 << " SIZE: " << grid.Local().Size() << std::endl
493 << " SIZE_TOTAL: " << grid.Local().SizeTotal() << std::endl
494 << " HALO_BEGIN_1: " << grid.Local().HaloBegin1() << std::endl
495 << " HALO_END_1: " << grid.Local().HaloEnd1() << std::endl
496 << " HALO_SIZE_1: " << grid.Local().HaloSize1() << std::endl
497 << " HALO_BEGIN_2: " << grid.Local().HaloBegin2() << std::endl
498 << " HALO_END_2: " << grid.Local().HaloEnd2() << std::endl
499 << " HALO_SIZE_2: " << grid.Local().HaloSize2() << std::endl
500 << " BOUNDARY_BEGIN_1: " << grid.Local().BoundaryBegin1() << std::endl
501 << " BOUNDARY_END_1: " << grid.Local().BoundaryEnd1() << std::endl
502 << " BOUNDARY_SIZE_1: " << grid.Local().BoundarySize1() << std::endl
503 << " BOUNDARY_BEGIN_2: " << grid.Local().BoundaryBegin2() << std::endl
504 << " BOUNDARY_END_2: " << grid.Local().BoundaryEnd2() << std::endl
505 << " BOUNDARY_SIZE_2: " << grid.Local().BoundarySize2() << std::endl
506 << " FINER_BEGIN: " << grid.Local().FinerBegin() << std::endl
507 << " FINER_END: " << grid.Local().FinerEnd() << std::endl
508 << " FINER_SIZE: " << grid.Local().FinerSize() << std::endl;
509
510 if (rank == size-1)
511 buf << "###########################################################" << std::endl;
512
513 char* char_buf = Helper::GetCharArray(buf.str());
514 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
515 delete [] char_buf;
516
517 MPI_File_close(&file);
518
519 }
520}
521
522void CommMPI::PrintAllSettings()
523{
524 std::stringstream buf;
525 MPI_File file;
526
527 const Multigrid& mg = *MG::GetSol();
528
529 buf << OutputPath() << "settings.txt";
530 char *filename = Helper::GetCharArray(buf.str());
531
532 MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
533 MPI_File_set_size(file, 0);
534 MPI_File_close(&file);
535
536 for (int i=mg.MinLevel(); i<=mg.MaxLevel(); ++i)
537 PrintGridInformation(mg(i), filename, "MULTIGRID");
538
539 for (int i=mg.MinLevel()+1; i<=mg.MaxLevel(); ++i)
540 PrintGridInformation(settings.CoarserGrid(mg(i)), filename, "COARSER_GRID");
541
542 for (int i=mg.MinLevel(); i<mg.MaxLevel(); ++i)
543 PrintGridInformation(settings.FinerGrid(mg(i)), filename, "FINER_GRID");
544
545 delete [] filename;
546
547}
548
549void CommMPI::PrintGrid(Grid& grid, const char* information)
550{
551 int output_count = OutputCount();
552
553#ifdef HAVE_VTK
554
555 if (settings.CommunicatorLocal(grid) != MPI_COMM_NULL) {
556
557 Index end, end_global;
558
559 for (int i=0; i<3; ++i) {
560 end[i] = grid.Local().End()[i];
561 end_global[i] = grid.Global().LocalEnd()[i];
562 }
563
564 vtkSmartPointer<vtkImageData> image = vtkSmartPointer<vtkImageData>::New();
565 image->SetExtent(grid.Global().LocalBegin().X(), end_global.X()-1,
566 grid.Global().LocalBegin().Y(), end_global.Y()-1,
567 grid.Global().LocalBegin().Z(), end_global.Z()-1);
568 image->SetSpacing(grid.Extent().MeshWidth().vec());
569 image->SetOrigin(grid.Extent().Begin().vec());
570 image->SetScalarTypeToDouble();
571 image->SetNumberOfScalarComponents(1);
572 image->AllocateScalars();
573 image->GetPointData()->GetScalars()->SetName(information);
574
575 Index i;
576 for (i.X()=grid.Local().Begin().X(); i.X()<end.X(); ++i.X())
577 for (i.Y()=grid.Local().Begin().Y(); i.Y()<end.Y(); ++i.Y())
578 for (i.Z()=grid.Local().Begin().Z(); i.Z()<end.Z(); ++i.Z())
579 image->SetScalarComponentFromDouble(i.X() - grid.Local().Begin().X() + grid.Global().LocalBegin().X(),
580 i.Y() - grid.Local().Begin().Y() + grid.Global().LocalBegin().Y(),
581 i.Z() - grid.Local().Begin().Z() + grid.Global().LocalBegin().Z(),
582 0, grid.GetVal(i));
583
584 image->Update();
585
586 int rank, size;
587 MPI_Comm_rank(comm_global, &rank);
588 MPI_Comm_size(comm_global, &size);
589
590 std::stringstream filename;
591 filename << OutputPath() << std::setw(4) << std::setfill('0') << output_count << "_" << rank << ".vti";
592
593 vtkSmartPointer<vtkXMLImageDataWriter> writer = vtkSmartPointer<vtkXMLImageDataWriter>::New();
594 writer->SetFileName(filename.str().c_str());
595 writer->SetCompressorTypeToNone();
596 writer->SetDataModeToAscii();
597 writer->SetInput(image);
598 writer->Update();
599 writer->Write();
600
601 }
602
603#else /* HAVE_VTK */
604 Index i;
605 std::stringstream buf;
606
607 Index begin, end;
608 Index begin_local, end_local, begin_global, end_global;
609
610 CommToGhosts(grid);
611
612 for (int i=0; i<3; ++i) {
613 end[i] = grid.Local().End()[i] + (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 0 : grid.Local().HaloSize1()[i]);
614 end_local[i] = grid.Global().LocalEnd()[i] - (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 1 : 0);
615 }
616
617 begin = grid.Local().Begin();
618 begin_local = grid.Global().LocalBegin();
619 begin_global = 0;
620 end_global = grid.Global().GlobalSize()-1;
621
622 for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
623 for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
624 for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
625 buf << std::scientific << grid.GetVal(i) << " ";
626
627 CreateOutputFiles(grid, buf, information,
628 begin_global, end_global,
629 begin_local, end_local,
630 output_count);
631#endif /* HAVE_VTK */
632}
633
634void CommMPI::PrintDefect(Grid& sol, Grid& rhs, const char* information)
635{
636 TempGrid *temp = MG::GetTempGrid();
637 temp->SetProperties(sol);
638 temp->ImportFromResidual(sol, rhs);
639 PrintGrid(*temp, information);
640}
641
642void CommMPI::CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
643 const Index& begin_global, const Index& end_global,
644 const Index& begin_local, const Index& end_local,
645 const int& output_count)
646{
647 MPI_Comm comm = settings.CommunicatorGlobal(grid);
648
649 if (comm != MPI_COMM_NULL) {
650
651 MPI_File file;
652 std::string conv_information = Helper::ReplaceWhitespaces(information, "_");
653
654 CreateParallelOutputFile(grid, comm, output_count, conv_information.c_str(),
655 begin_global, end_global, begin_local, end_local);
656
657 file = CreateSerialOutputFile(grid, comm, output_count, conv_information.c_str(),
658 begin_global, end_global, begin_local, end_local);
659
660 char *char_buf = Helper::GetCharArray(serial_data.str());
661 MPI_File_write(file, char_buf, serial_data.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
662 delete [] char_buf;
663
664 FinalizeSerialOutputFile(file);
665
666 }
667}
668
669void CommMPI::CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
670 const int& output_count, const char* information,
671 const Index& begin_global, const Index& end_global,
672 const Index& begin_local, const Index& end_local)
673{
674 int rank;
675 MPI_File file;
676 char parallel_filename[513], serial_filename[513];
677 std::stringstream buf;
678
679 MPI_Comm_rank(comm, &rank);
680
681 sprintf(parallel_filename, "%s%04d.pvti", OutputPath().c_str(), output_count);
682 sprintf(serial_filename, "%04d_%d.vti", output_count, rank);
683
684 MPI_File_open(comm, parallel_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
685 MPI_File_set_size(file, 0);
686
687 if (rank == 0) {
688
689 buf << "<?xml version=\"1.0\"?>" << std::endl
690 << "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
691 << " <PImageData WholeExtent=\"";
692
693 for (int i=0; i<3; ++i)
694 buf << begin_global[i] << " " << end_global[i] << " ";
695
696 buf << "\"" << std::endl
697 << " GhostLevel=\"0\" Origin=\"0 0 0\" Spacing=\"";
698
699 for (int i=0; i<3; ++i)
700 buf << grid.Extent().MeshWidth()[i] << " ";
701
702 buf << "\">" << std::endl
703 << " <PPointData Scalars=\"" << information << "\">" << std::endl
704 << " <PDataArray type=\"Float32\" Name=\"" << information << "\"/>" << std::endl
705 << " </PPointData>" << std::endl;
706
707 char* char_buf = Helper::GetCharArray(buf.str());
708
709 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
710
711 delete [] char_buf;
712 }
713
714 buf.str("");
715
716 if ((end_local-begin_local).Product() > 0) {
717 buf << " <Piece Extent=\"";
718
719 for (int i=0; i<3; ++i)
720 buf << begin_local[i] << " " << end_local[i] << " ";
721
722 buf << "\" Source=\"" << serial_filename << "\"/>" << std::endl;
723 }
724
725 char* char_buf = Helper::GetCharArray(buf.str());
726
727 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
728
729 delete [] char_buf;
730
731 if (rank == 0) {
732
733 buf.str("");
734
735 buf << " </PImageData>" << std::endl
736 << "</VTKFile>" << std::endl;
737
738 char* char_buf = Helper::GetCharArray(buf.str());
739
740 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
741
742 delete [] char_buf;
743 }
744
745 MPI_File_close(&file);
746}
747
748MPI_File CommMPI::CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
749 const int& output_count, const char* information,
750 const Index& begin_global, const Index& end_global,
751 const Index& begin_local, const Index& end_local)
752{
753 char serial_filename[513];
754 int rank;
755 MPI_File file;
756 std::stringstream buf;
757
758 MPI_Comm_rank(comm_global, &rank);
759
760 sprintf(serial_filename, "%s%04d_%d.vti", OutputPath().c_str(), output_count, rank);
761
762 MPI_File_open(MPI_COMM_SELF, serial_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
763
764 buf << "<?xml version=\"1.0\"?>" << std::endl
765 << "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
766 << " <ImageData WholeExtent=\"";
767
768 for (int i=0; i<3; ++i)
769 buf << begin_global[i] << " " << end_global[i] << " ";
770
771 buf << "\"" << std::endl
772 << " Origin=\"0 0 0\" Spacing=\"";
773
774 for (int i=0; i<3; ++i)
775 buf << grid.Extent().MeshWidth()[i] << " ";
776
777 buf << "\">" << std::endl
778 << " <Piece Extent=\"";
779
780 for (int i=0; i<3; ++i)
781 buf << begin_local[i] << " " << end_local[i] << " ";
782
783 buf << "\">" << std::endl
784 << " <PointData Scalars=\"" << information << "\">" << std::endl
785 << " <DataArray type=\"Float32\" Name=\"" << information << "\" format=\"ascii\">" << std::endl
786 << " ";
787
788 char* char_buf = Helper::GetCharArray(buf.str());
789 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
790 delete [] char_buf;
791
792 return file;
793}
794
795void CommMPI::FinalizeSerialOutputFile(MPI_File& file)
796{
797 std::stringstream buf;
798
799 buf << std::endl
800 << " </DataArray>" << std::endl
801 << " </PointData>" << std::endl
802 << " </Piece>" << std::endl
803 << " </ImageData>" << std::endl
804 << "</VTKFile>" << std::endl;
805
806 char* char_buf = Helper::GetCharArray(buf.str());
807 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
808 delete [] char_buf;
809
810 MPI_File_close(&file);
811}
812
813int CommMPI::GlobalRank() const
814{
815 int rank;
816 MPI_Comm_rank(comm_global, &rank);
817 return rank;
818}
819
820int CommMPI::GlobalSize() const
821{
822 int size;
823 MPI_Comm_size(comm_global, &size);
824 return size;
825}
826
827Index CommMPI::GlobalPos() const
828{
829 Index dims, periods, coords;
830 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
831 return coords;
832}
833
834Index CommMPI::GlobalProcs() const
835{
836 Index dims, periods, coords;
837 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
838 return dims;
839}
840
841int CommMPI::Rank(const Grid& grid) const
842{
843 int rank;
844 MPI_Comm comm = settings.CommunicatorLocal(grid);
845 assert(comm != MPI_COMM_NULL);
846 MPI_Comm_rank(comm, &rank);
847 return rank;
848}
849
850int CommMPI::Size(const Grid& grid) const
851{
852 int size;
853 MPI_Comm comm = settings.CommunicatorLocal(grid);
854 assert(comm != MPI_COMM_NULL);
855 MPI_Comm_size(comm, &size);
856 return size;
857}
858
859Index CommMPI::Pos(const Grid& grid) const
860{
861 Index dims, periods, coords;
862 MPI_Comm comm = settings.CommunicatorLocal(grid);
863 assert(comm != MPI_COMM_NULL);
864 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
865 return coords;
866}
867
868Index CommMPI::Procs(const Grid& grid) const
869{
870 Index dims, periods, coords;
871 MPI_Comm comm = settings.CommunicatorLocal(grid);
872 assert(comm != MPI_COMM_NULL);
873 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
874 return dims;
875}
876
877void CommMPI::InitCommMPI(const MPI_Comm& comm)
878{
879 int status, size, rank;
880 int dims[3] = {0, 0, 0};
881 int periods[3];
882
883 for (int i=0; i<3; ++i)
884 periods[i] = (BoundaryConditions()[i] == Periodic ? 1 : 0);
885
886 MPI_Comm_size(comm, &size);
887 MPI_Comm_rank(comm, &rank);
888
889 MPI_Topo_test(comm, &status);
890
891 if (status == MPI_CART) {
892
893 comm_global = comm;
894
895 }else {
896
897 const int log2 = Helper::log_2(size);
898
899 if (Helper::intpow(2, log2) == size) {
900 for (int i=0; i<3; ++i)
901 dims[i] = Helper::intpow(2, log2 / 3 + (log2%3 > i ? 1 : 0));
902 }else {
903 MPI_Dims_create(size, 3, dims);
904 }
905
906#ifdef DEBUG_OUTPUT
907 if (rank == 0)
908 std::printf("Dims: %d %d %d\n", dims[0], dims[1], dims[2]);
909#endif
910
911 MPI_Cart_create(comm, 3, dims, periods, 1, &comm_global);
912
913 }
914
915 MPI_Info_create(&info);
916 char key[] = "no_locks";
917 char val[] = "true";
918 MPI_Info_set(info, key, val);
919
920}
921
922CommMPI::~CommMPI()
923{
924 MPI_Comm_free(&comm_global);
925#ifdef VMG_ONE_SIDED
926 if (win_created)
927 MPI_Win_free(&win);
928#endif
929 MPI_Info_free(&info);
930}
931
932Grid& CommMPI::GetCoarserGrid(Multigrid& multigrid)
933{
934 return settings.CoarserGrid(multigrid(multigrid.Level()));
935}
936
937Grid& CommMPI::GetFinerGrid(Multigrid& multigrid)
938{
939 return settings.FinerGrid(multigrid(multigrid.Level()));
940}
941
942std::string CommMPI::CreateOutputDirectory()
943{
944#ifdef HAVE_BOOST_FILESYSTEM
945 std::string path, unique_path;
946 std::stringstream unique_suffix;
947 int suffix_counter = 0;
948 char buffer[129];
949 time_t rawtime;
950 struct tm *timeinfo;
951 int path_size;
952 char* path_array;
953
954 if (GlobalRank() == 0) {
955
956 time(&rawtime);
957 timeinfo = localtime(&rawtime);
958 strftime(buffer, 128, "./output/%Y_%m_%d_%H_%M_%S/", timeinfo);
959 path = buffer;
960
961 if (!fs::exists("output"))
962 fs::create_directory("output");
963
964 unique_path = path;
965
966 while (fs::exists(unique_path.c_str())) {
967
968 unique_suffix.str("");
969 unique_suffix << "_" << suffix_counter++ << "/";
970
971 unique_path = path;
972 unique_path.replace(unique_path.size()-1, 1, unique_suffix.str());
973
974 }
975
976 fs::create_directory(unique_path.c_str());
977
978 path_size = unique_path.size() + 1;
979 path_array = Helper::GetCharArray(unique_path);
980
981 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
982
983 }else {
984
985 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
986 path_array = new char[path_size];
987
988 }
989
990 MPI_Bcast(path_array, path_size, MPI_CHAR, 0, comm_global);
991
992 unique_path = path_array;
993
994 delete [] path_array;
995
996 return unique_path;
997
998#else
999
1000 return "./";
1001
1002#endif
1003}
1004
1005
1006#endif /* HAVE_MPI */
Note: See TracBrowser for help on using the repository browser.