source: src/comm/comm_mpi.cpp@ d6a338

Last change on this file since d6a338 was e58835d, checked in by Julian Iseringhausen <isering@…>, 13 years ago

Renamed GlobalIndices::GlobalSizeNew back to GlobalIndices::GlobalSize.

  • Property mode set to 100644
File size: 30.5 KB
Line 
1/*
2 * vmg - a versatile multigrid solver
3 * Copyright (C) 2012 Institute for Numerical Simulation, University of Bonn
4 *
5 * vmg is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 3 of the License, or
8 * (at your option) any later version.
9 *
10 * vmg is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/**
20 * @file comm_mpi.cpp
21 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
22 * @date Wed Jun 16 13:21:06 2010
23 *
24 * @brief Class for MPI-based communication.
25 *
26 */
27
28#ifdef HAVE_CONFIG_H
29#include <config.h>
30#endif
31
32#ifdef HAVE_MPI
33
34#include <mpi.h>
35#ifdef HAVE_MARMOT
36#include <enhancempicalls.h>
37#include <sourceinfompicalls.h>
38#endif
39
40#ifdef HAVE_BOOST_FILESYSTEM
41#include <boost/filesystem.hpp>
42namespace fs = boost::filesystem;
43#endif
44
45#ifdef HAVE_VTK
46#include <vtkAbstractArray.h>
47#include <vtkImageData.h>
48#include <vtkPointData.h>
49#include <vtkSmartPointer.h>
50#include <vtkXMLImageDataWriter.h>
51#endif
52
53#include <cstring>
54#include <sstream>
55
56#include "base/helper.hpp"
57#include "base/tuple.hpp"
58#include "comm/comm_mpi.hpp"
59#include "comm/mpi/datatypes_local.hpp"
60#include "grid/grid.hpp"
61#include "grid/multigrid.hpp"
62#include "grid/tempgrid.hpp"
63#include "mg.hpp"
64#include "base/timer.hpp"
65
66static char print_buffer[512];
67
68using namespace VMG;
69
70void CommMPI::IsendAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
71{
72 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
73 iter->Isend(grid, tag_start, comm, Request());
74}
75
76void CommMPI::IrecvAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
77{
78 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
79 iter->Irecv(grid, tag_start, comm, Request());
80}
81
82void CommMPI::IsendAllBuffered(const Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
83{
84 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
85 iter->IsendBuffered(grid, tag_start, comm, Request());
86}
87
88void CommMPI::IrecvAllBuffered(std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
89{
90 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
91 iter->IrecvBuffered(tag_start, comm, Request());
92}
93
94void CommMPI::ReplaceBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
95{
96 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
97 iter->GridReplace(grid);
98}
99
100void CommMPI::AddBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
101{
102 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
103 iter->GridSum(grid);
104}
105
106void CommMPI::CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
107{
108 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
109 if (comm != MPI_COMM_NULL) {
110 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
111 IrecvAllBuffered(datatypes.Receive(), comm, 0411);
112 IsendAllBuffered(grid_old, datatypes.Send(), comm, 0411);
113 WaitAll();
114 ReplaceBufferAll(grid_new, datatypes.Receive());
115 }
116}
117
118void CommMPI::CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
119{
120 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
121 if (comm != MPI_COMM_NULL) {
122 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
123 IrecvAllBuffered(datatypes.Receive(), comm, 1806);
124 IsendAllBuffered(grid_old, datatypes.Send(), comm, 1806);
125 WaitAll();
126 AddBufferAll(grid_new, datatypes.Receive());
127 }
128}
129
130void CommMPI::CommToGhosts(Grid& grid)
131{
132 MPI_Comm comm = settings.CommunicatorLocal(grid);
133 if (comm != MPI_COMM_NULL) {
134 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
135 IrecvAllBuffered(types.Halo(), comm, 2310);
136 IsendAllBuffered(grid, types.NB(), comm, 2310);
137 WaitAll();
138 ReplaceBufferAll(grid, types.Halo());
139 }
140}
141
142void CommMPI::CommToGhostsAsyncStart(Grid& grid)
143{
144 MPI_Comm comm = settings.CommunicatorLocal(grid);
145 if (comm != MPI_COMM_NULL) {
146 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
147 IrecvAllBuffered(types.Halo(), comm, 2412);
148 IsendAllBuffered(grid, types.NB(), comm, 2412);
149 TestAll();
150 }
151}
152
153void CommMPI::CommToGhostsAsyncFinish(Grid& grid)
154{
155 WaitAll();
156 ReplaceBufferAll(grid, settings.DatatypesLocal(grid).Halo());
157}
158
159void CommMPI::CommFromGhosts(Grid& grid)
160{
161 MPI_Comm comm = settings.CommunicatorLocal(grid);
162 if (comm != MPI_COMM_NULL) {
163 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
164 IrecvAllBuffered(types.NB(), comm, 1337);
165 IsendAllBuffered(grid, types.Halo(), comm, 1337);
166 WaitAll();
167 AddBufferAll(grid, types.NB());
168 }
169}
170
171void CommMPI::CommFromGhostsAsyncStart(Grid& grid)
172{
173 MPI_Comm comm = settings.CommunicatorLocal(grid);
174 if (comm != MPI_COMM_NULL) {
175 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
176 IrecvAllBuffered(types.NB(), comm, 0xc0ffee);
177 IsendAllBuffered(grid, types.Halo(), comm, 0xc0ffee);
178 TestAll();
179 }
180}
181
182void CommMPI::CommFromGhostsAsyncFinish(Grid& grid)
183{
184 WaitAll();
185 AddBufferAll(grid, settings.DatatypesLocal(grid).NB());
186}
187
188void CommMPI::Barrier()
189{
190 MPI_Barrier(comm_global);
191}
192
193vmg_float CommMPI::GlobalSum(vmg_float value)
194{
195 vmg_float result = 0;
196 MPI_Allreduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, comm_global);
197 return result;
198}
199
200vmg_float CommMPI::GlobalSumRoot(vmg_float value)
201{
202 vmg_float result = 0;
203 MPI_Reduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, 0, comm_global);
204 return result;
205}
206
207void CommMPI::GlobalSumArray(vmg_float* array, const vmg_int& size)
208{
209 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm_global);
210}
211
212void CommMPI::GlobalBroadcast(vmg_float& value)
213{
214 MPI_Bcast(&value, 1, MPI_DOUBLE, 0, comm_global);
215}
216
217void CommMPI::GlobalGather(vmg_float& value, vmg_float* array)
218{
219 MPI_Gather(&value, 1, MPI_DOUBLE, array, 1, MPI_DOUBLE, 0, comm_global);
220}
221
222vmg_int CommMPI::GlobalSum(vmg_int value)
223{
224 vmg_int result = 0;
225 MPI_Allreduce(&value, &result, 1, MPI_INT, MPI_SUM, comm_global);
226 return result;
227}
228
229vmg_int CommMPI::GlobalSumRoot(vmg_int value)
230{
231 vmg_int result = 0;
232 MPI_Reduce(&value, &result, 1, MPI_INT, MPI_SUM, 0, comm_global);
233 return result;
234}
235
236void CommMPI::GlobalSumArray(vmg_int* array, const vmg_int& size)
237{
238 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm_global);
239}
240
241vmg_float CommMPI::GlobalMax(vmg_float value)
242{
243 vmg_float result = 0.0;
244 MPI_Allreduce(&value, &result, 1, MPI_DOUBLE, MPI_MAX, comm_global);
245 return result;
246}
247
248vmg_float CommMPI::GlobalMaxRoot(vmg_float value)
249{
250 vmg_float result = 0.0;
251 MPI_Reduce(&value, &result, 1, MPI_DOUBLE, MPI_MAX, 0, comm_global);
252 return result;
253}
254
255void CommMPI::GlobalMaxArray(vmg_float* array, const vmg_int& size)
256{
257 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_MAX, comm_global);
258}
259
260vmg_int CommMPI::GlobalMax(vmg_int value)
261{
262 vmg_int result = 0;
263 MPI_Allreduce(&value, &result, 1, MPI_INT, MPI_MAX, comm_global);
264 return result;
265}
266
267vmg_int CommMPI::GlobalMaxRoot(vmg_int value)
268{
269 vmg_int result = 0;
270 MPI_Reduce(&value, &result, 1, MPI_INT, MPI_MAX, 0, comm_global);
271 return result;
272}
273
274void CommMPI::GlobalMaxArray(vmg_int* array, const vmg_int& size)
275{
276 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_MAX, comm_global);
277}
278
279void CommMPI::GlobalBroadcast(vmg_int& value)
280{
281 MPI_Bcast(&value, 1, MPI_INT, 0, comm_global);
282}
283
284void CommMPI::GlobalGather(vmg_int& value, vmg_int* array)
285{
286 MPI_Gather(&value, 1, MPI_INT, array, 1, MPI_INT, 0, comm_global);
287}
288
289void CommMPI::GlobalBroadcast(char* str)
290{
291 int size = std::strlen(str) + 1;
292 MPI_Bcast(&size, 1, MPI_INT, 0, comm_global);
293 MPI_Bcast(str, size, MPI_CHAR, 0, comm_global);
294}
295
296vmg_float CommMPI::LevelSum(const Grid& grid, vmg_float value)
297{
298 vmg_float result = 0.0;
299 MPI_Comm comm = settings.CommunicatorLocal(grid);
300 assert(comm != MPI_COMM_NULL);
301 MPI_Allreduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, comm);
302 return result;
303}
304
305vmg_float CommMPI::LevelSumRoot(const Grid& grid, vmg_float value)
306{
307 vmg_float result = 0.0;
308 MPI_Comm comm = settings.CommunicatorLocal(grid);
309 assert(comm != MPI_COMM_NULL);
310 MPI_Reduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, 0, comm);
311 return result;
312}
313
314void CommMPI::LevelSumArray(const Grid& grid, vmg_float* array, const vmg_int& size)
315{
316 MPI_Comm comm = settings.CommunicatorLocal(grid);
317 assert(comm != MPI_COMM_NULL);
318 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm);
319}
320
321vmg_int CommMPI::LevelSum(const Grid& grid, vmg_int value)
322{
323 vmg_int result = 0;
324 MPI_Comm comm = settings.CommunicatorLocal(grid);
325 assert(comm != MPI_COMM_NULL);
326 MPI_Allreduce(&value, &result, 1, MPI_INT, MPI_SUM, comm);
327 return result;
328}
329
330vmg_int CommMPI::LevelSumRoot(const Grid& grid, vmg_int value)
331{
332 vmg_int result = 0;
333 MPI_Comm comm = settings.CommunicatorLocal(grid);
334 assert(comm != MPI_COMM_NULL);
335 MPI_Reduce(&value, &result, 1, MPI_INT, MPI_SUM, 0, comm);
336 return result;
337}
338
339void CommMPI::LevelSumArray(const Grid& grid, vmg_int* array, const vmg_int& size)
340{
341 MPI_Comm comm = settings.CommunicatorLocal(grid);
342 assert(comm != MPI_COMM_NULL);
343 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm);
344}
345
346//TODO: Variable length buffer
347void CommMPI::Print(const OutputLevel level, const char* format, ...)
348{
349 bool print = (level == Output);
350
351#ifdef OUTPUT_INFO
352 print |= (level == Info);
353#endif
354#ifdef OUTPUT_DEBUG
355 print |= (level == Debug);
356#endif
357#ifdef OUTPUT_TIMING
358 print |= (level == Timing);
359#endif
360
361 if (print) {
362 va_list args;
363 va_start(args, format);
364 vsprintf(print_buffer, format, args);
365 printf("vmg: Rank %d: %s\n", GlobalRank(), print_buffer);
366 va_end(args);
367 }
368}
369
370void CommMPI::PrintOnce(const OutputLevel level, const char* format, ...)
371{
372 bool print = (level == Output);
373
374#ifdef OUTPUT_INFO
375 print |= (level == Info);
376#endif
377#ifdef OUTPUT_DEBUG
378 print |= (level == Debug);
379#endif
380#ifdef OUTPUT_TIMING
381 print |= (level == Timing);
382#endif
383
384 if (GlobalRank() == 0 && print) {
385 va_list args;
386 va_start(args, format);
387 vsprintf(print_buffer, format, args);
388 printf("vmg: Rank %d: %s\n", GlobalRank(), print_buffer);
389 va_end(args);
390 }
391}
392
393void CommMPI::PrintXML(const std::string& filename, const std::string& xml_data)
394{
395 MPI_File file;
396 std::stringstream path, xml_header;
397
398 pugi::xml_document doc;
399 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
400 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
401 doc.save(xml_header);
402
403 path << OutputPath() << filename;
404
405 char* filename_array = Helper::GetCharArray(path.str());
406 char* xml_header_array = Helper::GetCharArray(xml_header.str());
407 char* str_array = Helper::GetCharArray(xml_data);
408
409 MPI_File_open(MPI_COMM_SELF, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
410 MPI_File_set_size(file, 0);
411 MPI_File_write(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
412 MPI_File_write(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
413 MPI_File_close(&file);
414
415 delete [] filename_array;
416 delete [] xml_header_array;
417 delete [] str_array;
418}
419
420void CommMPI::PrintXMLAll(const std::string& filename, const std::string& xml_data)
421{
422 MPI_File file;
423 std::stringstream path;
424
425 path << OutputPath() << filename;
426
427 char* filename_array = Helper::GetCharArray(path.str());
428 char* str_array = Helper::GetCharArray(xml_data);
429
430 MPI_File_open(comm_global, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
431 MPI_File_set_size(file, 0);
432
433 if (GlobalRank() == 0) {
434 std::stringstream xml_header;
435 pugi::xml_document doc;
436 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
437 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
438 doc.save(xml_header);
439
440 char* xml_header_array = Helper::GetCharArray(xml_header.str());
441
442 MPI_File_write_shared(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
443
444 delete [] xml_header_array;
445 }
446
447 MPI_File_write_ordered(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
448 MPI_File_close(&file);
449
450 delete [] filename_array;
451 delete [] str_array;
452}
453
454void CommMPI::PrintGridInformation(const Grid& grid, char* filename, const std::string& name)
455{
456 std::stringstream buf;
457 MPI_File file;
458 int rank, size;
459 int size_local, size_local_max;
460
461 MPI_Comm comm = settings.CommunicatorGlobal(grid);
462 MPI_Comm comm_local = settings.CommunicatorLocal(grid);
463
464 if (comm_local != MPI_COMM_NULL)
465 MPI_Comm_size(comm_local, &size_local);
466 else
467 size_local = 0;
468
469 if (comm != MPI_COMM_NULL) {
470
471 MPI_Reduce(&size_local, &size_local_max, 1, MPI_INT, MPI_MAX, 0, comm);
472
473 MPI_File_open(comm, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
474
475 MPI_Comm_rank(comm, &rank);
476 MPI_Comm_size(comm, &size);
477
478 if (rank == 0) {
479
480 buf << "###########################################################" << std::endl
481 << "GLOBAL INFORMATION:" << std::endl
482 << " NAME: " << name << std::endl
483 << " LEVEL: " << grid.Level() << std::endl
484 << " COMM_SIZE_GLOBAL: " << size << std::endl
485 << " COMM_SIZE_LOCAL: " << size_local_max << std::endl
486 << " GLOBAL:" << std::endl
487 << " GLOBAL_BEGIN: " << grid.Global().GlobalBegin() << std::endl
488 << " GLOBAL_END: " << grid.Global().GlobalEnd() << std::endl
489 << " GLOBAL_SIZE: " << grid.Global().GlobalSize() << std::endl
490 << " GLOBAL_BEGIN_FINEST: " << grid.Global().GlobalBeginFinest() << std::endl
491 << " GLOBAL_END_FINEST: " << grid.Global().GlobalEndFinest() << std::endl
492 << " GLOBAL_SIZE_FINEST: " << grid.Global().GlobalSizeFinest() << std::endl
493 << " EXTENT:" << std::endl
494 << " BEGIN: " << grid.Extent().Begin() << std::endl
495 << " END: " << grid.Extent().End() << std::endl
496 << " SIZE: " << grid.Extent().Size() << std::endl
497 << " MESH_WIDTH: " << grid.Extent().MeshWidth() << std::endl
498 << std::endl
499 << "LOCAL INFORMATION:" << std::endl;
500 }
501
502 buf << "RANK " << rank << ":" << std::endl
503 << " GLOBAL:" << std::endl
504 << " LOCAL_BEGIN: " << grid.Global().LocalBegin() << std::endl
505 << " LOCAL_END: " << grid.Global().LocalEnd() << std::endl
506 << " LOCAL_SIZE: " << grid.Global().LocalSize() << std::endl
507 << " BOUNDARY_TYPE: " << grid.Global().BoundaryType() << std::endl
508 << " LOCAL:" << std::endl
509 << " BEGIN: " << grid.Local().Begin() << std::endl
510 << " END: " << grid.Local().End() << std::endl
511 << " SIZE: " << grid.Local().Size() << std::endl
512 << " SIZE_TOTAL: " << grid.Local().SizeTotal() << std::endl
513 << " HALO_BEGIN_1: " << grid.Local().HaloBegin1() << std::endl
514 << " HALO_END_1: " << grid.Local().HaloEnd1() << std::endl
515 << " HALO_SIZE_1: " << grid.Local().HaloSize1() << std::endl
516 << " HALO_BEGIN_2: " << grid.Local().HaloBegin2() << std::endl
517 << " HALO_END_2: " << grid.Local().HaloEnd2() << std::endl
518 << " HALO_SIZE_2: " << grid.Local().HaloSize2() << std::endl
519 << " BOUNDARY_BEGIN_1: " << grid.Local().BoundaryBegin1() << std::endl
520 << " BOUNDARY_END_1: " << grid.Local().BoundaryEnd1() << std::endl
521 << " BOUNDARY_SIZE_1: " << grid.Local().BoundarySize1() << std::endl
522 << " BOUNDARY_BEGIN_2: " << grid.Local().BoundaryBegin2() << std::endl
523 << " BOUNDARY_END_2: " << grid.Local().BoundaryEnd2() << std::endl
524 << " BOUNDARY_SIZE_2: " << grid.Local().BoundarySize2() << std::endl;
525
526 if (rank == size-1)
527 buf << "###########################################################" << std::endl;
528
529 char* char_buf = Helper::GetCharArray(buf.str());
530 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
531 delete [] char_buf;
532
533 MPI_File_close(&file);
534
535 }
536}
537
538void CommMPI::PrintDatatypes(char* filename)
539{
540 std::stringstream buf;
541 MPI_File file;
542
543 if (comm_global != MPI_COMM_NULL) {
544
545 MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
546
547 buf << settings << std::endl;
548
549 char* char_buf = Helper::GetCharArray(buf.str());
550 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
551 delete [] char_buf;
552
553 MPI_File_close(&file);
554
555 }
556}
557
558void CommMPI::PrintAllSettings()
559{
560 std::stringstream buf;
561 MPI_File file;
562
563 const Multigrid& mg = *MG::GetSol();
564
565 buf << OutputPath() << "settings.txt";
566 char *filename = Helper::GetCharArray(buf.str());
567
568 MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
569 MPI_File_set_size(file, 0);
570 MPI_File_close(&file);
571
572 for (int i=mg.MinLevel(); i<=mg.MaxLevel(); ++i)
573 PrintGridInformation(mg(i), filename, "MULTIGRID");
574
575 for (int i=mg.MinLevel()+1; i<=mg.MaxLevel(); ++i)
576 PrintGridInformation(settings.CoarserGrid(mg(i)), filename, "COARSER_GRID");
577
578 for (int i=mg.MinLevel(); i<mg.MaxLevel(); ++i)
579 PrintGridInformation(settings.FinerGrid(mg(i)), filename, "FINER_GRID");
580
581 PrintDatatypes(filename);
582
583 delete [] filename;
584
585}
586
587void CommMPI::PrintGrid(Grid& grid, const char* information)
588{
589 int output_count = OutputCount();
590
591#ifdef HAVE_VTK
592
593 if (settings.CommunicatorLocal(grid) != MPI_COMM_NULL) {
594
595 vtkSmartPointer<vtkImageData> image = vtkSmartPointer<vtkImageData>::New();
596 image->SetExtent(grid.Global().LocalBegin().X(), grid.Global().LocalEnd().X()-1,
597 grid.Global().LocalBegin().Y(), grid.Global().LocalEnd().Y()-1,
598 grid.Global().LocalBegin().Z(), grid.Global().LocalEnd().Z()-1);
599
600 image->SetSpacing(grid.Extent().MeshWidth().vec());
601 image->SetOrigin(grid.Extent().Begin().vec());
602 image->SetScalarTypeToDouble();
603 image->SetNumberOfScalarComponents(1);
604 image->AllocateScalars();
605 image->GetPointData()->GetScalars()->SetName(information);
606
607 Index i;
608 Index begin, end, offset;
609
610 for (int j=0; j<3; ++j) {
611 if (grid.Local().BoundarySize1()[j] > 0) {
612 begin[j] = grid.Local().BoundaryBegin1()[j];
613 offset[j] = grid.Global().LocalBegin()[j];
614 } else {
615 begin[j] = grid.Local().Begin()[j];
616 offset[j] = grid.Global().LocalBegin()[j] - grid.Local().Begin()[j];
617 }
618 end[j] = (grid.Local().BoundarySize2()[j] > 0 ? grid.Local().BoundaryEnd2()[j] : grid.Local().End()[j]);
619 }
620
621 for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
622 for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
623 for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
624 image->SetScalarComponentFromDouble(i.X() + offset.X(),
625 i.Y() + offset.Y(),
626 i.Z() + offset.Z(),
627 0, grid.GetVal(i));
628
629 image->Update();
630
631 int rank, size;
632 MPI_Comm_rank(comm_global, &rank);
633 MPI_Comm_size(comm_global, &size);
634
635 std::stringstream filename;
636 filename << OutputPath() << std::setw(4) << std::setfill('0') << output_count << "_" << rank << ".vti";
637
638 vtkSmartPointer<vtkXMLImageDataWriter> writer = vtkSmartPointer<vtkXMLImageDataWriter>::New();
639 writer->SetFileName(filename.str().c_str());
640 writer->SetCompressorTypeToNone();
641 writer->SetDataModeToAscii();
642 writer->SetInput(image);
643 writer->Update();
644 writer->Write();
645
646 }
647
648//FIXME
649#else /* HAVE_VTK */
650 Index i;
651 std::stringstream buf;
652
653 Index begin, end;
654 Index begin_local, end_local, begin_global, end_global;
655
656 CommToGhosts(grid);
657
658 for (int i=0; i<3; ++i) {
659 end[i] = grid.Local().End()[i] + (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 0 : grid.Local().HaloSize1()[i]);
660 end_local[i] = grid.Global().LocalEnd()[i] - (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 1 : 0);
661 }
662
663 begin = grid.Local().Begin();
664 begin_local = grid.Global().LocalBegin();
665 begin_global = 0;
666 end_global = grid.Global().GlobalSize()-1;
667
668 for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
669 for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
670 for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
671 buf << std::scientific << grid.GetVal(i) << " ";
672
673 CreateOutputFiles(grid, buf, information,
674 begin_global, end_global,
675 begin_local, end_local,
676 output_count);
677#endif /* HAVE_VTK */
678}
679
680void CommMPI::PrintDefect(Grid& sol, Grid& rhs, const char* information)
681{
682 TempGrid *temp = MG::GetTempGrid();
683 temp->SetProperties(sol);
684 temp->ImportFromResidual(sol, rhs);
685 PrintGrid(*temp, information);
686}
687
688void CommMPI::CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
689 const Index& begin_global, const Index& end_global,
690 const Index& begin_local, const Index& end_local,
691 const int& output_count)
692{
693 MPI_Comm comm = settings.CommunicatorGlobal(grid);
694
695 if (comm != MPI_COMM_NULL) {
696
697 MPI_File file;
698 std::string conv_information = Helper::ReplaceWhitespaces(information, "_");
699
700 CreateParallelOutputFile(grid, comm, output_count, conv_information.c_str(),
701 begin_global, end_global, begin_local, end_local);
702
703 file = CreateSerialOutputFile(grid, comm, output_count, conv_information.c_str(),
704 begin_global, end_global, begin_local, end_local);
705
706 char *char_buf = Helper::GetCharArray(serial_data.str());
707 MPI_File_write(file, char_buf, serial_data.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
708 delete [] char_buf;
709
710 FinalizeSerialOutputFile(file);
711
712 }
713}
714
715void CommMPI::CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
716 const int& output_count, const char* information,
717 const Index& begin_global, const Index& end_global,
718 const Index& begin_local, const Index& end_local)
719{
720 int rank;
721 MPI_File file;
722 char parallel_filename[513], serial_filename[513];
723 std::stringstream buf;
724
725 MPI_Comm_rank(comm, &rank);
726
727 sprintf(parallel_filename, "%s%04d.pvti", OutputPath().c_str(), output_count);
728 sprintf(serial_filename, "%04d_%d.vti", output_count, rank);
729
730 MPI_File_open(comm, parallel_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
731 MPI_File_set_size(file, 0);
732
733 if (rank == 0) {
734
735 buf << "<?xml version=\"1.0\"?>" << std::endl
736 << "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
737 << " <PImageData WholeExtent=\"";
738
739 for (int i=0; i<3; ++i)
740 buf << begin_global[i] << " " << end_global[i] << " ";
741
742 buf << "\"" << std::endl
743 << " GhostLevel=\"0\" Origin=\"0 0 0\" Spacing=\"";
744
745 for (int i=0; i<3; ++i)
746 buf << grid.Extent().MeshWidth()[i] << " ";
747
748 buf << "\">" << std::endl
749 << " <PPointData Scalars=\"" << information << "\">" << std::endl
750 << " <PDataArray type=\"Float32\" Name=\"" << information << "\"/>" << std::endl
751 << " </PPointData>" << std::endl;
752
753 char* char_buf = Helper::GetCharArray(buf.str());
754
755 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
756
757 delete [] char_buf;
758 }
759
760 buf.str("");
761
762 if ((end_local-begin_local).Product() > 0) {
763 buf << " <Piece Extent=\"";
764
765 for (int i=0; i<3; ++i)
766 buf << begin_local[i] << " " << end_local[i] << " ";
767
768 buf << "\" Source=\"" << serial_filename << "\"/>" << std::endl;
769 }
770
771 char* char_buf = Helper::GetCharArray(buf.str());
772
773 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
774
775 delete [] char_buf;
776
777 if (rank == 0) {
778
779 buf.str("");
780
781 buf << " </PImageData>" << std::endl
782 << "</VTKFile>" << std::endl;
783
784 char* char_buf = Helper::GetCharArray(buf.str());
785
786 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
787
788 delete [] char_buf;
789 }
790
791 MPI_File_close(&file);
792}
793
794MPI_File CommMPI::CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
795 const int& output_count, const char* information,
796 const Index& begin_global, const Index& end_global,
797 const Index& begin_local, const Index& end_local)
798{
799 char serial_filename[513];
800 int rank;
801 MPI_File file;
802 std::stringstream buf;
803
804 MPI_Comm_rank(comm_global, &rank);
805
806 sprintf(serial_filename, "%s%04d_%d.vti", OutputPath().c_str(), output_count, rank);
807
808 MPI_File_open(MPI_COMM_SELF, serial_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
809
810 buf << "<?xml version=\"1.0\"?>" << std::endl
811 << "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
812 << " <ImageData WholeExtent=\"";
813
814 for (int i=0; i<3; ++i)
815 buf << begin_global[i] << " " << end_global[i] << " ";
816
817 buf << "\"" << std::endl
818 << " Origin=\"0 0 0\" Spacing=\"";
819
820 for (int i=0; i<3; ++i)
821 buf << grid.Extent().MeshWidth()[i] << " ";
822
823 buf << "\">" << std::endl
824 << " <Piece Extent=\"";
825
826 for (int i=0; i<3; ++i)
827 buf << begin_local[i] << " " << end_local[i] << " ";
828
829 buf << "\">" << std::endl
830 << " <PointData Scalars=\"" << information << "\">" << std::endl
831 << " <DataArray type=\"Float32\" Name=\"" << information << "\" format=\"ascii\">" << std::endl
832 << " ";
833
834 char* char_buf = Helper::GetCharArray(buf.str());
835 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
836 delete [] char_buf;
837
838 return file;
839}
840
841void CommMPI::FinalizeSerialOutputFile(MPI_File& file)
842{
843 std::stringstream buf;
844
845 buf << std::endl
846 << " </DataArray>" << std::endl
847 << " </PointData>" << std::endl
848 << " </Piece>" << std::endl
849 << " </ImageData>" << std::endl
850 << "</VTKFile>" << std::endl;
851
852 char* char_buf = Helper::GetCharArray(buf.str());
853 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
854 delete [] char_buf;
855
856 MPI_File_close(&file);
857}
858
859int CommMPI::GlobalRank() const
860{
861 int rank;
862 MPI_Comm_rank(comm_global, &rank);
863 return rank;
864}
865
866int CommMPI::GlobalSize() const
867{
868 int size;
869 MPI_Comm_size(comm_global, &size);
870 return size;
871}
872
873Index CommMPI::GlobalPos() const
874{
875 Index dims, periods, coords;
876 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
877 return coords;
878}
879
880Index CommMPI::GlobalProcs() const
881{
882 Index dims, periods, coords;
883 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
884 return dims;
885}
886
887int CommMPI::Rank(const Grid& grid) const
888{
889 int rank;
890 MPI_Comm comm = settings.CommunicatorLocal(grid);
891 assert(comm != MPI_COMM_NULL);
892 MPI_Comm_rank(comm, &rank);
893 return rank;
894}
895
896int CommMPI::Size(const Grid& grid) const
897{
898 int size;
899 MPI_Comm comm = settings.CommunicatorLocal(grid);
900 assert(comm != MPI_COMM_NULL);
901 MPI_Comm_size(comm, &size);
902 return size;
903}
904
905Index CommMPI::Pos(const Grid& grid) const
906{
907 Index dims, periods, coords;
908 MPI_Comm comm = settings.CommunicatorLocal(grid);
909 assert(comm != MPI_COMM_NULL);
910 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
911 return coords;
912}
913
914Index CommMPI::Procs(const Grid& grid) const
915{
916 Index dims, periods, coords;
917 MPI_Comm comm = settings.CommunicatorLocal(grid);
918 assert(comm != MPI_COMM_NULL);
919 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
920 return dims;
921}
922
923void CommMPI::InitCommMPI(const MPI_Comm& comm)
924{
925 int status, size, rank;
926 int dims[3] = {0, 0, 0};
927 int periods[3];
928
929 for (int i=0; i<3; ++i)
930 periods[i] = (BoundaryConditions()[i] == Periodic ? 1 : 0);
931
932 MPI_Comm_size(comm, &size);
933 MPI_Comm_rank(comm, &rank);
934
935 MPI_Topo_test(comm, &status);
936
937 if (status == MPI_CART) {
938
939 comm_global = comm;
940
941 }else {
942
943 const int log2 = Helper::log_2(size);
944
945 if (Helper::intpow(2, log2) == size) {
946 for (int i=0; i<3; ++i)
947 dims[i] = Helper::intpow(2, log2 / 3 + (log2%3 > i ? 1 : 0));
948 }else {
949 MPI_Dims_create(size, 3, dims);
950 }
951
952#ifdef OUTPUT_DEBUG
953 if (rank == 0)
954 std::printf("Process grid: %d %d %d\n", dims[0], dims[1], dims[2]);
955#endif
956
957 MPI_Cart_create(comm, 3, dims, periods, 1, &comm_global);
958
959 }
960
961 MPI_Info_create(&info);
962 char key[] = "no_locks";
963 char val[] = "true";
964 MPI_Info_set(info, key, val);
965
966}
967
968CommMPI::~CommMPI()
969{
970 MPI_Comm_free(&comm_global);
971#ifdef VMG_ONE_SIDED
972 if (win_created)
973 MPI_Win_free(&win);
974#endif
975 MPI_Info_free(&info);
976}
977
978Grid& CommMPI::GetCoarserGrid(Multigrid& multigrid)
979{
980 return settings.CoarserGrid(multigrid(multigrid.Level()));
981}
982
983Grid& CommMPI::GetFinerGrid(Multigrid& multigrid)
984{
985 return settings.FinerGrid(multigrid(multigrid.Level()));
986}
987
988std::string CommMPI::CreateOutputDirectory()
989{
990#ifdef HAVE_BOOST_FILESYSTEM
991 std::string path, unique_path;
992 std::stringstream unique_suffix;
993 int suffix_counter = 0;
994 char buffer[129];
995 time_t rawtime;
996 struct tm *timeinfo;
997 int path_size;
998 char* path_array;
999
1000 if (GlobalRank() == 0) {
1001
1002 time(&rawtime);
1003 timeinfo = localtime(&rawtime);
1004 strftime(buffer, 128, "./output/%Y_%m_%d_%H_%M_%S/", timeinfo);
1005 path = buffer;
1006
1007 if (!fs::exists("output"))
1008 fs::create_directory("output");
1009
1010 unique_path = path;
1011
1012 while (fs::exists(unique_path.c_str())) {
1013
1014 unique_suffix.str("");
1015 unique_suffix << "_" << suffix_counter++ << "/";
1016
1017 unique_path = path;
1018 unique_path.replace(unique_path.size()-1, 1, unique_suffix.str());
1019
1020 }
1021
1022 fs::create_directory(unique_path.c_str());
1023
1024 path_size = unique_path.size() + 1;
1025 path_array = Helper::GetCharArray(unique_path);
1026
1027 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
1028
1029 }else {
1030
1031 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
1032 path_array = new char[path_size];
1033
1034 }
1035
1036 MPI_Bcast(path_array, path_size, MPI_CHAR, 0, comm_global);
1037
1038 unique_path = path_array;
1039
1040 delete [] path_array;
1041
1042 return unique_path;
1043
1044#else
1045
1046 return "./";
1047
1048#endif
1049}
1050
1051
1052#endif /* HAVE_MPI */
Note: See TracBrowser for help on using the repository browser.