source: src/comm/comm_mpi.cpp@ d13e27

Last change on this file since d13e27 was d13e27, checked in by Julian Iseringhausen <isering@…>, 13 years ago

vmg: Work on output verbosity.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@2845 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 29.8 KB
RevLine 
[fcf7f6]1/*
2 * vmg - a versatile multigrid solver
3 * Copyright (C) 2012 Institute for Numerical Simulation, University of Bonn
4 *
5 * vmg is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 3 of the License, or
8 * (at your option) any later version.
9 *
10 * vmg is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
[dfed1c]19/**
20 * @file comm_mpi.cpp
21 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
22 * @date Wed Jun 16 13:21:06 2010
23 *
24 * @brief Class for MPI-based communication.
25 *
26 */
27
28#ifdef HAVE_CONFIG_H
29#include <config.h>
30#endif
31
32#ifdef HAVE_MPI
33
34#include <mpi.h>
[ac6d04]35#ifdef HAVE_MARMOT
36#include <enhancempicalls.h>
37#include <sourceinfompicalls.h>
38#endif
[dfed1c]39
40#ifdef HAVE_BOOST_FILESYSTEM
41#include <boost/filesystem.hpp>
42namespace fs = boost::filesystem;
43#endif
44
[ac6d04]45#ifdef HAVE_VTK
46#include <vtkAbstractArray.h>
47#include <vtkImageData.h>
48#include <vtkPointData.h>
49#include <vtkSmartPointer.h>
50#include <vtkXMLImageDataWriter.h>
51#endif
52
[dfed1c]53#include <cstring>
54#include <sstream>
55
56#include "base/helper.hpp"
[894a5f]57#include "base/tuple.hpp"
[dfed1c]58#include "comm/comm_mpi.hpp"
[ac6d04]59#include "comm/mpi/datatypes_local.hpp"
[dfed1c]60#include "grid/grid.hpp"
61#include "grid/multigrid.hpp"
62#include "grid/tempgrid.hpp"
63#include "mg.hpp"
[894a5f]64#include "base/timer.hpp"
[f003a9]65
[dfed1c]66static char print_buffer[512];
67
68using namespace VMG;
69
[894a5f]70void CommMPI::IsendAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
71{
[ac6d04]72 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
73 iter->Isend(grid, tag_start, comm, Request());
[894a5f]74}
[dfed1c]75
[ac6d04]76void CommMPI::IrecvAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
[894a5f]77{
[ac6d04]78 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
79 iter->Irecv(grid, tag_start, comm, Request());
[894a5f]80}
[dfed1c]81
[ac6d04]82void CommMPI::IsendAllBuffered(const Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
[894a5f]83{
[ac6d04]84 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
85 iter->IsendBuffered(grid, tag_start, comm, Request());
[894a5f]86}
[dfed1c]87
[894a5f]88void CommMPI::IrecvAllBuffered(std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
89{
[ac6d04]90 for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
91 iter->IrecvBuffered(tag_start, comm, Request());
[894a5f]92}
[dfed1c]93
[ac6d04]94void CommMPI::ReplaceBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
[894a5f]95{
[ac6d04]96 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
97 iter->GridReplace(grid);
98}
[dfed1c]99
[ac6d04]100void CommMPI::AddBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
101{
102 for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
103 iter->GridSum(grid);
[894a5f]104}
[dfed1c]105
[ac6d04]106void CommMPI::CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
[894a5f]107{
108 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
109 if (comm != MPI_COMM_NULL) {
[ac6d04]110 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
111 IrecvAllBuffered(datatypes.Receive(), comm, 0411);
112 IsendAllBuffered(grid_old, datatypes.Send(), comm, 0411);
113 WaitAll();
114 ReplaceBufferAll(grid_new, datatypes.Receive());
[dfed1c]115 }
116}
117
[ac6d04]118void CommMPI::CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
[dfed1c]119{
[894a5f]120 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
[dfed1c]121 if (comm != MPI_COMM_NULL) {
[ac6d04]122 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
[894a5f]123 IrecvAllBuffered(datatypes.Receive(), comm, 1806);
[ac6d04]124 IsendAllBuffered(grid_old, datatypes.Send(), comm, 1806);
125 WaitAll();
[894a5f]126 AddBufferAll(grid_new, datatypes.Receive());
[dfed1c]127 }
128}
129
[ac6d04]130void CommMPI::CommToGhosts(Grid& grid)
[dfed1c]131{
[894a5f]132 MPI_Comm comm = settings.CommunicatorLocal(grid);
[dfed1c]133 if (comm != MPI_COMM_NULL) {
[894a5f]134 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
[ac6d04]135 IrecvAllBuffered(types.Halo(), comm, 2310);
136 IsendAllBuffered(grid, types.NB(), comm, 2310);
137 WaitAll();
138 ReplaceBufferAll(grid, types.Halo());
[dfed1c]139 }
140}
141
[ac6d04]142void CommMPI::CommToGhostsAsyncStart(Grid& grid)
[dfed1c]143{
[ac6d04]144 MPI_Comm comm = settings.CommunicatorLocal(grid);
[dfed1c]145 if (comm != MPI_COMM_NULL) {
[894a5f]146 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
[ac6d04]147 IrecvAllBuffered(types.Halo(), comm, 2412);
148 IsendAllBuffered(grid, types.NB(), comm, 2412);
149 TestAll();
[894a5f]150 }
151}
[dfed1c]152
[ac6d04]153void CommMPI::CommToGhostsAsyncFinish(Grid& grid)
[894a5f]154{
[ac6d04]155 WaitAll();
156 ReplaceBufferAll(grid, settings.DatatypesLocal(grid).Halo());
[894a5f]157}
[dfed1c]158
[ac6d04]159void CommMPI::CommFromGhosts(Grid& grid)
[894a5f]160{
161 MPI_Comm comm = settings.CommunicatorLocal(grid);
162 if (comm != MPI_COMM_NULL) {
163 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
[ac6d04]164 IrecvAllBuffered(types.NB(), comm, 1337);
165 IsendAllBuffered(grid, types.Halo(), comm, 1337);
166 WaitAll();
167 AddBufferAll(grid, types.NB());
[97c25dd]168 }
169}
170
[ac6d04]171void CommMPI::CommFromGhostsAsyncStart(Grid& grid)
[dfed1c]172{
[ac6d04]173 MPI_Comm comm = settings.CommunicatorLocal(grid);
174 if (comm != MPI_COMM_NULL) {
175 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
176 IrecvAllBuffered(types.NB(), comm, 0xc0ffee);
177 IsendAllBuffered(grid, types.Halo(), comm, 0xc0ffee);
178 TestAll();
179 }
[dfed1c]180}
181
[ac6d04]182void CommMPI::CommFromGhostsAsyncFinish(Grid& grid)
[dfed1c]183{
[ac6d04]184 WaitAll();
185 AddBufferAll(grid, settings.DatatypesLocal(grid).NB());
[dfed1c]186}
187
[2a5451]188void CommMPI::Barrier()
189{
190 MPI_Barrier(comm_global);
191}
192
[ef94e7]193vmg_float CommMPI::GlobalSum(vmg_float value)
[dfed1c]194{
[ef94e7]195 vmg_float result = 0;
196 MPI_Allreduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, comm_global);
[ac6d04]197 return result;
198}
[dfed1c]199
[ef94e7]200vmg_float CommMPI::GlobalSumRoot(vmg_float value)
[ac6d04]201{
[ef94e7]202 vmg_float result = 0;
203 MPI_Reduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, 0, comm_global);
204 return result;
[dfed1c]205}
206
[ac6d04]207void CommMPI::GlobalSumArray(vmg_float* array, const vmg_int& size)
[dfed1c]208{
[ac6d04]209 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm_global);
210}
[dfed1c]211
[2a5451]212void CommMPI::GlobalBroadcast(vmg_float& value)
213{
214 MPI_Bcast(&value, 1, MPI_DOUBLE, 0, comm_global);
215}
216
217void CommMPI::GlobalGather(vmg_float& value, vmg_float* array)
218{
219 MPI_Gather(&value, 1, MPI_DOUBLE, array, 1, MPI_DOUBLE, 0, comm_global);
220}
221
[ef94e7]222vmg_int CommMPI::GlobalSum(vmg_int value)
[ac6d04]223{
[ef94e7]224 vmg_int result = 0;
225 MPI_Allreduce(&value, &result, 1, MPI_INT, MPI_SUM, comm_global);
[ac6d04]226 return result;
227}
[dfed1c]228
[ef94e7]229vmg_int CommMPI::GlobalSumRoot(vmg_int value)
[ac6d04]230{
[ef94e7]231 vmg_int result = 0;
232 MPI_Reduce(&value, &result, 1, MPI_INT, MPI_SUM, 0, comm_global);
233 return result;
[ac6d04]234}
[dfed1c]235
[ac6d04]236void CommMPI::GlobalSumArray(vmg_int* array, const vmg_int& size)
237{
238 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm_global);
[dfed1c]239}
240
[ef94e7]241vmg_float CommMPI::GlobalMax(vmg_float value)
[b51c3b]242{
[ef94e7]243 vmg_float result = 0.0;
244 MPI_Allreduce(&value, &result, 1, MPI_DOUBLE, MPI_MAX, comm_global);
[b51c3b]245 return result;
246}
247
[ef94e7]248vmg_float CommMPI::GlobalMaxRoot(vmg_float value)
[b51c3b]249{
[ef94e7]250 vmg_float result = 0.0;
251 MPI_Reduce(&value, &result, 1, MPI_DOUBLE, MPI_MAX, 0, comm_global);
252 return result;
[b51c3b]253}
254
255void CommMPI::GlobalMaxArray(vmg_float* array, const vmg_int& size)
256{
257 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_MAX, comm_global);
258}
259
[ef94e7]260vmg_int CommMPI::GlobalMax(vmg_int value)
[b51c3b]261{
[ef94e7]262 vmg_int result = 0;
263 MPI_Allreduce(&value, &result, 1, MPI_INT, MPI_MAX, comm_global);
[b51c3b]264 return result;
265}
266
[ef94e7]267vmg_int CommMPI::GlobalMaxRoot(vmg_int value)
[b51c3b]268{
[ef94e7]269 vmg_int result = 0;
270 MPI_Reduce(&value, &result, 1, MPI_INT, MPI_MAX, 0, comm_global);
271 return result;
[b51c3b]272}
273
274void CommMPI::GlobalMaxArray(vmg_int* array, const vmg_int& size)
275{
276 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_MAX, comm_global);
277}
278
[2a5451]279void CommMPI::GlobalBroadcast(vmg_int& value)
280{
281 MPI_Bcast(&value, 1, MPI_INT, 0, comm_global);
282}
283
284void CommMPI::GlobalGather(vmg_int& value, vmg_int* array)
285{
286 MPI_Gather(&value, 1, MPI_INT, array, 1, MPI_INT, 0, comm_global);
287}
288
289void CommMPI::GlobalBroadcast(char* str)
290{
[290aa3]291 int size = std::strlen(str) + 1;
292 MPI_Bcast(&size, 1, MPI_INT, 0, comm_global);
293 MPI_Bcast(str, size, MPI_CHAR, 0, comm_global);
[2a5451]294}
295
[ef94e7]296vmg_float CommMPI::LevelSum(const Grid& grid, vmg_float value)
[dfed1c]297{
[ef94e7]298 vmg_float result = 0.0;
[ac6d04]299 MPI_Comm comm = settings.CommunicatorLocal(grid);
300 assert(comm != MPI_COMM_NULL);
[ef94e7]301 MPI_Allreduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, comm);
[dfed1c]302 return result;
303}
304
[ef94e7]305vmg_float CommMPI::LevelSumRoot(const Grid& grid, vmg_float value)
[dfed1c]306{
[ef94e7]307 vmg_float result = 0.0;
[ac6d04]308 MPI_Comm comm = settings.CommunicatorLocal(grid);
309 assert(comm != MPI_COMM_NULL);
[ef94e7]310 MPI_Reduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, 0, comm);
311 return result;
[ac6d04]312}
[dfed1c]313
[ac6d04]314void CommMPI::LevelSumArray(const Grid& grid, vmg_float* array, const vmg_int& size)
315{
316 MPI_Comm comm = settings.CommunicatorLocal(grid);
317 assert(comm != MPI_COMM_NULL);
318 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm);
319}
[dfed1c]320
[ef94e7]321vmg_int CommMPI::LevelSum(const Grid& grid, vmg_int value)
[ac6d04]322{
[ef94e7]323 vmg_int result = 0;
[ac6d04]324 MPI_Comm comm = settings.CommunicatorLocal(grid);
325 assert(comm != MPI_COMM_NULL);
[ef94e7]326 MPI_Allreduce(&value, &result, 1, MPI_INT, MPI_SUM, comm);
[ac6d04]327 return result;
328}
329
[ef94e7]330vmg_int CommMPI::LevelSumRoot(const Grid& grid, vmg_int value)
[ac6d04]331{
[ef94e7]332 vmg_int result = 0;
[ac6d04]333 MPI_Comm comm = settings.CommunicatorLocal(grid);
334 assert(comm != MPI_COMM_NULL);
[ef94e7]335 MPI_Reduce(&value, &result, 1, MPI_INT, MPI_SUM, 0, comm);
336 return result;
[dfed1c]337}
338
[ac6d04]339void CommMPI::LevelSumArray(const Grid& grid, vmg_int* array, const vmg_int& size)
[dfed1c]340{
[ac6d04]341 MPI_Comm comm = settings.CommunicatorLocal(grid);
342 assert(comm != MPI_COMM_NULL);
343 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm);
[dfed1c]344}
345
[d13e27]346void CommMPI::Print(const OutputLevel level, const char* format, ...)
[dfed1c]347{
[d13e27]348 bool print = (level == Output);
349
350#ifdef OUTPUT_INFO
351 print |= (level == Info);
352#endif
353#ifdef OUTPUT_DEBUG
354 print |= (level == Debug);
355#endif
356#ifdef OUTPUT_TIMING
357 print |= (level == Timing);
358#endif
359
360 if (print) {
361 va_list args;
362 va_start(args, format);
363 vsprintf(print_buffer, format, args);
364 printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
365 va_end(args);
366 }
[dfed1c]367}
368
[d13e27]369void CommMPI::PrintOnce(const OutputLevel level, const char* format, ...)
[dfed1c]370{
[d13e27]371 bool print = (level == Output);
372
373#ifdef OUTPUT_INFO
374 print |= (level == Info);
375#endif
376#ifdef OUTPUT_DEBUG
377 print |= (level == Debug);
378#endif
379#ifdef OUTPUT_TIMING
380 print |= (level == Timing);
381#endif
382
383 if (GlobalRank() == 0 && print) {
[dfed1c]384 va_list args;
385 va_start(args, format);
386 vsprintf(print_buffer, format, args);
387 printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
388 va_end(args);
389 }
390}
391
392void CommMPI::PrintXML(const std::string& filename, const std::string& xml_data)
393{
394 MPI_File file;
395 std::stringstream path, xml_header;
396
397 pugi::xml_document doc;
398 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
399 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
400 doc.save(xml_header);
401
402 path << OutputPath() << filename;
403
404 char* filename_array = Helper::GetCharArray(path.str());
405 char* xml_header_array = Helper::GetCharArray(xml_header.str());
406 char* str_array = Helper::GetCharArray(xml_data);
407
408 MPI_File_open(MPI_COMM_SELF, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
409 MPI_File_set_size(file, 0);
410 MPI_File_write(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
411 MPI_File_write(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
412 MPI_File_close(&file);
413
414 delete [] filename_array;
415 delete [] xml_header_array;
416 delete [] str_array;
417}
418
419void CommMPI::PrintXMLAll(const std::string& filename, const std::string& xml_data)
420{
421 MPI_File file;
422 std::stringstream path;
423
424 path << OutputPath() << filename;
425
426 char* filename_array = Helper::GetCharArray(path.str());
427 char* str_array = Helper::GetCharArray(xml_data);
428
429 MPI_File_open(comm_global, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
430 MPI_File_set_size(file, 0);
431
432 if (GlobalRank() == 0) {
433 std::stringstream xml_header;
434 pugi::xml_document doc;
435 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
436 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
437 doc.save(xml_header);
438
439 char* xml_header_array = Helper::GetCharArray(xml_header.str());
440
441 MPI_File_write_shared(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
442
443 delete [] xml_header_array;
444 }
445
446 MPI_File_write_ordered(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
447 MPI_File_close(&file);
448
449 delete [] filename_array;
450 delete [] str_array;
451}
452
[ac6d04]453void CommMPI::PrintGridInformation(const Grid& grid, char* filename, const std::string& name)
[dfed1c]454{
455 std::stringstream buf;
456 MPI_File file;
[ac6d04]457 int rank, size;
458 int size_local, size_local_max;
[dfed1c]459
[ac6d04]460 MPI_Comm comm = settings.CommunicatorGlobal(grid);
461 MPI_Comm comm_local = settings.CommunicatorLocal(grid);
[dfed1c]462
[ac6d04]463 if (comm_local != MPI_COMM_NULL)
464 MPI_Comm_size(comm_local, &size_local);
465 else
466 size_local = 0;
[dfed1c]467
[ac6d04]468 if (comm != MPI_COMM_NULL) {
[dfed1c]469
[ac6d04]470 MPI_Reduce(&size_local, &size_local_max, 1, MPI_INT, MPI_MAX, 0, comm);
471
472 MPI_File_open(comm, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
473
474 MPI_Comm_rank(comm, &rank);
475 MPI_Comm_size(comm, &size);
476
477 if (rank == 0) {
478
479 buf << "###########################################################" << std::endl
480 << "GLOBAL INFORMATION:" << std::endl
481 << " NAME: " << name << std::endl
482 << " LEVEL: " << grid.Level() << std::endl
483 << " COMM_SIZE_GLOBAL: " << size << std::endl
484 << " COMM_SIZE_LOCAL: " << size_local_max << std::endl
485 << " GLOBAL:" << std::endl
486 << " GLOBAL_FINER_BEGIN: " << grid.Global().GlobalFinerBegin() << std::endl
487 << " GLOBAL_FINER_END: " << grid.Global().GlobalFinerEnd() << std::endl
488 << " GLOBAL_FINER_SIZE: " << grid.Global().GlobalFinerSize() << std::endl
489 << " FINEST_ABS_BEGIN: " << grid.Global().FinestAbsBegin() << std::endl
490 << " FINEST_ABS_END: " << grid.Global().FinestAbsEnd() << std::endl
491 << " FINEST_ABS_SIZE: " << grid.Global().FinestAbsSize() << std::endl
492 << " GLOBAL_SIZE: " << grid.Global().GlobalSize() << std::endl
493 << " EXTENT:" << std::endl
494 << " BEGIN: " << grid.Extent().Begin() << std::endl
495 << " END: " << grid.Extent().End() << std::endl
496 << " SIZE: " << grid.Extent().Size() << std::endl
497 << " MESH_WIDTH: " << grid.Extent().MeshWidth() << std::endl
498 << std::endl
499 << "LOCAL INFORMATION:" << std::endl;
500 }
[dfed1c]501
[ac6d04]502 buf << "RANK " << rank << ":" << std::endl
503 << " GLOBAL:" << std::endl
504 << " LOCAL_BEGIN: " << grid.Global().LocalBegin() << std::endl
505 << " LOCAL_END: " << grid.Global().LocalEnd() << std::endl
506 << " LOCAL_SIZE: " << grid.Global().LocalSize() << std::endl
507 << " LOCAL_FINER_BEGIN: " << grid.Global().LocalFinerBegin() << std::endl
508 << " LOCAL_FINER_END: " << grid.Global().LocalFinerEnd() << std::endl
509 << " LOCAL_FINER_SIZE: " << grid.Global().LocalFinerSize() << std::endl
510 << " BOUNDARY_TYPE: " << grid.Global().BoundaryType() << std::endl
511 << " LOCAL:" << std::endl
512 << " BEGIN: " << grid.Local().Begin() << std::endl
513 << " END: " << grid.Local().End() << std::endl
514 << " SIZE: " << grid.Local().Size() << std::endl
515 << " SIZE_TOTAL: " << grid.Local().SizeTotal() << std::endl
516 << " HALO_BEGIN_1: " << grid.Local().HaloBegin1() << std::endl
517 << " HALO_END_1: " << grid.Local().HaloEnd1() << std::endl
518 << " HALO_SIZE_1: " << grid.Local().HaloSize1() << std::endl
519 << " HALO_BEGIN_2: " << grid.Local().HaloBegin2() << std::endl
520 << " HALO_END_2: " << grid.Local().HaloEnd2() << std::endl
521 << " HALO_SIZE_2: " << grid.Local().HaloSize2() << std::endl
522 << " BOUNDARY_BEGIN_1: " << grid.Local().BoundaryBegin1() << std::endl
523 << " BOUNDARY_END_1: " << grid.Local().BoundaryEnd1() << std::endl
524 << " BOUNDARY_SIZE_1: " << grid.Local().BoundarySize1() << std::endl
525 << " BOUNDARY_BEGIN_2: " << grid.Local().BoundaryBegin2() << std::endl
526 << " BOUNDARY_END_2: " << grid.Local().BoundaryEnd2() << std::endl
527 << " BOUNDARY_SIZE_2: " << grid.Local().BoundarySize2() << std::endl
[716da7]528 << " FINER_BEGIN: " << grid.Local().FinerBegin() << std::endl
529 << " FINER_END: " << grid.Local().FinerEnd() << std::endl
530 << " FINER_SIZE: " << grid.Local().FinerSize() << std::endl;
[ac6d04]531
532 if (rank == size-1)
533 buf << "###########################################################" << std::endl;
[dfed1c]534
[ac6d04]535 char* char_buf = Helper::GetCharArray(buf.str());
536 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
537 delete [] char_buf;
[dfed1c]538
[ac6d04]539 MPI_File_close(&file);
[dfed1c]540
[ac6d04]541 }
542}
[dfed1c]543
[ac6d04]544void CommMPI::PrintAllSettings()
545{
546 std::stringstream buf;
547 MPI_File file;
[dfed1c]548
[ac6d04]549 const Multigrid& mg = *MG::GetSol();
[dfed1c]550
[ac6d04]551 buf << OutputPath() << "settings.txt";
552 char *filename = Helper::GetCharArray(buf.str());
[dfed1c]553
[ac6d04]554 MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
555 MPI_File_set_size(file, 0);
556 MPI_File_close(&file);
[dfed1c]557
[ac6d04]558 for (int i=mg.MinLevel(); i<=mg.MaxLevel(); ++i)
559 PrintGridInformation(mg(i), filename, "MULTIGRID");
[dfed1c]560
[ac6d04]561 for (int i=mg.MinLevel()+1; i<=mg.MaxLevel(); ++i)
562 PrintGridInformation(settings.CoarserGrid(mg(i)), filename, "COARSER_GRID");
[dfed1c]563
[ac6d04]564 for (int i=mg.MinLevel(); i<mg.MaxLevel(); ++i)
565 PrintGridInformation(settings.FinerGrid(mg(i)), filename, "FINER_GRID");
[dfed1c]566
567 delete [] filename;
568
569}
570
571void CommMPI::PrintGrid(Grid& grid, const char* information)
572{
[ac6d04]573 int output_count = OutputCount();
[dfed1c]574
[ac6d04]575#ifdef HAVE_VTK
[dfed1c]576
[ac6d04]577 if (settings.CommunicatorLocal(grid) != MPI_COMM_NULL) {
[dfed1c]578
[ac6d04]579 Index end, end_global;
[dfed1c]580
[ac6d04]581 for (int i=0; i<3; ++i) {
582 end[i] = grid.Local().End()[i];
583 end_global[i] = grid.Global().LocalEnd()[i];
584 }
[dfed1c]585
[ac6d04]586 vtkSmartPointer<vtkImageData> image = vtkSmartPointer<vtkImageData>::New();
587 image->SetExtent(grid.Global().LocalBegin().X(), end_global.X()-1,
588 grid.Global().LocalBegin().Y(), end_global.Y()-1,
589 grid.Global().LocalBegin().Z(), end_global.Z()-1);
590 image->SetSpacing(grid.Extent().MeshWidth().vec());
591 image->SetOrigin(grid.Extent().Begin().vec());
592 image->SetScalarTypeToDouble();
593 image->SetNumberOfScalarComponents(1);
594 image->AllocateScalars();
595 image->GetPointData()->GetScalars()->SetName(information);
[dfed1c]596
[ac6d04]597 Index i;
598 for (i.X()=grid.Local().Begin().X(); i.X()<end.X(); ++i.X())
599 for (i.Y()=grid.Local().Begin().Y(); i.Y()<end.Y(); ++i.Y())
600 for (i.Z()=grid.Local().Begin().Z(); i.Z()<end.Z(); ++i.Z())
601 image->SetScalarComponentFromDouble(i.X() - grid.Local().Begin().X() + grid.Global().LocalBegin().X(),
602 i.Y() - grid.Local().Begin().Y() + grid.Global().LocalBegin().Y(),
603 i.Z() - grid.Local().Begin().Z() + grid.Global().LocalBegin().Z(),
604 0, grid.GetVal(i));
605
606 image->Update();
607
608 int rank, size;
609 MPI_Comm_rank(comm_global, &rank);
610 MPI_Comm_size(comm_global, &size);
611
612 std::stringstream filename;
613 filename << OutputPath() << std::setw(4) << std::setfill('0') << output_count << "_" << rank << ".vti";
614
615 vtkSmartPointer<vtkXMLImageDataWriter> writer = vtkSmartPointer<vtkXMLImageDataWriter>::New();
616 writer->SetFileName(filename.str().c_str());
617 writer->SetCompressorTypeToNone();
618 writer->SetDataModeToAscii();
619 writer->SetInput(image);
620 writer->Update();
621 writer->Write();
[dfed1c]622
623 }
624
[ac6d04]625#else /* HAVE_VTK */
[dfed1c]626 Index i;
627 std::stringstream buf;
628
[ac6d04]629 Index begin, end;
[dfed1c]630 Index begin_local, end_local, begin_global, end_global;
631
632 CommToGhosts(grid);
633
634 for (int i=0; i<3; ++i) {
[ac6d04]635 end[i] = grid.Local().End()[i] + (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 0 : grid.Local().HaloSize1()[i]);
636 end_local[i] = grid.Global().LocalEnd()[i] - (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 1 : 0);
[dfed1c]637 }
638
[ac6d04]639 begin = grid.Local().Begin();
640 begin_local = grid.Global().LocalBegin();
641 begin_global = 0;
642 end_global = grid.Global().GlobalSize()-1;
[dfed1c]643
644 for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
645 for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
646 for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
647 buf << std::scientific << grid.GetVal(i) << " ";
648
649 CreateOutputFiles(grid, buf, information,
650 begin_global, end_global,
[ac6d04]651 begin_local, end_local,
652 output_count);
653#endif /* HAVE_VTK */
[dfed1c]654}
655
[ac6d04]656void CommMPI::PrintDefect(Grid& sol, Grid& rhs, const char* information)
[dfed1c]657{
[ac6d04]658 TempGrid *temp = MG::GetTempGrid();
659 temp->SetProperties(sol);
660 temp->ImportFromResidual(sol, rhs);
661 PrintGrid(*temp, information);
[dfed1c]662}
663
664void CommMPI::CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
665 const Index& begin_global, const Index& end_global,
[ac6d04]666 const Index& begin_local, const Index& end_local,
667 const int& output_count)
[dfed1c]668{
[894a5f]669 MPI_Comm comm = settings.CommunicatorGlobal(grid);
[dfed1c]670
671 if (comm != MPI_COMM_NULL) {
672
673 MPI_File file;
674 std::string conv_information = Helper::ReplaceWhitespaces(information, "_");
675
676 CreateParallelOutputFile(grid, comm, output_count, conv_information.c_str(),
677 begin_global, end_global, begin_local, end_local);
678
679 file = CreateSerialOutputFile(grid, comm, output_count, conv_information.c_str(),
680 begin_global, end_global, begin_local, end_local);
681
682 char *char_buf = Helper::GetCharArray(serial_data.str());
683 MPI_File_write(file, char_buf, serial_data.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
684 delete [] char_buf;
685
686 FinalizeSerialOutputFile(file);
687
688 }
689}
690
691void CommMPI::CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
692 const int& output_count, const char* information,
693 const Index& begin_global, const Index& end_global,
694 const Index& begin_local, const Index& end_local)
695{
696 int rank;
697 MPI_File file;
698 char parallel_filename[513], serial_filename[513];
699 std::stringstream buf;
700
701 MPI_Comm_rank(comm, &rank);
702
703 sprintf(parallel_filename, "%s%04d.pvti", OutputPath().c_str(), output_count);
704 sprintf(serial_filename, "%04d_%d.vti", output_count, rank);
705
706 MPI_File_open(comm, parallel_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
707 MPI_File_set_size(file, 0);
708
709 if (rank == 0) {
710
711 buf << "<?xml version=\"1.0\"?>" << std::endl
712 << "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
713 << " <PImageData WholeExtent=\"";
714
715 for (int i=0; i<3; ++i)
[ac6d04]716 buf << begin_global[i] << " " << end_global[i] << " ";
[dfed1c]717
718 buf << "\"" << std::endl
719 << " GhostLevel=\"0\" Origin=\"0 0 0\" Spacing=\"";
720
721 for (int i=0; i<3; ++i)
722 buf << grid.Extent().MeshWidth()[i] << " ";
723
724 buf << "\">" << std::endl
725 << " <PPointData Scalars=\"" << information << "\">" << std::endl
726 << " <PDataArray type=\"Float32\" Name=\"" << information << "\"/>" << std::endl
727 << " </PPointData>" << std::endl;
728
729 char* char_buf = Helper::GetCharArray(buf.str());
730
731 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
732
733 delete [] char_buf;
734 }
735
736 buf.str("");
737
[ac6d04]738 if ((end_local-begin_local).Product() > 0) {
739 buf << " <Piece Extent=\"";
740
741 for (int i=0; i<3; ++i)
742 buf << begin_local[i] << " " << end_local[i] << " ";
[dfed1c]743
[ac6d04]744 buf << "\" Source=\"" << serial_filename << "\"/>" << std::endl;
745 }
[dfed1c]746
747 char* char_buf = Helper::GetCharArray(buf.str());
748
749 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
750
751 delete [] char_buf;
752
753 if (rank == 0) {
754
755 buf.str("");
756
757 buf << " </PImageData>" << std::endl
758 << "</VTKFile>" << std::endl;
759
760 char* char_buf = Helper::GetCharArray(buf.str());
761
762 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
763
764 delete [] char_buf;
765 }
766
767 MPI_File_close(&file);
768}
769
770MPI_File CommMPI::CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
771 const int& output_count, const char* information,
772 const Index& begin_global, const Index& end_global,
773 const Index& begin_local, const Index& end_local)
774{
775 char serial_filename[513];
776 int rank;
777 MPI_File file;
778 std::stringstream buf;
779
780 MPI_Comm_rank(comm_global, &rank);
781
782 sprintf(serial_filename, "%s%04d_%d.vti", OutputPath().c_str(), output_count, rank);
783
784 MPI_File_open(MPI_COMM_SELF, serial_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
785
786 buf << "<?xml version=\"1.0\"?>" << std::endl
787 << "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
788 << " <ImageData WholeExtent=\"";
789
790 for (int i=0; i<3; ++i)
[ac6d04]791 buf << begin_global[i] << " " << end_global[i] << " ";
[dfed1c]792
793 buf << "\"" << std::endl
794 << " Origin=\"0 0 0\" Spacing=\"";
795
796 for (int i=0; i<3; ++i)
797 buf << grid.Extent().MeshWidth()[i] << " ";
798
799 buf << "\">" << std::endl
800 << " <Piece Extent=\"";
801
802 for (int i=0; i<3; ++i)
[ac6d04]803 buf << begin_local[i] << " " << end_local[i] << " ";
[dfed1c]804
805 buf << "\">" << std::endl
806 << " <PointData Scalars=\"" << information << "\">" << std::endl
807 << " <DataArray type=\"Float32\" Name=\"" << information << "\" format=\"ascii\">" << std::endl
808 << " ";
809
810 char* char_buf = Helper::GetCharArray(buf.str());
811 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
812 delete [] char_buf;
813
814 return file;
815}
816
817void CommMPI::FinalizeSerialOutputFile(MPI_File& file)
818{
819 std::stringstream buf;
820
821 buf << std::endl
822 << " </DataArray>" << std::endl
823 << " </PointData>" << std::endl
824 << " </Piece>" << std::endl
825 << " </ImageData>" << std::endl
826 << "</VTKFile>" << std::endl;
827
828 char* char_buf = Helper::GetCharArray(buf.str());
829 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
830 delete [] char_buf;
831
832 MPI_File_close(&file);
833}
834
835int CommMPI::GlobalRank() const
836{
837 int rank;
838 MPI_Comm_rank(comm_global, &rank);
839 return rank;
840}
841
[ac6d04]842int CommMPI::GlobalSize() const
843{
844 int size;
845 MPI_Comm_size(comm_global, &size);
846 return size;
847}
848
[dfed1c]849Index CommMPI::GlobalPos() const
850{
851 Index dims, periods, coords;
852 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
853 return coords;
854}
855
856Index CommMPI::GlobalProcs() const
857{
858 Index dims, periods, coords;
859 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
[ac6d04]860 return dims;
861}
862
863int CommMPI::Rank(const Grid& grid) const
864{
865 int rank;
866 MPI_Comm comm = settings.CommunicatorLocal(grid);
867 assert(comm != MPI_COMM_NULL);
868 MPI_Comm_rank(comm, &rank);
869 return rank;
870}
871
872int CommMPI::Size(const Grid& grid) const
873{
874 int size;
875 MPI_Comm comm = settings.CommunicatorLocal(grid);
876 assert(comm != MPI_COMM_NULL);
877 MPI_Comm_size(comm, &size);
878 return size;
879}
[dfed1c]880
[ac6d04]881Index CommMPI::Pos(const Grid& grid) const
882{
883 Index dims, periods, coords;
884 MPI_Comm comm = settings.CommunicatorLocal(grid);
885 assert(comm != MPI_COMM_NULL);
886 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
887 return coords;
888}
889
890Index CommMPI::Procs(const Grid& grid) const
891{
892 Index dims, periods, coords;
893 MPI_Comm comm = settings.CommunicatorLocal(grid);
894 assert(comm != MPI_COMM_NULL);
895 MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
[dfed1c]896 return dims;
897}
898
899void CommMPI::InitCommMPI(const MPI_Comm& comm)
900{
901 int status, size, rank;
902 int dims[3] = {0, 0, 0};
903 int periods[3];
904
905 for (int i=0; i<3; ++i)
906 periods[i] = (BoundaryConditions()[i] == Periodic ? 1 : 0);
907
908 MPI_Comm_size(comm, &size);
909 MPI_Comm_rank(comm, &rank);
910
911 MPI_Topo_test(comm, &status);
912
913 if (status == MPI_CART) {
914
915 comm_global = comm;
916
917 }else {
918
[71b148]919 const int log2 = Helper::log_2(size);
920
921 if (Helper::intpow(2, log2) == size) {
922 for (int i=0; i<3; ++i)
923 dims[i] = Helper::intpow(2, log2 / 3 + (log2%3 > i ? 1 : 0));
924 }else {
925 MPI_Dims_create(size, 3, dims);
926 }
927
[d13e27]928#ifdef OUTPUT_DEBUG
[d7de94]929 if (rank == 0)
[d13e27]930 std::printf("Process grid: %d %d %d\n", dims[0], dims[1], dims[2]);
[d7de94]931#endif
[dfed1c]932
933 MPI_Cart_create(comm, 3, dims, periods, 1, &comm_global);
934
935 }
936
937 MPI_Info_create(&info);
938 char key[] = "no_locks";
939 char val[] = "true";
940 MPI_Info_set(info, key, val);
941
942}
943
944CommMPI::~CommMPI()
945{
946 MPI_Comm_free(&comm_global);
[ac6d04]947#ifdef VMG_ONE_SIDED
[dfed1c]948 if (win_created)
949 MPI_Win_free(&win);
[ac6d04]950#endif
[dfed1c]951 MPI_Info_free(&info);
[894a5f]952}
[dfed1c]953
[894a5f]954Grid& CommMPI::GetCoarserGrid(Multigrid& multigrid)
955{
956 return settings.CoarserGrid(multigrid(multigrid.Level()));
[dfed1c]957}
958
[894a5f]959Grid& CommMPI::GetFinerGrid(Multigrid& multigrid)
960{
961 return settings.FinerGrid(multigrid(multigrid.Level()));
962}
963
[dfed1c]964std::string CommMPI::CreateOutputDirectory()
965{
966#ifdef HAVE_BOOST_FILESYSTEM
967 std::string path, unique_path;
968 std::stringstream unique_suffix;
969 int suffix_counter = 0;
970 char buffer[129];
971 time_t rawtime;
972 struct tm *timeinfo;
973 int path_size;
974 char* path_array;
975
976 if (GlobalRank() == 0) {
977
978 time(&rawtime);
979 timeinfo = localtime(&rawtime);
980 strftime(buffer, 128, "./output/%Y_%m_%d_%H_%M_%S/", timeinfo);
981 path = buffer;
982
983 if (!fs::exists("output"))
984 fs::create_directory("output");
985
986 unique_path = path;
987
988 while (fs::exists(unique_path.c_str())) {
989
990 unique_suffix.str("");
991 unique_suffix << "_" << suffix_counter++ << "/";
992
993 unique_path = path;
994 unique_path.replace(unique_path.size()-1, 1, unique_suffix.str());
995
996 }
997
998 fs::create_directory(unique_path.c_str());
999
1000 path_size = unique_path.size() + 1;
1001 path_array = Helper::GetCharArray(unique_path);
1002
1003 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
1004
1005 }else {
1006
1007 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
1008 path_array = new char[path_size];
1009
1010 }
1011
1012 MPI_Bcast(path_array, path_size, MPI_CHAR, 0, comm_global);
1013
1014 unique_path = path_array;
1015
1016 delete [] path_array;
1017
1018 return unique_path;
1019
1020#else
1021
1022 return "./";
1023
1024#endif
1025}
1026
1027
1028#endif /* HAVE_MPI */
Note: See TracBrowser for help on using the repository browser.