source: src/comm/comm_mpi.hpp@ 5ba22b

Last change on this file since 5ba22b was a72216, checked in by Olaf Lenz <olenz@…>, 13 years ago

Fixed permissions.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@2428 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 6.3 KB
Line 
1/*
2 * vmg - a versatile multigrid solver
3 * Copyright (C) 2012 Institute for Numerical Simulation, University of Bonn
4 *
5 * vmg is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, either version 3 of the License, or
8 * (at your option) any later version.
9 *
10 * vmg is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19/**
20 * @file comm_mpi.hpp
21 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
22 * @date Wed Jun 16 13:21:06 2010
23 *
24 * @brief Class for MPI-based communication.
25 *
26 */
27
28#ifndef COMM_MPI_HPP_
29#define COMM_MPI_HPP_
30
31#ifndef HAVE_MPI
32#error You need MPI in order to compile CommMPI
33#endif
34
35#include <cstdarg>
36#include <cstdio>
37#include <list>
38#include <map>
39#include <vector>
40
41#include "base/has_tempgrids.hpp"
42#include "comm/comm.hpp"
43#include "comm/mpi/settings.hpp"
44#include "comm/mpi/has_request_vec.hpp"
45
46namespace VMG
47{
48
49class DomainDecomposition;
50class GridIteratorSet;
51class TempGrid;
52
53class CommMPI : public Comm, public HasTempGrids, public HasRequestVec
54{
55public:
56 CommMPI(const Boundary& boundary, DomainDecomposition* domain_dec, const MPI_Comm& mpi_comm, bool register_ = true) :
57 Comm(boundary, domain_dec, register_)
58#ifdef VMG_ONE_SIDED
59 ,win_created(false)
60#endif
61 {
62 InitCommMPI(mpi_comm);
63 }
64
65 CommMPI(const Boundary& boundary, DomainDecomposition* domain_dec, bool register_ = true) :
66 Comm(boundary, domain_dec, register_)
67#ifdef VMG_ONE_SIDED
68 ,win_created(false)
69#endif
70 {
71 InitCommMPI(MPI_COMM_WORLD);
72 }
73
74 virtual ~CommMPI();
75
76 Grid& GetCoarserGrid(Multigrid& multigrid);
77 Grid& GetFinerGrid(Multigrid& multigrid);
78
79 void CommFromGhosts(Grid& grid);
80 void CommToGhosts(Grid& grid);
81 void CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction);
82 void CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction);
83
84 void CommToGhostsAsyncStart(Grid& grid);
85 void CommToGhostsAsyncFinish(Grid& grid);
86 void CommFromGhostsAsyncStart(Grid& grid);
87 void CommFromGhostsAsyncFinish(Grid& grid);
88
89 void Barrier();
90
91 vmg_float GlobalSum(vmg_float value);
92 vmg_float GlobalSumRoot(vmg_float value);
93 void GlobalSumArray(vmg_float* array, const vmg_int& size);
94 vmg_float GlobalMax(vmg_float value);
95 vmg_float GlobalMaxRoot(vmg_float value);
96 void GlobalMaxArray(vmg_float* array, const vmg_int& size);
97 void GlobalBroadcast(vmg_float& value);
98 void GlobalGather(vmg_float& value, vmg_float* array);
99
100 vmg_int GlobalSum(vmg_int value);
101 vmg_int GlobalSumRoot(vmg_int value);
102 void GlobalSumArray(vmg_int* array, const vmg_int& size);
103 vmg_int GlobalMax(vmg_int value);
104 vmg_int GlobalMaxRoot(vmg_int value);
105 void GlobalMaxArray(vmg_int* array, const vmg_int& size);
106 void GlobalBroadcast(vmg_int& value);
107 void GlobalGather(vmg_int& value, vmg_int* array);
108
109 void GlobalBroadcast(char* str);
110
111 vmg_float LevelSum(const Grid& grid, vmg_float value);
112 vmg_float LevelSumRoot(const Grid& grid, vmg_float value);
113 void LevelSumArray(const Grid& grid, vmg_float* array, const vmg_int& size);
114
115 vmg_int LevelSum(const Grid& grid, vmg_int value);
116 vmg_int LevelSumRoot(const Grid& grid, vmg_int value);
117 void LevelSumArray(const Grid& grid, vmg_int* array, const vmg_int& size);
118
119 void PrintString(const char* format, ...);
120 void PrintStringOnce(const char* format, ...);
121 void PrintXML(const std::string& filename, const std::string& xml_data);
122 void PrintXMLAll(const std::string& filename, const std::string& xml_data);
123 void PrintAllSettings();
124 void PrintGrid(Grid& grid, const char* information);
125 void PrintDefect(Grid& sol, Grid& rhs, const char* information);
126
127 virtual int GlobalRank() const;
128 virtual int GlobalSize() const;
129 virtual Index GlobalPos() const;
130 virtual Index GlobalProcs() const;
131
132 virtual int Rank(const Grid& grid) const;
133 virtual int Size(const Grid& grid) const;
134 virtual Index Pos(const Grid& grid) const;
135 virtual Index Procs(const Grid& grid) const;
136
137 void PostInit(Multigrid& sol, Multigrid& rhs)
138 {
139 settings.ComputeSettings(sol, rhs, comm_global);
140 }
141
142 virtual void DebugPrintError(const Grid& sol, const char* information) {}
143 virtual void DebugPrintErrorNorm(Grid& sol) {}
144 virtual void DebugPrintGridStructure(Multigrid& multigrid) {}
145
146private:
147 void InitCommMPI(const MPI_Comm& comm);
148
149 void CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
150 const Index& begin_global, const Index& end_global,
151 const Index& begin_local, const Index& end_local,
152 const int& output_count);
153
154 void CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
155 const int& output_count, const char* information,
156 const Index& begin_global, const Index& end_global,
157 const Index& begin_local, const Index& end_local);
158
159 MPI_File CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
160 const int& output_count, const char* information,
161 const Index& begin_global, const Index& end_global,
162 const Index& begin_local, const Index& end_local);
163
164 void FinalizeSerialOutputFile(MPI_File& file);
165
166 std::string CreateOutputDirectory();
167
168protected:
169 void IsendAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start);
170 void IrecvAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start);
171
172 void IsendAllBuffered(const Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start);
173 void IrecvAllBuffered(std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start);
174
175 void ReplaceBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types);
176 void AddBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types);
177
178 void PrintGridInformation(const Grid& grid, char* filename, const std::string& name);
179
180 VMG::MPI::Settings settings;
181
182 MPI_Comm comm_global;
183 MPI_Info info;
184
185#ifdef VMG_ONE_SIDED
186 bool win_created;
187 MPI_Win win;
188#endif
189
190};
191
192}
193
194#endif /* COMM_MPI_HPP_ */
Note: See TracBrowser for help on using the repository browser.