| 1 | /**
|
|---|
| 2 | * @file comm_mpi.cpp
|
|---|
| 3 | * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
|
|---|
| 4 | * @date Wed Jun 16 13:21:06 2010
|
|---|
| 5 | *
|
|---|
| 6 | * @brief Class for MPI-based communication.
|
|---|
| 7 | *
|
|---|
| 8 | */
|
|---|
| 9 |
|
|---|
| 10 | #ifdef HAVE_CONFIG_H
|
|---|
| 11 | #include <config.h>
|
|---|
| 12 | #endif
|
|---|
| 13 |
|
|---|
| 14 | #ifdef HAVE_MPI
|
|---|
| 15 |
|
|---|
| 16 | #include <mpi.h>
|
|---|
| 17 |
|
|---|
| 18 | #ifdef HAVE_BOOST_FILESYSTEM
|
|---|
| 19 | #include <boost/filesystem.hpp>
|
|---|
| 20 | namespace fs = boost::filesystem;
|
|---|
| 21 | #endif
|
|---|
| 22 |
|
|---|
| 23 | #include <cstring>
|
|---|
| 24 | #include <sstream>
|
|---|
| 25 |
|
|---|
| 26 | #include "base/helper.hpp"
|
|---|
| 27 | #include "base/linked_cell_list.hpp"
|
|---|
| 28 | #include "base/tuple.hpp"
|
|---|
| 29 | #include "comm/comm_mpi.hpp"
|
|---|
| 30 | #include "grid/grid.hpp"
|
|---|
| 31 | #include "grid/multigrid.hpp"
|
|---|
| 32 | #include "grid/tempgrid.hpp"
|
|---|
| 33 | #include "mg.hpp"
|
|---|
| 34 | #include "base/timer.hpp"
|
|---|
| 35 | static char print_buffer[512];
|
|---|
| 36 |
|
|---|
| 37 | using namespace VMG;
|
|---|
| 38 |
|
|---|
| 39 | void CommMPI::Isend(Grid& grid, VMG::MPI::Datatype& type, const MPI_Comm& comm, const int& tag)
|
|---|
| 40 | {
|
|---|
| 41 | if (type.Feasible())
|
|---|
| 42 | MPI_Isend(&grid(0), 1, type.Type(), type.Rank(), tag, comm, &Request());
|
|---|
| 43 | }
|
|---|
| 44 |
|
|---|
| 45 | void CommMPI::IsendAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
|
|---|
| 46 | {
|
|---|
| 47 | for (unsigned int i=0; i<types.size(); ++i)
|
|---|
| 48 | if (types[i].Feasible())
|
|---|
| 49 | MPI_Isend(&grid(0), 1, types[i].Type(), types[i].Rank(), tag_start+types[i].TagSend(), comm, &Request());
|
|---|
| 50 | }
|
|---|
| 51 |
|
|---|
| 52 | void CommMPI::Irecv(Grid& grid, VMG::MPI::Datatype& type, const MPI_Comm& comm, const int& tag)
|
|---|
| 53 | {
|
|---|
| 54 | if (type.Feasible())
|
|---|
| 55 | MPI_Irecv(&grid(0), 1, type.Type(), type.Rank(), tag, comm, &Request());
|
|---|
| 56 | }
|
|---|
| 57 |
|
|---|
| 58 | void CommMPI::IrecvAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
|
|---|
| 59 | {
|
|---|
| 60 | for (unsigned int i=0; i<types.size(); ++i)
|
|---|
| 61 | if (types[i].Feasible())
|
|---|
| 62 | MPI_Irecv(&grid(0), 1, types[i].Type(), types[i].Rank(), tag_start+types[i].TagReceive(), comm, &Request());
|
|---|
| 63 | }
|
|---|
| 64 |
|
|---|
| 65 | void CommMPI::IrecvAllBuffered(std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
|
|---|
| 66 | {
|
|---|
| 67 | receive_buffer.resize(types.size());
|
|---|
| 68 | for (unsigned int i=0; i<types.size(); ++i)
|
|---|
| 69 | if (types[i].Feasible()) {
|
|---|
| 70 | receive_buffer[i].resize(types[i].Size());
|
|---|
| 71 | MPI_Irecv(&receive_buffer[i].front(), types[i].Size(), MPI_DOUBLE,
|
|---|
| 72 | types[i].Rank(), tag_start+types[i].TagReceive(), comm, &Request());
|
|---|
| 73 | }
|
|---|
| 74 | }
|
|---|
| 75 |
|
|---|
| 76 | void CommMPI::AddBufferAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types)
|
|---|
| 77 | {
|
|---|
| 78 | Index ind;
|
|---|
| 79 |
|
|---|
| 80 | for (unsigned int i=0; i<types.size(); ++i) {
|
|---|
| 81 | int c = 0;
|
|---|
| 82 | for (ind.X()=types[i].Starts().X(); ind.X()<types[i].Starts().X()+types[i].Subsizes().X(); ++ind.X())
|
|---|
| 83 | for (ind.Y()=types[i].Starts().Y(); ind.Y()<types[i].Starts().Y()+types[i].Subsizes().Y(); ++ind.Y())
|
|---|
| 84 | for (ind.Z()=types[i].Starts().Z(); ind.Z()<types[i].Starts().Z()+types[i].Subsizes().Z(); ++ind.Z())
|
|---|
| 85 | grid(ind) += receive_buffer[i][c++];
|
|---|
| 86 | }
|
|---|
| 87 |
|
|---|
| 88 | receive_buffer.clear();
|
|---|
| 89 | }
|
|---|
| 90 |
|
|---|
| 91 | void CommMPI::CommSubgrid(Grid& grid_old, Grid& grid_new)
|
|---|
| 92 | {
|
|---|
| 93 | MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
|
|---|
| 94 | if (comm != MPI_COMM_NULL) {
|
|---|
| 95 | VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new);
|
|---|
| 96 | IsendAll(grid_old, datatypes.Send(), comm, 0411);
|
|---|
| 97 | IrecvAll(grid_new, datatypes.Receive(), comm, 0411);
|
|---|
| 98 | ClearRequests();
|
|---|
| 99 | }
|
|---|
| 100 | }
|
|---|
| 101 |
|
|---|
| 102 | void CommMPI::CommAddSubgrid(Grid& grid_old, Grid& grid_new)
|
|---|
| 103 | {
|
|---|
| 104 | MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
|
|---|
| 105 | if (comm != MPI_COMM_NULL) {
|
|---|
| 106 | VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new);
|
|---|
| 107 | IsendAll(grid_old, datatypes.Send(), comm, 1806);
|
|---|
| 108 | IrecvAllBuffered(datatypes.Receive(), comm, 1806);
|
|---|
| 109 | ClearRequests();
|
|---|
| 110 | AddBufferAll(grid_new, datatypes.Receive());
|
|---|
| 111 | }
|
|---|
| 112 | }
|
|---|
| 113 |
|
|---|
| 114 | void CommMPI::CommFromGhosts(Grid& grid)
|
|---|
| 115 | {
|
|---|
| 116 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 117 |
|
|---|
| 118 | if (comm != MPI_COMM_NULL) {
|
|---|
| 119 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| 120 | IsendAll(grid, types.Halo(), comm, 1337);
|
|---|
| 121 | IrecvAllBuffered(types.NB(), comm, 1337);
|
|---|
| 122 | ClearRequests();
|
|---|
| 123 | AddBufferAll(grid, types.NB());
|
|---|
| 124 | }
|
|---|
| 125 | }
|
|---|
| 126 |
|
|---|
| 127 | void CommMPI::CommFromGhostsAsyncStart(Grid& grid)
|
|---|
| 128 | {
|
|---|
| 129 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 130 |
|
|---|
| 131 | if (comm != MPI_COMM_NULL) {
|
|---|
| 132 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| 133 | IsendAll(grid, types.Halo(), comm, 0xc0ffee);
|
|---|
| 134 | IrecvAllBuffered(types.NB(), comm, 0xc0ffee);
|
|---|
| 135 | }
|
|---|
| 136 | }
|
|---|
| 137 |
|
|---|
| 138 | void CommMPI::CommFromGhostsAsyncFinish(Grid& grid)
|
|---|
| 139 | {
|
|---|
| 140 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| 141 | ClearRequests();
|
|---|
| 142 | AddBufferAll(grid, types.NB());
|
|---|
| 143 | }
|
|---|
| 144 |
|
|---|
| 145 | void CommMPI::CommToGhosts(Grid& grid)
|
|---|
| 146 | {
|
|---|
| 147 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 148 |
|
|---|
| 149 | if (comm != MPI_COMM_NULL) {
|
|---|
| 150 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| 151 | IsendAll(grid, types.NB(), comm, 2310);
|
|---|
| 152 | IrecvAll(grid, types.Halo(), comm, 2310);
|
|---|
| 153 | ClearRequests();
|
|---|
| 154 | }
|
|---|
| 155 | }
|
|---|
| 156 |
|
|---|
| 157 | void CommMPI::CommToGhostsRB(Grid& grid, const int& offset)
|
|---|
| 158 | {
|
|---|
| 159 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 160 |
|
|---|
| 161 | if (comm != MPI_COMM_NULL) {
|
|---|
| 162 |
|
|---|
| 163 | Index i;
|
|---|
| 164 | Tuple<int> neighbors;
|
|---|
| 165 | std::vector<vmg_float> buffer_send_left, buffer_send_right;
|
|---|
| 166 | std::vector<vmg_float> buffer_recv_left, buffer_recv_right;
|
|---|
| 167 | std::vector<vmg_float>::const_iterator iter;
|
|---|
| 168 |
|
|---|
| 169 | const Index halo_size_1 = grid.Local().HaloEnd1() - grid.Local().HaloBegin1();
|
|---|
| 170 | const Index halo_size_2 = grid.Local().HaloEnd2() - grid.Local().HaloBegin2();
|
|---|
| 171 |
|
|---|
| 172 | buffer_send_left.clear();
|
|---|
| 173 | buffer_send_right.clear();
|
|---|
| 174 | buffer_recv_left.clear();
|
|---|
| 175 | buffer_recv_right.clear();
|
|---|
| 176 |
|
|---|
| 177 | MPI_Cart_shift(comm, 2, 1, &neighbors.Left(), &neighbors.Right());
|
|---|
| 178 |
|
|---|
| 179 | if (halo_size_1.Z()) {
|
|---|
| 180 |
|
|---|
| 181 | buffer_send_left.reserve(grid.Local().Size().X() *
|
|---|
| 182 | grid.Local().Size().Y() *
|
|---|
| 183 | halo_size_1.Z() / 2);
|
|---|
| 184 |
|
|---|
| 185 | for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
|
|---|
| 186 | for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
|
|---|
| 187 | for (i.Z()=grid.Local().Begin().Z() + (i.X()+i.Y()+offset)%2;
|
|---|
| 188 | i.Z()<grid.Local().Begin().Z() + halo_size_1.Z();
|
|---|
| 189 | i.Z() += 2)
|
|---|
| 190 | buffer_send_left.push_back(grid.GetVal(i));
|
|---|
| 191 | }
|
|---|
| 192 |
|
|---|
| 193 | if (halo_size_2.Z()) {
|
|---|
| 194 |
|
|---|
| 195 | buffer_send_right.reserve(grid.Local().Size().X() *
|
|---|
| 196 | grid.Local().Size().Y() *
|
|---|
| 197 | halo_size_2.Z() / 2);
|
|---|
| 198 |
|
|---|
| 199 | for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
|
|---|
| 200 | for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
|
|---|
| 201 | for (i.Z()=grid.Local().End().Z() - halo_size_2.Z() + (i.X()+i.Y()+offset)%2;
|
|---|
| 202 | i.Z()<grid.Local().End().Z();
|
|---|
| 203 | i.Z() += 2)
|
|---|
| 204 | buffer_send_right.push_back(grid.GetVal(i));
|
|---|
| 205 |
|
|---|
| 206 | }
|
|---|
| 207 |
|
|---|
| 208 | buffer_recv_left.resize(buffer_send_left.size());
|
|---|
| 209 | buffer_recv_right.resize(buffer_send_right.size());
|
|---|
| 210 |
|
|---|
| 211 | MPI_Sendrecv(&buffer_send_right.front(), buffer_send_right.size(), MPI_DOUBLE, neighbors.Right(), 7,
|
|---|
| 212 | &buffer_recv_left.front(), buffer_recv_left.size(), MPI_DOUBLE, neighbors.Left(), 7,
|
|---|
| 213 | comm, MPI_STATUS_IGNORE);
|
|---|
| 214 |
|
|---|
| 215 | MPI_Sendrecv(&buffer_send_left.front(), buffer_send_left.size(), MPI_DOUBLE, neighbors.Left(), 8,
|
|---|
| 216 | & buffer_recv_right.front(), buffer_recv_right.size(), MPI_DOUBLE, neighbors.Right(), 8,
|
|---|
| 217 | comm, MPI_STATUS_IGNORE);
|
|---|
| 218 |
|
|---|
| 219 | if (halo_size_1.Z()) {
|
|---|
| 220 |
|
|---|
| 221 | iter = buffer_recv_left.begin();
|
|---|
| 222 | for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
|
|---|
| 223 | for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
|
|---|
| 224 | for (i.Z() = (i.X()+i.Y()+offset)%2; i.Z() < grid.Local().Begin().Z(); i.Z() += 2)
|
|---|
| 225 | grid(i) = *iter++;
|
|---|
| 226 |
|
|---|
| 227 | }
|
|---|
| 228 |
|
|---|
| 229 | if (halo_size_2.Z()) {
|
|---|
| 230 |
|
|---|
| 231 | iter = buffer_recv_right.begin();
|
|---|
| 232 | for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
|
|---|
| 233 | for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
|
|---|
| 234 | for (i.Z()=grid.Local().End().Z() + (i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
|
|---|
| 235 | grid(i) = *iter++;
|
|---|
| 236 |
|
|---|
| 237 | }
|
|---|
| 238 |
|
|---|
| 239 | buffer_send_left.clear();
|
|---|
| 240 | buffer_send_right.clear();
|
|---|
| 241 | buffer_recv_left.clear();
|
|---|
| 242 | buffer_recv_right.clear();
|
|---|
| 243 |
|
|---|
| 244 | MPI_Cart_shift(comm, 1, 1, &neighbors.Left(), &neighbors.Right());
|
|---|
| 245 |
|
|---|
| 246 | if (halo_size_1.Y()) {
|
|---|
| 247 |
|
|---|
| 248 | buffer_send_left.reserve(grid.Local().Size().X() *
|
|---|
| 249 | halo_size_1.Y() *
|
|---|
| 250 | grid.Local().SizeTotal().Z() / 2);
|
|---|
| 251 |
|
|---|
| 252 | for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
|
|---|
| 253 | for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().Begin().Y() + halo_size_1.Y(); ++i.Y())
|
|---|
| 254 | for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
|
|---|
| 255 | buffer_send_left.push_back(grid.GetVal(i));
|
|---|
| 256 | }
|
|---|
| 257 |
|
|---|
| 258 | if (halo_size_2.Y()) {
|
|---|
| 259 |
|
|---|
| 260 | buffer_send_right.reserve(grid.Local().Size().X() *
|
|---|
| 261 | halo_size_2.Y() *
|
|---|
| 262 | grid.Local().SizeTotal().Z() / 2);
|
|---|
| 263 |
|
|---|
| 264 | for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
|
|---|
| 265 | for (i.Y()=grid.Local().End().Y() - halo_size_2.Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
|
|---|
| 266 | for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
|
|---|
| 267 | buffer_send_right.push_back(grid.GetVal(i));
|
|---|
| 268 |
|
|---|
| 269 | }
|
|---|
| 270 |
|
|---|
| 271 | buffer_recv_left.resize(buffer_send_left.size());
|
|---|
| 272 | buffer_recv_right.resize(buffer_send_right.size());
|
|---|
| 273 |
|
|---|
| 274 | MPI_Sendrecv(&buffer_send_right.front(), buffer_send_right.size(), MPI_DOUBLE, neighbors.Right(), 7,
|
|---|
| 275 | &buffer_recv_left.front(), buffer_recv_left.size(), MPI_DOUBLE, neighbors.Left(), 7,
|
|---|
| 276 | comm, MPI_STATUS_IGNORE);
|
|---|
| 277 |
|
|---|
| 278 | MPI_Sendrecv(&buffer_send_left.front(), buffer_send_left.size(), MPI_DOUBLE, neighbors.Left(), 8,
|
|---|
| 279 | & buffer_recv_right.front(), buffer_recv_right.size(), MPI_DOUBLE, neighbors.Right(), 8,
|
|---|
| 280 | comm, MPI_STATUS_IGNORE);
|
|---|
| 281 |
|
|---|
| 282 | if (halo_size_1.Y()) {
|
|---|
| 283 |
|
|---|
| 284 | iter = buffer_recv_left.begin();
|
|---|
| 285 | for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
|
|---|
| 286 | for (i.Y()=0; i.Y()<grid.Local().Begin().Y(); ++i.Y())
|
|---|
| 287 | for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
|
|---|
| 288 | grid(i) = *iter++;
|
|---|
| 289 |
|
|---|
| 290 | }
|
|---|
| 291 |
|
|---|
| 292 | if (halo_size_2.Y()) {
|
|---|
| 293 |
|
|---|
| 294 | iter = buffer_recv_right.begin();
|
|---|
| 295 | for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
|
|---|
| 296 | for (i.Y()=grid.Local().End().Y(); i.Y()<grid.Local().SizeTotal().Y(); ++i.Y())
|
|---|
| 297 | for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
|
|---|
| 298 | grid(i) = *iter++;
|
|---|
| 299 |
|
|---|
| 300 | }
|
|---|
| 301 |
|
|---|
| 302 | buffer_send_left.clear();
|
|---|
| 303 | buffer_send_right.clear();
|
|---|
| 304 | buffer_recv_left.clear();
|
|---|
| 305 | buffer_recv_right.clear();
|
|---|
| 306 |
|
|---|
| 307 | MPI_Cart_shift(comm, 0, 1, &neighbors.Left(), &neighbors.Right());
|
|---|
| 308 |
|
|---|
| 309 | if (halo_size_1.X()) {
|
|---|
| 310 |
|
|---|
| 311 | buffer_send_left.reserve(halo_size_1.X() *
|
|---|
| 312 | grid.Local().SizeTotal().Y() *
|
|---|
| 313 | grid.Local().SizeTotal().Z() / 2);
|
|---|
| 314 |
|
|---|
| 315 | for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().Begin().X() + halo_size_1.X(); ++i.X())
|
|---|
| 316 | for (i.Y()=0; i.Y()<grid.Local().SizeTotal().Y(); ++i.Y())
|
|---|
| 317 | for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
|
|---|
| 318 | buffer_send_left.push_back(grid.GetVal(i));
|
|---|
| 319 | }
|
|---|
| 320 |
|
|---|
| 321 | if (halo_size_2.X()) {
|
|---|
| 322 |
|
|---|
| 323 | buffer_send_right.reserve(halo_size_2.X() *
|
|---|
| 324 | grid.Local().SizeTotal().Y() *
|
|---|
| 325 | grid.Local().SizeTotal().Z() / 2);
|
|---|
| 326 |
|
|---|
| 327 | for (i.X()=grid.Local().End().X() - halo_size_2.X(); i.X()<grid.Local().End().X(); ++i.X())
|
|---|
| 328 | for (i.Y()=0; i.Y()<grid.Local().SizeTotal().Y(); ++i.Y())
|
|---|
| 329 | for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
|
|---|
| 330 | buffer_send_right.push_back(grid.GetVal(i));
|
|---|
| 331 |
|
|---|
| 332 | }
|
|---|
| 333 |
|
|---|
| 334 | buffer_recv_left.resize(buffer_send_left.size());
|
|---|
| 335 | buffer_recv_right.resize(buffer_send_right.size());
|
|---|
| 336 |
|
|---|
| 337 | MPI_Sendrecv(&buffer_send_right.front(), buffer_send_right.size(), MPI_DOUBLE, neighbors.Right(), 7,
|
|---|
| 338 | &buffer_recv_left.front(), buffer_recv_left.size(), MPI_DOUBLE, neighbors.Left(), 7,
|
|---|
| 339 | comm, MPI_STATUS_IGNORE);
|
|---|
| 340 |
|
|---|
| 341 | MPI_Sendrecv(&buffer_send_left.front(), buffer_send_left.size(), MPI_DOUBLE, neighbors.Left(), 8,
|
|---|
| 342 | & buffer_recv_right.front(), buffer_recv_right.size(), MPI_DOUBLE, neighbors.Right(), 8,
|
|---|
| 343 | comm, MPI_STATUS_IGNORE);
|
|---|
| 344 |
|
|---|
| 345 | if (halo_size_1.X()) {
|
|---|
| 346 |
|
|---|
| 347 | iter = buffer_recv_left.begin();
|
|---|
| 348 | for (i.X()=0; i.X()<grid.Local().Begin().X(); ++i.X())
|
|---|
| 349 | for (i.Y()=0; i.Y()<grid.Local().SizeTotal().Y(); ++i.Y())
|
|---|
| 350 | for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
|
|---|
| 351 | grid(i) = *iter++;
|
|---|
| 352 | }
|
|---|
| 353 |
|
|---|
| 354 | if (halo_size_2.X()) {
|
|---|
| 355 |
|
|---|
| 356 | iter = buffer_recv_right.begin();
|
|---|
| 357 | for (i.X()=grid.Local().End().X(); i.X()<grid.Local().SizeTotal().X(); ++i.X())
|
|---|
| 358 | for (i.Y()=0; i.Y()<grid.Local().SizeTotal().Y(); ++i.Y())
|
|---|
| 359 | for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
|
|---|
| 360 | grid(i) = *iter++;
|
|---|
| 361 |
|
|---|
| 362 | }
|
|---|
| 363 |
|
|---|
| 364 | }
|
|---|
| 365 | }
|
|---|
| 366 |
|
|---|
| 367 | void CommMPI::CommToGhostsAsyncStart(Grid& grid)
|
|---|
| 368 | {
|
|---|
| 369 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 370 |
|
|---|
| 371 | if (comm != MPI_COMM_NULL) {
|
|---|
| 372 | Timer::Start("CTGAsyncGetDatatypes");
|
|---|
| 373 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| 374 | Timer::Stop("CTGAsyncGetDatatypes");
|
|---|
| 375 | Timer::Start("CTGAsyncIsend");
|
|---|
| 376 | IsendAll(grid, types.NB(), comm, 2412);
|
|---|
| 377 | Timer::Stop("CTGAsyncIsend");
|
|---|
| 378 | Timer::Start("CTGAsyncIrecv");
|
|---|
| 379 | IrecvAll(grid, types.Halo(), comm, 2412);
|
|---|
| 380 | Timer::Stop("CTGAsyncIrecv");
|
|---|
| 381 | }
|
|---|
| 382 | }
|
|---|
| 383 |
|
|---|
| 384 | void CommMPI::CommToGhostsAsyncFinish()
|
|---|
| 385 | {
|
|---|
| 386 | ClearRequests();
|
|---|
| 387 | }
|
|---|
| 388 |
|
|---|
| 389 | void CommMPI::CommParticles(const Grid& grid, std::list<Particle::Particle>& particles)
|
|---|
| 390 | {
|
|---|
| 391 | Factory& factory = MG::GetFactory();
|
|---|
| 392 |
|
|---|
| 393 | const vmg_int& num_particles_local = factory.GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
|
|---|
| 394 | vmg_float* x = factory.GetObjectStorageArray<vmg_float>("PARTICLE_POS_ARRAY");
|
|---|
| 395 | vmg_float* q = factory.GetObjectStorageArray<vmg_float>("PARTICLE_CHARGE_ARRAY");
|
|---|
| 396 |
|
|---|
| 397 | int rank, size;
|
|---|
| 398 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 399 | MPI_Comm_size(comm_global, &size);
|
|---|
| 400 |
|
|---|
| 401 | Index index;
|
|---|
| 402 | std::vector<int> global_extent, send_sizes, recv_sizes;
|
|---|
| 403 | std::vector<Index> begin_remote, end_remote;
|
|---|
| 404 | std::vector< std::vector<int> > pos_indices, charge_indices;
|
|---|
| 405 | std::vector<vmg_float> buffer_x, buffer_q;
|
|---|
| 406 | std::vector<vmg_int> buffer_ind;
|
|---|
| 407 | MPI_Datatype dt_temp;
|
|---|
| 408 |
|
|---|
| 409 | global_extent.resize(6*size);
|
|---|
| 410 | send_sizes.resize(size);
|
|---|
| 411 | recv_sizes.resize(size);
|
|---|
| 412 | begin_remote.resize(size);
|
|---|
| 413 | end_remote.resize(size);
|
|---|
| 414 | pos_indices.resize(size);
|
|---|
| 415 | charge_indices.resize(size);
|
|---|
| 416 |
|
|---|
| 417 | std::memcpy(&global_extent[6*rank], grid.Global().BeginLocal().vec(), 3*sizeof(int));
|
|---|
| 418 | std::memcpy(&global_extent[6*rank+3], grid.Global().EndLocal().vec(), 3*sizeof(int));
|
|---|
| 419 |
|
|---|
| 420 | MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &global_extent.front(), 6, MPI_INT, MPI_COMM_WORLD);
|
|---|
| 421 |
|
|---|
| 422 | for (int i=0; i<size; ++i) {
|
|---|
| 423 | begin_remote[i] = static_cast<Vector>(&global_extent[6*i]);
|
|---|
| 424 | end_remote[i] = static_cast<Vector>(&global_extent[6*i+3]);
|
|---|
| 425 | }
|
|---|
| 426 |
|
|---|
| 427 | for (int i=0; i<num_particles_local; ++i) {
|
|---|
| 428 | index = static_cast<Index>((Vector(&x[3*i]) - grid.Extent().Begin()) / grid.Extent().MeshWidth());
|
|---|
| 429 | for (int j=0; j<size; ++j)
|
|---|
| 430 | if (index.IsInBounds(begin_remote[j], end_remote[j])) {
|
|---|
| 431 | pos_indices[j].push_back(3*i);
|
|---|
| 432 | charge_indices[j].push_back(i);
|
|---|
| 433 | break;
|
|---|
| 434 | }
|
|---|
| 435 | }
|
|---|
| 436 |
|
|---|
| 437 | /*
|
|---|
| 438 | * Communicate which process gets how many particles
|
|---|
| 439 | */
|
|---|
| 440 | for (int i=0; i<size; ++i)
|
|---|
| 441 | send_sizes[i] = charge_indices[i].size();
|
|---|
| 442 |
|
|---|
| 443 | MPI_Alltoall(&send_sizes.front(), 1, MPI_INT, &recv_sizes.front(), 1, MPI_INT, comm_global);
|
|---|
| 444 |
|
|---|
| 445 | /*
|
|---|
| 446 | * Send particles
|
|---|
| 447 | */
|
|---|
| 448 | for (int i=0; i<size; ++i) {
|
|---|
| 449 |
|
|---|
| 450 | if (pos_indices[i].size() > 0) {
|
|---|
| 451 |
|
|---|
| 452 | MPI_Type_create_indexed_block(pos_indices[i].size(), 3, &pos_indices[i].front(), MPI_DOUBLE, &dt_temp);
|
|---|
| 453 | MPI_Type_commit(&dt_temp);
|
|---|
| 454 | MPI_Isend(x, 1, dt_temp, i, 0, MPI_COMM_WORLD, &Request());
|
|---|
| 455 | MPI_Type_free(&dt_temp);
|
|---|
| 456 |
|
|---|
| 457 | MPI_Type_create_indexed_block(charge_indices[i].size(), 1, &charge_indices[i].front(), MPI_DOUBLE, &dt_temp);
|
|---|
| 458 | MPI_Type_commit(&dt_temp);
|
|---|
| 459 | MPI_Isend(q, 1, dt_temp, i, 1, MPI_COMM_WORLD, &Request());
|
|---|
| 460 | MPI_Type_free(&dt_temp);
|
|---|
| 461 |
|
|---|
| 462 | MPI_Isend(&charge_indices[i].front(), charge_indices[i].size(), MPI_INT, i, 2, MPI_COMM_WORLD, &Request());
|
|---|
| 463 |
|
|---|
| 464 | }
|
|---|
| 465 |
|
|---|
| 466 | }
|
|---|
| 467 |
|
|---|
| 468 | /*
|
|---|
| 469 | * Receive particles
|
|---|
| 470 | */
|
|---|
| 471 | for (int i=0; i<size; ++i) {
|
|---|
| 472 |
|
|---|
| 473 | if (recv_sizes[i] > 0) {
|
|---|
| 474 |
|
|---|
| 475 | buffer_x.resize(3*recv_sizes[i]);
|
|---|
| 476 | buffer_q.resize(recv_sizes[i]);
|
|---|
| 477 | buffer_ind.resize(recv_sizes[i]);
|
|---|
| 478 |
|
|---|
| 479 | MPI_Recv(&buffer_x.front(), 3*recv_sizes[i], MPI_DOUBLE, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
|---|
| 480 | MPI_Recv(&buffer_q.front(), recv_sizes[i], MPI_DOUBLE, i, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
|---|
| 481 | MPI_Recv(&buffer_ind.front(), recv_sizes[i], MPI_INT, i, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
|---|
| 482 |
|
|---|
| 483 | particles.clear();
|
|---|
| 484 |
|
|---|
| 485 | for (int j=0; j<recv_sizes[i]; ++j)
|
|---|
| 486 | particles.push_back(Particle::Particle(&buffer_x[3*j], buffer_q[j], 0.0, 0.0, i, buffer_ind[j]));
|
|---|
| 487 |
|
|---|
| 488 | }
|
|---|
| 489 |
|
|---|
| 490 | }
|
|---|
| 491 |
|
|---|
| 492 | ClearRequests();
|
|---|
| 493 | }
|
|---|
| 494 |
|
|---|
| 495 | void CommMPI::CommLCListGhosts(const Grid& grid, Particle::LinkedCellList& lc)
|
|---|
| 496 | {
|
|---|
| 497 | std::vector<vmg_float> send_buffer_1, send_buffer_2, recv_buffer;
|
|---|
| 498 | vmg_int send_size_1, send_size_2, recv_size;
|
|---|
| 499 | std::list<Particle::Particle>::iterator p_iter;
|
|---|
| 500 | Grid::iterator g_iter;
|
|---|
| 501 |
|
|---|
| 502 | Tuple<int> neighbors;
|
|---|
| 503 |
|
|---|
| 504 | for (int i=2; i>=0; --i) {
|
|---|
| 505 |
|
|---|
| 506 | /*
|
|---|
| 507 | * Get ranks of neighbors
|
|---|
| 508 | */
|
|---|
| 509 | MPI_Cart_shift(comm_global, i, 1, &neighbors.Left(), &neighbors.Right());
|
|---|
| 510 |
|
|---|
| 511 | /*
|
|---|
| 512 | * Send to left
|
|---|
| 513 | */
|
|---|
| 514 | send_buffer_1.clear();
|
|---|
| 515 |
|
|---|
| 516 | for (g_iter=lc.Iterators().Boundary1()[i].Begin(); g_iter!=lc.Iterators().Boundary1()[i].End(); ++g_iter)
|
|---|
| 517 | lc(*g_iter).clear();
|
|---|
| 518 |
|
|---|
| 519 | for (g_iter=lc.Iterators().NearBoundary1()[i].Begin(); g_iter!=lc.Iterators().NearBoundary1()[i].End(); ++g_iter)
|
|---|
| 520 | for (p_iter=lc(*g_iter).begin(); p_iter!=lc(*g_iter).end(); ++p_iter) {
|
|---|
| 521 |
|
|---|
| 522 | if (grid.Global().BeginLocal()[i] == 0)
|
|---|
| 523 | for (int j=0; j<3; ++j)
|
|---|
| 524 | send_buffer_1.push_back(p_iter->Pos()[j] + grid.Extent().Size()[j]);
|
|---|
| 525 | else
|
|---|
| 526 | for (int j=0; j<3; ++j)
|
|---|
| 527 | send_buffer_1.push_back(p_iter->Pos()[j]);
|
|---|
| 528 |
|
|---|
| 529 | send_buffer_1.push_back(p_iter->Charge());
|
|---|
| 530 |
|
|---|
| 531 | }
|
|---|
| 532 |
|
|---|
| 533 | send_size_1 = send_buffer_1.size();
|
|---|
| 534 |
|
|---|
| 535 | MPI_Isend(&send_size_1, 1, MPI_INT, neighbors.Left(), 0, comm_global, &Request());
|
|---|
| 536 |
|
|---|
| 537 | if (send_size_1 > 0)
|
|---|
| 538 | MPI_Isend(&send_buffer_1.front(), send_size_1, MPI_DOUBLE, neighbors.Left(), 1, comm_global, &Request());
|
|---|
| 539 |
|
|---|
| 540 | /*
|
|---|
| 541 | * Send to right
|
|---|
| 542 | */
|
|---|
| 543 | send_buffer_2.clear();
|
|---|
| 544 |
|
|---|
| 545 | for (g_iter=lc.Iterators().Boundary2()[i].Begin(); g_iter!=lc.Iterators().Boundary2()[i].End(); ++g_iter)
|
|---|
| 546 | lc(*g_iter).clear();
|
|---|
| 547 |
|
|---|
| 548 | for (g_iter=lc.Iterators().NearBoundary2()[i].Begin(); g_iter!=lc.Iterators().NearBoundary2()[i].End(); ++g_iter)
|
|---|
| 549 | for (p_iter=lc(*g_iter).begin(); p_iter!=lc(*g_iter).end(); ++p_iter) {
|
|---|
| 550 |
|
|---|
| 551 | if (grid.Global().EndLocal()[i] == grid.Global().SizeGlobal()[i])
|
|---|
| 552 | for (int j=0; j<3; ++j)
|
|---|
| 553 | send_buffer_2.push_back(p_iter->Pos()[j] - grid.Extent().Size()[j]);
|
|---|
| 554 | else
|
|---|
| 555 | for (int j=0; j<3; ++j)
|
|---|
| 556 | send_buffer_2.push_back(p_iter->Pos()[j]);
|
|---|
| 557 |
|
|---|
| 558 | send_buffer_2.push_back(p_iter->Charge());
|
|---|
| 559 |
|
|---|
| 560 | }
|
|---|
| 561 |
|
|---|
| 562 | send_size_2 = send_buffer_2.size();
|
|---|
| 563 |
|
|---|
| 564 | MPI_Isend(&send_size_2, 1, MPI_INT, neighbors.Right(), 2, comm_global, &Request());
|
|---|
| 565 |
|
|---|
| 566 | if (send_size_2 > 0)
|
|---|
| 567 | MPI_Isend(&send_buffer_2.front(), send_size_2, MPI_DOUBLE, neighbors.Right(), 3, comm_global, &Request());
|
|---|
| 568 |
|
|---|
| 569 | /*
|
|---|
| 570 | * Receive from right
|
|---|
| 571 | */
|
|---|
| 572 | MPI_Recv(&recv_size, 1, MPI_INT, neighbors.Right(), 0, comm_global, MPI_STATUS_IGNORE);
|
|---|
| 573 |
|
|---|
| 574 | if (recv_size > 0) {
|
|---|
| 575 |
|
|---|
| 576 | recv_buffer.resize(recv_size);
|
|---|
| 577 | MPI_Recv(&recv_buffer.front(), recv_size, MPI_DOUBLE, neighbors.Right(), 1, comm_global, MPI_STATUS_IGNORE);
|
|---|
| 578 |
|
|---|
| 579 | for (vmg_int i=0; i<recv_size; i+=4)
|
|---|
| 580 | lc.AddParticleToComplete(&recv_buffer[i], recv_buffer[i+3]);
|
|---|
| 581 |
|
|---|
| 582 | }
|
|---|
| 583 |
|
|---|
| 584 | /*
|
|---|
| 585 | * Receive from left
|
|---|
| 586 | */
|
|---|
| 587 | MPI_Recv(&recv_size, 1, MPI_INT, neighbors.Left(), 2, comm_global, MPI_STATUS_IGNORE);
|
|---|
| 588 |
|
|---|
| 589 | if (recv_size > 0) {
|
|---|
| 590 |
|
|---|
| 591 | recv_buffer.resize(recv_size);
|
|---|
| 592 | MPI_Recv(&recv_buffer.front(), recv_size, MPI_DOUBLE, neighbors.Left(), 3, comm_global, MPI_STATUS_IGNORE);
|
|---|
| 593 |
|
|---|
| 594 | for (vmg_int i=0; i<recv_size; i+=4)
|
|---|
| 595 | lc.AddParticleToComplete(&recv_buffer[i], recv_buffer[i+3]);
|
|---|
| 596 |
|
|---|
| 597 | }
|
|---|
| 598 |
|
|---|
| 599 |
|
|---|
| 600 | ClearRequests();
|
|---|
| 601 | }
|
|---|
| 602 | }
|
|---|
| 603 |
|
|---|
| 604 | void CommMPI::CommAddPotential(std::list<Particle::Particle>& particles)
|
|---|
| 605 | {
|
|---|
| 606 | std::list<Particle::Particle>::iterator iter;
|
|---|
| 607 |
|
|---|
| 608 | if (!win_created) {
|
|---|
| 609 | vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
|
|---|
| 610 | const vmg_int& num_particles_local = MG::GetFactory().GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
|
|---|
| 611 | MPI_Win_create(p, num_particles_local*sizeof(vmg_float), sizeof(vmg_float), info, comm_global, &win);
|
|---|
| 612 | win_created = true;
|
|---|
| 613 | }
|
|---|
| 614 |
|
|---|
| 615 | MPI_Win_fence(MPI_MODE_NOPRECEDE, win);
|
|---|
| 616 |
|
|---|
| 617 | for (iter=particles.begin(); iter!=particles.end(); ++iter)
|
|---|
| 618 | MPI_Accumulate(&iter->Pot(), 1, MPI_DOUBLE, iter->Rank(), iter->Index(), 1, MPI_DOUBLE, MPI_SUM, win);
|
|---|
| 619 |
|
|---|
| 620 | MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED, win);
|
|---|
| 621 | }
|
|---|
| 622 |
|
|---|
| 623 | void CommMPI::CommAddPotential(Particle::LinkedCellList& lc)
|
|---|
| 624 | {
|
|---|
| 625 | Grid::iterator g_iter;
|
|---|
| 626 | std::list<Particle::Particle>::iterator p_iter;
|
|---|
| 627 |
|
|---|
| 628 | if (!win_created) {
|
|---|
| 629 | vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
|
|---|
| 630 | const vmg_int& num_particles_local = MG::GetFactory().GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
|
|---|
| 631 | MPI_Win_create(p, num_particles_local*sizeof(vmg_float), sizeof(vmg_float), info, comm_global, &win);
|
|---|
| 632 | win_created = true;
|
|---|
| 633 | }
|
|---|
| 634 |
|
|---|
| 635 | MPI_Win_fence(MPI_MODE_NOPRECEDE, win);
|
|---|
| 636 |
|
|---|
| 637 | for (g_iter=lc.Iterators().Local().Begin(); g_iter!=lc.Iterators().Local().End(); ++g_iter)
|
|---|
| 638 | for (p_iter=lc(*g_iter).begin(); p_iter!=lc(*g_iter).end(); ++p_iter)
|
|---|
| 639 | MPI_Accumulate(&p_iter->Pot(), 1, MPI_DOUBLE, p_iter->Rank(), p_iter->Index(), 1, MPI_DOUBLE, MPI_SUM, win);
|
|---|
| 640 |
|
|---|
| 641 | MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED, win);
|
|---|
| 642 | }
|
|---|
| 643 |
|
|---|
| 644 | vmg_float CommMPI::GlobalSum(const vmg_float& value)
|
|---|
| 645 | {
|
|---|
| 646 | vmg_float result = value;
|
|---|
| 647 |
|
|---|
| 648 | MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm_global);
|
|---|
| 649 |
|
|---|
| 650 | return result;
|
|---|
| 651 | }
|
|---|
| 652 |
|
|---|
| 653 | vmg_float CommMPI::GlobalSumRoot(const vmg_float& value)
|
|---|
| 654 | {
|
|---|
| 655 | vmg_float recv_buffer;
|
|---|
| 656 | vmg_float send_buffer = value;
|
|---|
| 657 |
|
|---|
| 658 | MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm_global);
|
|---|
| 659 |
|
|---|
| 660 | return recv_buffer;
|
|---|
| 661 | }
|
|---|
| 662 |
|
|---|
| 663 | void CommMPI::GlobalSumArray(vmg_float* array, const vmg_int& size)
|
|---|
| 664 | {
|
|---|
| 665 | MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm_global);
|
|---|
| 666 | }
|
|---|
| 667 |
|
|---|
| 668 | void CommMPI::PrintString(const char* format, ...)
|
|---|
| 669 | {
|
|---|
| 670 | va_list args;
|
|---|
| 671 | va_start(args, format);
|
|---|
| 672 | vsprintf(print_buffer, format, args);
|
|---|
| 673 | printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
|
|---|
| 674 | va_end(args);
|
|---|
| 675 | }
|
|---|
| 676 |
|
|---|
| 677 | void CommMPI::PrintStringOnce(const char* format, ...)
|
|---|
| 678 | {
|
|---|
| 679 | if (GlobalRank() == 0) {
|
|---|
| 680 | va_list args;
|
|---|
| 681 | va_start(args, format);
|
|---|
| 682 | vsprintf(print_buffer, format, args);
|
|---|
| 683 | printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
|
|---|
| 684 | va_end(args);
|
|---|
| 685 | }
|
|---|
| 686 | }
|
|---|
| 687 |
|
|---|
| 688 | void CommMPI::PrintXML(const std::string& filename, const std::string& xml_data)
|
|---|
| 689 | {
|
|---|
| 690 | MPI_File file;
|
|---|
| 691 | std::stringstream path, xml_header;
|
|---|
| 692 |
|
|---|
| 693 | pugi::xml_document doc;
|
|---|
| 694 | pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
|
|---|
| 695 | node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
|
|---|
| 696 | doc.save(xml_header);
|
|---|
| 697 |
|
|---|
| 698 | path << OutputPath() << filename;
|
|---|
| 699 |
|
|---|
| 700 | char* filename_array = Helper::GetCharArray(path.str());
|
|---|
| 701 | char* xml_header_array = Helper::GetCharArray(xml_header.str());
|
|---|
| 702 | char* str_array = Helper::GetCharArray(xml_data);
|
|---|
| 703 |
|
|---|
| 704 | MPI_File_open(MPI_COMM_SELF, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 705 | MPI_File_set_size(file, 0);
|
|---|
| 706 | MPI_File_write(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 707 | MPI_File_write(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 708 | MPI_File_close(&file);
|
|---|
| 709 |
|
|---|
| 710 | delete [] filename_array;
|
|---|
| 711 | delete [] xml_header_array;
|
|---|
| 712 | delete [] str_array;
|
|---|
| 713 | }
|
|---|
| 714 |
|
|---|
| 715 | void CommMPI::PrintXMLAll(const std::string& filename, const std::string& xml_data)
|
|---|
| 716 | {
|
|---|
| 717 | MPI_File file;
|
|---|
| 718 | std::stringstream path;
|
|---|
| 719 |
|
|---|
| 720 | path << OutputPath() << filename;
|
|---|
| 721 |
|
|---|
| 722 | char* filename_array = Helper::GetCharArray(path.str());
|
|---|
| 723 | char* str_array = Helper::GetCharArray(xml_data);
|
|---|
| 724 |
|
|---|
| 725 | MPI_File_open(comm_global, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 726 | MPI_File_set_size(file, 0);
|
|---|
| 727 |
|
|---|
| 728 | if (GlobalRank() == 0) {
|
|---|
| 729 | std::stringstream xml_header;
|
|---|
| 730 | pugi::xml_document doc;
|
|---|
| 731 | pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
|
|---|
| 732 | node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
|
|---|
| 733 | doc.save(xml_header);
|
|---|
| 734 |
|
|---|
| 735 | char* xml_header_array = Helper::GetCharArray(xml_header.str());
|
|---|
| 736 |
|
|---|
| 737 | MPI_File_write_shared(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 738 |
|
|---|
| 739 | delete [] xml_header_array;
|
|---|
| 740 | }
|
|---|
| 741 |
|
|---|
| 742 | MPI_File_write_ordered(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 743 | MPI_File_close(&file);
|
|---|
| 744 |
|
|---|
| 745 | delete [] filename_array;
|
|---|
| 746 | delete [] str_array;
|
|---|
| 747 | }
|
|---|
| 748 |
|
|---|
| 749 | void CommMPI::PrintAllSettings()
|
|---|
| 750 | {
|
|---|
| 751 | std::stringstream buf;
|
|---|
| 752 | MPI_File file;
|
|---|
| 753 | MPI_Status status;
|
|---|
| 754 |
|
|---|
| 755 | Multigrid* mg = MG::GetFactory().Get("SOL")->Cast<Multigrid>();
|
|---|
| 756 |
|
|---|
| 757 | buf << OutputPath() << "settings.txt";
|
|---|
| 758 | char *filename = Helper::GetCharArray(buf.str());
|
|---|
| 759 |
|
|---|
| 760 | MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 761 | MPI_File_set_size(file, 0);
|
|---|
| 762 | MPI_File_close(&file);
|
|---|
| 763 |
|
|---|
| 764 | for (int i=mg->MinLevel(); i<=mg->MaxLevel(); ++i) {
|
|---|
| 765 |
|
|---|
| 766 | MPI_Comm comm = settings.CommunicatorGlobal((*mg)(i));
|
|---|
| 767 |
|
|---|
| 768 | if (comm != MPI_COMM_NULL) {
|
|---|
| 769 |
|
|---|
| 770 | MPI_File_open(comm, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
|
|---|
| 771 |
|
|---|
| 772 | int comm_rank;
|
|---|
| 773 | MPI_Comm_rank(comm, &comm_rank);
|
|---|
| 774 |
|
|---|
| 775 | if (comm_rank == 0) {
|
|---|
| 776 |
|
|---|
| 777 | buf.str("");
|
|---|
| 778 | buf << "PROCESS INDEPENDENT INFORMATION:" << std::endl
|
|---|
| 779 | << "GRID LEVEL " << i << std::endl
|
|---|
| 780 | << "BEGINFINEST: " << (*mg)(i).Global().BeginFinest() << std::endl
|
|---|
| 781 | << "ENDFINEST: " << (*mg)(i).Global().EndFinest() << std::endl
|
|---|
| 782 | << "SIZEFINEST: " << (*mg)(i).Global().SizeFinest() << std::endl
|
|---|
| 783 | << "SIZEGLOBAL: " << (*mg)(i).Global().SizeGlobal() << std::endl
|
|---|
| 784 | << "BOUNDARYTYPE: " << (*mg)(i).Global().BoundaryType() << std::endl
|
|---|
| 785 | << "EXTENT" << std::endl
|
|---|
| 786 | << "SIZE: " << (*mg)(i).Extent().Size() << std::endl
|
|---|
| 787 | << "SIZEFACTOR: " << (*mg)(i).Extent().SizeFactor() << std::endl
|
|---|
| 788 | << "BEGIN: " << (*mg)(i).Extent().Begin() << std::endl
|
|---|
| 789 | << "END: " << (*mg)(i).Extent().End() << std::endl
|
|---|
| 790 | << "MESHWIDTH: " << (*mg)(i).Extent().MeshWidth() << std::endl
|
|---|
| 791 | << std::endl
|
|---|
| 792 | << "PROCESS DEPENDENT INFORMATION:" << std::endl;
|
|---|
| 793 |
|
|---|
| 794 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 795 |
|
|---|
| 796 | MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, &status);
|
|---|
| 797 |
|
|---|
| 798 | delete [] char_buf;
|
|---|
| 799 |
|
|---|
| 800 | }
|
|---|
| 801 |
|
|---|
| 802 | buf.str("");
|
|---|
| 803 | buf << "PROCESS " << comm_rank << ":" << std::endl
|
|---|
| 804 | << "GLOBAL" << std::endl
|
|---|
| 805 | << "BEGINLOCAL: " << (*mg)(i).Global().BeginLocal() << std::endl
|
|---|
| 806 | << "ENDLOCAL: " << (*mg)(i).Global().EndLocal() << std::endl
|
|---|
| 807 | << "SIZELOCAL: " << (*mg)(i).Global().SizeLocal() << std::endl
|
|---|
| 808 | << "LOCAL" << std::endl
|
|---|
| 809 | << "SIZE: " << (*mg)(i).Local().Size() << std::endl
|
|---|
| 810 | << "SIZETOTAL: " << (*mg)(i).Local().SizeTotal() << std::endl
|
|---|
| 811 | << "BEGIN: " << (*mg)(i).Local().Begin() << std::endl
|
|---|
| 812 | << "END: " << (*mg)(i).Local().End() << std::endl
|
|---|
| 813 | << "HALOBEGIN1: " << (*mg)(i).Local().HaloBegin1() << std::endl
|
|---|
| 814 | << "HALOEND1: " << (*mg)(i).Local().HaloEnd1() << std::endl
|
|---|
| 815 | << "HALOBEGIN2: " << (*mg)(i).Local().HaloBegin2() << std::endl
|
|---|
| 816 | << "HALOEND2: " << (*mg)(i).Local().HaloEnd2() << std::endl
|
|---|
| 817 | << "BOUNDARYBEGIN1: " << (*mg)(i).Local().BoundaryBegin1() << std::endl
|
|---|
| 818 | << "BOUNDARYEND1: " << (*mg)(i).Local().BoundaryEnd1() << std::endl
|
|---|
| 819 | << "BOUNDARYBEGIN2: " << (*mg)(i).Local().BoundaryBegin2() << std::endl
|
|---|
| 820 | << "BOUNDARYEND2: " << (*mg)(i).Local().BoundaryEnd2() << std::endl
|
|---|
| 821 | << "ALIGNMENTBEGIN: " << (*mg)(i).Local().AlignmentBegin() << std::endl
|
|---|
| 822 | << "ALIGNMENTEND: " << (*mg)(i).Local().AlignmentEnd() << std::endl
|
|---|
| 823 | << std::endl;
|
|---|
| 824 |
|
|---|
| 825 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 826 |
|
|---|
| 827 | MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, &status);
|
|---|
| 828 |
|
|---|
| 829 | delete [] char_buf;
|
|---|
| 830 |
|
|---|
| 831 | MPI_File_close(&file);
|
|---|
| 832 |
|
|---|
| 833 | }
|
|---|
| 834 | }
|
|---|
| 835 |
|
|---|
| 836 | delete [] filename;
|
|---|
| 837 |
|
|---|
| 838 | }
|
|---|
| 839 |
|
|---|
| 840 | void CommMPI::PrintDefect(Grid& sol, Grid& rhs, const char* information)
|
|---|
| 841 | {
|
|---|
| 842 | TempGrid *temp = MG::GetTempGrid();
|
|---|
| 843 | temp->SetProperties(sol);
|
|---|
| 844 | temp->ImportFromResidual(sol, rhs);
|
|---|
| 845 |
|
|---|
| 846 | PrintInnerGrid(*temp, information);
|
|---|
| 847 | }
|
|---|
| 848 |
|
|---|
| 849 | void CommMPI::PrintGrid(Grid& grid, const char* information)
|
|---|
| 850 | {
|
|---|
| 851 | Index i;
|
|---|
| 852 | std::stringstream buf;
|
|---|
| 853 |
|
|---|
| 854 | Index begin = 0;
|
|---|
| 855 | Index end = grid.Local().End();
|
|---|
| 856 | Index begin_local, end_local, begin_global, end_global;
|
|---|
| 857 |
|
|---|
| 858 | CommToGhosts(grid);
|
|---|
| 859 |
|
|---|
| 860 | for (int j=0; j<3; ++j)
|
|---|
| 861 | if (grid.Global().EndLocal()[j] == grid.Global().SizeGlobal()[j])
|
|---|
| 862 | end[j] = grid.Local().SizeTotal()[j];
|
|---|
| 863 |
|
|---|
| 864 | for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
|
|---|
| 865 | for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
|
|---|
| 866 | for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
|
|---|
| 867 | buf << std::scientific << grid.GetVal(i) << " ";
|
|---|
| 868 |
|
|---|
| 869 | begin_local = grid.Global().BeginLocal();
|
|---|
| 870 | end_local = grid.Global().EndLocal();
|
|---|
| 871 | begin_global = 0;
|
|---|
| 872 |
|
|---|
| 873 | for (int i=0; i<3; ++i) {
|
|---|
| 874 |
|
|---|
| 875 | if (BoundaryConditions()[i] == Dirichlet ||
|
|---|
| 876 | BoundaryConditions()[i] == Open) {
|
|---|
| 877 |
|
|---|
| 878 | if (begin_local[i] != 0)
|
|---|
| 879 | begin_local[i] -= grid.Local().HaloEnd1()[i] - grid.Local().HaloBegin1()[i];
|
|---|
| 880 |
|
|---|
| 881 | end_global[i] = grid.Global().SizeGlobal()[i];
|
|---|
| 882 |
|
|---|
| 883 | }else if (BoundaryConditions()[i] == Periodic) {
|
|---|
| 884 |
|
|---|
| 885 | if (end_local[i] == grid.Global().SizeGlobal()[i])
|
|---|
| 886 | end_local[i] += grid.Local().HaloEnd1()[i] - grid.Local().HaloBegin1()[i] + grid.Local().HaloEnd2()[i] - grid.Local().HaloBegin2()[i];
|
|---|
| 887 | else
|
|---|
| 888 | end_local[i] += grid.Local().HaloEnd1()[i] - grid.Local().HaloBegin1()[i];
|
|---|
| 889 |
|
|---|
| 890 | end_global[i] = grid.Global().SizeGlobal()[i] +
|
|---|
| 891 | grid.Local().HaloEnd1()[i] - grid.Local().HaloBegin1()[i] +
|
|---|
| 892 | grid.Local().HaloEnd2()[i] - grid.Local().HaloBegin2()[i];
|
|---|
| 893 |
|
|---|
| 894 | }
|
|---|
| 895 |
|
|---|
| 896 | }
|
|---|
| 897 |
|
|---|
| 898 | CreateOutputFiles(grid, buf, information,
|
|---|
| 899 | begin_global, end_global,
|
|---|
| 900 | begin_local, end_local);
|
|---|
| 901 | }
|
|---|
| 902 |
|
|---|
| 903 | void CommMPI::PrintCorrectedGrid(Grid& grid, const char* information)
|
|---|
| 904 | {
|
|---|
| 905 | Index i;
|
|---|
| 906 |
|
|---|
| 907 | std::stringstream buf;
|
|---|
| 908 |
|
|---|
| 909 | Index begin = 0;
|
|---|
| 910 | Index end = grid.Local().End();
|
|---|
| 911 | Index begin_local, end_local, begin_global, end_global;
|
|---|
| 912 |
|
|---|
| 913 | CommToGhosts(grid);
|
|---|
| 914 |
|
|---|
| 915 | for (int j=0; j<3; ++j)
|
|---|
| 916 | if (grid.Global().EndLocal()[j] == grid.Global().SizeGlobal()[j])
|
|---|
| 917 | end[j] = grid.Local().SizeTotal()[j];
|
|---|
| 918 |
|
|---|
| 919 | for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
|
|---|
| 920 | for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
|
|---|
| 921 | for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
|
|---|
| 922 | buf << std::scientific << grid.GetVal(i) - Grid::Correction() << " ";
|
|---|
| 923 |
|
|---|
| 924 | begin_local = grid.Global().BeginLocal();
|
|---|
| 925 | end_local = grid.Global().EndLocal();
|
|---|
| 926 | begin_global = 0;
|
|---|
| 927 |
|
|---|
| 928 | for (int i=0; i<3; ++i) {
|
|---|
| 929 |
|
|---|
| 930 | if (BoundaryConditions()[i] == Dirichlet ||
|
|---|
| 931 | BoundaryConditions()[i] == Open) {
|
|---|
| 932 |
|
|---|
| 933 | if (begin_local[i] != 0)
|
|---|
| 934 | --begin_local[i];
|
|---|
| 935 |
|
|---|
| 936 | end_global[i] = grid.Global().SizeGlobal()[i];
|
|---|
| 937 |
|
|---|
| 938 | }else if (BoundaryConditions()[i] == Periodic) {
|
|---|
| 939 |
|
|---|
| 940 | if (begin_local[i] != 0)
|
|---|
| 941 | --begin_local[i];
|
|---|
| 942 |
|
|---|
| 943 | if (end_local[i] == grid.Global().SizeGlobal()[i])
|
|---|
| 944 | end_local[i] += 2;
|
|---|
| 945 |
|
|---|
| 946 | end_global[i] = grid.Global().SizeGlobal()[i] + 2;
|
|---|
| 947 |
|
|---|
| 948 | }
|
|---|
| 949 |
|
|---|
| 950 | }
|
|---|
| 951 |
|
|---|
| 952 | CreateOutputFiles(grid, buf, information,
|
|---|
| 953 | 0, grid.Global().SizeGlobal()+2,
|
|---|
| 954 | begin, end);
|
|---|
| 955 | }
|
|---|
| 956 |
|
|---|
| 957 | void CommMPI::PrintInnerGrid(Grid& grid, const char* information)
|
|---|
| 958 | {
|
|---|
| 959 | Index i;
|
|---|
| 960 | Index begin_local, end_local, begin_global, end_global;
|
|---|
| 961 | std::stringstream buf;
|
|---|
| 962 |
|
|---|
| 963 | CommToGhosts(grid);
|
|---|
| 964 |
|
|---|
| 965 | Index begin = 0;
|
|---|
| 966 | Index end = grid.Local().End();
|
|---|
| 967 |
|
|---|
| 968 | /* VTK parallel ImageData files seem to require some overlap. */
|
|---|
| 969 | for (int j=0; j<3; ++j)
|
|---|
| 970 | if (grid.Global().BeginLocal()[j] == 0)
|
|---|
| 971 | ++begin[j];
|
|---|
| 972 |
|
|---|
| 973 | /* Write grid values into buffer */
|
|---|
| 974 | for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
|
|---|
| 975 | for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
|
|---|
| 976 | for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
|
|---|
| 977 | buf << std::scientific << grid.GetVal(i) << " ";
|
|---|
| 978 |
|
|---|
| 979 | for (int j=0; j<3; ++j) {
|
|---|
| 980 |
|
|---|
| 981 | if (BoundaryConditions()[j] == Dirichlet ||
|
|---|
| 982 | BoundaryConditions()[j] == Open) {
|
|---|
| 983 |
|
|---|
| 984 | begin_global[j] = 1;
|
|---|
| 985 | end_global[j] = grid.Global().SizeGlobal()[j] - 1;
|
|---|
| 986 |
|
|---|
| 987 | if (grid.Global().BeginLocal()[j] == 0)
|
|---|
| 988 | begin_local[j] = 1;
|
|---|
| 989 | else
|
|---|
| 990 | begin_local[j] = grid.Global().BeginLocal()[j] - 1;
|
|---|
| 991 |
|
|---|
| 992 | if (grid.Global().EndLocal()[j] == grid.Global().SizeGlobal()[j])
|
|---|
| 993 | end_local[j] = grid.Global().EndLocal()[j] - 1;
|
|---|
| 994 | else
|
|---|
| 995 | end_local[j] = grid.Global().EndLocal()[j];
|
|---|
| 996 |
|
|---|
| 997 | }else if (BoundaryConditions()[j] == Periodic) {
|
|---|
| 998 |
|
|---|
| 999 | begin_global[j] = 0;
|
|---|
| 1000 | end_global[j] = grid.Global().SizeGlobal()[j];
|
|---|
| 1001 |
|
|---|
| 1002 | if (grid.Global().BeginLocal()[j] == 0)
|
|---|
| 1003 | begin_local[j] = grid.Global().BeginLocal()[j];
|
|---|
| 1004 | else
|
|---|
| 1005 | begin_local[j] = grid.Global().BeginLocal()[j] - 1;
|
|---|
| 1006 |
|
|---|
| 1007 | end_local[j] = grid.Global().EndLocal()[j];
|
|---|
| 1008 |
|
|---|
| 1009 | }else {
|
|---|
| 1010 |
|
|---|
| 1011 | assert(0 == "Boundary condition not handled properly.");
|
|---|
| 1012 |
|
|---|
| 1013 | }
|
|---|
| 1014 |
|
|---|
| 1015 | }
|
|---|
| 1016 |
|
|---|
| 1017 | CreateOutputFiles(grid, buf, information,
|
|---|
| 1018 | begin_global, end_global,
|
|---|
| 1019 | begin_local, end_local);
|
|---|
| 1020 | }
|
|---|
| 1021 |
|
|---|
| 1022 | void CommMPI::PrintInnerCorrectedGrid(Grid& grid, const char* information)
|
|---|
| 1023 | {
|
|---|
| 1024 | Index i;
|
|---|
| 1025 | const Index begin = grid.Local().Begin();
|
|---|
| 1026 | const Index end = grid.Local().End();
|
|---|
| 1027 | std::stringstream buf;
|
|---|
| 1028 |
|
|---|
| 1029 | for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
|
|---|
| 1030 | for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
|
|---|
| 1031 | for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
|
|---|
| 1032 | buf << std::scientific << grid.GetVal(i) - Grid::Correction() << " ";
|
|---|
| 1033 |
|
|---|
| 1034 | CreateOutputFiles(grid, buf, information,
|
|---|
| 1035 | 0, grid.Global().SizeGlobal(),
|
|---|
| 1036 | grid.Global().BeginLocal(), grid.Global().EndLocal());
|
|---|
| 1037 | }
|
|---|
| 1038 |
|
|---|
| 1039 | void CommMPI::CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
|
|---|
| 1040 | const Index& begin_global, const Index& end_global,
|
|---|
| 1041 | const Index& begin_local, const Index& end_local)
|
|---|
| 1042 | {
|
|---|
| 1043 | int output_count = OutputCount();
|
|---|
| 1044 |
|
|---|
| 1045 | MPI_Comm comm = settings.CommunicatorGlobal(grid);
|
|---|
| 1046 |
|
|---|
| 1047 | if (comm != MPI_COMM_NULL) {
|
|---|
| 1048 |
|
|---|
| 1049 | MPI_File file;
|
|---|
| 1050 | std::string conv_information = Helper::ReplaceWhitespaces(information, "_");
|
|---|
| 1051 |
|
|---|
| 1052 | CreateParallelOutputFile(grid, comm, output_count, conv_information.c_str(),
|
|---|
| 1053 | begin_global, end_global, begin_local, end_local);
|
|---|
| 1054 |
|
|---|
| 1055 | file = CreateSerialOutputFile(grid, comm, output_count, conv_information.c_str(),
|
|---|
| 1056 | begin_global, end_global, begin_local, end_local);
|
|---|
| 1057 |
|
|---|
| 1058 | char *char_buf = Helper::GetCharArray(serial_data.str());
|
|---|
| 1059 | MPI_File_write(file, char_buf, serial_data.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1060 | delete [] char_buf;
|
|---|
| 1061 |
|
|---|
| 1062 | FinalizeSerialOutputFile(file);
|
|---|
| 1063 |
|
|---|
| 1064 | }
|
|---|
| 1065 | }
|
|---|
| 1066 |
|
|---|
| 1067 | void CommMPI::CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
|
|---|
| 1068 | const int& output_count, const char* information,
|
|---|
| 1069 | const Index& begin_global, const Index& end_global,
|
|---|
| 1070 | const Index& begin_local, const Index& end_local)
|
|---|
| 1071 | {
|
|---|
| 1072 | int rank;
|
|---|
| 1073 | MPI_File file;
|
|---|
| 1074 | char parallel_filename[513], serial_filename[513];
|
|---|
| 1075 | std::stringstream buf;
|
|---|
| 1076 |
|
|---|
| 1077 | MPI_Comm_rank(comm, &rank);
|
|---|
| 1078 |
|
|---|
| 1079 | sprintf(parallel_filename, "%s%04d.pvti", OutputPath().c_str(), output_count);
|
|---|
| 1080 | sprintf(serial_filename, "%04d_%d.vti", output_count, rank);
|
|---|
| 1081 |
|
|---|
| 1082 | MPI_File_open(comm, parallel_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 1083 | MPI_File_set_size(file, 0);
|
|---|
| 1084 |
|
|---|
| 1085 | if (rank == 0) {
|
|---|
| 1086 |
|
|---|
| 1087 | buf << "<?xml version=\"1.0\"?>" << std::endl
|
|---|
| 1088 | << "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
|
|---|
| 1089 | << " <PImageData WholeExtent=\"";
|
|---|
| 1090 |
|
|---|
| 1091 | for (int i=0; i<3; ++i)
|
|---|
| 1092 | buf << begin_global[i] << " " << end_global[i] - 1 << " ";
|
|---|
| 1093 |
|
|---|
| 1094 | buf << "\"" << std::endl
|
|---|
| 1095 | << " GhostLevel=\"0\" Origin=\"0 0 0\" Spacing=\"";
|
|---|
| 1096 |
|
|---|
| 1097 | for (int i=0; i<3; ++i)
|
|---|
| 1098 | buf << grid.Extent().MeshWidth()[i] << " ";
|
|---|
| 1099 |
|
|---|
| 1100 | buf << "\">" << std::endl
|
|---|
| 1101 | << " <PPointData Scalars=\"" << information << "\">" << std::endl
|
|---|
| 1102 | << " <PDataArray type=\"Float32\" Name=\"" << information << "\"/>" << std::endl
|
|---|
| 1103 | << " </PPointData>" << std::endl;
|
|---|
| 1104 |
|
|---|
| 1105 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1106 |
|
|---|
| 1107 | MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1108 |
|
|---|
| 1109 | delete [] char_buf;
|
|---|
| 1110 | }
|
|---|
| 1111 |
|
|---|
| 1112 | buf.str("");
|
|---|
| 1113 | buf << " <Piece Extent=\"";
|
|---|
| 1114 |
|
|---|
| 1115 | for (int i=0; i<3; ++i)
|
|---|
| 1116 | buf << begin_local[i] << " " << end_local[i] - 1 << " ";
|
|---|
| 1117 |
|
|---|
| 1118 | buf << "\" Source=\"" << serial_filename << "\"/>" << std::endl;
|
|---|
| 1119 |
|
|---|
| 1120 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1121 |
|
|---|
| 1122 | MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1123 |
|
|---|
| 1124 | delete [] char_buf;
|
|---|
| 1125 |
|
|---|
| 1126 | if (rank == 0) {
|
|---|
| 1127 |
|
|---|
| 1128 | buf.str("");
|
|---|
| 1129 |
|
|---|
| 1130 | buf << " </PImageData>" << std::endl
|
|---|
| 1131 | << "</VTKFile>" << std::endl;
|
|---|
| 1132 |
|
|---|
| 1133 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1134 |
|
|---|
| 1135 | MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1136 |
|
|---|
| 1137 | delete [] char_buf;
|
|---|
| 1138 | }
|
|---|
| 1139 |
|
|---|
| 1140 | MPI_File_close(&file);
|
|---|
| 1141 | }
|
|---|
| 1142 |
|
|---|
| 1143 | MPI_File CommMPI::CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
|
|---|
| 1144 | const int& output_count, const char* information,
|
|---|
| 1145 | const Index& begin_global, const Index& end_global,
|
|---|
| 1146 | const Index& begin_local, const Index& end_local)
|
|---|
| 1147 | {
|
|---|
| 1148 | char serial_filename[513];
|
|---|
| 1149 | int rank;
|
|---|
| 1150 | MPI_File file;
|
|---|
| 1151 | std::stringstream buf;
|
|---|
| 1152 |
|
|---|
| 1153 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 1154 |
|
|---|
| 1155 | sprintf(serial_filename, "%s%04d_%d.vti", OutputPath().c_str(), output_count, rank);
|
|---|
| 1156 |
|
|---|
| 1157 | MPI_File_open(MPI_COMM_SELF, serial_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 1158 |
|
|---|
| 1159 | buf << "<?xml version=\"1.0\"?>" << std::endl
|
|---|
| 1160 | << "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
|
|---|
| 1161 | << " <ImageData WholeExtent=\"";
|
|---|
| 1162 |
|
|---|
| 1163 | for (int i=0; i<3; ++i)
|
|---|
| 1164 | buf << begin_global[i] << " " << end_global[i] - 1 << " ";
|
|---|
| 1165 |
|
|---|
| 1166 | buf << "\"" << std::endl
|
|---|
| 1167 | << " Origin=\"0 0 0\" Spacing=\"";
|
|---|
| 1168 |
|
|---|
| 1169 | for (int i=0; i<3; ++i)
|
|---|
| 1170 | buf << grid.Extent().MeshWidth()[i] << " ";
|
|---|
| 1171 |
|
|---|
| 1172 | buf << "\">" << std::endl
|
|---|
| 1173 | << " <Piece Extent=\"";
|
|---|
| 1174 |
|
|---|
| 1175 | for (int i=0; i<3; ++i)
|
|---|
| 1176 | buf << begin_local[i] << " " << end_local[i] - 1 << " ";
|
|---|
| 1177 |
|
|---|
| 1178 | buf << "\">" << std::endl
|
|---|
| 1179 | << " <PointData Scalars=\"" << information << "\">" << std::endl
|
|---|
| 1180 | << " <DataArray type=\"Float32\" Name=\"" << information << "\" format=\"ascii\">" << std::endl
|
|---|
| 1181 | << " ";
|
|---|
| 1182 |
|
|---|
| 1183 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1184 | MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1185 | delete [] char_buf;
|
|---|
| 1186 |
|
|---|
| 1187 | return file;
|
|---|
| 1188 | }
|
|---|
| 1189 |
|
|---|
| 1190 | void CommMPI::FinalizeSerialOutputFile(MPI_File& file)
|
|---|
| 1191 | {
|
|---|
| 1192 | std::stringstream buf;
|
|---|
| 1193 |
|
|---|
| 1194 | buf << std::endl
|
|---|
| 1195 | << " </DataArray>" << std::endl
|
|---|
| 1196 | << " </PointData>" << std::endl
|
|---|
| 1197 | << " </Piece>" << std::endl
|
|---|
| 1198 | << " </ImageData>" << std::endl
|
|---|
| 1199 | << "</VTKFile>" << std::endl;
|
|---|
| 1200 |
|
|---|
| 1201 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1202 | MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1203 | delete [] char_buf;
|
|---|
| 1204 |
|
|---|
| 1205 | MPI_File_close(&file);
|
|---|
| 1206 | }
|
|---|
| 1207 |
|
|---|
| 1208 | int CommMPI::GlobalRank() const
|
|---|
| 1209 | {
|
|---|
| 1210 | int rank;
|
|---|
| 1211 |
|
|---|
| 1212 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 1213 |
|
|---|
| 1214 | return rank;
|
|---|
| 1215 | }
|
|---|
| 1216 |
|
|---|
| 1217 | Index CommMPI::GlobalPos() const
|
|---|
| 1218 | {
|
|---|
| 1219 | Index dims, periods, coords;
|
|---|
| 1220 |
|
|---|
| 1221 | MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
|
|---|
| 1222 |
|
|---|
| 1223 | return coords;
|
|---|
| 1224 | }
|
|---|
| 1225 |
|
|---|
| 1226 | Index CommMPI::GlobalProcs() const
|
|---|
| 1227 | {
|
|---|
| 1228 | Index dims, periods, coords;
|
|---|
| 1229 |
|
|---|
| 1230 | MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
|
|---|
| 1231 |
|
|---|
| 1232 | return dims;
|
|---|
| 1233 | }
|
|---|
| 1234 |
|
|---|
| 1235 | void CommMPI::InitCommMPI(const MPI_Comm& comm)
|
|---|
| 1236 | {
|
|---|
| 1237 | int status, size, rank;
|
|---|
| 1238 | int dims[3] = {0, 0, 0};
|
|---|
| 1239 | int periods[3];
|
|---|
| 1240 |
|
|---|
| 1241 | for (int i=0; i<3; ++i)
|
|---|
| 1242 | periods[i] = (BoundaryConditions()[i] == Periodic ? 1 : 0);
|
|---|
| 1243 |
|
|---|
| 1244 | MPI_Comm_size(comm, &size);
|
|---|
| 1245 | MPI_Comm_rank(comm, &rank);
|
|---|
| 1246 |
|
|---|
| 1247 | MPI_Topo_test(comm, &status);
|
|---|
| 1248 |
|
|---|
| 1249 | if (status == MPI_CART) {
|
|---|
| 1250 |
|
|---|
| 1251 | comm_global = comm;
|
|---|
| 1252 |
|
|---|
| 1253 | }else {
|
|---|
| 1254 |
|
|---|
| 1255 | MPI_Dims_create(size, 3, dims);
|
|---|
| 1256 |
|
|---|
| 1257 | MPI_Cart_create(comm, 3, dims, periods, 1, &comm_global);
|
|---|
| 1258 |
|
|---|
| 1259 | }
|
|---|
| 1260 |
|
|---|
| 1261 | MPI_Info_create(&info);
|
|---|
| 1262 | char key[] = "no_locks";
|
|---|
| 1263 | char val[] = "true";
|
|---|
| 1264 | MPI_Info_set(info, key, val);
|
|---|
| 1265 |
|
|---|
| 1266 | }
|
|---|
| 1267 |
|
|---|
| 1268 | CommMPI::~CommMPI()
|
|---|
| 1269 | {
|
|---|
| 1270 | MPI_Comm_free(&comm_global);
|
|---|
| 1271 |
|
|---|
| 1272 | if (win_created)
|
|---|
| 1273 | MPI_Win_free(&win);
|
|---|
| 1274 |
|
|---|
| 1275 | MPI_Info_free(&info);
|
|---|
| 1276 | }
|
|---|
| 1277 |
|
|---|
| 1278 | Grid& CommMPI::GetCoarserGrid(Multigrid& multigrid)
|
|---|
| 1279 | {
|
|---|
| 1280 | return settings.CoarserGrid(multigrid(multigrid.Level()));
|
|---|
| 1281 | }
|
|---|
| 1282 |
|
|---|
| 1283 | Grid& CommMPI::GetFinerGrid(Multigrid& multigrid)
|
|---|
| 1284 | {
|
|---|
| 1285 | return settings.FinerGrid(multigrid(multigrid.Level()));
|
|---|
| 1286 | }
|
|---|
| 1287 |
|
|---|
| 1288 | Grid& CommMPI::GetGlobalCoarseGrid(Multigrid& multigrid)
|
|---|
| 1289 | {
|
|---|
| 1290 | Grid& global_coarse_grid = settings.GlobalCoarseGrid();
|
|---|
| 1291 | CommSubgrid(multigrid(multigrid.MinLevel()), global_coarse_grid);
|
|---|
| 1292 | return global_coarse_grid;
|
|---|
| 1293 | }
|
|---|
| 1294 |
|
|---|
| 1295 |
|
|---|
| 1296 | std::string CommMPI::CreateOutputDirectory()
|
|---|
| 1297 | {
|
|---|
| 1298 | #ifdef HAVE_BOOST_FILESYSTEM
|
|---|
| 1299 | std::string path, unique_path;
|
|---|
| 1300 | std::stringstream unique_suffix;
|
|---|
| 1301 | int suffix_counter = 0;
|
|---|
| 1302 | char buffer[129];
|
|---|
| 1303 | time_t rawtime;
|
|---|
| 1304 | struct tm *timeinfo;
|
|---|
| 1305 | int path_size;
|
|---|
| 1306 | char* path_array;
|
|---|
| 1307 |
|
|---|
| 1308 | if (GlobalRank() == 0) {
|
|---|
| 1309 |
|
|---|
| 1310 | time(&rawtime);
|
|---|
| 1311 | timeinfo = localtime(&rawtime);
|
|---|
| 1312 | strftime(buffer, 128, "./output/%Y_%m_%d_%H_%M_%S/", timeinfo);
|
|---|
| 1313 | path = buffer;
|
|---|
| 1314 |
|
|---|
| 1315 | if (!fs::exists("output"))
|
|---|
| 1316 | fs::create_directory("output");
|
|---|
| 1317 |
|
|---|
| 1318 | unique_path = path;
|
|---|
| 1319 |
|
|---|
| 1320 | while (fs::exists(unique_path.c_str())) {
|
|---|
| 1321 |
|
|---|
| 1322 | unique_suffix.str("");
|
|---|
| 1323 | unique_suffix << "_" << suffix_counter++ << "/";
|
|---|
| 1324 |
|
|---|
| 1325 | unique_path = path;
|
|---|
| 1326 | unique_path.replace(unique_path.size()-1, 1, unique_suffix.str());
|
|---|
| 1327 |
|
|---|
| 1328 | }
|
|---|
| 1329 |
|
|---|
| 1330 | fs::create_directory(unique_path.c_str());
|
|---|
| 1331 |
|
|---|
| 1332 | path_size = unique_path.size() + 1;
|
|---|
| 1333 | path_array = Helper::GetCharArray(unique_path);
|
|---|
| 1334 |
|
|---|
| 1335 | MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
|
|---|
| 1336 |
|
|---|
| 1337 | }else {
|
|---|
| 1338 |
|
|---|
| 1339 | MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
|
|---|
| 1340 | path_array = new char[path_size];
|
|---|
| 1341 |
|
|---|
| 1342 | }
|
|---|
| 1343 |
|
|---|
| 1344 | MPI_Bcast(path_array, path_size, MPI_CHAR, 0, comm_global);
|
|---|
| 1345 |
|
|---|
| 1346 | unique_path = path_array;
|
|---|
| 1347 |
|
|---|
| 1348 | delete [] path_array;
|
|---|
| 1349 |
|
|---|
| 1350 | return unique_path;
|
|---|
| 1351 |
|
|---|
| 1352 | #else
|
|---|
| 1353 |
|
|---|
| 1354 | return "./";
|
|---|
| 1355 |
|
|---|
| 1356 | #endif
|
|---|
| 1357 | }
|
|---|
| 1358 |
|
|---|
| 1359 |
|
|---|
| 1360 | #endif /* HAVE_MPI */
|
|---|