| [dfed1c] | 1 | /**
|
|---|
| 2 | * @file comm_mpi.cpp
|
|---|
| 3 | * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
|
|---|
| 4 | * @date Wed Jun 16 13:21:06 2010
|
|---|
| 5 | *
|
|---|
| 6 | * @brief Class for MPI-based communication.
|
|---|
| 7 | *
|
|---|
| 8 | */
|
|---|
| 9 |
|
|---|
| 10 | #ifdef HAVE_CONFIG_H
|
|---|
| 11 | #include <config.h>
|
|---|
| 12 | #endif
|
|---|
| 13 |
|
|---|
| 14 | #ifdef HAVE_MPI
|
|---|
| 15 |
|
|---|
| 16 | #include <mpi.h>
|
|---|
| [ac6d04] | 17 | #ifdef HAVE_MARMOT
|
|---|
| 18 | #include <enhancempicalls.h>
|
|---|
| 19 | #include <sourceinfompicalls.h>
|
|---|
| 20 | #endif
|
|---|
| [dfed1c] | 21 |
|
|---|
| 22 | #ifdef HAVE_BOOST_FILESYSTEM
|
|---|
| 23 | #include <boost/filesystem.hpp>
|
|---|
| 24 | namespace fs = boost::filesystem;
|
|---|
| 25 | #endif
|
|---|
| 26 |
|
|---|
| [ac6d04] | 27 | #ifdef HAVE_VTK
|
|---|
| 28 | #include <vtkAbstractArray.h>
|
|---|
| 29 | #include <vtkImageData.h>
|
|---|
| 30 | #include <vtkPointData.h>
|
|---|
| 31 | #include <vtkSmartPointer.h>
|
|---|
| 32 | #include <vtkXMLImageDataWriter.h>
|
|---|
| 33 | #endif
|
|---|
| 34 |
|
|---|
| [dfed1c] | 35 | #include <cstring>
|
|---|
| 36 | #include <sstream>
|
|---|
| 37 |
|
|---|
| 38 | #include "base/helper.hpp"
|
|---|
| 39 | #include "base/linked_cell_list.hpp"
|
|---|
| [894a5f] | 40 | #include "base/tuple.hpp"
|
|---|
| [dfed1c] | 41 | #include "comm/comm_mpi.hpp"
|
|---|
| [ac6d04] | 42 | #include "comm/mpi/datatypes_local.hpp"
|
|---|
| [dfed1c] | 43 | #include "grid/grid.hpp"
|
|---|
| 44 | #include "grid/multigrid.hpp"
|
|---|
| 45 | #include "grid/tempgrid.hpp"
|
|---|
| 46 | #include "mg.hpp"
|
|---|
| [894a5f] | 47 | #include "base/timer.hpp"
|
|---|
| [dfed1c] | 48 | static char print_buffer[512];
|
|---|
| 49 |
|
|---|
| 50 | using namespace VMG;
|
|---|
| 51 |
|
|---|
| [894a5f] | 52 | void CommMPI::IsendAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
|
|---|
| 53 | {
|
|---|
| [ac6d04] | 54 | for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
|
|---|
| 55 | iter->Isend(grid, tag_start, comm, Request());
|
|---|
| [894a5f] | 56 | }
|
|---|
| [dfed1c] | 57 |
|
|---|
| [ac6d04] | 58 | void CommMPI::IrecvAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
|
|---|
| [894a5f] | 59 | {
|
|---|
| [ac6d04] | 60 | for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
|
|---|
| 61 | iter->Irecv(grid, tag_start, comm, Request());
|
|---|
| [894a5f] | 62 | }
|
|---|
| [dfed1c] | 63 |
|
|---|
| [ac6d04] | 64 | void CommMPI::IsendAllBuffered(const Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
|
|---|
| [894a5f] | 65 | {
|
|---|
| [ac6d04] | 66 | for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
|
|---|
| 67 | iter->IsendBuffered(grid, tag_start, comm, Request());
|
|---|
| [894a5f] | 68 | }
|
|---|
| [dfed1c] | 69 |
|
|---|
| [894a5f] | 70 | void CommMPI::IrecvAllBuffered(std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
|
|---|
| 71 | {
|
|---|
| [ac6d04] | 72 | for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
|
|---|
| 73 | iter->IrecvBuffered(tag_start, comm, Request());
|
|---|
| [894a5f] | 74 | }
|
|---|
| [dfed1c] | 75 |
|
|---|
| [ac6d04] | 76 | void CommMPI::ReplaceBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
|
|---|
| [894a5f] | 77 | {
|
|---|
| [ac6d04] | 78 | for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
|
|---|
| 79 | iter->GridReplace(grid);
|
|---|
| 80 | }
|
|---|
| [dfed1c] | 81 |
|
|---|
| [ac6d04] | 82 | void CommMPI::AddBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
|
|---|
| 83 | {
|
|---|
| 84 | for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
|
|---|
| 85 | iter->GridSum(grid);
|
|---|
| [894a5f] | 86 | }
|
|---|
| [dfed1c] | 87 |
|
|---|
| [ac6d04] | 88 | void CommMPI::CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
|
|---|
| [894a5f] | 89 | {
|
|---|
| 90 | MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
|
|---|
| 91 | if (comm != MPI_COMM_NULL) {
|
|---|
| [ac6d04] | 92 | VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
|
|---|
| 93 | IrecvAllBuffered(datatypes.Receive(), comm, 0411);
|
|---|
| 94 | IsendAllBuffered(grid_old, datatypes.Send(), comm, 0411);
|
|---|
| 95 | WaitAll();
|
|---|
| 96 | ReplaceBufferAll(grid_new, datatypes.Receive());
|
|---|
| [dfed1c] | 97 | }
|
|---|
| 98 | }
|
|---|
| 99 |
|
|---|
| [ac6d04] | 100 | void CommMPI::CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
|
|---|
| [dfed1c] | 101 | {
|
|---|
| [894a5f] | 102 | MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
|
|---|
| [dfed1c] | 103 | if (comm != MPI_COMM_NULL) {
|
|---|
| [ac6d04] | 104 | VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
|
|---|
| [894a5f] | 105 | IrecvAllBuffered(datatypes.Receive(), comm, 1806);
|
|---|
| [ac6d04] | 106 | IsendAllBuffered(grid_old, datatypes.Send(), comm, 1806);
|
|---|
| 107 | WaitAll();
|
|---|
| [894a5f] | 108 | AddBufferAll(grid_new, datatypes.Receive());
|
|---|
| [dfed1c] | 109 | }
|
|---|
| 110 | }
|
|---|
| 111 |
|
|---|
| [ac6d04] | 112 | void CommMPI::CommToGhosts(Grid& grid)
|
|---|
| [dfed1c] | 113 | {
|
|---|
| [894a5f] | 114 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| [dfed1c] | 115 | if (comm != MPI_COMM_NULL) {
|
|---|
| [894a5f] | 116 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| [ac6d04] | 117 | IrecvAllBuffered(types.Halo(), comm, 2310);
|
|---|
| 118 | IsendAllBuffered(grid, types.NB(), comm, 2310);
|
|---|
| 119 | WaitAll();
|
|---|
| 120 | ReplaceBufferAll(grid, types.Halo());
|
|---|
| [dfed1c] | 121 | }
|
|---|
| 122 | }
|
|---|
| 123 |
|
|---|
| [ac6d04] | 124 | void CommMPI::CommToGhostsAsyncStart(Grid& grid)
|
|---|
| [dfed1c] | 125 | {
|
|---|
| [ac6d04] | 126 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| [dfed1c] | 127 | if (comm != MPI_COMM_NULL) {
|
|---|
| [894a5f] | 128 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| [ac6d04] | 129 | IrecvAllBuffered(types.Halo(), comm, 2412);
|
|---|
| 130 | IsendAllBuffered(grid, types.NB(), comm, 2412);
|
|---|
| 131 | TestAll();
|
|---|
| [894a5f] | 132 | }
|
|---|
| 133 | }
|
|---|
| [dfed1c] | 134 |
|
|---|
| [ac6d04] | 135 | void CommMPI::CommToGhostsAsyncFinish(Grid& grid)
|
|---|
| [894a5f] | 136 | {
|
|---|
| [ac6d04] | 137 | WaitAll();
|
|---|
| 138 | ReplaceBufferAll(grid, settings.DatatypesLocal(grid).Halo());
|
|---|
| [894a5f] | 139 | }
|
|---|
| [dfed1c] | 140 |
|
|---|
| [ac6d04] | 141 | void CommMPI::CommFromGhosts(Grid& grid)
|
|---|
| [894a5f] | 142 | {
|
|---|
| 143 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 144 | if (comm != MPI_COMM_NULL) {
|
|---|
| 145 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| [ac6d04] | 146 | IrecvAllBuffered(types.NB(), comm, 1337);
|
|---|
| 147 | IsendAllBuffered(grid, types.Halo(), comm, 1337);
|
|---|
| 148 | WaitAll();
|
|---|
| 149 | AddBufferAll(grid, types.NB());
|
|---|
| [97c25dd] | 150 | }
|
|---|
| 151 | }
|
|---|
| 152 |
|
|---|
| [ac6d04] | 153 | void CommMPI::CommFromGhostsAsyncStart(Grid& grid)
|
|---|
| [dfed1c] | 154 | {
|
|---|
| [ac6d04] | 155 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 156 | if (comm != MPI_COMM_NULL) {
|
|---|
| 157 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| 158 | IrecvAllBuffered(types.NB(), comm, 0xc0ffee);
|
|---|
| 159 | IsendAllBuffered(grid, types.Halo(), comm, 0xc0ffee);
|
|---|
| 160 | TestAll();
|
|---|
| 161 | }
|
|---|
| [dfed1c] | 162 | }
|
|---|
| 163 |
|
|---|
| [ac6d04] | 164 | void CommMPI::CommFromGhostsAsyncFinish(Grid& grid)
|
|---|
| [dfed1c] | 165 | {
|
|---|
| [ac6d04] | 166 | WaitAll();
|
|---|
| 167 | AddBufferAll(grid, settings.DatatypesLocal(grid).NB());
|
|---|
| [dfed1c] | 168 | }
|
|---|
| 169 |
|
|---|
| 170 | void CommMPI::CommParticles(const Grid& grid, std::list<Particle::Particle>& particles)
|
|---|
| 171 | {
|
|---|
| 172 | Factory& factory = MG::GetFactory();
|
|---|
| 173 |
|
|---|
| 174 | const vmg_int& num_particles_local = factory.GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
|
|---|
| 175 | vmg_float* x = factory.GetObjectStorageArray<vmg_float>("PARTICLE_POS_ARRAY");
|
|---|
| 176 | vmg_float* q = factory.GetObjectStorageArray<vmg_float>("PARTICLE_CHARGE_ARRAY");
|
|---|
| 177 |
|
|---|
| 178 | int rank, size;
|
|---|
| 179 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 180 | MPI_Comm_size(comm_global, &size);
|
|---|
| 181 |
|
|---|
| [ac6d04] | 182 | #ifndef VMG_ONE_SIDED
|
|---|
| 183 | vmg_int* receiver;
|
|---|
| 184 | if (!factory.TestObject("PARTICLE_RECEIVER_ARRAY"))
|
|---|
| 185 | new ObjectStorageArray<vmg_int>("PARTICLE_RECEIVER_ARRAY", size);
|
|---|
| 186 | receiver = factory.GetObjectStorageArray<vmg_int>("PARTICLE_RECEIVER_ARRAY");
|
|---|
| 187 | #endif
|
|---|
| 188 |
|
|---|
| [dfed1c] | 189 | Index index;
|
|---|
| [ac6d04] | 190 | std::vector<int> global_extent(6*size);
|
|---|
| 191 | std::vector<int> send_sizes(size);
|
|---|
| 192 | std::vector<int> recv_sizes(size);
|
|---|
| 193 | std::vector<Index> begin_remote(size);
|
|---|
| 194 | std::vector<Index> end_remote(size);
|
|---|
| 195 | std::vector< std::vector<vmg_float> > send_buffer_x(size);
|
|---|
| 196 | std::vector< std::vector<vmg_float> > send_buffer_q(size);
|
|---|
| 197 | std::vector< std::vector<vmg_int> > send_buffer_ind(size);
|
|---|
| 198 | std::vector< std::vector<vmg_float> > recv_buffer_x(size);
|
|---|
| 199 | std::vector< std::vector<vmg_float> > recv_buffer_q(size);
|
|---|
| 200 | std::vector< std::vector<vmg_int> > recv_buffer_ind(size);
|
|---|
| [9fc7e1] | 201 | MPI_Datatype dt_temp;
|
|---|
| 202 |
|
|---|
| [ac6d04] | 203 | std::memcpy(&global_extent[6*rank], grid.Global().LocalBegin().vec(), 3*sizeof(int));
|
|---|
| 204 | std::memcpy(&global_extent[6*rank+3], grid.Global().LocalEnd().vec(), 3*sizeof(int));
|
|---|
| [dfed1c] | 205 |
|
|---|
| [ac6d04] | 206 | MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &global_extent.front(), 6, MPI_INT, comm_global);
|
|---|
| [dfed1c] | 207 |
|
|---|
| 208 | for (int i=0; i<size; ++i) {
|
|---|
| [ac6d04] | 209 | begin_remote[i] = static_cast<Index>(&global_extent[6*i]);
|
|---|
| 210 | end_remote[i] = static_cast<Index>(&global_extent[6*i+3]);
|
|---|
| [dfed1c] | 211 | }
|
|---|
| 212 |
|
|---|
| 213 | for (int i=0; i<num_particles_local; ++i) {
|
|---|
| 214 | index = static_cast<Index>((Vector(&x[3*i]) - grid.Extent().Begin()) / grid.Extent().MeshWidth());
|
|---|
| 215 | for (int j=0; j<size; ++j)
|
|---|
| 216 | if (index.IsInBounds(begin_remote[j], end_remote[j])) {
|
|---|
| [ac6d04] | 217 | send_buffer_x[j].push_back(x[3*i+0]);
|
|---|
| 218 | send_buffer_x[j].push_back(x[3*i+1]);
|
|---|
| 219 | send_buffer_x[j].push_back(x[3*i+2]);
|
|---|
| 220 | send_buffer_q[j].push_back(q[i]);
|
|---|
| 221 | send_buffer_ind[j].push_back(i);
|
|---|
| [dfed1c] | 222 | break;
|
|---|
| 223 | }
|
|---|
| 224 | }
|
|---|
| 225 |
|
|---|
| 226 | /*
|
|---|
| 227 | * Communicate which process gets how many particles
|
|---|
| 228 | */
|
|---|
| 229 | for (int i=0; i<size; ++i)
|
|---|
| [ac6d04] | 230 | send_sizes[i] = send_buffer_q[i].size();
|
|---|
| [dfed1c] | 231 |
|
|---|
| [9fc7e1] | 232 | MPI_Alltoall(&send_sizes.front(), 1, MPI_INT, &recv_sizes.front(), 1, MPI_INT, comm_global);
|
|---|
| [dfed1c] | 233 |
|
|---|
| 234 | /*
|
|---|
| 235 | * Send particles
|
|---|
| 236 | */
|
|---|
| 237 | for (int i=0; i<size; ++i) {
|
|---|
| 238 |
|
|---|
| [ac6d04] | 239 | if (!send_buffer_q[i].empty()) {
|
|---|
| 240 | MPI_Isend(&send_buffer_x[i].front(), send_buffer_x[i].size(), MPI_DOUBLE, i, 0, comm_global, &Request());
|
|---|
| 241 | MPI_Isend(&send_buffer_q[i].front(), send_buffer_q[i].size(), MPI_DOUBLE, i, 1, comm_global, &Request());
|
|---|
| 242 | MPI_Isend(&send_buffer_ind[i].front(), send_buffer_ind[i].size(), MPI_INT, i, 2, comm_global, &Request());
|
|---|
| [dfed1c] | 243 | }
|
|---|
| 244 |
|
|---|
| [ac6d04] | 245 | #ifndef VMG_ONE_SIDED
|
|---|
| 246 | receiver[i] = send_buffer_q[i].size();
|
|---|
| 247 | #endif
|
|---|
| [dfed1c] | 248 | }
|
|---|
| 249 |
|
|---|
| 250 | /*
|
|---|
| 251 | * Receive particles
|
|---|
| 252 | */
|
|---|
| 253 | for (int i=0; i<size; ++i) {
|
|---|
| 254 |
|
|---|
| 255 | if (recv_sizes[i] > 0) {
|
|---|
| 256 |
|
|---|
| [ac6d04] | 257 | recv_buffer_x[i].resize(3*recv_sizes[i]);
|
|---|
| 258 | recv_buffer_q[i].resize(recv_sizes[i]);
|
|---|
| 259 | recv_buffer_ind[i].resize(recv_sizes[i]);
|
|---|
| [dfed1c] | 260 |
|
|---|
| [ac6d04] | 261 | MPI_Irecv(&recv_buffer_x[i].front(), 3*recv_sizes[i], MPI_DOUBLE, i, 0, comm_global, &Request());
|
|---|
| 262 | MPI_Irecv(&recv_buffer_q[i].front(), recv_sizes[i], MPI_DOUBLE, i, 1, comm_global, &Request());
|
|---|
| 263 | MPI_Irecv(&recv_buffer_ind[i].front(), recv_sizes[i], MPI_INT, i, 2, comm_global, &Request());
|
|---|
| [dfed1c] | 264 |
|
|---|
| 265 | }
|
|---|
| 266 |
|
|---|
| 267 | }
|
|---|
| 268 |
|
|---|
| [ac6d04] | 269 | WaitAll();
|
|---|
| 270 |
|
|---|
| 271 | particles.clear();
|
|---|
| 272 |
|
|---|
| 273 | for (int i=0; i<size; ++i)
|
|---|
| 274 | for (int j=0; j<recv_sizes[i]; ++j)
|
|---|
| 275 | particles.push_back(Particle::Particle(&recv_buffer_x[i][3*j], recv_buffer_q[i][j], 0.0, 0.0, i, recv_buffer_ind[i][j]));
|
|---|
| [dfed1c] | 276 | }
|
|---|
| 277 |
|
|---|
| [ac6d04] | 278 | void CommMPI::CommParticlesBack(std::list<Particle::Particle>& particles)
|
|---|
| [dfed1c] | 279 | {
|
|---|
| [ac6d04] | 280 | std::list<Particle::Particle>::iterator iter;
|
|---|
| [dfed1c] | 281 |
|
|---|
| [ac6d04] | 282 | #ifdef VMG_ONE_SIDED
|
|---|
| 283 | // if (!win_created) {
|
|---|
| 284 | // vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
|
|---|
| 285 | // const vmg_int& num_particles_local = MG::GetFactory().GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
|
|---|
| 286 | // MPI_Win_create(p, num_particles_local*sizeof(vmg_float), sizeof(vmg_float), info, comm_global, &win);
|
|---|
| 287 | // win_created = true;
|
|---|
| 288 | // }
|
|---|
| [dfed1c] | 289 |
|
|---|
| [ac6d04] | 290 | // MPI_Win_fence(MPI_MODE_NOPRECEDE, win);
|
|---|
| [dfed1c] | 291 |
|
|---|
| [ac6d04] | 292 | // for (iter=particles.begin(); iter!=particles.end(); ++iter)
|
|---|
| 293 | // MPI_Put(&iter->Pot(), 1, MPI_DOUBLE, iter->Rank(), iter->Index(), 1, MPI_DOUBLE, win);
|
|---|
| [dfed1c] | 294 |
|
|---|
| [ac6d04] | 295 | // MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED, win);
|
|---|
| 296 | #else
|
|---|
| 297 | int rank, size;
|
|---|
| 298 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 299 | MPI_Comm_size(comm_global, &size);
|
|---|
| [dfed1c] | 300 |
|
|---|
| [ac6d04] | 301 | std::vector< std::vector<vmg_float> > send_buffer_pot(size);
|
|---|
| 302 | std::vector< std::vector<vmg_float> > recv_buffer_pot(size);
|
|---|
| 303 | std::vector< std::vector<vmg_int> > send_buffer_index(size);
|
|---|
| 304 | std::vector< std::vector<vmg_int> > recv_buffer_index(size);
|
|---|
| [dfed1c] | 305 |
|
|---|
| [ac6d04] | 306 | vmg_int* size_receive = MG::GetFactory().GetObjectStorageArray<vmg_int>("PARTICLE_RECEIVER_ARRAY");
|
|---|
| 307 | vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
|
|---|
| [dfed1c] | 308 |
|
|---|
| [ac6d04] | 309 | // Build send buffer
|
|---|
| 310 | for (iter=particles.begin(); iter!=particles.end(); ++iter) {
|
|---|
| 311 | send_buffer_pot[iter->Rank()].push_back(iter->Pot());
|
|---|
| 312 | send_buffer_index[iter->Rank()].push_back(iter->Index());
|
|---|
| 313 | }
|
|---|
| [dfed1c] | 314 |
|
|---|
| [ac6d04] | 315 | // Send potentials
|
|---|
| 316 | for (int i=0; i<size; ++i) {
|
|---|
| 317 | if (!send_buffer_pot[i].empty()) {
|
|---|
| 318 | MPI_Isend(&send_buffer_pot[i].front(), send_buffer_pot[i].size(), MPI_DOUBLE, i, 699+rank, comm_global, &Request());
|
|---|
| 319 | MPI_Isend(&send_buffer_index[i].front(), send_buffer_index[i].size(), MPI_INT, i, 32111+rank, comm_global, &Request());
|
|---|
| 320 | }
|
|---|
| 321 | }
|
|---|
| [dfed1c] | 322 |
|
|---|
| [ac6d04] | 323 | //Receive potentials
|
|---|
| 324 | for (int i=0; i<size; ++i) {
|
|---|
| 325 | if (size_receive[i] > 0) {
|
|---|
| 326 | recv_buffer_pot[i].resize(size_receive[i]);
|
|---|
| 327 | recv_buffer_index[i].resize(size_receive[i]);
|
|---|
| 328 | MPI_Irecv(&recv_buffer_pot[i].front(), size_receive[i], MPI_DOUBLE, i, 699+i, comm_global, &Request());
|
|---|
| 329 | MPI_Irecv(&recv_buffer_index[i].front(), size_receive[i], MPI_INT, i, 32111+i, comm_global, &Request());
|
|---|
| 330 | }
|
|---|
| 331 | }
|
|---|
| [dfed1c] | 332 |
|
|---|
| [ac6d04] | 333 | WaitAll();
|
|---|
| [dfed1c] | 334 |
|
|---|
| [ac6d04] | 335 | // Add potential values
|
|---|
| 336 | for (int i=0; i<size; ++i)
|
|---|
| 337 | for (unsigned int j=0; j<size_receive[i]; ++j)
|
|---|
| 338 | p[recv_buffer_index[i][j]] = recv_buffer_pot[i][j];
|
|---|
| 339 | #endif
|
|---|
| [dfed1c] | 340 |
|
|---|
| [ac6d04] | 341 | }
|
|---|
| [dfed1c] | 342 |
|
|---|
| [ac6d04] | 343 | void CommMPI::CommLCListToGhosts(const Grid& grid, Particle::LinkedCellList& lc)
|
|---|
| 344 | {
|
|---|
| 345 | VMG::MPI::DatatypesLocal types(lc, comm_global);
|
|---|
| 346 | std::vector< std::vector<vmg_float> > send_buffer(types.NB().size());
|
|---|
| 347 | std::vector< std::vector<vmg_float> > recv_buffer(types.Halo().size());
|
|---|
| 348 | std::vector<vmg_int> send_size(types.NB().size());
|
|---|
| 349 | vmg_int recv_size;
|
|---|
| 350 | std::list<Particle::Particle*>::iterator iter;
|
|---|
| 351 | Index ind;
|
|---|
| [dfed1c] | 352 |
|
|---|
| [ac6d04] | 353 | lc.ClearHalo();
|
|---|
| [dfed1c] | 354 |
|
|---|
| [ac6d04] | 355 | for (int i=0; i<3; ++i) {
|
|---|
| 356 | for (Grid::iterator iter=lc.Iterators().Halo1()[i].Begin(); iter!=lc.Iterators().Halo1()[i].End(); ++iter)
|
|---|
| 357 | assert(lc(*iter).size() == 0);
|
|---|
| 358 | for (Grid::iterator iter=lc.Iterators().Halo2()[i].Begin(); iter!=lc.Iterators().Halo2()[i].End(); ++iter)
|
|---|
| 359 | assert(lc(*iter).size() == 0);
|
|---|
| 360 | }
|
|---|
| [dfed1c] | 361 |
|
|---|
| [ac6d04] | 362 | for (unsigned int i=0; i<types.NB().size(); ++i)
|
|---|
| 363 | if (types.NB()[i].Feasible()) {
|
|---|
| 364 | for (ind.X() = types.NB()[i].Starts().X(); ind.X() < types.NB()[i].Starts().X()+types.NB()[i].Subsizes().X(); ++ind.X())
|
|---|
| 365 | for (ind.Y() = types.NB()[i].Starts().Y(); ind.Y() < types.NB()[i].Starts().Y()+types.NB()[i].Subsizes().Y(); ++ind.Y())
|
|---|
| 366 | for (ind.Z() = types.NB()[i].Starts().Z(); ind.Z() < types.NB()[i].Starts().Z()+types.NB()[i].Subsizes().Z(); ++ind.Z())
|
|---|
| 367 | for (iter=lc(ind).begin(); iter!=lc(ind).end(); ++iter) {
|
|---|
| 368 | if (grid.Global().LocalBegin()[i] == 0)
|
|---|
| 369 | for (int j=0; j<3; ++j)
|
|---|
| 370 | send_buffer[i].push_back((*iter)->Pos()[j] + (i==j ? grid.Extent().Size()[j] : 0.0));
|
|---|
| 371 | else
|
|---|
| 372 | for (int j=0; j<3; ++j)
|
|---|
| 373 | send_buffer[1].push_back((*iter)->Pos()[j]);
|
|---|
| 374 |
|
|---|
| 375 | send_buffer[1].push_back((*iter)->Charge());
|
|---|
| 376 | }
|
|---|
| 377 |
|
|---|
| 378 | MPI_Isend(&send_size[i], 1, MPI_INT, types.NB()[i].Rank(), 2048+types.NB()[i].TagSend(), comm_global, &Request());
|
|---|
| 379 |
|
|---|
| 380 | if (send_size[i] > 0)
|
|---|
| 381 | MPI_Isend(&send_buffer[i].front(), send_size[i], MPI_DOUBLE,
|
|---|
| 382 | types.NB()[i].Rank(), 4096+types.NB()[i].TagSend(),
|
|---|
| 383 | comm_global, &Request());
|
|---|
| 384 | }
|
|---|
| [dfed1c] | 385 |
|
|---|
| [ac6d04] | 386 | for (unsigned int i=0; i<types.Halo().size(); ++i)
|
|---|
| 387 | if (types.Halo()[i].Feasible()) {
|
|---|
| 388 | MPI_Recv(&recv_size, 1, MPI_INT, types.Halo()[i].Rank(), 2048+types.Halo()[i].TagReceive(), comm_global, MPI_STATUS_IGNORE);
|
|---|
| 389 | if (recv_size > 0) {
|
|---|
| 390 | recv_buffer[i].resize(recv_size);
|
|---|
| 391 | MPI_Irecv(&recv_buffer[i].front(), recv_size, MPI_DOUBLE,
|
|---|
| 392 | types.Halo()[i].Rank(), 4096+types.Halo()[i].TagReceive(),
|
|---|
| 393 | comm_global, &Request());
|
|---|
| [dfed1c] | 394 | }
|
|---|
| [ac6d04] | 395 | }
|
|---|
| [dfed1c] | 396 |
|
|---|
| [ac6d04] | 397 | WaitAll();
|
|---|
| [dfed1c] | 398 |
|
|---|
| [ac6d04] | 399 | for (unsigned int i=0; i<recv_buffer.size(); ++i)
|
|---|
| 400 | for (unsigned int j=0; j<recv_buffer[i].size(); j+=4)
|
|---|
| 401 | lc.AddParticleToHalo(&recv_buffer[i][j], recv_buffer[i][j+3]);
|
|---|
| 402 | }
|
|---|
| [dfed1c] | 403 |
|
|---|
| [ac6d04] | 404 | void CommMPI::CommLCListFromGhosts(const Grid& grid, Particle::LinkedCellList& lc)
|
|---|
| 405 | {
|
|---|
| 406 | VMG::MPI::DatatypesLocal types(lc, comm_global);
|
|---|
| 407 | std::vector< std::vector<vmg_float> > send_buffer(types.NB().size());
|
|---|
| 408 | std::vector< std::vector<vmg_float> > recv_buffer(types.Halo().size());
|
|---|
| 409 | std::vector<vmg_int> send_size(types.NB().size());
|
|---|
| 410 | vmg_int recv_size;
|
|---|
| 411 | std::list<Particle::Particle*>::iterator iter;
|
|---|
| 412 | Index ind;
|
|---|
| [dfed1c] | 413 |
|
|---|
| [ac6d04] | 414 | for (unsigned int i=0; i<types.Halo().size(); ++i) {
|
|---|
| 415 | assert(types.Halo()[i].Subsizes().X() == types.NB()[i].Subsizes().X());
|
|---|
| 416 | assert(types.Halo()[i].Subsizes().Y() == types.NB()[i].Subsizes().Y());
|
|---|
| 417 | assert(types.Halo()[i].Subsizes().Z() == types.NB()[i].Subsizes().Z());
|
|---|
| [dfed1c] | 418 |
|
|---|
| [ac6d04] | 419 | for (ind.X() = 0; ind.X() < types.Halo()[i].Subsizes().X(); ++ind.X())
|
|---|
| 420 | for (ind.Y() = 0; ind.Y() < types.Halo()[i].Subsizes().Y(); ++ind.Y())
|
|---|
| 421 | for (ind.Z() = 0; ind.Z() < types.Halo()[i].Subsizes().Z(); ++ind.Z())
|
|---|
| 422 | assert(lc(ind+types.Halo()[i].Starts()).size() == lc(ind+types.NB()[i].Starts()).size());
|
|---|
| 423 | }
|
|---|
| [dfed1c] | 424 |
|
|---|
| [ac6d04] | 425 | for (unsigned int i=0; i<types.Halo().size(); ++i)
|
|---|
| 426 | if (types.Halo()[i].Feasible()) {
|
|---|
| [dfed1c] | 427 |
|
|---|
| [ac6d04] | 428 | for (ind.X() = types.Halo()[i].Starts().X(); ind.X() < types.Halo()[i].Starts().X()+types.Halo()[i].Subsizes().X(); ++ind.X())
|
|---|
| 429 | for (ind.Y() = types.Halo()[i].Starts().X(); ind.Y() < types.Halo()[i].Starts().Y()+types.Halo()[i].Subsizes().Y(); ++ind.Y())
|
|---|
| 430 | for (ind.Z() = types.Halo()[i].Starts().X(); ind.Z() < types.Halo()[i].Starts().Z()+types.Halo()[i].Subsizes().Z(); ++ind.Z())
|
|---|
| 431 | for (iter=lc(ind).begin(); iter!=lc(ind).end(); ++iter)
|
|---|
| 432 | send_buffer[i].push_back((*iter)->Pot());
|
|---|
| [dfed1c] | 433 |
|
|---|
| [ac6d04] | 434 | MPI_Isend(&send_buffer[i].front(), send_buffer[i].size(), MPI_DOUBLE, types.Halo()[i].Rank(), 342523+types.Halo()[i].TagSend(), comm_global, &Request());
|
|---|
| [dfed1c] | 435 | }
|
|---|
| 436 |
|
|---|
| [ac6d04] | 437 | for (unsigned int i=0; i<types.NB().size(); ++i)
|
|---|
| 438 | if (types.NB()[i].Feasible()) {
|
|---|
| [dfed1c] | 439 |
|
|---|
| [ac6d04] | 440 | recv_size = 0;
|
|---|
| 441 | for (ind.X() = types.NB()[i].Starts().X(); ind.X() < types.NB()[i].Starts().X()+types.NB()[i].Subsizes().X(); ++ind.X())
|
|---|
| 442 | for (ind.Y() = types.NB()[i].Starts().X(); ind.Y() < types.NB()[i].Starts().Y()+types.NB()[i].Subsizes().Y(); ++ind.Y())
|
|---|
| 443 | for (ind.Z() = types.NB()[i].Starts().X(); ind.Z() < types.NB()[i].Starts().Z()+types.NB()[i].Subsizes().Z(); ++ind.Z())
|
|---|
| 444 | recv_size += lc(ind).size();
|
|---|
| [dfed1c] | 445 |
|
|---|
| [ac6d04] | 446 | if (recv_size > 0) {
|
|---|
| 447 | recv_buffer[i].resize(recv_size);
|
|---|
| 448 | MPI_Irecv(&recv_buffer[i].front(), recv_size, MPI_DOUBLE, types.NB()[i].Rank(), 342523+types.NB()[i].TagReceive(), comm_global, &Request());
|
|---|
| 449 | }
|
|---|
| [dfed1c] | 450 | }
|
|---|
| 451 |
|
|---|
| [ac6d04] | 452 | WaitAll();
|
|---|
| [dfed1c] | 453 |
|
|---|
| [ac6d04] | 454 | for (unsigned int i=0; i<types.NB().size(); ++i) {
|
|---|
| 455 | unsigned int c=0;
|
|---|
| 456 | if (types.NB()[i].Feasible()) {
|
|---|
| 457 | for (ind.X() = types.NB()[i].Starts().X(); ind.X() < types.NB()[i].Starts().X()+types.NB()[i].Subsizes().X(); ++ind.X())
|
|---|
| 458 | for (ind.Y() = types.NB()[i].Starts().X(); ind.Y() < types.NB()[i].Starts().Y()+types.NB()[i].Subsizes().Y(); ++ind.Y())
|
|---|
| 459 | for (ind.Z() = types.NB()[i].Starts().X(); ind.Z() < types.NB()[i].Starts().Z()+types.NB()[i].Subsizes().Z(); ++ind.Z())
|
|---|
| 460 | for (iter=lc(ind).begin(); iter!=lc(ind).end(); ++iter)
|
|---|
| 461 | (*iter)->Pot() += recv_buffer[i][c++];
|
|---|
| 462 | }
|
|---|
| [dfed1c] | 463 | }
|
|---|
| 464 | }
|
|---|
| 465 |
|
|---|
| [ac6d04] | 466 | vmg_float CommMPI::GlobalSum(const vmg_float& value)
|
|---|
| [dfed1c] | 467 | {
|
|---|
| [ac6d04] | 468 | vmg_float result = value;
|
|---|
| 469 | MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm_global);
|
|---|
| 470 | return result;
|
|---|
| 471 | }
|
|---|
| [dfed1c] | 472 |
|
|---|
| [ac6d04] | 473 | vmg_float CommMPI::GlobalSumRoot(const vmg_float& value)
|
|---|
| 474 | {
|
|---|
| 475 | vmg_float recv_buffer = value;
|
|---|
| 476 | vmg_float send_buffer = value;
|
|---|
| 477 | MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm_global);
|
|---|
| 478 | return recv_buffer;
|
|---|
| [dfed1c] | 479 | }
|
|---|
| 480 |
|
|---|
| [ac6d04] | 481 | void CommMPI::GlobalSumArray(vmg_float* array, const vmg_int& size)
|
|---|
| [dfed1c] | 482 | {
|
|---|
| [ac6d04] | 483 | MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm_global);
|
|---|
| 484 | }
|
|---|
| [dfed1c] | 485 |
|
|---|
| [ac6d04] | 486 | vmg_int CommMPI::GlobalSum(const vmg_int& value)
|
|---|
| 487 | {
|
|---|
| 488 | vmg_int result = value;
|
|---|
| 489 | MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, comm_global);
|
|---|
| 490 | return result;
|
|---|
| 491 | }
|
|---|
| [dfed1c] | 492 |
|
|---|
| [ac6d04] | 493 | vmg_int CommMPI::GlobalSumRoot(const vmg_int& value)
|
|---|
| 494 | {
|
|---|
| 495 | vmg_int recv_buffer = value;
|
|---|
| 496 | vmg_int send_buffer = value;
|
|---|
| 497 | MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_SUM, 0, comm_global);
|
|---|
| 498 | return recv_buffer;
|
|---|
| 499 | }
|
|---|
| [dfed1c] | 500 |
|
|---|
| [ac6d04] | 501 | void CommMPI::GlobalSumArray(vmg_int* array, const vmg_int& size)
|
|---|
| 502 | {
|
|---|
| 503 | MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm_global);
|
|---|
| [dfed1c] | 504 | }
|
|---|
| 505 |
|
|---|
| [ac6d04] | 506 | vmg_float CommMPI::LevelSum(const Grid& grid, const vmg_float& value)
|
|---|
| [dfed1c] | 507 | {
|
|---|
| 508 | vmg_float result = value;
|
|---|
| [ac6d04] | 509 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 510 | assert(comm != MPI_COMM_NULL);
|
|---|
| 511 | MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm);
|
|---|
| [dfed1c] | 512 | return result;
|
|---|
| 513 | }
|
|---|
| 514 |
|
|---|
| [ac6d04] | 515 | vmg_float CommMPI::LevelSumRoot(const Grid& grid, const vmg_float& value)
|
|---|
| [dfed1c] | 516 | {
|
|---|
| [ac6d04] | 517 | vmg_float recv_buffer = value;
|
|---|
| [dfed1c] | 518 | vmg_float send_buffer = value;
|
|---|
| [ac6d04] | 519 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 520 | assert(comm != MPI_COMM_NULL);
|
|---|
| 521 | MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm);
|
|---|
| 522 | return recv_buffer;
|
|---|
| 523 | }
|
|---|
| [dfed1c] | 524 |
|
|---|
| [ac6d04] | 525 | void CommMPI::LevelSumArray(const Grid& grid, vmg_float* array, const vmg_int& size)
|
|---|
| 526 | {
|
|---|
| 527 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 528 | assert(comm != MPI_COMM_NULL);
|
|---|
| 529 | MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm);
|
|---|
| 530 | }
|
|---|
| [dfed1c] | 531 |
|
|---|
| [ac6d04] | 532 | vmg_int CommMPI::LevelSum(const Grid& grid, const vmg_int& value)
|
|---|
| 533 | {
|
|---|
| 534 | vmg_int result = value;
|
|---|
| 535 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 536 | assert(comm != MPI_COMM_NULL);
|
|---|
| 537 | MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, comm);
|
|---|
| 538 | return result;
|
|---|
| 539 | }
|
|---|
| 540 |
|
|---|
| 541 | vmg_int CommMPI::LevelSumRoot(const Grid& grid, const vmg_int& value)
|
|---|
| 542 | {
|
|---|
| 543 | vmg_int recv_buffer = value;
|
|---|
| 544 | vmg_int send_buffer = value;
|
|---|
| 545 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 546 | assert(comm != MPI_COMM_NULL);
|
|---|
| 547 | MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_SUM, 0, comm);
|
|---|
| [dfed1c] | 548 | return recv_buffer;
|
|---|
| 549 | }
|
|---|
| 550 |
|
|---|
| [ac6d04] | 551 | void CommMPI::LevelSumArray(const Grid& grid, vmg_int* array, const vmg_int& size)
|
|---|
| [dfed1c] | 552 | {
|
|---|
| [ac6d04] | 553 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 554 | assert(comm != MPI_COMM_NULL);
|
|---|
| 555 | MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm);
|
|---|
| [dfed1c] | 556 | }
|
|---|
| 557 |
|
|---|
| 558 | void CommMPI::PrintString(const char* format, ...)
|
|---|
| 559 | {
|
|---|
| 560 | va_list args;
|
|---|
| 561 | va_start(args, format);
|
|---|
| 562 | vsprintf(print_buffer, format, args);
|
|---|
| 563 | printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
|
|---|
| 564 | va_end(args);
|
|---|
| 565 | }
|
|---|
| 566 |
|
|---|
| 567 | void CommMPI::PrintStringOnce(const char* format, ...)
|
|---|
| 568 | {
|
|---|
| 569 | if (GlobalRank() == 0) {
|
|---|
| 570 | va_list args;
|
|---|
| 571 | va_start(args, format);
|
|---|
| 572 | vsprintf(print_buffer, format, args);
|
|---|
| 573 | printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
|
|---|
| 574 | va_end(args);
|
|---|
| 575 | }
|
|---|
| 576 | }
|
|---|
| 577 |
|
|---|
| 578 | void CommMPI::PrintXML(const std::string& filename, const std::string& xml_data)
|
|---|
| 579 | {
|
|---|
| 580 | MPI_File file;
|
|---|
| 581 | std::stringstream path, xml_header;
|
|---|
| 582 |
|
|---|
| 583 | pugi::xml_document doc;
|
|---|
| 584 | pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
|
|---|
| 585 | node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
|
|---|
| 586 | doc.save(xml_header);
|
|---|
| 587 |
|
|---|
| 588 | path << OutputPath() << filename;
|
|---|
| 589 |
|
|---|
| 590 | char* filename_array = Helper::GetCharArray(path.str());
|
|---|
| 591 | char* xml_header_array = Helper::GetCharArray(xml_header.str());
|
|---|
| 592 | char* str_array = Helper::GetCharArray(xml_data);
|
|---|
| 593 |
|
|---|
| 594 | MPI_File_open(MPI_COMM_SELF, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 595 | MPI_File_set_size(file, 0);
|
|---|
| 596 | MPI_File_write(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 597 | MPI_File_write(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 598 | MPI_File_close(&file);
|
|---|
| 599 |
|
|---|
| 600 | delete [] filename_array;
|
|---|
| 601 | delete [] xml_header_array;
|
|---|
| 602 | delete [] str_array;
|
|---|
| 603 | }
|
|---|
| 604 |
|
|---|
| 605 | void CommMPI::PrintXMLAll(const std::string& filename, const std::string& xml_data)
|
|---|
| 606 | {
|
|---|
| 607 | MPI_File file;
|
|---|
| 608 | std::stringstream path;
|
|---|
| 609 |
|
|---|
| 610 | path << OutputPath() << filename;
|
|---|
| 611 |
|
|---|
| 612 | char* filename_array = Helper::GetCharArray(path.str());
|
|---|
| 613 | char* str_array = Helper::GetCharArray(xml_data);
|
|---|
| 614 |
|
|---|
| 615 | MPI_File_open(comm_global, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 616 | MPI_File_set_size(file, 0);
|
|---|
| 617 |
|
|---|
| 618 | if (GlobalRank() == 0) {
|
|---|
| 619 | std::stringstream xml_header;
|
|---|
| 620 | pugi::xml_document doc;
|
|---|
| 621 | pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
|
|---|
| 622 | node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
|
|---|
| 623 | doc.save(xml_header);
|
|---|
| 624 |
|
|---|
| 625 | char* xml_header_array = Helper::GetCharArray(xml_header.str());
|
|---|
| 626 |
|
|---|
| 627 | MPI_File_write_shared(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 628 |
|
|---|
| 629 | delete [] xml_header_array;
|
|---|
| 630 | }
|
|---|
| 631 |
|
|---|
| 632 | MPI_File_write_ordered(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 633 | MPI_File_close(&file);
|
|---|
| 634 |
|
|---|
| 635 | delete [] filename_array;
|
|---|
| 636 | delete [] str_array;
|
|---|
| 637 | }
|
|---|
| 638 |
|
|---|
| [ac6d04] | 639 | void CommMPI::PrintGridInformation(const Grid& grid, char* filename, const std::string& name)
|
|---|
| [dfed1c] | 640 | {
|
|---|
| 641 | std::stringstream buf;
|
|---|
| 642 | MPI_File file;
|
|---|
| [ac6d04] | 643 | int rank, size;
|
|---|
| 644 | int size_local, size_local_max;
|
|---|
| [dfed1c] | 645 |
|
|---|
| [ac6d04] | 646 | MPI_Comm comm = settings.CommunicatorGlobal(grid);
|
|---|
| 647 | MPI_Comm comm_local = settings.CommunicatorLocal(grid);
|
|---|
| [dfed1c] | 648 |
|
|---|
| [ac6d04] | 649 | if (comm_local != MPI_COMM_NULL)
|
|---|
| 650 | MPI_Comm_size(comm_local, &size_local);
|
|---|
| 651 | else
|
|---|
| 652 | size_local = 0;
|
|---|
| [dfed1c] | 653 |
|
|---|
| [ac6d04] | 654 | if (comm != MPI_COMM_NULL) {
|
|---|
| [dfed1c] | 655 |
|
|---|
| [ac6d04] | 656 | MPI_Reduce(&size_local, &size_local_max, 1, MPI_INT, MPI_MAX, 0, comm);
|
|---|
| 657 |
|
|---|
| 658 | MPI_File_open(comm, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
|
|---|
| 659 |
|
|---|
| 660 | MPI_Comm_rank(comm, &rank);
|
|---|
| 661 | MPI_Comm_size(comm, &size);
|
|---|
| 662 |
|
|---|
| 663 | if (rank == 0) {
|
|---|
| 664 |
|
|---|
| 665 | buf << "###########################################################" << std::endl
|
|---|
| 666 | << "GLOBAL INFORMATION:" << std::endl
|
|---|
| 667 | << " NAME: " << name << std::endl
|
|---|
| 668 | << " LEVEL: " << grid.Level() << std::endl
|
|---|
| 669 | << " COMM_SIZE_GLOBAL: " << size << std::endl
|
|---|
| 670 | << " COMM_SIZE_LOCAL: " << size_local_max << std::endl
|
|---|
| 671 | << " GLOBAL:" << std::endl
|
|---|
| 672 | << " GLOBAL_FINER_BEGIN: " << grid.Global().GlobalFinerBegin() << std::endl
|
|---|
| 673 | << " GLOBAL_FINER_END: " << grid.Global().GlobalFinerEnd() << std::endl
|
|---|
| 674 | << " GLOBAL_FINER_SIZE: " << grid.Global().GlobalFinerSize() << std::endl
|
|---|
| 675 | << " FINEST_ABS_BEGIN: " << grid.Global().FinestAbsBegin() << std::endl
|
|---|
| 676 | << " FINEST_ABS_END: " << grid.Global().FinestAbsEnd() << std::endl
|
|---|
| 677 | << " FINEST_ABS_SIZE: " << grid.Global().FinestAbsSize() << std::endl
|
|---|
| 678 | << " GLOBAL_SIZE: " << grid.Global().GlobalSize() << std::endl
|
|---|
| 679 | << " EXTENT:" << std::endl
|
|---|
| 680 | << " BEGIN: " << grid.Extent().Begin() << std::endl
|
|---|
| 681 | << " END: " << grid.Extent().End() << std::endl
|
|---|
| 682 | << " SIZE: " << grid.Extent().Size() << std::endl
|
|---|
| 683 | << " MESH_WIDTH: " << grid.Extent().MeshWidth() << std::endl
|
|---|
| 684 | << std::endl
|
|---|
| 685 | << "LOCAL INFORMATION:" << std::endl;
|
|---|
| 686 | }
|
|---|
| [dfed1c] | 687 |
|
|---|
| [ac6d04] | 688 | buf << "RANK " << rank << ":" << std::endl
|
|---|
| 689 | << " GLOBAL:" << std::endl
|
|---|
| 690 | << " LOCAL_BEGIN: " << grid.Global().LocalBegin() << std::endl
|
|---|
| 691 | << " LOCAL_END: " << grid.Global().LocalEnd() << std::endl
|
|---|
| 692 | << " LOCAL_SIZE: " << grid.Global().LocalSize() << std::endl
|
|---|
| 693 | << " LOCAL_FINER_BEGIN: " << grid.Global().LocalFinerBegin() << std::endl
|
|---|
| 694 | << " LOCAL_FINER_END: " << grid.Global().LocalFinerEnd() << std::endl
|
|---|
| 695 | << " LOCAL_FINER_SIZE: " << grid.Global().LocalFinerSize() << std::endl
|
|---|
| 696 | << " BOUNDARY_TYPE: " << grid.Global().BoundaryType() << std::endl
|
|---|
| 697 | << " LOCAL:" << std::endl
|
|---|
| 698 | << " BEGIN: " << grid.Local().Begin() << std::endl
|
|---|
| 699 | << " END: " << grid.Local().End() << std::endl
|
|---|
| 700 | << " SIZE: " << grid.Local().Size() << std::endl
|
|---|
| 701 | << " SIZE_TOTAL: " << grid.Local().SizeTotal() << std::endl
|
|---|
| 702 | << " HALO_BEGIN_1: " << grid.Local().HaloBegin1() << std::endl
|
|---|
| 703 | << " HALO_END_1: " << grid.Local().HaloEnd1() << std::endl
|
|---|
| 704 | << " HALO_SIZE_1: " << grid.Local().HaloSize1() << std::endl
|
|---|
| 705 | << " HALO_BEGIN_2: " << grid.Local().HaloBegin2() << std::endl
|
|---|
| 706 | << " HALO_END_2: " << grid.Local().HaloEnd2() << std::endl
|
|---|
| 707 | << " HALO_SIZE_2: " << grid.Local().HaloSize2() << std::endl
|
|---|
| 708 | << " BOUNDARY_BEGIN_1: " << grid.Local().BoundaryBegin1() << std::endl
|
|---|
| 709 | << " BOUNDARY_END_1: " << grid.Local().BoundaryEnd1() << std::endl
|
|---|
| 710 | << " BOUNDARY_SIZE_1: " << grid.Local().BoundarySize1() << std::endl
|
|---|
| 711 | << " BOUNDARY_BEGIN_2: " << grid.Local().BoundaryBegin2() << std::endl
|
|---|
| 712 | << " BOUNDARY_END_2: " << grid.Local().BoundaryEnd2() << std::endl
|
|---|
| 713 | << " BOUNDARY_SIZE_2: " << grid.Local().BoundarySize2() << std::endl
|
|---|
| 714 | << " FINER_BEGIN: " << grid.Local().FinerBeginFoo() << std::endl
|
|---|
| 715 | << " FINER_END: " << grid.Local().FinerEndFoo() << std::endl
|
|---|
| 716 | << " FINER_SIZE: " << grid.Local().FinerSizeFoo() << std::endl;
|
|---|
| 717 |
|
|---|
| 718 | if (rank == size-1)
|
|---|
| 719 | buf << "###########################################################" << std::endl;
|
|---|
| [dfed1c] | 720 |
|
|---|
| [ac6d04] | 721 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 722 | MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 723 | delete [] char_buf;
|
|---|
| [dfed1c] | 724 |
|
|---|
| [ac6d04] | 725 | MPI_File_close(&file);
|
|---|
| [dfed1c] | 726 |
|
|---|
| [ac6d04] | 727 | }
|
|---|
| 728 | }
|
|---|
| [dfed1c] | 729 |
|
|---|
| [ac6d04] | 730 | void CommMPI::PrintAllSettings()
|
|---|
| 731 | {
|
|---|
| 732 | std::stringstream buf;
|
|---|
| 733 | MPI_File file;
|
|---|
| [dfed1c] | 734 |
|
|---|
| [ac6d04] | 735 | const Multigrid& mg = *MG::GetSol();
|
|---|
| [dfed1c] | 736 |
|
|---|
| [ac6d04] | 737 | buf << OutputPath() << "settings.txt";
|
|---|
| 738 | char *filename = Helper::GetCharArray(buf.str());
|
|---|
| [dfed1c] | 739 |
|
|---|
| [ac6d04] | 740 | MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 741 | MPI_File_set_size(file, 0);
|
|---|
| 742 | MPI_File_close(&file);
|
|---|
| [dfed1c] | 743 |
|
|---|
| [ac6d04] | 744 | for (int i=mg.MinLevel(); i<=mg.MaxLevel(); ++i)
|
|---|
| 745 | PrintGridInformation(mg(i), filename, "MULTIGRID");
|
|---|
| [dfed1c] | 746 |
|
|---|
| [ac6d04] | 747 | for (int i=mg.MinLevel()+1; i<=mg.MaxLevel(); ++i)
|
|---|
| 748 | PrintGridInformation(settings.CoarserGrid(mg(i)), filename, "COARSER_GRID");
|
|---|
| [dfed1c] | 749 |
|
|---|
| [ac6d04] | 750 | for (int i=mg.MinLevel(); i<mg.MaxLevel(); ++i)
|
|---|
| 751 | PrintGridInformation(settings.FinerGrid(mg(i)), filename, "FINER_GRID");
|
|---|
| [dfed1c] | 752 |
|
|---|
| 753 | delete [] filename;
|
|---|
| 754 |
|
|---|
| 755 | }
|
|---|
| 756 |
|
|---|
| 757 | void CommMPI::PrintGrid(Grid& grid, const char* information)
|
|---|
| 758 | {
|
|---|
| [ac6d04] | 759 | int output_count = OutputCount();
|
|---|
| [dfed1c] | 760 |
|
|---|
| [ac6d04] | 761 | #ifdef HAVE_VTK
|
|---|
| [dfed1c] | 762 |
|
|---|
| [ac6d04] | 763 | if (settings.CommunicatorLocal(grid) != MPI_COMM_NULL) {
|
|---|
| [dfed1c] | 764 |
|
|---|
| [ac6d04] | 765 | Index end, end_global;
|
|---|
| [dfed1c] | 766 |
|
|---|
| [ac6d04] | 767 | for (int i=0; i<3; ++i) {
|
|---|
| 768 | end[i] = grid.Local().End()[i];
|
|---|
| 769 | end_global[i] = grid.Global().LocalEnd()[i];
|
|---|
| 770 | }
|
|---|
| [dfed1c] | 771 |
|
|---|
| [ac6d04] | 772 | vtkSmartPointer<vtkImageData> image = vtkSmartPointer<vtkImageData>::New();
|
|---|
| 773 | image->SetExtent(grid.Global().LocalBegin().X(), end_global.X()-1,
|
|---|
| 774 | grid.Global().LocalBegin().Y(), end_global.Y()-1,
|
|---|
| 775 | grid.Global().LocalBegin().Z(), end_global.Z()-1);
|
|---|
| 776 | image->SetSpacing(grid.Extent().MeshWidth().vec());
|
|---|
| 777 | image->SetOrigin(grid.Extent().Begin().vec());
|
|---|
| 778 | image->SetScalarTypeToDouble();
|
|---|
| 779 | image->SetNumberOfScalarComponents(1);
|
|---|
| 780 | image->AllocateScalars();
|
|---|
| 781 | image->GetPointData()->GetScalars()->SetName(information);
|
|---|
| [dfed1c] | 782 |
|
|---|
| [ac6d04] | 783 | Index i;
|
|---|
| 784 | for (i.X()=grid.Local().Begin().X(); i.X()<end.X(); ++i.X())
|
|---|
| 785 | for (i.Y()=grid.Local().Begin().Y(); i.Y()<end.Y(); ++i.Y())
|
|---|
| 786 | for (i.Z()=grid.Local().Begin().Z(); i.Z()<end.Z(); ++i.Z())
|
|---|
| 787 | image->SetScalarComponentFromDouble(i.X() - grid.Local().Begin().X() + grid.Global().LocalBegin().X(),
|
|---|
| 788 | i.Y() - grid.Local().Begin().Y() + grid.Global().LocalBegin().Y(),
|
|---|
| 789 | i.Z() - grid.Local().Begin().Z() + grid.Global().LocalBegin().Z(),
|
|---|
| 790 | 0, grid.GetVal(i));
|
|---|
| 791 |
|
|---|
| 792 | image->Update();
|
|---|
| 793 |
|
|---|
| 794 | int rank, size;
|
|---|
| 795 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 796 | MPI_Comm_size(comm_global, &size);
|
|---|
| 797 |
|
|---|
| 798 | std::stringstream filename;
|
|---|
| 799 | filename << OutputPath() << std::setw(4) << std::setfill('0') << output_count << "_" << rank << ".vti";
|
|---|
| 800 |
|
|---|
| 801 | vtkSmartPointer<vtkXMLImageDataWriter> writer = vtkSmartPointer<vtkXMLImageDataWriter>::New();
|
|---|
| 802 | writer->SetFileName(filename.str().c_str());
|
|---|
| 803 | writer->SetCompressorTypeToNone();
|
|---|
| 804 | writer->SetDataModeToAscii();
|
|---|
| 805 | writer->SetInput(image);
|
|---|
| 806 | writer->Update();
|
|---|
| 807 | writer->Write();
|
|---|
| [dfed1c] | 808 |
|
|---|
| 809 | }
|
|---|
| 810 |
|
|---|
| [ac6d04] | 811 | #else /* HAVE_VTK */
|
|---|
| [dfed1c] | 812 | Index i;
|
|---|
| 813 | std::stringstream buf;
|
|---|
| 814 |
|
|---|
| [ac6d04] | 815 | Index begin, end;
|
|---|
| [dfed1c] | 816 | Index begin_local, end_local, begin_global, end_global;
|
|---|
| 817 |
|
|---|
| 818 | CommToGhosts(grid);
|
|---|
| 819 |
|
|---|
| 820 | for (int i=0; i<3; ++i) {
|
|---|
| [ac6d04] | 821 | end[i] = grid.Local().End()[i] + (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 0 : grid.Local().HaloSize1()[i]);
|
|---|
| 822 | end_local[i] = grid.Global().LocalEnd()[i] - (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 1 : 0);
|
|---|
| [dfed1c] | 823 | }
|
|---|
| 824 |
|
|---|
| [ac6d04] | 825 | begin = grid.Local().Begin();
|
|---|
| 826 | begin_local = grid.Global().LocalBegin();
|
|---|
| 827 | begin_global = 0;
|
|---|
| 828 | end_global = grid.Global().GlobalSize()-1;
|
|---|
| [dfed1c] | 829 |
|
|---|
| 830 | for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
|
|---|
| 831 | for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
|
|---|
| 832 | for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
|
|---|
| 833 | buf << std::scientific << grid.GetVal(i) << " ";
|
|---|
| 834 |
|
|---|
| 835 | CreateOutputFiles(grid, buf, information,
|
|---|
| 836 | begin_global, end_global,
|
|---|
| [ac6d04] | 837 | begin_local, end_local,
|
|---|
| 838 | output_count);
|
|---|
| 839 | #endif /* HAVE_VTK */
|
|---|
| [dfed1c] | 840 | }
|
|---|
| 841 |
|
|---|
| [ac6d04] | 842 | void CommMPI::PrintDefect(Grid& sol, Grid& rhs, const char* information)
|
|---|
| [dfed1c] | 843 | {
|
|---|
| [ac6d04] | 844 | TempGrid *temp = MG::GetTempGrid();
|
|---|
| 845 | temp->SetProperties(sol);
|
|---|
| 846 | temp->ImportFromResidual(sol, rhs);
|
|---|
| 847 | PrintGrid(*temp, information);
|
|---|
| [dfed1c] | 848 | }
|
|---|
| 849 |
|
|---|
| 850 | void CommMPI::CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
|
|---|
| 851 | const Index& begin_global, const Index& end_global,
|
|---|
| [ac6d04] | 852 | const Index& begin_local, const Index& end_local,
|
|---|
| 853 | const int& output_count)
|
|---|
| [dfed1c] | 854 | {
|
|---|
| [894a5f] | 855 | MPI_Comm comm = settings.CommunicatorGlobal(grid);
|
|---|
| [dfed1c] | 856 |
|
|---|
| 857 | if (comm != MPI_COMM_NULL) {
|
|---|
| 858 |
|
|---|
| 859 | MPI_File file;
|
|---|
| 860 | std::string conv_information = Helper::ReplaceWhitespaces(information, "_");
|
|---|
| 861 |
|
|---|
| 862 | CreateParallelOutputFile(grid, comm, output_count, conv_information.c_str(),
|
|---|
| 863 | begin_global, end_global, begin_local, end_local);
|
|---|
| 864 |
|
|---|
| 865 | file = CreateSerialOutputFile(grid, comm, output_count, conv_information.c_str(),
|
|---|
| 866 | begin_global, end_global, begin_local, end_local);
|
|---|
| 867 |
|
|---|
| 868 | char *char_buf = Helper::GetCharArray(serial_data.str());
|
|---|
| 869 | MPI_File_write(file, char_buf, serial_data.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 870 | delete [] char_buf;
|
|---|
| 871 |
|
|---|
| 872 | FinalizeSerialOutputFile(file);
|
|---|
| 873 |
|
|---|
| 874 | }
|
|---|
| 875 | }
|
|---|
| 876 |
|
|---|
| 877 | void CommMPI::CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
|
|---|
| 878 | const int& output_count, const char* information,
|
|---|
| 879 | const Index& begin_global, const Index& end_global,
|
|---|
| 880 | const Index& begin_local, const Index& end_local)
|
|---|
| 881 | {
|
|---|
| 882 | int rank;
|
|---|
| 883 | MPI_File file;
|
|---|
| 884 | char parallel_filename[513], serial_filename[513];
|
|---|
| 885 | std::stringstream buf;
|
|---|
| 886 |
|
|---|
| 887 | MPI_Comm_rank(comm, &rank);
|
|---|
| 888 |
|
|---|
| 889 | sprintf(parallel_filename, "%s%04d.pvti", OutputPath().c_str(), output_count);
|
|---|
| 890 | sprintf(serial_filename, "%04d_%d.vti", output_count, rank);
|
|---|
| 891 |
|
|---|
| 892 | MPI_File_open(comm, parallel_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 893 | MPI_File_set_size(file, 0);
|
|---|
| 894 |
|
|---|
| 895 | if (rank == 0) {
|
|---|
| 896 |
|
|---|
| 897 | buf << "<?xml version=\"1.0\"?>" << std::endl
|
|---|
| 898 | << "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
|
|---|
| 899 | << " <PImageData WholeExtent=\"";
|
|---|
| 900 |
|
|---|
| 901 | for (int i=0; i<3; ++i)
|
|---|
| [ac6d04] | 902 | buf << begin_global[i] << " " << end_global[i] << " ";
|
|---|
| [dfed1c] | 903 |
|
|---|
| 904 | buf << "\"" << std::endl
|
|---|
| 905 | << " GhostLevel=\"0\" Origin=\"0 0 0\" Spacing=\"";
|
|---|
| 906 |
|
|---|
| 907 | for (int i=0; i<3; ++i)
|
|---|
| 908 | buf << grid.Extent().MeshWidth()[i] << " ";
|
|---|
| 909 |
|
|---|
| 910 | buf << "\">" << std::endl
|
|---|
| 911 | << " <PPointData Scalars=\"" << information << "\">" << std::endl
|
|---|
| 912 | << " <PDataArray type=\"Float32\" Name=\"" << information << "\"/>" << std::endl
|
|---|
| 913 | << " </PPointData>" << std::endl;
|
|---|
| 914 |
|
|---|
| 915 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 916 |
|
|---|
| 917 | MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 918 |
|
|---|
| 919 | delete [] char_buf;
|
|---|
| 920 | }
|
|---|
| 921 |
|
|---|
| 922 | buf.str("");
|
|---|
| 923 |
|
|---|
| [ac6d04] | 924 | if ((end_local-begin_local).Product() > 0) {
|
|---|
| 925 | buf << " <Piece Extent=\"";
|
|---|
| 926 |
|
|---|
| 927 | for (int i=0; i<3; ++i)
|
|---|
| 928 | buf << begin_local[i] << " " << end_local[i] << " ";
|
|---|
| [dfed1c] | 929 |
|
|---|
| [ac6d04] | 930 | buf << "\" Source=\"" << serial_filename << "\"/>" << std::endl;
|
|---|
| 931 | }
|
|---|
| [dfed1c] | 932 |
|
|---|
| 933 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 934 |
|
|---|
| 935 | MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 936 |
|
|---|
| 937 | delete [] char_buf;
|
|---|
| 938 |
|
|---|
| 939 | if (rank == 0) {
|
|---|
| 940 |
|
|---|
| 941 | buf.str("");
|
|---|
| 942 |
|
|---|
| 943 | buf << " </PImageData>" << std::endl
|
|---|
| 944 | << "</VTKFile>" << std::endl;
|
|---|
| 945 |
|
|---|
| 946 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 947 |
|
|---|
| 948 | MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 949 |
|
|---|
| 950 | delete [] char_buf;
|
|---|
| 951 | }
|
|---|
| 952 |
|
|---|
| 953 | MPI_File_close(&file);
|
|---|
| 954 | }
|
|---|
| 955 |
|
|---|
| 956 | MPI_File CommMPI::CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
|
|---|
| 957 | const int& output_count, const char* information,
|
|---|
| 958 | const Index& begin_global, const Index& end_global,
|
|---|
| 959 | const Index& begin_local, const Index& end_local)
|
|---|
| 960 | {
|
|---|
| 961 | char serial_filename[513];
|
|---|
| 962 | int rank;
|
|---|
| 963 | MPI_File file;
|
|---|
| 964 | std::stringstream buf;
|
|---|
| 965 |
|
|---|
| 966 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 967 |
|
|---|
| 968 | sprintf(serial_filename, "%s%04d_%d.vti", OutputPath().c_str(), output_count, rank);
|
|---|
| 969 |
|
|---|
| 970 | MPI_File_open(MPI_COMM_SELF, serial_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 971 |
|
|---|
| 972 | buf << "<?xml version=\"1.0\"?>" << std::endl
|
|---|
| 973 | << "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
|
|---|
| 974 | << " <ImageData WholeExtent=\"";
|
|---|
| 975 |
|
|---|
| 976 | for (int i=0; i<3; ++i)
|
|---|
| [ac6d04] | 977 | buf << begin_global[i] << " " << end_global[i] << " ";
|
|---|
| [dfed1c] | 978 |
|
|---|
| 979 | buf << "\"" << std::endl
|
|---|
| 980 | << " Origin=\"0 0 0\" Spacing=\"";
|
|---|
| 981 |
|
|---|
| 982 | for (int i=0; i<3; ++i)
|
|---|
| 983 | buf << grid.Extent().MeshWidth()[i] << " ";
|
|---|
| 984 |
|
|---|
| 985 | buf << "\">" << std::endl
|
|---|
| 986 | << " <Piece Extent=\"";
|
|---|
| 987 |
|
|---|
| 988 | for (int i=0; i<3; ++i)
|
|---|
| [ac6d04] | 989 | buf << begin_local[i] << " " << end_local[i] << " ";
|
|---|
| [dfed1c] | 990 |
|
|---|
| 991 | buf << "\">" << std::endl
|
|---|
| 992 | << " <PointData Scalars=\"" << information << "\">" << std::endl
|
|---|
| 993 | << " <DataArray type=\"Float32\" Name=\"" << information << "\" format=\"ascii\">" << std::endl
|
|---|
| 994 | << " ";
|
|---|
| 995 |
|
|---|
| 996 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 997 | MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 998 | delete [] char_buf;
|
|---|
| 999 |
|
|---|
| 1000 | return file;
|
|---|
| 1001 | }
|
|---|
| 1002 |
|
|---|
| 1003 | void CommMPI::FinalizeSerialOutputFile(MPI_File& file)
|
|---|
| 1004 | {
|
|---|
| 1005 | std::stringstream buf;
|
|---|
| 1006 |
|
|---|
| 1007 | buf << std::endl
|
|---|
| 1008 | << " </DataArray>" << std::endl
|
|---|
| 1009 | << " </PointData>" << std::endl
|
|---|
| 1010 | << " </Piece>" << std::endl
|
|---|
| 1011 | << " </ImageData>" << std::endl
|
|---|
| 1012 | << "</VTKFile>" << std::endl;
|
|---|
| 1013 |
|
|---|
| 1014 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1015 | MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1016 | delete [] char_buf;
|
|---|
| 1017 |
|
|---|
| 1018 | MPI_File_close(&file);
|
|---|
| 1019 | }
|
|---|
| 1020 |
|
|---|
| 1021 | int CommMPI::GlobalRank() const
|
|---|
| 1022 | {
|
|---|
| 1023 | int rank;
|
|---|
| 1024 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 1025 | return rank;
|
|---|
| 1026 | }
|
|---|
| 1027 |
|
|---|
| [ac6d04] | 1028 | int CommMPI::GlobalSize() const
|
|---|
| 1029 | {
|
|---|
| 1030 | int size;
|
|---|
| 1031 | MPI_Comm_size(comm_global, &size);
|
|---|
| 1032 | return size;
|
|---|
| 1033 | }
|
|---|
| 1034 |
|
|---|
| [dfed1c] | 1035 | Index CommMPI::GlobalPos() const
|
|---|
| 1036 | {
|
|---|
| 1037 | Index dims, periods, coords;
|
|---|
| 1038 | MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
|
|---|
| 1039 | return coords;
|
|---|
| 1040 | }
|
|---|
| 1041 |
|
|---|
| 1042 | Index CommMPI::GlobalProcs() const
|
|---|
| 1043 | {
|
|---|
| 1044 | Index dims, periods, coords;
|
|---|
| 1045 | MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
|
|---|
| [ac6d04] | 1046 | return dims;
|
|---|
| 1047 | }
|
|---|
| 1048 |
|
|---|
| 1049 | int CommMPI::Rank(const Grid& grid) const
|
|---|
| 1050 | {
|
|---|
| 1051 | int rank;
|
|---|
| 1052 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 1053 | assert(comm != MPI_COMM_NULL);
|
|---|
| 1054 | MPI_Comm_rank(comm, &rank);
|
|---|
| 1055 | return rank;
|
|---|
| 1056 | }
|
|---|
| 1057 |
|
|---|
| 1058 | int CommMPI::Size(const Grid& grid) const
|
|---|
| 1059 | {
|
|---|
| 1060 | int size;
|
|---|
| 1061 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 1062 | assert(comm != MPI_COMM_NULL);
|
|---|
| 1063 | MPI_Comm_size(comm, &size);
|
|---|
| 1064 | return size;
|
|---|
| 1065 | }
|
|---|
| [dfed1c] | 1066 |
|
|---|
| [ac6d04] | 1067 | Index CommMPI::Pos(const Grid& grid) const
|
|---|
| 1068 | {
|
|---|
| 1069 | Index dims, periods, coords;
|
|---|
| 1070 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 1071 | assert(comm != MPI_COMM_NULL);
|
|---|
| 1072 | MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
|
|---|
| 1073 | return coords;
|
|---|
| 1074 | }
|
|---|
| 1075 |
|
|---|
| 1076 | Index CommMPI::Procs(const Grid& grid) const
|
|---|
| 1077 | {
|
|---|
| 1078 | Index dims, periods, coords;
|
|---|
| 1079 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 1080 | assert(comm != MPI_COMM_NULL);
|
|---|
| 1081 | MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
|
|---|
| [dfed1c] | 1082 | return dims;
|
|---|
| 1083 | }
|
|---|
| 1084 |
|
|---|
| 1085 | void CommMPI::InitCommMPI(const MPI_Comm& comm)
|
|---|
| 1086 | {
|
|---|
| 1087 | int status, size, rank;
|
|---|
| 1088 | int dims[3] = {0, 0, 0};
|
|---|
| 1089 | int periods[3];
|
|---|
| 1090 |
|
|---|
| 1091 | for (int i=0; i<3; ++i)
|
|---|
| 1092 | periods[i] = (BoundaryConditions()[i] == Periodic ? 1 : 0);
|
|---|
| 1093 |
|
|---|
| 1094 | MPI_Comm_size(comm, &size);
|
|---|
| 1095 | MPI_Comm_rank(comm, &rank);
|
|---|
| 1096 |
|
|---|
| 1097 | MPI_Topo_test(comm, &status);
|
|---|
| 1098 |
|
|---|
| 1099 | if (status == MPI_CART) {
|
|---|
| 1100 |
|
|---|
| 1101 | comm_global = comm;
|
|---|
| 1102 |
|
|---|
| 1103 | }else {
|
|---|
| 1104 |
|
|---|
| 1105 | MPI_Dims_create(size, 3, dims);
|
|---|
| 1106 |
|
|---|
| 1107 | MPI_Cart_create(comm, 3, dims, periods, 1, &comm_global);
|
|---|
| 1108 |
|
|---|
| 1109 | }
|
|---|
| 1110 |
|
|---|
| 1111 | MPI_Info_create(&info);
|
|---|
| 1112 | char key[] = "no_locks";
|
|---|
| 1113 | char val[] = "true";
|
|---|
| 1114 | MPI_Info_set(info, key, val);
|
|---|
| 1115 |
|
|---|
| 1116 | }
|
|---|
| 1117 |
|
|---|
| 1118 | CommMPI::~CommMPI()
|
|---|
| 1119 | {
|
|---|
| 1120 | MPI_Comm_free(&comm_global);
|
|---|
| [ac6d04] | 1121 | #ifdef VMG_ONE_SIDED
|
|---|
| [dfed1c] | 1122 | if (win_created)
|
|---|
| 1123 | MPI_Win_free(&win);
|
|---|
| [ac6d04] | 1124 | #endif
|
|---|
| [dfed1c] | 1125 | MPI_Info_free(&info);
|
|---|
| [894a5f] | 1126 | }
|
|---|
| [dfed1c] | 1127 |
|
|---|
| [894a5f] | 1128 | Grid& CommMPI::GetCoarserGrid(Multigrid& multigrid)
|
|---|
| 1129 | {
|
|---|
| 1130 | return settings.CoarserGrid(multigrid(multigrid.Level()));
|
|---|
| [dfed1c] | 1131 | }
|
|---|
| 1132 |
|
|---|
| [894a5f] | 1133 | Grid& CommMPI::GetFinerGrid(Multigrid& multigrid)
|
|---|
| 1134 | {
|
|---|
| 1135 | return settings.FinerGrid(multigrid(multigrid.Level()));
|
|---|
| 1136 | }
|
|---|
| 1137 |
|
|---|
| [dfed1c] | 1138 | std::string CommMPI::CreateOutputDirectory()
|
|---|
| 1139 | {
|
|---|
| 1140 | #ifdef HAVE_BOOST_FILESYSTEM
|
|---|
| 1141 | std::string path, unique_path;
|
|---|
| 1142 | std::stringstream unique_suffix;
|
|---|
| 1143 | int suffix_counter = 0;
|
|---|
| 1144 | char buffer[129];
|
|---|
| 1145 | time_t rawtime;
|
|---|
| 1146 | struct tm *timeinfo;
|
|---|
| 1147 | int path_size;
|
|---|
| 1148 | char* path_array;
|
|---|
| 1149 |
|
|---|
| 1150 | if (GlobalRank() == 0) {
|
|---|
| 1151 |
|
|---|
| 1152 | time(&rawtime);
|
|---|
| 1153 | timeinfo = localtime(&rawtime);
|
|---|
| 1154 | strftime(buffer, 128, "./output/%Y_%m_%d_%H_%M_%S/", timeinfo);
|
|---|
| 1155 | path = buffer;
|
|---|
| 1156 |
|
|---|
| 1157 | if (!fs::exists("output"))
|
|---|
| 1158 | fs::create_directory("output");
|
|---|
| 1159 |
|
|---|
| 1160 | unique_path = path;
|
|---|
| 1161 |
|
|---|
| 1162 | while (fs::exists(unique_path.c_str())) {
|
|---|
| 1163 |
|
|---|
| 1164 | unique_suffix.str("");
|
|---|
| 1165 | unique_suffix << "_" << suffix_counter++ << "/";
|
|---|
| 1166 |
|
|---|
| 1167 | unique_path = path;
|
|---|
| 1168 | unique_path.replace(unique_path.size()-1, 1, unique_suffix.str());
|
|---|
| 1169 |
|
|---|
| 1170 | }
|
|---|
| 1171 |
|
|---|
| 1172 | fs::create_directory(unique_path.c_str());
|
|---|
| 1173 |
|
|---|
| 1174 | path_size = unique_path.size() + 1;
|
|---|
| 1175 | path_array = Helper::GetCharArray(unique_path);
|
|---|
| 1176 |
|
|---|
| 1177 | MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
|
|---|
| 1178 |
|
|---|
| 1179 | }else {
|
|---|
| 1180 |
|
|---|
| 1181 | MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
|
|---|
| 1182 | path_array = new char[path_size];
|
|---|
| 1183 |
|
|---|
| 1184 | }
|
|---|
| 1185 |
|
|---|
| 1186 | MPI_Bcast(path_array, path_size, MPI_CHAR, 0, comm_global);
|
|---|
| 1187 |
|
|---|
| 1188 | unique_path = path_array;
|
|---|
| 1189 |
|
|---|
| 1190 | delete [] path_array;
|
|---|
| 1191 |
|
|---|
| 1192 | return unique_path;
|
|---|
| 1193 |
|
|---|
| 1194 | #else
|
|---|
| 1195 |
|
|---|
| 1196 | return "./";
|
|---|
| 1197 |
|
|---|
| 1198 | #endif
|
|---|
| 1199 | }
|
|---|
| 1200 |
|
|---|
| 1201 |
|
|---|
| 1202 | #endif /* HAVE_MPI */
|
|---|