| [dfed1c] | 1 | /**
|
|---|
| 2 | * @file comm_mpi.cpp
|
|---|
| 3 | * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
|
|---|
| 4 | * @date Wed Jun 16 13:21:06 2010
|
|---|
| 5 | *
|
|---|
| 6 | * @brief Class for MPI-based communication.
|
|---|
| 7 | *
|
|---|
| 8 | */
|
|---|
| 9 |
|
|---|
| 10 | #ifdef HAVE_CONFIG_H
|
|---|
| 11 | #include <config.h>
|
|---|
| 12 | #endif
|
|---|
| 13 |
|
|---|
| 14 | #ifdef HAVE_MPI
|
|---|
| 15 |
|
|---|
| 16 | #include <mpi.h>
|
|---|
| [ac6d04] | 17 | #ifdef HAVE_MARMOT
|
|---|
| 18 | #include <enhancempicalls.h>
|
|---|
| 19 | #include <sourceinfompicalls.h>
|
|---|
| 20 | #endif
|
|---|
| [dfed1c] | 21 |
|
|---|
| 22 | #ifdef HAVE_BOOST_FILESYSTEM
|
|---|
| 23 | #include <boost/filesystem.hpp>
|
|---|
| 24 | namespace fs = boost::filesystem;
|
|---|
| 25 | #endif
|
|---|
| 26 |
|
|---|
| [ac6d04] | 27 | #ifdef HAVE_VTK
|
|---|
| 28 | #include <vtkAbstractArray.h>
|
|---|
| 29 | #include <vtkImageData.h>
|
|---|
| 30 | #include <vtkPointData.h>
|
|---|
| 31 | #include <vtkSmartPointer.h>
|
|---|
| 32 | #include <vtkXMLImageDataWriter.h>
|
|---|
| 33 | #endif
|
|---|
| 34 |
|
|---|
| [dfed1c] | 35 | #include <cstring>
|
|---|
| 36 | #include <sstream>
|
|---|
| 37 |
|
|---|
| 38 | #include "base/helper.hpp"
|
|---|
| 39 | #include "base/linked_cell_list.hpp"
|
|---|
| [894a5f] | 40 | #include "base/tuple.hpp"
|
|---|
| [dfed1c] | 41 | #include "comm/comm_mpi.hpp"
|
|---|
| [ac6d04] | 42 | #include "comm/mpi/datatypes_local.hpp"
|
|---|
| [dfed1c] | 43 | #include "grid/grid.hpp"
|
|---|
| 44 | #include "grid/multigrid.hpp"
|
|---|
| 45 | #include "grid/tempgrid.hpp"
|
|---|
| 46 | #include "mg.hpp"
|
|---|
| [894a5f] | 47 | #include "base/timer.hpp"
|
|---|
| [dfed1c] | 48 | static char print_buffer[512];
|
|---|
| 49 |
|
|---|
| 50 | using namespace VMG;
|
|---|
| 51 |
|
|---|
| [894a5f] | 52 | void CommMPI::IsendAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
|
|---|
| 53 | {
|
|---|
| [ac6d04] | 54 | for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
|
|---|
| 55 | iter->Isend(grid, tag_start, comm, Request());
|
|---|
| [894a5f] | 56 | }
|
|---|
| [dfed1c] | 57 |
|
|---|
| [ac6d04] | 58 | void CommMPI::IrecvAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
|
|---|
| [894a5f] | 59 | {
|
|---|
| [ac6d04] | 60 | for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!=types.end(); ++iter)
|
|---|
| 61 | iter->Irecv(grid, tag_start, comm, Request());
|
|---|
| [894a5f] | 62 | }
|
|---|
| [dfed1c] | 63 |
|
|---|
| [ac6d04] | 64 | void CommMPI::IsendAllBuffered(const Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
|
|---|
| [894a5f] | 65 | {
|
|---|
| [ac6d04] | 66 | for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
|
|---|
| 67 | iter->IsendBuffered(grid, tag_start, comm, Request());
|
|---|
| [894a5f] | 68 | }
|
|---|
| [dfed1c] | 69 |
|
|---|
| [894a5f] | 70 | void CommMPI::IrecvAllBuffered(std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
|
|---|
| 71 | {
|
|---|
| [ac6d04] | 72 | for (std::vector<VMG::MPI::Datatype>::iterator iter=types.begin(); iter!=types.end(); ++iter)
|
|---|
| 73 | iter->IrecvBuffered(tag_start, comm, Request());
|
|---|
| [894a5f] | 74 | }
|
|---|
| [dfed1c] | 75 |
|
|---|
| [ac6d04] | 76 | void CommMPI::ReplaceBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
|
|---|
| [894a5f] | 77 | {
|
|---|
| [ac6d04] | 78 | for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
|
|---|
| 79 | iter->GridReplace(grid);
|
|---|
| 80 | }
|
|---|
| [dfed1c] | 81 |
|
|---|
| [ac6d04] | 82 | void CommMPI::AddBufferAll(Grid& grid, const std::vector<VMG::MPI::Datatype>& types)
|
|---|
| 83 | {
|
|---|
| 84 | for (std::vector<VMG::MPI::Datatype>::const_iterator iter=types.begin(); iter!= types.end(); ++iter)
|
|---|
| 85 | iter->GridSum(grid);
|
|---|
| [894a5f] | 86 | }
|
|---|
| [dfed1c] | 87 |
|
|---|
| [ac6d04] | 88 | void CommMPI::CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
|
|---|
| [894a5f] | 89 | {
|
|---|
| 90 | MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
|
|---|
| 91 | if (comm != MPI_COMM_NULL) {
|
|---|
| [ac6d04] | 92 | VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
|
|---|
| 93 | IrecvAllBuffered(datatypes.Receive(), comm, 0411);
|
|---|
| 94 | IsendAllBuffered(grid_old, datatypes.Send(), comm, 0411);
|
|---|
| 95 | WaitAll();
|
|---|
| 96 | ReplaceBufferAll(grid_new, datatypes.Receive());
|
|---|
| [dfed1c] | 97 | }
|
|---|
| 98 | }
|
|---|
| 99 |
|
|---|
| [ac6d04] | 100 | void CommMPI::CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction)
|
|---|
| [dfed1c] | 101 | {
|
|---|
| [894a5f] | 102 | MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
|
|---|
| [dfed1c] | 103 | if (comm != MPI_COMM_NULL) {
|
|---|
| [ac6d04] | 104 | VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new, direction);
|
|---|
| [894a5f] | 105 | IrecvAllBuffered(datatypes.Receive(), comm, 1806);
|
|---|
| [ac6d04] | 106 | IsendAllBuffered(grid_old, datatypes.Send(), comm, 1806);
|
|---|
| 107 | WaitAll();
|
|---|
| [894a5f] | 108 | AddBufferAll(grid_new, datatypes.Receive());
|
|---|
| [dfed1c] | 109 | }
|
|---|
| 110 | }
|
|---|
| 111 |
|
|---|
| [ac6d04] | 112 | void CommMPI::CommToGhosts(Grid& grid)
|
|---|
| [dfed1c] | 113 | {
|
|---|
| [894a5f] | 114 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| [dfed1c] | 115 | if (comm != MPI_COMM_NULL) {
|
|---|
| [894a5f] | 116 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| [ac6d04] | 117 | IrecvAllBuffered(types.Halo(), comm, 2310);
|
|---|
| 118 | IsendAllBuffered(grid, types.NB(), comm, 2310);
|
|---|
| 119 | WaitAll();
|
|---|
| 120 | ReplaceBufferAll(grid, types.Halo());
|
|---|
| [dfed1c] | 121 | }
|
|---|
| 122 | }
|
|---|
| 123 |
|
|---|
| [ac6d04] | 124 | void CommMPI::CommToGhostsAsyncStart(Grid& grid)
|
|---|
| [dfed1c] | 125 | {
|
|---|
| [ac6d04] | 126 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| [dfed1c] | 127 | if (comm != MPI_COMM_NULL) {
|
|---|
| [894a5f] | 128 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| [ac6d04] | 129 | IrecvAllBuffered(types.Halo(), comm, 2412);
|
|---|
| 130 | IsendAllBuffered(grid, types.NB(), comm, 2412);
|
|---|
| 131 | TestAll();
|
|---|
| [894a5f] | 132 | }
|
|---|
| 133 | }
|
|---|
| [dfed1c] | 134 |
|
|---|
| [ac6d04] | 135 | void CommMPI::CommToGhostsAsyncFinish(Grid& grid)
|
|---|
| [894a5f] | 136 | {
|
|---|
| [ac6d04] | 137 | WaitAll();
|
|---|
| 138 | ReplaceBufferAll(grid, settings.DatatypesLocal(grid).Halo());
|
|---|
| [894a5f] | 139 | }
|
|---|
| [dfed1c] | 140 |
|
|---|
| [ac6d04] | 141 | void CommMPI::CommFromGhosts(Grid& grid)
|
|---|
| [894a5f] | 142 | {
|
|---|
| 143 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 144 | if (comm != MPI_COMM_NULL) {
|
|---|
| 145 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| [ac6d04] | 146 | IrecvAllBuffered(types.NB(), comm, 1337);
|
|---|
| 147 | IsendAllBuffered(grid, types.Halo(), comm, 1337);
|
|---|
| 148 | WaitAll();
|
|---|
| 149 | AddBufferAll(grid, types.NB());
|
|---|
| [97c25dd] | 150 | }
|
|---|
| 151 | }
|
|---|
| 152 |
|
|---|
| [ac6d04] | 153 | void CommMPI::CommFromGhostsAsyncStart(Grid& grid)
|
|---|
| [dfed1c] | 154 | {
|
|---|
| [ac6d04] | 155 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 156 | if (comm != MPI_COMM_NULL) {
|
|---|
| 157 | VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
|
|---|
| 158 | IrecvAllBuffered(types.NB(), comm, 0xc0ffee);
|
|---|
| 159 | IsendAllBuffered(grid, types.Halo(), comm, 0xc0ffee);
|
|---|
| 160 | TestAll();
|
|---|
| 161 | }
|
|---|
| [dfed1c] | 162 | }
|
|---|
| 163 |
|
|---|
| [ac6d04] | 164 | void CommMPI::CommFromGhostsAsyncFinish(Grid& grid)
|
|---|
| [dfed1c] | 165 | {
|
|---|
| [ac6d04] | 166 | WaitAll();
|
|---|
| 167 | AddBufferAll(grid, settings.DatatypesLocal(grid).NB());
|
|---|
| [dfed1c] | 168 | }
|
|---|
| 169 |
|
|---|
| 170 | void CommMPI::CommParticles(const Grid& grid, std::list<Particle::Particle>& particles)
|
|---|
| 171 | {
|
|---|
| 172 | Factory& factory = MG::GetFactory();
|
|---|
| 173 |
|
|---|
| 174 | const vmg_int& num_particles_local = factory.GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
|
|---|
| 175 | vmg_float* x = factory.GetObjectStorageArray<vmg_float>("PARTICLE_POS_ARRAY");
|
|---|
| 176 | vmg_float* q = factory.GetObjectStorageArray<vmg_float>("PARTICLE_CHARGE_ARRAY");
|
|---|
| 177 |
|
|---|
| 178 | int rank, size;
|
|---|
| 179 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 180 | MPI_Comm_size(comm_global, &size);
|
|---|
| 181 |
|
|---|
| [ac6d04] | 182 | #ifndef VMG_ONE_SIDED
|
|---|
| 183 | vmg_int* receiver;
|
|---|
| 184 | if (!factory.TestObject("PARTICLE_RECEIVER_ARRAY"))
|
|---|
| 185 | new ObjectStorageArray<vmg_int>("PARTICLE_RECEIVER_ARRAY", size);
|
|---|
| 186 | receiver = factory.GetObjectStorageArray<vmg_int>("PARTICLE_RECEIVER_ARRAY");
|
|---|
| 187 | #endif
|
|---|
| 188 |
|
|---|
| [dfed1c] | 189 | Index index;
|
|---|
| [ac6d04] | 190 | std::vector<int> global_extent(6*size);
|
|---|
| 191 | std::vector<int> send_sizes(size);
|
|---|
| 192 | std::vector<int> recv_sizes(size);
|
|---|
| 193 | std::vector<Index> begin_remote(size);
|
|---|
| 194 | std::vector<Index> end_remote(size);
|
|---|
| 195 | std::vector< std::vector<vmg_float> > send_buffer_x(size);
|
|---|
| 196 | std::vector< std::vector<vmg_float> > send_buffer_q(size);
|
|---|
| 197 | std::vector< std::vector<vmg_int> > send_buffer_ind(size);
|
|---|
| 198 | std::vector< std::vector<vmg_float> > recv_buffer_x(size);
|
|---|
| 199 | std::vector< std::vector<vmg_float> > recv_buffer_q(size);
|
|---|
| 200 | std::vector< std::vector<vmg_int> > recv_buffer_ind(size);
|
|---|
| [9fc7e1] | 201 | MPI_Datatype dt_temp;
|
|---|
| 202 |
|
|---|
| [ac6d04] | 203 | std::memcpy(&global_extent[6*rank], grid.Global().LocalBegin().vec(), 3*sizeof(int));
|
|---|
| 204 | std::memcpy(&global_extent[6*rank+3], grid.Global().LocalEnd().vec(), 3*sizeof(int));
|
|---|
| [dfed1c] | 205 |
|
|---|
| [ac6d04] | 206 | MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, &global_extent.front(), 6, MPI_INT, comm_global);
|
|---|
| [dfed1c] | 207 |
|
|---|
| 208 | for (int i=0; i<size; ++i) {
|
|---|
| [ac6d04] | 209 | begin_remote[i] = static_cast<Index>(&global_extent[6*i]);
|
|---|
| 210 | end_remote[i] = static_cast<Index>(&global_extent[6*i+3]);
|
|---|
| [dfed1c] | 211 | }
|
|---|
| 212 |
|
|---|
| 213 | for (int i=0; i<num_particles_local; ++i) {
|
|---|
| 214 | index = static_cast<Index>((Vector(&x[3*i]) - grid.Extent().Begin()) / grid.Extent().MeshWidth());
|
|---|
| 215 | for (int j=0; j<size; ++j)
|
|---|
| 216 | if (index.IsInBounds(begin_remote[j], end_remote[j])) {
|
|---|
| [ac6d04] | 217 | send_buffer_x[j].push_back(x[3*i+0]);
|
|---|
| 218 | send_buffer_x[j].push_back(x[3*i+1]);
|
|---|
| 219 | send_buffer_x[j].push_back(x[3*i+2]);
|
|---|
| 220 | send_buffer_q[j].push_back(q[i]);
|
|---|
| 221 | send_buffer_ind[j].push_back(i);
|
|---|
| [dfed1c] | 222 | break;
|
|---|
| 223 | }
|
|---|
| 224 | }
|
|---|
| 225 |
|
|---|
| 226 | /*
|
|---|
| 227 | * Communicate which process gets how many particles
|
|---|
| 228 | */
|
|---|
| 229 | for (int i=0; i<size; ++i)
|
|---|
| [ac6d04] | 230 | send_sizes[i] = send_buffer_q[i].size();
|
|---|
| [dfed1c] | 231 |
|
|---|
| [9fc7e1] | 232 | MPI_Alltoall(&send_sizes.front(), 1, MPI_INT, &recv_sizes.front(), 1, MPI_INT, comm_global);
|
|---|
| [dfed1c] | 233 |
|
|---|
| [716da7] | 234 | assert(RequestsPending() == 0);
|
|---|
| 235 |
|
|---|
| [dfed1c] | 236 | /*
|
|---|
| 237 | * Send particles
|
|---|
| 238 | */
|
|---|
| 239 | for (int i=0; i<size; ++i) {
|
|---|
| 240 |
|
|---|
| [ac6d04] | 241 | if (!send_buffer_q[i].empty()) {
|
|---|
| 242 | MPI_Isend(&send_buffer_x[i].front(), send_buffer_x[i].size(), MPI_DOUBLE, i, 0, comm_global, &Request());
|
|---|
| 243 | MPI_Isend(&send_buffer_q[i].front(), send_buffer_q[i].size(), MPI_DOUBLE, i, 1, comm_global, &Request());
|
|---|
| 244 | MPI_Isend(&send_buffer_ind[i].front(), send_buffer_ind[i].size(), MPI_INT, i, 2, comm_global, &Request());
|
|---|
| [dfed1c] | 245 | }
|
|---|
| 246 |
|
|---|
| [ac6d04] | 247 | #ifndef VMG_ONE_SIDED
|
|---|
| 248 | receiver[i] = send_buffer_q[i].size();
|
|---|
| 249 | #endif
|
|---|
| [dfed1c] | 250 | }
|
|---|
| 251 |
|
|---|
| 252 | /*
|
|---|
| 253 | * Receive particles
|
|---|
| 254 | */
|
|---|
| 255 | for (int i=0; i<size; ++i) {
|
|---|
| 256 |
|
|---|
| 257 | if (recv_sizes[i] > 0) {
|
|---|
| 258 |
|
|---|
| [ac6d04] | 259 | recv_buffer_x[i].resize(3*recv_sizes[i]);
|
|---|
| 260 | recv_buffer_q[i].resize(recv_sizes[i]);
|
|---|
| 261 | recv_buffer_ind[i].resize(recv_sizes[i]);
|
|---|
| [dfed1c] | 262 |
|
|---|
| [ac6d04] | 263 | MPI_Irecv(&recv_buffer_x[i].front(), 3*recv_sizes[i], MPI_DOUBLE, i, 0, comm_global, &Request());
|
|---|
| 264 | MPI_Irecv(&recv_buffer_q[i].front(), recv_sizes[i], MPI_DOUBLE, i, 1, comm_global, &Request());
|
|---|
| 265 | MPI_Irecv(&recv_buffer_ind[i].front(), recv_sizes[i], MPI_INT, i, 2, comm_global, &Request());
|
|---|
| [dfed1c] | 266 |
|
|---|
| 267 | }
|
|---|
| 268 |
|
|---|
| 269 | }
|
|---|
| 270 |
|
|---|
| [ac6d04] | 271 | WaitAll();
|
|---|
| 272 |
|
|---|
| 273 | particles.clear();
|
|---|
| 274 |
|
|---|
| 275 | for (int i=0; i<size; ++i)
|
|---|
| 276 | for (int j=0; j<recv_sizes[i]; ++j)
|
|---|
| 277 | particles.push_back(Particle::Particle(&recv_buffer_x[i][3*j], recv_buffer_q[i][j], 0.0, 0.0, i, recv_buffer_ind[i][j]));
|
|---|
| [dfed1c] | 278 | }
|
|---|
| 279 |
|
|---|
| [ac6d04] | 280 | void CommMPI::CommParticlesBack(std::list<Particle::Particle>& particles)
|
|---|
| [dfed1c] | 281 | {
|
|---|
| [ac6d04] | 282 | std::list<Particle::Particle>::iterator iter;
|
|---|
| [dfed1c] | 283 |
|
|---|
| [ac6d04] | 284 | #ifdef VMG_ONE_SIDED
|
|---|
| [716da7] | 285 | if (!win_created) {
|
|---|
| 286 | vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
|
|---|
| 287 | const vmg_int& num_particles_local = MG::GetFactory().GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
|
|---|
| 288 | MPI_Win_create(p, num_particles_local*sizeof(vmg_float), sizeof(vmg_float), info, comm_global, &win);
|
|---|
| 289 | win_created = true;
|
|---|
| 290 | }
|
|---|
| [dfed1c] | 291 |
|
|---|
| [716da7] | 292 | MPI_Win_fence(MPI_MODE_NOPRECEDE, win);
|
|---|
| [dfed1c] | 293 |
|
|---|
| [716da7] | 294 | for (iter=particles.begin(); iter!=particles.end(); ++iter)
|
|---|
| 295 | MPI_Put(&iter->Pot(), 1, MPI_DOUBLE, iter->Rank(), iter->Index(), 1, MPI_DOUBLE, win);
|
|---|
| [dfed1c] | 296 |
|
|---|
| [716da7] | 297 | MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED, win);
|
|---|
| [ac6d04] | 298 | #else
|
|---|
| 299 | int rank, size;
|
|---|
| 300 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 301 | MPI_Comm_size(comm_global, &size);
|
|---|
| [dfed1c] | 302 |
|
|---|
| [716da7] | 303 | std::vector< std::vector<vmg_float> > send_buffer_float(size);
|
|---|
| 304 | std::vector< std::vector<vmg_float> > recv_buffer_float(size);
|
|---|
| [ac6d04] | 305 | std::vector< std::vector<vmg_int> > send_buffer_index(size);
|
|---|
| 306 | std::vector< std::vector<vmg_int> > recv_buffer_index(size);
|
|---|
| [dfed1c] | 307 |
|
|---|
| [ac6d04] | 308 | vmg_int* size_receive = MG::GetFactory().GetObjectStorageArray<vmg_int>("PARTICLE_RECEIVER_ARRAY");
|
|---|
| 309 | vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
|
|---|
| [716da7] | 310 | vmg_float* f = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_FIELD_ARRAY");
|
|---|
| [dfed1c] | 311 |
|
|---|
| [ac6d04] | 312 | // Build send buffer
|
|---|
| 313 | for (iter=particles.begin(); iter!=particles.end(); ++iter) {
|
|---|
| [716da7] | 314 | send_buffer_float[iter->Rank()].push_back(iter->Pot());
|
|---|
| 315 | send_buffer_float[iter->Rank()].push_back(iter->Field()[0]);
|
|---|
| 316 | send_buffer_float[iter->Rank()].push_back(iter->Field()[1]);
|
|---|
| 317 | send_buffer_float[iter->Rank()].push_back(iter->Field()[2]);
|
|---|
| [ac6d04] | 318 | send_buffer_index[iter->Rank()].push_back(iter->Index());
|
|---|
| 319 | }
|
|---|
| [dfed1c] | 320 |
|
|---|
| [ac6d04] | 321 | // Send potentials
|
|---|
| 322 | for (int i=0; i<size; ++i) {
|
|---|
| [716da7] | 323 | if (!send_buffer_float[i].empty()) {
|
|---|
| 324 | MPI_Isend(&send_buffer_float[i].front(), send_buffer_float[i].size(), MPI_DOUBLE, i, 699+rank, comm_global, &Request());
|
|---|
| [ac6d04] | 325 | MPI_Isend(&send_buffer_index[i].front(), send_buffer_index[i].size(), MPI_INT, i, 32111+rank, comm_global, &Request());
|
|---|
| 326 | }
|
|---|
| 327 | }
|
|---|
| [dfed1c] | 328 |
|
|---|
| [ac6d04] | 329 | //Receive potentials
|
|---|
| 330 | for (int i=0; i<size; ++i) {
|
|---|
| 331 | if (size_receive[i] > 0) {
|
|---|
| [716da7] | 332 | recv_buffer_float[i].resize(4*size_receive[i]);
|
|---|
| [ac6d04] | 333 | recv_buffer_index[i].resize(size_receive[i]);
|
|---|
| [716da7] | 334 | MPI_Irecv(&recv_buffer_float[i].front(), 4*size_receive[i], MPI_DOUBLE, i, 699+i, comm_global, &Request());
|
|---|
| [ac6d04] | 335 | MPI_Irecv(&recv_buffer_index[i].front(), size_receive[i], MPI_INT, i, 32111+i, comm_global, &Request());
|
|---|
| 336 | }
|
|---|
| 337 | }
|
|---|
| [dfed1c] | 338 |
|
|---|
| [ac6d04] | 339 | WaitAll();
|
|---|
| [dfed1c] | 340 |
|
|---|
| [ac6d04] | 341 | // Add potential values
|
|---|
| 342 | for (int i=0; i<size; ++i)
|
|---|
| [716da7] | 343 | for (unsigned int j=0; j<size_receive[i]; ++j) {
|
|---|
| 344 | p[recv_buffer_index[i][j]] = recv_buffer_float[i][4*j];
|
|---|
| 345 | std::memcpy(&f[recv_buffer_index[i][j]], &recv_buffer_float[i][4*j+1], 3*sizeof(vmg_float));
|
|---|
| 346 | }
|
|---|
| [ac6d04] | 347 | #endif
|
|---|
| [dfed1c] | 348 |
|
|---|
| [ac6d04] | 349 | }
|
|---|
| [dfed1c] | 350 |
|
|---|
| [716da7] | 351 | void CommMPI::CommLCListToGhosts(Particle::LinkedCellList& lc)
|
|---|
| [ac6d04] | 352 | {
|
|---|
| 353 | VMG::MPI::DatatypesLocal types(lc, comm_global);
|
|---|
| [716da7] | 354 | std::vector<int> send_size(types.NB().size());
|
|---|
| [ac6d04] | 355 | vmg_int recv_size;
|
|---|
| 356 | std::list<Particle::Particle*>::iterator iter;
|
|---|
| 357 | Index ind;
|
|---|
| [716da7] | 358 | Vector offset;
|
|---|
| [dfed1c] | 359 |
|
|---|
| [716da7] | 360 | const Vector halo_length = lc.Local().HaloSize1() * lc.Extent().MeshWidth();
|
|---|
| [dfed1c] | 361 |
|
|---|
| [716da7] | 362 | lc.ClearHalo();
|
|---|
| [dfed1c] | 363 |
|
|---|
| [ac6d04] | 364 | for (unsigned int i=0; i<types.NB().size(); ++i)
|
|---|
| 365 | if (types.NB()[i].Feasible()) {
|
|---|
| [716da7] | 366 |
|
|---|
| 367 | for (int j=0; j<3; ++j)
|
|---|
| 368 | if ((types.Offset()[i][j] < 0 && lc.Global().LocalBegin()[j] == 0) ||
|
|---|
| 369 | (types.Offset()[i][j] > 0 && lc.Global().LocalEnd()[j] == lc.Global().GlobalSize()[j]))
|
|---|
| 370 | offset[j] = -1 * types.Offset()[i][j] * lc.Extent().Size()[j];
|
|---|
| 371 | else
|
|---|
| 372 | offset[j] = 0;
|
|---|
| 373 |
|
|---|
| [ac6d04] | 374 | for (ind.X() = types.NB()[i].Starts().X(); ind.X() < types.NB()[i].Starts().X()+types.NB()[i].Subsizes().X(); ++ind.X())
|
|---|
| 375 | for (ind.Y() = types.NB()[i].Starts().Y(); ind.Y() < types.NB()[i].Starts().Y()+types.NB()[i].Subsizes().Y(); ++ind.Y())
|
|---|
| 376 | for (ind.Z() = types.NB()[i].Starts().Z(); ind.Z() < types.NB()[i].Starts().Z()+types.NB()[i].Subsizes().Z(); ++ind.Z())
|
|---|
| 377 | for (iter=lc(ind).begin(); iter!=lc(ind).end(); ++iter) {
|
|---|
| [716da7] | 378 | for (int j=0; j<3; ++j)
|
|---|
| 379 | types.NB()[i].Buffer().push_back((*iter)->Pos()[j] + offset[j]);
|
|---|
| 380 | types.NB()[i].Buffer().push_back((*iter)->Charge());
|
|---|
| 381 |
|
|---|
| 382 | assert(lc.Extent().Begin().IsComponentwiseLessOrEqual((*iter)->Pos()));
|
|---|
| 383 | assert(lc.Extent().End().IsComponentwiseGreaterOrEqual((*iter)->Pos()));
|
|---|
| 384 | assert(lc.Extent().Begin().IsComponentwiseLessOrEqual((*iter)->Pos() + offset + halo_length));
|
|---|
| 385 | assert(lc.Extent().End().IsComponentwiseGreaterOrEqual((*iter)->Pos() + offset - halo_length));
|
|---|
| [ac6d04] | 386 | }
|
|---|
| 387 |
|
|---|
| [716da7] | 388 | send_size[i] = types.NB()[i].Buffer().size();
|
|---|
| [ac6d04] | 389 | MPI_Isend(&send_size[i], 1, MPI_INT, types.NB()[i].Rank(), 2048+types.NB()[i].TagSend(), comm_global, &Request());
|
|---|
| 390 |
|
|---|
| 391 | if (send_size[i] > 0)
|
|---|
| [716da7] | 392 | MPI_Isend(&types.NB()[i].Buffer().front(), send_size[i], MPI_DOUBLE,
|
|---|
| [ac6d04] | 393 | types.NB()[i].Rank(), 4096+types.NB()[i].TagSend(),
|
|---|
| 394 | comm_global, &Request());
|
|---|
| 395 | }
|
|---|
| [dfed1c] | 396 |
|
|---|
| [ac6d04] | 397 | for (unsigned int i=0; i<types.Halo().size(); ++i)
|
|---|
| 398 | if (types.Halo()[i].Feasible()) {
|
|---|
| 399 | MPI_Recv(&recv_size, 1, MPI_INT, types.Halo()[i].Rank(), 2048+types.Halo()[i].TagReceive(), comm_global, MPI_STATUS_IGNORE);
|
|---|
| 400 | if (recv_size > 0) {
|
|---|
| [716da7] | 401 | types.Halo()[i].Buffer().resize(recv_size);
|
|---|
| 402 | MPI_Irecv(&types.Halo()[i].Buffer().front(), recv_size, MPI_DOUBLE,
|
|---|
| [ac6d04] | 403 | types.Halo()[i].Rank(), 4096+types.Halo()[i].TagReceive(),
|
|---|
| 404 | comm_global, &Request());
|
|---|
| [dfed1c] | 405 | }
|
|---|
| [ac6d04] | 406 | }
|
|---|
| [dfed1c] | 407 |
|
|---|
| [ac6d04] | 408 | WaitAll();
|
|---|
| [dfed1c] | 409 |
|
|---|
| [716da7] | 410 | for (unsigned int i=0; i<types.Halo().size(); ++i)
|
|---|
| 411 | for (unsigned int j=0; j<types.Halo()[i].Buffer().size(); j+=4)
|
|---|
| 412 | lc.AddParticleToHalo(&types.Halo()[i].Buffer()[j], types.Halo()[i].Buffer()[j+3]);
|
|---|
| [ac6d04] | 413 | }
|
|---|
| [dfed1c] | 414 |
|
|---|
| [ac6d04] | 415 | void CommMPI::CommLCListFromGhosts(const Grid& grid, Particle::LinkedCellList& lc)
|
|---|
| 416 | {
|
|---|
| 417 | VMG::MPI::DatatypesLocal types(lc, comm_global);
|
|---|
| 418 | std::vector< std::vector<vmg_float> > send_buffer(types.NB().size());
|
|---|
| 419 | std::vector< std::vector<vmg_float> > recv_buffer(types.Halo().size());
|
|---|
| 420 | std::vector<vmg_int> send_size(types.NB().size());
|
|---|
| 421 | vmg_int recv_size;
|
|---|
| 422 | std::list<Particle::Particle*>::iterator iter;
|
|---|
| 423 | Index ind;
|
|---|
| [dfed1c] | 424 |
|
|---|
| [ac6d04] | 425 | for (unsigned int i=0; i<types.Halo().size(); ++i) {
|
|---|
| 426 | assert(types.Halo()[i].Subsizes().X() == types.NB()[i].Subsizes().X());
|
|---|
| 427 | assert(types.Halo()[i].Subsizes().Y() == types.NB()[i].Subsizes().Y());
|
|---|
| 428 | assert(types.Halo()[i].Subsizes().Z() == types.NB()[i].Subsizes().Z());
|
|---|
| [dfed1c] | 429 |
|
|---|
| [ac6d04] | 430 | for (ind.X() = 0; ind.X() < types.Halo()[i].Subsizes().X(); ++ind.X())
|
|---|
| 431 | for (ind.Y() = 0; ind.Y() < types.Halo()[i].Subsizes().Y(); ++ind.Y())
|
|---|
| 432 | for (ind.Z() = 0; ind.Z() < types.Halo()[i].Subsizes().Z(); ++ind.Z())
|
|---|
| 433 | assert(lc(ind+types.Halo()[i].Starts()).size() == lc(ind+types.NB()[i].Starts()).size());
|
|---|
| 434 | }
|
|---|
| [dfed1c] | 435 |
|
|---|
| [ac6d04] | 436 | for (unsigned int i=0; i<types.Halo().size(); ++i)
|
|---|
| 437 | if (types.Halo()[i].Feasible()) {
|
|---|
| [dfed1c] | 438 |
|
|---|
| [ac6d04] | 439 | for (ind.X() = types.Halo()[i].Starts().X(); ind.X() < types.Halo()[i].Starts().X()+types.Halo()[i].Subsizes().X(); ++ind.X())
|
|---|
| 440 | for (ind.Y() = types.Halo()[i].Starts().X(); ind.Y() < types.Halo()[i].Starts().Y()+types.Halo()[i].Subsizes().Y(); ++ind.Y())
|
|---|
| 441 | for (ind.Z() = types.Halo()[i].Starts().X(); ind.Z() < types.Halo()[i].Starts().Z()+types.Halo()[i].Subsizes().Z(); ++ind.Z())
|
|---|
| 442 | for (iter=lc(ind).begin(); iter!=lc(ind).end(); ++iter)
|
|---|
| 443 | send_buffer[i].push_back((*iter)->Pot());
|
|---|
| [dfed1c] | 444 |
|
|---|
| [ac6d04] | 445 | MPI_Isend(&send_buffer[i].front(), send_buffer[i].size(), MPI_DOUBLE, types.Halo()[i].Rank(), 342523+types.Halo()[i].TagSend(), comm_global, &Request());
|
|---|
| [dfed1c] | 446 | }
|
|---|
| 447 |
|
|---|
| [ac6d04] | 448 | for (unsigned int i=0; i<types.NB().size(); ++i)
|
|---|
| 449 | if (types.NB()[i].Feasible()) {
|
|---|
| [dfed1c] | 450 |
|
|---|
| [ac6d04] | 451 | recv_size = 0;
|
|---|
| 452 | for (ind.X() = types.NB()[i].Starts().X(); ind.X() < types.NB()[i].Starts().X()+types.NB()[i].Subsizes().X(); ++ind.X())
|
|---|
| 453 | for (ind.Y() = types.NB()[i].Starts().X(); ind.Y() < types.NB()[i].Starts().Y()+types.NB()[i].Subsizes().Y(); ++ind.Y())
|
|---|
| 454 | for (ind.Z() = types.NB()[i].Starts().X(); ind.Z() < types.NB()[i].Starts().Z()+types.NB()[i].Subsizes().Z(); ++ind.Z())
|
|---|
| 455 | recv_size += lc(ind).size();
|
|---|
| [dfed1c] | 456 |
|
|---|
| [ac6d04] | 457 | if (recv_size > 0) {
|
|---|
| 458 | recv_buffer[i].resize(recv_size);
|
|---|
| 459 | MPI_Irecv(&recv_buffer[i].front(), recv_size, MPI_DOUBLE, types.NB()[i].Rank(), 342523+types.NB()[i].TagReceive(), comm_global, &Request());
|
|---|
| 460 | }
|
|---|
| [dfed1c] | 461 | }
|
|---|
| 462 |
|
|---|
| [ac6d04] | 463 | WaitAll();
|
|---|
| [dfed1c] | 464 |
|
|---|
| [ac6d04] | 465 | for (unsigned int i=0; i<types.NB().size(); ++i) {
|
|---|
| 466 | unsigned int c=0;
|
|---|
| 467 | if (types.NB()[i].Feasible()) {
|
|---|
| 468 | for (ind.X() = types.NB()[i].Starts().X(); ind.X() < types.NB()[i].Starts().X()+types.NB()[i].Subsizes().X(); ++ind.X())
|
|---|
| 469 | for (ind.Y() = types.NB()[i].Starts().X(); ind.Y() < types.NB()[i].Starts().Y()+types.NB()[i].Subsizes().Y(); ++ind.Y())
|
|---|
| 470 | for (ind.Z() = types.NB()[i].Starts().X(); ind.Z() < types.NB()[i].Starts().Z()+types.NB()[i].Subsizes().Z(); ++ind.Z())
|
|---|
| 471 | for (iter=lc(ind).begin(); iter!=lc(ind).end(); ++iter)
|
|---|
| 472 | (*iter)->Pot() += recv_buffer[i][c++];
|
|---|
| 473 | }
|
|---|
| [dfed1c] | 474 | }
|
|---|
| 475 | }
|
|---|
| 476 |
|
|---|
| [ac6d04] | 477 | vmg_float CommMPI::GlobalSum(const vmg_float& value)
|
|---|
| [dfed1c] | 478 | {
|
|---|
| [ac6d04] | 479 | vmg_float result = value;
|
|---|
| 480 | MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm_global);
|
|---|
| 481 | return result;
|
|---|
| 482 | }
|
|---|
| [dfed1c] | 483 |
|
|---|
| [ac6d04] | 484 | vmg_float CommMPI::GlobalSumRoot(const vmg_float& value)
|
|---|
| 485 | {
|
|---|
| 486 | vmg_float recv_buffer = value;
|
|---|
| 487 | vmg_float send_buffer = value;
|
|---|
| 488 | MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm_global);
|
|---|
| 489 | return recv_buffer;
|
|---|
| [dfed1c] | 490 | }
|
|---|
| 491 |
|
|---|
| [ac6d04] | 492 | void CommMPI::GlobalSumArray(vmg_float* array, const vmg_int& size)
|
|---|
| [dfed1c] | 493 | {
|
|---|
| [ac6d04] | 494 | MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm_global);
|
|---|
| 495 | }
|
|---|
| [dfed1c] | 496 |
|
|---|
| [ac6d04] | 497 | vmg_int CommMPI::GlobalSum(const vmg_int& value)
|
|---|
| 498 | {
|
|---|
| 499 | vmg_int result = value;
|
|---|
| 500 | MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, comm_global);
|
|---|
| 501 | return result;
|
|---|
| 502 | }
|
|---|
| [dfed1c] | 503 |
|
|---|
| [ac6d04] | 504 | vmg_int CommMPI::GlobalSumRoot(const vmg_int& value)
|
|---|
| 505 | {
|
|---|
| 506 | vmg_int recv_buffer = value;
|
|---|
| 507 | vmg_int send_buffer = value;
|
|---|
| 508 | MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_SUM, 0, comm_global);
|
|---|
| 509 | return recv_buffer;
|
|---|
| 510 | }
|
|---|
| [dfed1c] | 511 |
|
|---|
| [ac6d04] | 512 | void CommMPI::GlobalSumArray(vmg_int* array, const vmg_int& size)
|
|---|
| 513 | {
|
|---|
| 514 | MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm_global);
|
|---|
| [dfed1c] | 515 | }
|
|---|
| 516 |
|
|---|
| [b51c3b] | 517 | vmg_float CommMPI::GlobalMax(const vmg_float& value)
|
|---|
| 518 | {
|
|---|
| 519 | vmg_float result = value;
|
|---|
| 520 | MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_MAX, comm_global);
|
|---|
| 521 | return result;
|
|---|
| 522 | }
|
|---|
| 523 |
|
|---|
| 524 | vmg_float CommMPI::GlobalMaxRoot(const vmg_float& value)
|
|---|
| 525 | {
|
|---|
| 526 | vmg_float recv_buffer = value;
|
|---|
| 527 | vmg_float send_buffer = value;
|
|---|
| 528 | MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_MAX, 0, comm_global);
|
|---|
| 529 | return recv_buffer;
|
|---|
| 530 | }
|
|---|
| 531 |
|
|---|
| 532 | void CommMPI::GlobalMaxArray(vmg_float* array, const vmg_int& size)
|
|---|
| 533 | {
|
|---|
| 534 | MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_MAX, comm_global);
|
|---|
| 535 | }
|
|---|
| 536 |
|
|---|
| 537 | vmg_int CommMPI::GlobalMax(const vmg_int& value)
|
|---|
| 538 | {
|
|---|
| 539 | vmg_int result = value;
|
|---|
| 540 | MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_MAX, comm_global);
|
|---|
| 541 | return result;
|
|---|
| 542 | }
|
|---|
| 543 |
|
|---|
| 544 | vmg_int CommMPI::GlobalMaxRoot(const vmg_int& value)
|
|---|
| 545 | {
|
|---|
| 546 | vmg_int recv_buffer = value;
|
|---|
| 547 | vmg_int send_buffer = value;
|
|---|
| 548 | MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_MAX, 0, comm_global);
|
|---|
| 549 | return recv_buffer;
|
|---|
| 550 | }
|
|---|
| 551 |
|
|---|
| 552 | void CommMPI::GlobalMaxArray(vmg_int* array, const vmg_int& size)
|
|---|
| 553 | {
|
|---|
| 554 | MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_MAX, comm_global);
|
|---|
| 555 | }
|
|---|
| 556 |
|
|---|
| [ac6d04] | 557 | vmg_float CommMPI::LevelSum(const Grid& grid, const vmg_float& value)
|
|---|
| [dfed1c] | 558 | {
|
|---|
| 559 | vmg_float result = value;
|
|---|
| [ac6d04] | 560 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 561 | assert(comm != MPI_COMM_NULL);
|
|---|
| 562 | MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm);
|
|---|
| [dfed1c] | 563 | return result;
|
|---|
| 564 | }
|
|---|
| 565 |
|
|---|
| [ac6d04] | 566 | vmg_float CommMPI::LevelSumRoot(const Grid& grid, const vmg_float& value)
|
|---|
| [dfed1c] | 567 | {
|
|---|
| [ac6d04] | 568 | vmg_float recv_buffer = value;
|
|---|
| [dfed1c] | 569 | vmg_float send_buffer = value;
|
|---|
| [ac6d04] | 570 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 571 | assert(comm != MPI_COMM_NULL);
|
|---|
| 572 | MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm);
|
|---|
| 573 | return recv_buffer;
|
|---|
| 574 | }
|
|---|
| [dfed1c] | 575 |
|
|---|
| [ac6d04] | 576 | void CommMPI::LevelSumArray(const Grid& grid, vmg_float* array, const vmg_int& size)
|
|---|
| 577 | {
|
|---|
| 578 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 579 | assert(comm != MPI_COMM_NULL);
|
|---|
| 580 | MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm);
|
|---|
| 581 | }
|
|---|
| [dfed1c] | 582 |
|
|---|
| [ac6d04] | 583 | vmg_int CommMPI::LevelSum(const Grid& grid, const vmg_int& value)
|
|---|
| 584 | {
|
|---|
| 585 | vmg_int result = value;
|
|---|
| 586 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 587 | assert(comm != MPI_COMM_NULL);
|
|---|
| 588 | MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, comm);
|
|---|
| 589 | return result;
|
|---|
| 590 | }
|
|---|
| 591 |
|
|---|
| 592 | vmg_int CommMPI::LevelSumRoot(const Grid& grid, const vmg_int& value)
|
|---|
| 593 | {
|
|---|
| 594 | vmg_int recv_buffer = value;
|
|---|
| 595 | vmg_int send_buffer = value;
|
|---|
| 596 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 597 | assert(comm != MPI_COMM_NULL);
|
|---|
| 598 | MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_INT, MPI_SUM, 0, comm);
|
|---|
| [dfed1c] | 599 | return recv_buffer;
|
|---|
| 600 | }
|
|---|
| 601 |
|
|---|
| [ac6d04] | 602 | void CommMPI::LevelSumArray(const Grid& grid, vmg_int* array, const vmg_int& size)
|
|---|
| [dfed1c] | 603 | {
|
|---|
| [ac6d04] | 604 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 605 | assert(comm != MPI_COMM_NULL);
|
|---|
| 606 | MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_SUM, comm);
|
|---|
| [dfed1c] | 607 | }
|
|---|
| 608 |
|
|---|
| 609 | void CommMPI::PrintString(const char* format, ...)
|
|---|
| 610 | {
|
|---|
| 611 | va_list args;
|
|---|
| 612 | va_start(args, format);
|
|---|
| 613 | vsprintf(print_buffer, format, args);
|
|---|
| 614 | printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
|
|---|
| 615 | va_end(args);
|
|---|
| 616 | }
|
|---|
| 617 |
|
|---|
| 618 | void CommMPI::PrintStringOnce(const char* format, ...)
|
|---|
| 619 | {
|
|---|
| 620 | if (GlobalRank() == 0) {
|
|---|
| 621 | va_list args;
|
|---|
| 622 | va_start(args, format);
|
|---|
| 623 | vsprintf(print_buffer, format, args);
|
|---|
| 624 | printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
|
|---|
| 625 | va_end(args);
|
|---|
| 626 | }
|
|---|
| 627 | }
|
|---|
| 628 |
|
|---|
| 629 | void CommMPI::PrintXML(const std::string& filename, const std::string& xml_data)
|
|---|
| 630 | {
|
|---|
| 631 | MPI_File file;
|
|---|
| 632 | std::stringstream path, xml_header;
|
|---|
| 633 |
|
|---|
| 634 | pugi::xml_document doc;
|
|---|
| 635 | pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
|
|---|
| 636 | node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
|
|---|
| 637 | doc.save(xml_header);
|
|---|
| 638 |
|
|---|
| 639 | path << OutputPath() << filename;
|
|---|
| 640 |
|
|---|
| 641 | char* filename_array = Helper::GetCharArray(path.str());
|
|---|
| 642 | char* xml_header_array = Helper::GetCharArray(xml_header.str());
|
|---|
| 643 | char* str_array = Helper::GetCharArray(xml_data);
|
|---|
| 644 |
|
|---|
| 645 | MPI_File_open(MPI_COMM_SELF, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 646 | MPI_File_set_size(file, 0);
|
|---|
| 647 | MPI_File_write(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 648 | MPI_File_write(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 649 | MPI_File_close(&file);
|
|---|
| 650 |
|
|---|
| 651 | delete [] filename_array;
|
|---|
| 652 | delete [] xml_header_array;
|
|---|
| 653 | delete [] str_array;
|
|---|
| 654 | }
|
|---|
| 655 |
|
|---|
| 656 | void CommMPI::PrintXMLAll(const std::string& filename, const std::string& xml_data)
|
|---|
| 657 | {
|
|---|
| 658 | MPI_File file;
|
|---|
| 659 | std::stringstream path;
|
|---|
| 660 |
|
|---|
| 661 | path << OutputPath() << filename;
|
|---|
| 662 |
|
|---|
| 663 | char* filename_array = Helper::GetCharArray(path.str());
|
|---|
| 664 | char* str_array = Helper::GetCharArray(xml_data);
|
|---|
| 665 |
|
|---|
| 666 | MPI_File_open(comm_global, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 667 | MPI_File_set_size(file, 0);
|
|---|
| 668 |
|
|---|
| 669 | if (GlobalRank() == 0) {
|
|---|
| 670 | std::stringstream xml_header;
|
|---|
| 671 | pugi::xml_document doc;
|
|---|
| 672 | pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
|
|---|
| 673 | node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
|
|---|
| 674 | doc.save(xml_header);
|
|---|
| 675 |
|
|---|
| 676 | char* xml_header_array = Helper::GetCharArray(xml_header.str());
|
|---|
| 677 |
|
|---|
| 678 | MPI_File_write_shared(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 679 |
|
|---|
| 680 | delete [] xml_header_array;
|
|---|
| 681 | }
|
|---|
| 682 |
|
|---|
| 683 | MPI_File_write_ordered(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 684 | MPI_File_close(&file);
|
|---|
| 685 |
|
|---|
| 686 | delete [] filename_array;
|
|---|
| 687 | delete [] str_array;
|
|---|
| 688 | }
|
|---|
| 689 |
|
|---|
| [ac6d04] | 690 | void CommMPI::PrintGridInformation(const Grid& grid, char* filename, const std::string& name)
|
|---|
| [dfed1c] | 691 | {
|
|---|
| 692 | std::stringstream buf;
|
|---|
| 693 | MPI_File file;
|
|---|
| [ac6d04] | 694 | int rank, size;
|
|---|
| 695 | int size_local, size_local_max;
|
|---|
| [dfed1c] | 696 |
|
|---|
| [ac6d04] | 697 | MPI_Comm comm = settings.CommunicatorGlobal(grid);
|
|---|
| 698 | MPI_Comm comm_local = settings.CommunicatorLocal(grid);
|
|---|
| [dfed1c] | 699 |
|
|---|
| [ac6d04] | 700 | if (comm_local != MPI_COMM_NULL)
|
|---|
| 701 | MPI_Comm_size(comm_local, &size_local);
|
|---|
| 702 | else
|
|---|
| 703 | size_local = 0;
|
|---|
| [dfed1c] | 704 |
|
|---|
| [ac6d04] | 705 | if (comm != MPI_COMM_NULL) {
|
|---|
| [dfed1c] | 706 |
|
|---|
| [ac6d04] | 707 | MPI_Reduce(&size_local, &size_local_max, 1, MPI_INT, MPI_MAX, 0, comm);
|
|---|
| 708 |
|
|---|
| 709 | MPI_File_open(comm, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
|
|---|
| 710 |
|
|---|
| 711 | MPI_Comm_rank(comm, &rank);
|
|---|
| 712 | MPI_Comm_size(comm, &size);
|
|---|
| 713 |
|
|---|
| 714 | if (rank == 0) {
|
|---|
| 715 |
|
|---|
| 716 | buf << "###########################################################" << std::endl
|
|---|
| 717 | << "GLOBAL INFORMATION:" << std::endl
|
|---|
| 718 | << " NAME: " << name << std::endl
|
|---|
| 719 | << " LEVEL: " << grid.Level() << std::endl
|
|---|
| 720 | << " COMM_SIZE_GLOBAL: " << size << std::endl
|
|---|
| 721 | << " COMM_SIZE_LOCAL: " << size_local_max << std::endl
|
|---|
| 722 | << " GLOBAL:" << std::endl
|
|---|
| 723 | << " GLOBAL_FINER_BEGIN: " << grid.Global().GlobalFinerBegin() << std::endl
|
|---|
| 724 | << " GLOBAL_FINER_END: " << grid.Global().GlobalFinerEnd() << std::endl
|
|---|
| 725 | << " GLOBAL_FINER_SIZE: " << grid.Global().GlobalFinerSize() << std::endl
|
|---|
| 726 | << " FINEST_ABS_BEGIN: " << grid.Global().FinestAbsBegin() << std::endl
|
|---|
| 727 | << " FINEST_ABS_END: " << grid.Global().FinestAbsEnd() << std::endl
|
|---|
| 728 | << " FINEST_ABS_SIZE: " << grid.Global().FinestAbsSize() << std::endl
|
|---|
| 729 | << " GLOBAL_SIZE: " << grid.Global().GlobalSize() << std::endl
|
|---|
| 730 | << " EXTENT:" << std::endl
|
|---|
| 731 | << " BEGIN: " << grid.Extent().Begin() << std::endl
|
|---|
| 732 | << " END: " << grid.Extent().End() << std::endl
|
|---|
| 733 | << " SIZE: " << grid.Extent().Size() << std::endl
|
|---|
| 734 | << " MESH_WIDTH: " << grid.Extent().MeshWidth() << std::endl
|
|---|
| 735 | << std::endl
|
|---|
| 736 | << "LOCAL INFORMATION:" << std::endl;
|
|---|
| 737 | }
|
|---|
| [dfed1c] | 738 |
|
|---|
| [ac6d04] | 739 | buf << "RANK " << rank << ":" << std::endl
|
|---|
| 740 | << " GLOBAL:" << std::endl
|
|---|
| 741 | << " LOCAL_BEGIN: " << grid.Global().LocalBegin() << std::endl
|
|---|
| 742 | << " LOCAL_END: " << grid.Global().LocalEnd() << std::endl
|
|---|
| 743 | << " LOCAL_SIZE: " << grid.Global().LocalSize() << std::endl
|
|---|
| 744 | << " LOCAL_FINER_BEGIN: " << grid.Global().LocalFinerBegin() << std::endl
|
|---|
| 745 | << " LOCAL_FINER_END: " << grid.Global().LocalFinerEnd() << std::endl
|
|---|
| 746 | << " LOCAL_FINER_SIZE: " << grid.Global().LocalFinerSize() << std::endl
|
|---|
| 747 | << " BOUNDARY_TYPE: " << grid.Global().BoundaryType() << std::endl
|
|---|
| 748 | << " LOCAL:" << std::endl
|
|---|
| 749 | << " BEGIN: " << grid.Local().Begin() << std::endl
|
|---|
| 750 | << " END: " << grid.Local().End() << std::endl
|
|---|
| 751 | << " SIZE: " << grid.Local().Size() << std::endl
|
|---|
| 752 | << " SIZE_TOTAL: " << grid.Local().SizeTotal() << std::endl
|
|---|
| 753 | << " HALO_BEGIN_1: " << grid.Local().HaloBegin1() << std::endl
|
|---|
| 754 | << " HALO_END_1: " << grid.Local().HaloEnd1() << std::endl
|
|---|
| 755 | << " HALO_SIZE_1: " << grid.Local().HaloSize1() << std::endl
|
|---|
| 756 | << " HALO_BEGIN_2: " << grid.Local().HaloBegin2() << std::endl
|
|---|
| 757 | << " HALO_END_2: " << grid.Local().HaloEnd2() << std::endl
|
|---|
| 758 | << " HALO_SIZE_2: " << grid.Local().HaloSize2() << std::endl
|
|---|
| 759 | << " BOUNDARY_BEGIN_1: " << grid.Local().BoundaryBegin1() << std::endl
|
|---|
| 760 | << " BOUNDARY_END_1: " << grid.Local().BoundaryEnd1() << std::endl
|
|---|
| 761 | << " BOUNDARY_SIZE_1: " << grid.Local().BoundarySize1() << std::endl
|
|---|
| 762 | << " BOUNDARY_BEGIN_2: " << grid.Local().BoundaryBegin2() << std::endl
|
|---|
| 763 | << " BOUNDARY_END_2: " << grid.Local().BoundaryEnd2() << std::endl
|
|---|
| 764 | << " BOUNDARY_SIZE_2: " << grid.Local().BoundarySize2() << std::endl
|
|---|
| [716da7] | 765 | << " FINER_BEGIN: " << grid.Local().FinerBegin() << std::endl
|
|---|
| 766 | << " FINER_END: " << grid.Local().FinerEnd() << std::endl
|
|---|
| 767 | << " FINER_SIZE: " << grid.Local().FinerSize() << std::endl;
|
|---|
| [ac6d04] | 768 |
|
|---|
| 769 | if (rank == size-1)
|
|---|
| 770 | buf << "###########################################################" << std::endl;
|
|---|
| [dfed1c] | 771 |
|
|---|
| [ac6d04] | 772 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 773 | MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 774 | delete [] char_buf;
|
|---|
| [dfed1c] | 775 |
|
|---|
| [ac6d04] | 776 | MPI_File_close(&file);
|
|---|
| [dfed1c] | 777 |
|
|---|
| [ac6d04] | 778 | }
|
|---|
| 779 | }
|
|---|
| [dfed1c] | 780 |
|
|---|
| [ac6d04] | 781 | void CommMPI::PrintAllSettings()
|
|---|
| 782 | {
|
|---|
| 783 | std::stringstream buf;
|
|---|
| 784 | MPI_File file;
|
|---|
| [dfed1c] | 785 |
|
|---|
| [ac6d04] | 786 | const Multigrid& mg = *MG::GetSol();
|
|---|
| [dfed1c] | 787 |
|
|---|
| [ac6d04] | 788 | buf << OutputPath() << "settings.txt";
|
|---|
| 789 | char *filename = Helper::GetCharArray(buf.str());
|
|---|
| [dfed1c] | 790 |
|
|---|
| [ac6d04] | 791 | MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 792 | MPI_File_set_size(file, 0);
|
|---|
| 793 | MPI_File_close(&file);
|
|---|
| [dfed1c] | 794 |
|
|---|
| [ac6d04] | 795 | for (int i=mg.MinLevel(); i<=mg.MaxLevel(); ++i)
|
|---|
| 796 | PrintGridInformation(mg(i), filename, "MULTIGRID");
|
|---|
| [dfed1c] | 797 |
|
|---|
| [ac6d04] | 798 | for (int i=mg.MinLevel()+1; i<=mg.MaxLevel(); ++i)
|
|---|
| 799 | PrintGridInformation(settings.CoarserGrid(mg(i)), filename, "COARSER_GRID");
|
|---|
| [dfed1c] | 800 |
|
|---|
| [ac6d04] | 801 | for (int i=mg.MinLevel(); i<mg.MaxLevel(); ++i)
|
|---|
| 802 | PrintGridInformation(settings.FinerGrid(mg(i)), filename, "FINER_GRID");
|
|---|
| [dfed1c] | 803 |
|
|---|
| 804 | delete [] filename;
|
|---|
| 805 |
|
|---|
| 806 | }
|
|---|
| 807 |
|
|---|
| 808 | void CommMPI::PrintGrid(Grid& grid, const char* information)
|
|---|
| 809 | {
|
|---|
| [ac6d04] | 810 | int output_count = OutputCount();
|
|---|
| [dfed1c] | 811 |
|
|---|
| [ac6d04] | 812 | #ifdef HAVE_VTK
|
|---|
| [dfed1c] | 813 |
|
|---|
| [ac6d04] | 814 | if (settings.CommunicatorLocal(grid) != MPI_COMM_NULL) {
|
|---|
| [dfed1c] | 815 |
|
|---|
| [ac6d04] | 816 | Index end, end_global;
|
|---|
| [dfed1c] | 817 |
|
|---|
| [ac6d04] | 818 | for (int i=0; i<3; ++i) {
|
|---|
| 819 | end[i] = grid.Local().End()[i];
|
|---|
| 820 | end_global[i] = grid.Global().LocalEnd()[i];
|
|---|
| 821 | }
|
|---|
| [dfed1c] | 822 |
|
|---|
| [ac6d04] | 823 | vtkSmartPointer<vtkImageData> image = vtkSmartPointer<vtkImageData>::New();
|
|---|
| 824 | image->SetExtent(grid.Global().LocalBegin().X(), end_global.X()-1,
|
|---|
| 825 | grid.Global().LocalBegin().Y(), end_global.Y()-1,
|
|---|
| 826 | grid.Global().LocalBegin().Z(), end_global.Z()-1);
|
|---|
| 827 | image->SetSpacing(grid.Extent().MeshWidth().vec());
|
|---|
| 828 | image->SetOrigin(grid.Extent().Begin().vec());
|
|---|
| 829 | image->SetScalarTypeToDouble();
|
|---|
| 830 | image->SetNumberOfScalarComponents(1);
|
|---|
| 831 | image->AllocateScalars();
|
|---|
| 832 | image->GetPointData()->GetScalars()->SetName(information);
|
|---|
| [dfed1c] | 833 |
|
|---|
| [ac6d04] | 834 | Index i;
|
|---|
| 835 | for (i.X()=grid.Local().Begin().X(); i.X()<end.X(); ++i.X())
|
|---|
| 836 | for (i.Y()=grid.Local().Begin().Y(); i.Y()<end.Y(); ++i.Y())
|
|---|
| 837 | for (i.Z()=grid.Local().Begin().Z(); i.Z()<end.Z(); ++i.Z())
|
|---|
| 838 | image->SetScalarComponentFromDouble(i.X() - grid.Local().Begin().X() + grid.Global().LocalBegin().X(),
|
|---|
| 839 | i.Y() - grid.Local().Begin().Y() + grid.Global().LocalBegin().Y(),
|
|---|
| 840 | i.Z() - grid.Local().Begin().Z() + grid.Global().LocalBegin().Z(),
|
|---|
| 841 | 0, grid.GetVal(i));
|
|---|
| 842 |
|
|---|
| 843 | image->Update();
|
|---|
| 844 |
|
|---|
| 845 | int rank, size;
|
|---|
| 846 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 847 | MPI_Comm_size(comm_global, &size);
|
|---|
| 848 |
|
|---|
| 849 | std::stringstream filename;
|
|---|
| 850 | filename << OutputPath() << std::setw(4) << std::setfill('0') << output_count << "_" << rank << ".vti";
|
|---|
| 851 |
|
|---|
| 852 | vtkSmartPointer<vtkXMLImageDataWriter> writer = vtkSmartPointer<vtkXMLImageDataWriter>::New();
|
|---|
| 853 | writer->SetFileName(filename.str().c_str());
|
|---|
| 854 | writer->SetCompressorTypeToNone();
|
|---|
| 855 | writer->SetDataModeToAscii();
|
|---|
| 856 | writer->SetInput(image);
|
|---|
| 857 | writer->Update();
|
|---|
| 858 | writer->Write();
|
|---|
| [dfed1c] | 859 |
|
|---|
| 860 | }
|
|---|
| 861 |
|
|---|
| [ac6d04] | 862 | #else /* HAVE_VTK */
|
|---|
| [dfed1c] | 863 | Index i;
|
|---|
| 864 | std::stringstream buf;
|
|---|
| 865 |
|
|---|
| [ac6d04] | 866 | Index begin, end;
|
|---|
| [dfed1c] | 867 | Index begin_local, end_local, begin_global, end_global;
|
|---|
| 868 |
|
|---|
| 869 | CommToGhosts(grid);
|
|---|
| 870 |
|
|---|
| 871 | for (int i=0; i<3; ++i) {
|
|---|
| [ac6d04] | 872 | end[i] = grid.Local().End()[i] + (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 0 : grid.Local().HaloSize1()[i]);
|
|---|
| 873 | end_local[i] = grid.Global().LocalEnd()[i] - (grid.Global().LocalEnd()[i] == grid.Global().GlobalSize()[i] ? 1 : 0);
|
|---|
| [dfed1c] | 874 | }
|
|---|
| 875 |
|
|---|
| [ac6d04] | 876 | begin = grid.Local().Begin();
|
|---|
| 877 | begin_local = grid.Global().LocalBegin();
|
|---|
| 878 | begin_global = 0;
|
|---|
| 879 | end_global = grid.Global().GlobalSize()-1;
|
|---|
| [dfed1c] | 880 |
|
|---|
| 881 | for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
|
|---|
| 882 | for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
|
|---|
| 883 | for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
|
|---|
| 884 | buf << std::scientific << grid.GetVal(i) << " ";
|
|---|
| 885 |
|
|---|
| 886 | CreateOutputFiles(grid, buf, information,
|
|---|
| 887 | begin_global, end_global,
|
|---|
| [ac6d04] | 888 | begin_local, end_local,
|
|---|
| 889 | output_count);
|
|---|
| 890 | #endif /* HAVE_VTK */
|
|---|
| [dfed1c] | 891 | }
|
|---|
| 892 |
|
|---|
| [ac6d04] | 893 | void CommMPI::PrintDefect(Grid& sol, Grid& rhs, const char* information)
|
|---|
| [dfed1c] | 894 | {
|
|---|
| [ac6d04] | 895 | TempGrid *temp = MG::GetTempGrid();
|
|---|
| 896 | temp->SetProperties(sol);
|
|---|
| 897 | temp->ImportFromResidual(sol, rhs);
|
|---|
| 898 | PrintGrid(*temp, information);
|
|---|
| [dfed1c] | 899 | }
|
|---|
| 900 |
|
|---|
| 901 | void CommMPI::CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
|
|---|
| 902 | const Index& begin_global, const Index& end_global,
|
|---|
| [ac6d04] | 903 | const Index& begin_local, const Index& end_local,
|
|---|
| 904 | const int& output_count)
|
|---|
| [dfed1c] | 905 | {
|
|---|
| [894a5f] | 906 | MPI_Comm comm = settings.CommunicatorGlobal(grid);
|
|---|
| [dfed1c] | 907 |
|
|---|
| 908 | if (comm != MPI_COMM_NULL) {
|
|---|
| 909 |
|
|---|
| 910 | MPI_File file;
|
|---|
| 911 | std::string conv_information = Helper::ReplaceWhitespaces(information, "_");
|
|---|
| 912 |
|
|---|
| 913 | CreateParallelOutputFile(grid, comm, output_count, conv_information.c_str(),
|
|---|
| 914 | begin_global, end_global, begin_local, end_local);
|
|---|
| 915 |
|
|---|
| 916 | file = CreateSerialOutputFile(grid, comm, output_count, conv_information.c_str(),
|
|---|
| 917 | begin_global, end_global, begin_local, end_local);
|
|---|
| 918 |
|
|---|
| 919 | char *char_buf = Helper::GetCharArray(serial_data.str());
|
|---|
| 920 | MPI_File_write(file, char_buf, serial_data.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 921 | delete [] char_buf;
|
|---|
| 922 |
|
|---|
| 923 | FinalizeSerialOutputFile(file);
|
|---|
| 924 |
|
|---|
| 925 | }
|
|---|
| 926 | }
|
|---|
| 927 |
|
|---|
| 928 | void CommMPI::CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
|
|---|
| 929 | const int& output_count, const char* information,
|
|---|
| 930 | const Index& begin_global, const Index& end_global,
|
|---|
| 931 | const Index& begin_local, const Index& end_local)
|
|---|
| 932 | {
|
|---|
| 933 | int rank;
|
|---|
| 934 | MPI_File file;
|
|---|
| 935 | char parallel_filename[513], serial_filename[513];
|
|---|
| 936 | std::stringstream buf;
|
|---|
| 937 |
|
|---|
| 938 | MPI_Comm_rank(comm, &rank);
|
|---|
| 939 |
|
|---|
| 940 | sprintf(parallel_filename, "%s%04d.pvti", OutputPath().c_str(), output_count);
|
|---|
| 941 | sprintf(serial_filename, "%04d_%d.vti", output_count, rank);
|
|---|
| 942 |
|
|---|
| 943 | MPI_File_open(comm, parallel_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 944 | MPI_File_set_size(file, 0);
|
|---|
| 945 |
|
|---|
| 946 | if (rank == 0) {
|
|---|
| 947 |
|
|---|
| 948 | buf << "<?xml version=\"1.0\"?>" << std::endl
|
|---|
| 949 | << "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
|
|---|
| 950 | << " <PImageData WholeExtent=\"";
|
|---|
| 951 |
|
|---|
| 952 | for (int i=0; i<3; ++i)
|
|---|
| [ac6d04] | 953 | buf << begin_global[i] << " " << end_global[i] << " ";
|
|---|
| [dfed1c] | 954 |
|
|---|
| 955 | buf << "\"" << std::endl
|
|---|
| 956 | << " GhostLevel=\"0\" Origin=\"0 0 0\" Spacing=\"";
|
|---|
| 957 |
|
|---|
| 958 | for (int i=0; i<3; ++i)
|
|---|
| 959 | buf << grid.Extent().MeshWidth()[i] << " ";
|
|---|
| 960 |
|
|---|
| 961 | buf << "\">" << std::endl
|
|---|
| 962 | << " <PPointData Scalars=\"" << information << "\">" << std::endl
|
|---|
| 963 | << " <PDataArray type=\"Float32\" Name=\"" << information << "\"/>" << std::endl
|
|---|
| 964 | << " </PPointData>" << std::endl;
|
|---|
| 965 |
|
|---|
| 966 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 967 |
|
|---|
| 968 | MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 969 |
|
|---|
| 970 | delete [] char_buf;
|
|---|
| 971 | }
|
|---|
| 972 |
|
|---|
| 973 | buf.str("");
|
|---|
| 974 |
|
|---|
| [ac6d04] | 975 | if ((end_local-begin_local).Product() > 0) {
|
|---|
| 976 | buf << " <Piece Extent=\"";
|
|---|
| 977 |
|
|---|
| 978 | for (int i=0; i<3; ++i)
|
|---|
| 979 | buf << begin_local[i] << " " << end_local[i] << " ";
|
|---|
| [dfed1c] | 980 |
|
|---|
| [ac6d04] | 981 | buf << "\" Source=\"" << serial_filename << "\"/>" << std::endl;
|
|---|
| 982 | }
|
|---|
| [dfed1c] | 983 |
|
|---|
| 984 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 985 |
|
|---|
| 986 | MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 987 |
|
|---|
| 988 | delete [] char_buf;
|
|---|
| 989 |
|
|---|
| 990 | if (rank == 0) {
|
|---|
| 991 |
|
|---|
| 992 | buf.str("");
|
|---|
| 993 |
|
|---|
| 994 | buf << " </PImageData>" << std::endl
|
|---|
| 995 | << "</VTKFile>" << std::endl;
|
|---|
| 996 |
|
|---|
| 997 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 998 |
|
|---|
| 999 | MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1000 |
|
|---|
| 1001 | delete [] char_buf;
|
|---|
| 1002 | }
|
|---|
| 1003 |
|
|---|
| 1004 | MPI_File_close(&file);
|
|---|
| 1005 | }
|
|---|
| 1006 |
|
|---|
| 1007 | MPI_File CommMPI::CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
|
|---|
| 1008 | const int& output_count, const char* information,
|
|---|
| 1009 | const Index& begin_global, const Index& end_global,
|
|---|
| 1010 | const Index& begin_local, const Index& end_local)
|
|---|
| 1011 | {
|
|---|
| 1012 | char serial_filename[513];
|
|---|
| 1013 | int rank;
|
|---|
| 1014 | MPI_File file;
|
|---|
| 1015 | std::stringstream buf;
|
|---|
| 1016 |
|
|---|
| 1017 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 1018 |
|
|---|
| 1019 | sprintf(serial_filename, "%s%04d_%d.vti", OutputPath().c_str(), output_count, rank);
|
|---|
| 1020 |
|
|---|
| 1021 | MPI_File_open(MPI_COMM_SELF, serial_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 1022 |
|
|---|
| 1023 | buf << "<?xml version=\"1.0\"?>" << std::endl
|
|---|
| 1024 | << "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
|
|---|
| 1025 | << " <ImageData WholeExtent=\"";
|
|---|
| 1026 |
|
|---|
| 1027 | for (int i=0; i<3; ++i)
|
|---|
| [ac6d04] | 1028 | buf << begin_global[i] << " " << end_global[i] << " ";
|
|---|
| [dfed1c] | 1029 |
|
|---|
| 1030 | buf << "\"" << std::endl
|
|---|
| 1031 | << " Origin=\"0 0 0\" Spacing=\"";
|
|---|
| 1032 |
|
|---|
| 1033 | for (int i=0; i<3; ++i)
|
|---|
| 1034 | buf << grid.Extent().MeshWidth()[i] << " ";
|
|---|
| 1035 |
|
|---|
| 1036 | buf << "\">" << std::endl
|
|---|
| 1037 | << " <Piece Extent=\"";
|
|---|
| 1038 |
|
|---|
| 1039 | for (int i=0; i<3; ++i)
|
|---|
| [ac6d04] | 1040 | buf << begin_local[i] << " " << end_local[i] << " ";
|
|---|
| [dfed1c] | 1041 |
|
|---|
| 1042 | buf << "\">" << std::endl
|
|---|
| 1043 | << " <PointData Scalars=\"" << information << "\">" << std::endl
|
|---|
| 1044 | << " <DataArray type=\"Float32\" Name=\"" << information << "\" format=\"ascii\">" << std::endl
|
|---|
| 1045 | << " ";
|
|---|
| 1046 |
|
|---|
| 1047 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1048 | MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1049 | delete [] char_buf;
|
|---|
| 1050 |
|
|---|
| 1051 | return file;
|
|---|
| 1052 | }
|
|---|
| 1053 |
|
|---|
| 1054 | void CommMPI::FinalizeSerialOutputFile(MPI_File& file)
|
|---|
| 1055 | {
|
|---|
| 1056 | std::stringstream buf;
|
|---|
| 1057 |
|
|---|
| 1058 | buf << std::endl
|
|---|
| 1059 | << " </DataArray>" << std::endl
|
|---|
| 1060 | << " </PointData>" << std::endl
|
|---|
| 1061 | << " </Piece>" << std::endl
|
|---|
| 1062 | << " </ImageData>" << std::endl
|
|---|
| 1063 | << "</VTKFile>" << std::endl;
|
|---|
| 1064 |
|
|---|
| 1065 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1066 | MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1067 | delete [] char_buf;
|
|---|
| 1068 |
|
|---|
| 1069 | MPI_File_close(&file);
|
|---|
| 1070 | }
|
|---|
| 1071 |
|
|---|
| 1072 | int CommMPI::GlobalRank() const
|
|---|
| 1073 | {
|
|---|
| 1074 | int rank;
|
|---|
| 1075 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 1076 | return rank;
|
|---|
| 1077 | }
|
|---|
| 1078 |
|
|---|
| [ac6d04] | 1079 | int CommMPI::GlobalSize() const
|
|---|
| 1080 | {
|
|---|
| 1081 | int size;
|
|---|
| 1082 | MPI_Comm_size(comm_global, &size);
|
|---|
| 1083 | return size;
|
|---|
| 1084 | }
|
|---|
| 1085 |
|
|---|
| [dfed1c] | 1086 | Index CommMPI::GlobalPos() const
|
|---|
| 1087 | {
|
|---|
| 1088 | Index dims, periods, coords;
|
|---|
| 1089 | MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
|
|---|
| 1090 | return coords;
|
|---|
| 1091 | }
|
|---|
| 1092 |
|
|---|
| 1093 | Index CommMPI::GlobalProcs() const
|
|---|
| 1094 | {
|
|---|
| 1095 | Index dims, periods, coords;
|
|---|
| 1096 | MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
|
|---|
| [ac6d04] | 1097 | return dims;
|
|---|
| 1098 | }
|
|---|
| 1099 |
|
|---|
| 1100 | int CommMPI::Rank(const Grid& grid) const
|
|---|
| 1101 | {
|
|---|
| 1102 | int rank;
|
|---|
| 1103 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 1104 | assert(comm != MPI_COMM_NULL);
|
|---|
| 1105 | MPI_Comm_rank(comm, &rank);
|
|---|
| 1106 | return rank;
|
|---|
| 1107 | }
|
|---|
| 1108 |
|
|---|
| 1109 | int CommMPI::Size(const Grid& grid) const
|
|---|
| 1110 | {
|
|---|
| 1111 | int size;
|
|---|
| 1112 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 1113 | assert(comm != MPI_COMM_NULL);
|
|---|
| 1114 | MPI_Comm_size(comm, &size);
|
|---|
| 1115 | return size;
|
|---|
| 1116 | }
|
|---|
| [dfed1c] | 1117 |
|
|---|
| [ac6d04] | 1118 | Index CommMPI::Pos(const Grid& grid) const
|
|---|
| 1119 | {
|
|---|
| 1120 | Index dims, periods, coords;
|
|---|
| 1121 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 1122 | assert(comm != MPI_COMM_NULL);
|
|---|
| 1123 | MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
|
|---|
| 1124 | return coords;
|
|---|
| 1125 | }
|
|---|
| 1126 |
|
|---|
| 1127 | Index CommMPI::Procs(const Grid& grid) const
|
|---|
| 1128 | {
|
|---|
| 1129 | Index dims, periods, coords;
|
|---|
| 1130 | MPI_Comm comm = settings.CommunicatorLocal(grid);
|
|---|
| 1131 | assert(comm != MPI_COMM_NULL);
|
|---|
| 1132 | MPI_Cart_get(comm, 3, dims.vec(), periods.vec(), coords.vec());
|
|---|
| [dfed1c] | 1133 | return dims;
|
|---|
| 1134 | }
|
|---|
| 1135 |
|
|---|
| 1136 | void CommMPI::InitCommMPI(const MPI_Comm& comm)
|
|---|
| 1137 | {
|
|---|
| 1138 | int status, size, rank;
|
|---|
| 1139 | int dims[3] = {0, 0, 0};
|
|---|
| 1140 | int periods[3];
|
|---|
| 1141 |
|
|---|
| 1142 | for (int i=0; i<3; ++i)
|
|---|
| 1143 | periods[i] = (BoundaryConditions()[i] == Periodic ? 1 : 0);
|
|---|
| 1144 |
|
|---|
| 1145 | MPI_Comm_size(comm, &size);
|
|---|
| 1146 | MPI_Comm_rank(comm, &rank);
|
|---|
| 1147 |
|
|---|
| 1148 | MPI_Topo_test(comm, &status);
|
|---|
| 1149 |
|
|---|
| 1150 | if (status == MPI_CART) {
|
|---|
| 1151 |
|
|---|
| 1152 | comm_global = comm;
|
|---|
| 1153 |
|
|---|
| 1154 | }else {
|
|---|
| 1155 |
|
|---|
| [71b148] | 1156 | const int log2 = Helper::log_2(size);
|
|---|
| 1157 |
|
|---|
| 1158 | if (Helper::intpow(2, log2) == size) {
|
|---|
| 1159 | for (int i=0; i<3; ++i)
|
|---|
| 1160 | dims[i] = Helper::intpow(2, log2 / 3 + (log2%3 > i ? 1 : 0));
|
|---|
| 1161 | }else {
|
|---|
| 1162 | MPI_Dims_create(size, 3, dims);
|
|---|
| 1163 | }
|
|---|
| 1164 |
|
|---|
| 1165 | std::printf("DIMS: %d %d %d\n", dims[0], dims[1], dims[2]);
|
|---|
| [dfed1c] | 1166 |
|
|---|
| 1167 | MPI_Cart_create(comm, 3, dims, periods, 1, &comm_global);
|
|---|
| 1168 |
|
|---|
| 1169 | }
|
|---|
| 1170 |
|
|---|
| 1171 | MPI_Info_create(&info);
|
|---|
| 1172 | char key[] = "no_locks";
|
|---|
| 1173 | char val[] = "true";
|
|---|
| 1174 | MPI_Info_set(info, key, val);
|
|---|
| 1175 |
|
|---|
| 1176 | }
|
|---|
| 1177 |
|
|---|
| 1178 | CommMPI::~CommMPI()
|
|---|
| 1179 | {
|
|---|
| 1180 | MPI_Comm_free(&comm_global);
|
|---|
| [ac6d04] | 1181 | #ifdef VMG_ONE_SIDED
|
|---|
| [dfed1c] | 1182 | if (win_created)
|
|---|
| 1183 | MPI_Win_free(&win);
|
|---|
| [ac6d04] | 1184 | #endif
|
|---|
| [dfed1c] | 1185 | MPI_Info_free(&info);
|
|---|
| [894a5f] | 1186 | }
|
|---|
| [dfed1c] | 1187 |
|
|---|
| [894a5f] | 1188 | Grid& CommMPI::GetCoarserGrid(Multigrid& multigrid)
|
|---|
| 1189 | {
|
|---|
| 1190 | return settings.CoarserGrid(multigrid(multigrid.Level()));
|
|---|
| [dfed1c] | 1191 | }
|
|---|
| 1192 |
|
|---|
| [894a5f] | 1193 | Grid& CommMPI::GetFinerGrid(Multigrid& multigrid)
|
|---|
| 1194 | {
|
|---|
| 1195 | return settings.FinerGrid(multigrid(multigrid.Level()));
|
|---|
| 1196 | }
|
|---|
| 1197 |
|
|---|
| [dfed1c] | 1198 | std::string CommMPI::CreateOutputDirectory()
|
|---|
| 1199 | {
|
|---|
| 1200 | #ifdef HAVE_BOOST_FILESYSTEM
|
|---|
| 1201 | std::string path, unique_path;
|
|---|
| 1202 | std::stringstream unique_suffix;
|
|---|
| 1203 | int suffix_counter = 0;
|
|---|
| 1204 | char buffer[129];
|
|---|
| 1205 | time_t rawtime;
|
|---|
| 1206 | struct tm *timeinfo;
|
|---|
| 1207 | int path_size;
|
|---|
| 1208 | char* path_array;
|
|---|
| 1209 |
|
|---|
| 1210 | if (GlobalRank() == 0) {
|
|---|
| 1211 |
|
|---|
| 1212 | time(&rawtime);
|
|---|
| 1213 | timeinfo = localtime(&rawtime);
|
|---|
| 1214 | strftime(buffer, 128, "./output/%Y_%m_%d_%H_%M_%S/", timeinfo);
|
|---|
| 1215 | path = buffer;
|
|---|
| 1216 |
|
|---|
| 1217 | if (!fs::exists("output"))
|
|---|
| 1218 | fs::create_directory("output");
|
|---|
| 1219 |
|
|---|
| 1220 | unique_path = path;
|
|---|
| 1221 |
|
|---|
| 1222 | while (fs::exists(unique_path.c_str())) {
|
|---|
| 1223 |
|
|---|
| 1224 | unique_suffix.str("");
|
|---|
| 1225 | unique_suffix << "_" << suffix_counter++ << "/";
|
|---|
| 1226 |
|
|---|
| 1227 | unique_path = path;
|
|---|
| 1228 | unique_path.replace(unique_path.size()-1, 1, unique_suffix.str());
|
|---|
| 1229 |
|
|---|
| 1230 | }
|
|---|
| 1231 |
|
|---|
| 1232 | fs::create_directory(unique_path.c_str());
|
|---|
| 1233 |
|
|---|
| 1234 | path_size = unique_path.size() + 1;
|
|---|
| 1235 | path_array = Helper::GetCharArray(unique_path);
|
|---|
| 1236 |
|
|---|
| 1237 | MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
|
|---|
| 1238 |
|
|---|
| 1239 | }else {
|
|---|
| 1240 |
|
|---|
| 1241 | MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
|
|---|
| 1242 | path_array = new char[path_size];
|
|---|
| 1243 |
|
|---|
| 1244 | }
|
|---|
| 1245 |
|
|---|
| 1246 | MPI_Bcast(path_array, path_size, MPI_CHAR, 0, comm_global);
|
|---|
| 1247 |
|
|---|
| 1248 | unique_path = path_array;
|
|---|
| 1249 |
|
|---|
| 1250 | delete [] path_array;
|
|---|
| 1251 |
|
|---|
| 1252 | return unique_path;
|
|---|
| 1253 |
|
|---|
| 1254 | #else
|
|---|
| 1255 |
|
|---|
| 1256 | return "./";
|
|---|
| 1257 |
|
|---|
| 1258 | #endif
|
|---|
| 1259 | }
|
|---|
| 1260 |
|
|---|
| 1261 |
|
|---|
| 1262 | #endif /* HAVE_MPI */
|
|---|