| 1 | /**
|
|---|
| 2 | * @file comm_mpi.cpp
|
|---|
| 3 | * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
|
|---|
| 4 | * @date Wed Jun 16 13:21:06 2010
|
|---|
| 5 | *
|
|---|
| 6 | * @brief Class for MPI-based communication.
|
|---|
| 7 | *
|
|---|
| 8 | */
|
|---|
| 9 |
|
|---|
| 10 | #ifdef HAVE_CONFIG_H
|
|---|
| 11 | #include <config.h>
|
|---|
| 12 | #endif
|
|---|
| 13 |
|
|---|
| 14 | #ifdef HAVE_MPI
|
|---|
| 15 |
|
|---|
| 16 | #include <mpi.h>
|
|---|
| 17 |
|
|---|
| 18 | #ifdef HAVE_BOOST_FILESYSTEM
|
|---|
| 19 | #include <boost/filesystem.hpp>
|
|---|
| 20 | namespace fs = boost::filesystem;
|
|---|
| 21 | #endif
|
|---|
| 22 |
|
|---|
| 23 | #include <cstring>
|
|---|
| 24 | #include <sstream>
|
|---|
| 25 |
|
|---|
| 26 | #include "base/helper.hpp"
|
|---|
| 27 | #include "base/linked_cell_list.hpp"
|
|---|
| 28 | #include "base/timer.hpp"
|
|---|
| 29 | #include "comm/comm_mpi.hpp"
|
|---|
| 30 | #include "comm/mpi/datatype.hpp"
|
|---|
| 31 | #include "grid/grid.hpp"
|
|---|
| 32 | #include "grid/multigrid.hpp"
|
|---|
| 33 | #include "grid/tempgrid.hpp"
|
|---|
| 34 | #include "mg.hpp"
|
|---|
| 35 |
|
|---|
| 36 | static char print_buffer[512];
|
|---|
| 37 |
|
|---|
| 38 | using namespace VMG;
|
|---|
| 39 |
|
|---|
| 40 | void CommMPI::CommSubgrid(Grid& grid_old, Grid& grid_new)
|
|---|
| 41 | {
|
|---|
| 42 | Timer::Start("CommSubgrid");
|
|---|
| 43 |
|
|---|
| 44 | MPI_Comm comm = comm_info.GetUnionCommunicator(grid_old, grid_new);
|
|---|
| 45 |
|
|---|
| 46 | if (comm != MPI_COMM_NULL) {
|
|---|
| 47 |
|
|---|
| 48 | int comm_rank, comm_size;
|
|---|
| 49 | MPI_Comm_rank(comm, &comm_rank);
|
|---|
| 50 | MPI_Comm_size(comm, &comm_size);
|
|---|
| 51 |
|
|---|
| 52 | Index begin_local, end_local;
|
|---|
| 53 | Index begin_remote, end_remote;
|
|---|
| 54 | Index sizes, subsizes, starts;
|
|---|
| 55 | Index offset_new, offset_old;
|
|---|
| 56 | int* buffer = new int[6*comm_size];
|
|---|
| 57 |
|
|---|
| 58 | for (int i=0; i<3; ++i) {
|
|---|
| 59 |
|
|---|
| 60 | offset_old[i] = (grid_old.Local().HaloEnd1()[i] > 0 ? grid_old.Local().Begin()[i] : 0);
|
|---|
| 61 | offset_new[i] = (grid_new.Local().HaloEnd1()[i] > 0 ? grid_new.Local().Begin()[i] : 0);
|
|---|
| 62 |
|
|---|
| 63 | buffer[6*comm_rank + i ] = grid_new.Global().BeginLocal()[i];
|
|---|
| 64 | buffer[6*comm_rank + i+3] = grid_new.Global().EndLocal()[i];
|
|---|
| 65 |
|
|---|
| 66 | }
|
|---|
| 67 |
|
|---|
| 68 | //TODO: Create LUT to avoid global communication here
|
|---|
| 69 | MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, buffer, 6, MPI_INT, comm);
|
|---|
| 70 |
|
|---|
| 71 | for (int i=0; i<comm_size; ++i) {
|
|---|
| 72 |
|
|---|
| 73 | for (int j=0; j<3; ++j) {
|
|---|
| 74 |
|
|---|
| 75 | begin_remote[j] = buffer[6*i + j];
|
|---|
| 76 | end_remote[j] = buffer[6*i + j+3];
|
|---|
| 77 |
|
|---|
| 78 | }
|
|---|
| 79 |
|
|---|
| 80 | begin_remote = begin_remote.Clamp(grid_old.Global().BeginLocal(), grid_old.Global().EndLocal());
|
|---|
| 81 | end_remote = end_remote.Clamp(grid_old.Global().BeginLocal(), grid_old.Global().EndLocal());
|
|---|
| 82 |
|
|---|
| 83 | if ((end_remote - begin_remote).Product() > 0) {
|
|---|
| 84 |
|
|---|
| 85 | MPI_Datatype dt_temp;
|
|---|
| 86 |
|
|---|
| 87 | sizes = grid_old.Local().SizeTotal();
|
|---|
| 88 | subsizes = end_remote - begin_remote;
|
|---|
| 89 | starts = begin_remote - grid_old.Global().BeginLocal() + offset_old;
|
|---|
| 90 |
|
|---|
| 91 | MPI_Type_create_subarray(3, sizes.vec(), subsizes.vec(), starts.vec(), MPI_ORDER_C, MPI_DOUBLE, &dt_temp);
|
|---|
| 92 |
|
|---|
| 93 | MPI_Type_commit(&dt_temp);
|
|---|
| 94 |
|
|---|
| 95 | MPI_Isend(&grid_old(0), 1, dt_temp, i, 0, comm, &Request());
|
|---|
| 96 |
|
|---|
| 97 | MPI_Type_free(&dt_temp);
|
|---|
| 98 |
|
|---|
| 99 | }
|
|---|
| 100 |
|
|---|
| 101 | }
|
|---|
| 102 |
|
|---|
| 103 | for (int i=0; i<3; ++i) {
|
|---|
| 104 |
|
|---|
| 105 | buffer[6*comm_rank + i ] = grid_old.Global().BeginLocal()[i];
|
|---|
| 106 | buffer[6*comm_rank + i+3] = grid_old.Global().EndLocal()[i];
|
|---|
| 107 |
|
|---|
| 108 | }
|
|---|
| 109 |
|
|---|
| 110 | //TODO: Create LUT to avoid global communication here
|
|---|
| 111 | MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, buffer, 6, MPI_INT, comm);
|
|---|
| 112 |
|
|---|
| 113 | for (int i=0; i<comm_size; ++i) {
|
|---|
| 114 |
|
|---|
| 115 | for (int j=0; j<3; ++j) {
|
|---|
| 116 |
|
|---|
| 117 | begin_remote[j] = buffer[6*i + j];
|
|---|
| 118 | end_remote[j] = buffer[6*i + j+3];
|
|---|
| 119 |
|
|---|
| 120 | }
|
|---|
| 121 |
|
|---|
| 122 | begin_remote = begin_remote.Clamp(grid_new.Global().BeginLocal(), grid_new.Global().EndLocal());
|
|---|
| 123 | end_remote = end_remote.Clamp(grid_new.Global().BeginLocal(), grid_new.Global().EndLocal());
|
|---|
| 124 |
|
|---|
| 125 | if ((end_remote - begin_remote).Product() > 0) {
|
|---|
| 126 |
|
|---|
| 127 | MPI_Datatype dt_temp;
|
|---|
| 128 |
|
|---|
| 129 | sizes = grid_new.Local().SizeTotal();
|
|---|
| 130 | subsizes = end_remote - begin_remote;
|
|---|
| 131 | starts = begin_remote - grid_new.Global().BeginLocal() + offset_new;
|
|---|
| 132 |
|
|---|
| 133 | MPI_Type_create_subarray(3, sizes.vec(), subsizes.vec(), starts.vec(), MPI_ORDER_C, MPI_DOUBLE, &dt_temp);
|
|---|
| 134 |
|
|---|
| 135 | MPI_Type_commit(&dt_temp);
|
|---|
| 136 |
|
|---|
| 137 | MPI_Irecv(&grid_new(0), 1, dt_temp, i, 0, comm, &Request());
|
|---|
| 138 |
|
|---|
| 139 | MPI_Type_free(&dt_temp);
|
|---|
| 140 |
|
|---|
| 141 | }
|
|---|
| 142 |
|
|---|
| 143 | }
|
|---|
| 144 |
|
|---|
| 145 | ClearRequests();
|
|---|
| 146 |
|
|---|
| 147 | delete [] buffer;
|
|---|
| 148 |
|
|---|
| 149 | }
|
|---|
| 150 |
|
|---|
| 151 | CommToGhosts(grid_new);
|
|---|
| 152 |
|
|---|
| 153 | Timer::Stop("CommSubgrid");
|
|---|
| 154 | }
|
|---|
| 155 |
|
|---|
| 156 | void CommMPI::CommAddSubgrid(Grid& grid_old, Grid& grid_new)
|
|---|
| 157 | {
|
|---|
| 158 | Timer::Start("CommAddSubgrid");
|
|---|
| 159 |
|
|---|
| 160 | MPI_Comm comm = comm_info.GetUnionCommunicator(grid_old, grid_new);
|
|---|
| 161 |
|
|---|
| 162 | if (comm != MPI_COMM_NULL) {
|
|---|
| 163 |
|
|---|
| 164 | int comm_rank, comm_size;
|
|---|
| 165 | MPI_Comm_rank(comm, &comm_rank);
|
|---|
| 166 | MPI_Comm_size(comm, &comm_size);
|
|---|
| 167 |
|
|---|
| 168 | Index begin_local, end_local;
|
|---|
| 169 | Index begin_remote, end_remote;
|
|---|
| 170 | Index sizes, subsizes, starts;
|
|---|
| 171 | int* buffer = new int[6*comm_size];
|
|---|
| 172 |
|
|---|
| 173 | for (int i=0; i<3; ++i) {
|
|---|
| 174 |
|
|---|
| 175 | buffer[6*comm_rank + i ] = grid_new.Global().BeginLocal()[i];
|
|---|
| 176 | buffer[6*comm_rank + i+3] = grid_new.Global().EndLocal()[i];
|
|---|
| 177 |
|
|---|
| 178 | }
|
|---|
| 179 |
|
|---|
| 180 | //TODO: Create LUT to avoid global communication here
|
|---|
| 181 | MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, buffer, 6, MPI_INT, comm);
|
|---|
| 182 |
|
|---|
| 183 | for (int i=0; i<comm_size; ++i) {
|
|---|
| 184 | for (int j=0; j<3; ++j) {
|
|---|
| 185 |
|
|---|
| 186 | begin_remote[j] = buffer[6*i + j];
|
|---|
| 187 | end_remote[j] = buffer[6*i + j+3];
|
|---|
| 188 |
|
|---|
| 189 | }
|
|---|
| 190 |
|
|---|
| 191 | begin_remote = begin_remote.Clamp(grid_old.Global().BeginLocal(), grid_old.Global().EndLocal());
|
|---|
| 192 | end_remote = end_remote.Clamp(grid_old.Global().BeginLocal(), grid_old.Global().EndLocal());
|
|---|
| 193 |
|
|---|
| 194 | if ((end_remote - begin_remote).Product() > 0) {
|
|---|
| 195 |
|
|---|
| 196 | MPI_Datatype dt_temp;
|
|---|
| 197 |
|
|---|
| 198 | sizes = grid_old.Local().SizeTotal();
|
|---|
| 199 | subsizes = end_remote - begin_remote;
|
|---|
| 200 | starts = begin_remote - grid_old.Global().BeginLocal();
|
|---|
| 201 |
|
|---|
| 202 | MPI_Type_create_subarray(3, sizes.vec(), subsizes.vec(), starts.vec(), MPI_ORDER_C, MPI_DOUBLE, &dt_temp);
|
|---|
| 203 |
|
|---|
| 204 | MPI_Type_commit(&dt_temp);
|
|---|
| 205 |
|
|---|
| 206 | MPI_Isend(&grid_old(0), 1, dt_temp, i, 0, comm, &Request());
|
|---|
| 207 |
|
|---|
| 208 | MPI_Type_free(&dt_temp);
|
|---|
| 209 |
|
|---|
| 210 | }
|
|---|
| 211 |
|
|---|
| 212 | }
|
|---|
| 213 |
|
|---|
| 214 | for (int i=0; i<3; ++i) {
|
|---|
| 215 |
|
|---|
| 216 | buffer[6*comm_rank + i ] = grid_old.Global().BeginLocal()[i];
|
|---|
| 217 | buffer[6*comm_rank + i+3] = grid_old.Global().EndLocal()[i];
|
|---|
| 218 |
|
|---|
| 219 | }
|
|---|
| 220 |
|
|---|
| 221 | //TODO: Create LUT to avoid global communication here
|
|---|
| 222 | MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, buffer, 6, MPI_INT, comm);
|
|---|
| 223 |
|
|---|
| 224 | for (int i=0; i<comm_size; ++i) {
|
|---|
| 225 |
|
|---|
| 226 | for (int j=0; j<3; ++j) {
|
|---|
| 227 |
|
|---|
| 228 | begin_remote[j] = buffer[6*i + j];
|
|---|
| 229 | end_remote[j] = buffer[6*i + j+3];
|
|---|
| 230 |
|
|---|
| 231 | }
|
|---|
| 232 |
|
|---|
| 233 | begin_remote = begin_remote.Clamp(grid_new.Global().BeginLocal(), grid_new.Global().EndLocal());
|
|---|
| 234 | end_remote = end_remote.Clamp(grid_new.Global().BeginLocal(), grid_new.Global().EndLocal());
|
|---|
| 235 |
|
|---|
| 236 | if ((end_remote - begin_remote).Product() > 0) {
|
|---|
| 237 |
|
|---|
| 238 | MPI_Datatype dt_temp;
|
|---|
| 239 |
|
|---|
| 240 | sizes = grid_new.Local().SizeTotal();
|
|---|
| 241 | subsizes = end_remote - begin_remote;
|
|---|
| 242 | starts = begin_remote - grid_old.Global().BeginLocal();
|
|---|
| 243 |
|
|---|
| 244 | MPI_Type_create_subarray(3, sizes.vec(), subsizes.vec(), starts.vec(), MPI_ORDER_C, MPI_DOUBLE, &dt_temp);
|
|---|
| 245 |
|
|---|
| 246 | MPI_Type_commit(&dt_temp);
|
|---|
| 247 |
|
|---|
| 248 | ReceiveAndAddSubgrid(grid_new, comm, dt_temp, i, 0);
|
|---|
| 249 |
|
|---|
| 250 | MPI_Type_free(&dt_temp);
|
|---|
| 251 |
|
|---|
| 252 | }
|
|---|
| 253 |
|
|---|
| 254 | }
|
|---|
| 255 |
|
|---|
| 256 | ClearRequests();
|
|---|
| 257 |
|
|---|
| 258 | delete [] buffer;
|
|---|
| 259 |
|
|---|
| 260 | }
|
|---|
| 261 |
|
|---|
| 262 | CommToGhosts(grid_new);
|
|---|
| 263 |
|
|---|
| 264 | Timer::Stop("CommAddSubgrid");
|
|---|
| 265 | }
|
|---|
| 266 |
|
|---|
| 267 | void CommMPI::CommFromGhosts(Grid& grid)
|
|---|
| 268 | {
|
|---|
| 269 | Timer::Start("CommFromGhosts");
|
|---|
| 270 |
|
|---|
| 271 | MPI_Comm comm = comm_info.GetCommunicator(grid);
|
|---|
| 272 |
|
|---|
| 273 | if (comm != MPI_COMM_NULL) {
|
|---|
| 274 |
|
|---|
| 275 | const Index has_halo_1 = grid.Local().HasHalo1();
|
|---|
| 276 | const Index has_halo_2 = grid.Local().HasHalo2();
|
|---|
| 277 | Tuple<int> neighbors;
|
|---|
| 278 |
|
|---|
| 279 | for (int i=0; i<3; ++i) {
|
|---|
| 280 |
|
|---|
| 281 | MPI_Cart_shift(comm, i, 1, &neighbors.Left(), &neighbors.Right());
|
|---|
| 282 |
|
|---|
| 283 | if (has_halo_1[i]) {
|
|---|
| 284 |
|
|---|
| 285 | MPI_Datatype dts1 = GetDatatype(grid, grid.Iterators().Halo1()[i]);
|
|---|
| 286 | MPI_Isend(&grid(0), 1, dts1, neighbors.Left(), 1, comm, &Request());
|
|---|
| 287 | MPI_Type_free(&dts1);
|
|---|
| 288 | }
|
|---|
| 289 |
|
|---|
| 290 | if (has_halo_2[i]) {
|
|---|
| 291 |
|
|---|
| 292 | MPI_Datatype dts2 = GetDatatype(grid, grid.Iterators().Halo2()[i]);
|
|---|
| 293 | MPI_Isend(&grid(0), 1, dts2, neighbors.Right(), 2, comm, &Request());
|
|---|
| 294 | MPI_Type_free(&dts2);
|
|---|
| 295 |
|
|---|
| 296 | MPI_Datatype dtr1 = GetDatatype(grid, grid.Iterators().NearBoundary2()[i]);
|
|---|
| 297 | ReceiveAndAddSubgrid(grid, comm, dtr1, neighbors.Right(), 1);
|
|---|
| 298 | MPI_Type_free(&dtr1);
|
|---|
| 299 | }
|
|---|
| 300 |
|
|---|
| 301 | if (has_halo_1[i]) {
|
|---|
| 302 |
|
|---|
| 303 | MPI_Datatype dtr2 = GetDatatype(grid, grid.Iterators().NearBoundary1()[i]);
|
|---|
| 304 | ReceiveAndAddSubgrid(grid, comm, dtr2, neighbors.Left(), 2);
|
|---|
| 305 | MPI_Type_free(&dtr2);
|
|---|
| 306 |
|
|---|
| 307 | }
|
|---|
| 308 |
|
|---|
| 309 | }
|
|---|
| 310 |
|
|---|
| 311 | ClearRequests();
|
|---|
| 312 |
|
|---|
| 313 | }
|
|---|
| 314 |
|
|---|
| 315 | Timer::Stop("CommFromGhosts");
|
|---|
| 316 | }
|
|---|
| 317 |
|
|---|
| 318 | void CommMPI::CommToGhosts(Grid& grid)
|
|---|
| 319 | {
|
|---|
| 320 | Timer::Start("CommToGhosts");
|
|---|
| 321 |
|
|---|
| 322 | MPI_Comm comm = comm_info.GetCommunicator(grid);
|
|---|
| 323 |
|
|---|
| 324 | if (comm != MPI_COMM_NULL) {
|
|---|
| 325 |
|
|---|
| 326 | const Index has_halo_1 = grid.Local().HasHalo1();
|
|---|
| 327 | const Index has_halo_2 = grid.Local().HasHalo2();
|
|---|
| 328 | Tuple<int> neighbors;
|
|---|
| 329 |
|
|---|
| 330 | for (int i=2; i>=0; --i) {
|
|---|
| 331 |
|
|---|
| 332 | MPI_Cart_shift(comm, i, 1, &neighbors.Left(), &neighbors.Right());
|
|---|
| 333 |
|
|---|
| 334 | MPI_Datatype dts_left, dts_right;
|
|---|
| 335 | MPI_Datatype dtr_left, dtr_right;
|
|---|
| 336 | int num_left, num_right;
|
|---|
| 337 |
|
|---|
| 338 | if (has_halo_1[i]) {
|
|---|
| 339 | dts_left = GetDatatype(grid, grid.Iterators().NearBoundary1()[i]);
|
|---|
| 340 | dtr_left = GetDatatype(grid, grid.Iterators().Halo1()[i]);
|
|---|
| 341 | num_left = 1;
|
|---|
| 342 | }else {
|
|---|
| 343 | dts_left = MPI_INT;
|
|---|
| 344 | dtr_left = MPI_INT;
|
|---|
| 345 | num_left = 0;
|
|---|
| 346 | }
|
|---|
| 347 |
|
|---|
| 348 | if (has_halo_2[i]) {
|
|---|
| 349 | dts_right = GetDatatype(grid, grid.Iterators().NearBoundary2()[i]);
|
|---|
| 350 | dtr_right = GetDatatype(grid, grid.Iterators().Halo2()[i]);
|
|---|
| 351 | num_right = 1;
|
|---|
| 352 | }else {
|
|---|
| 353 | dts_right = MPI_INT;
|
|---|
| 354 | dtr_right = MPI_INT;
|
|---|
| 355 | num_right = 0;
|
|---|
| 356 | }
|
|---|
| 357 |
|
|---|
| 358 | Timer::Start("MPI_Sendrecv");
|
|---|
| 359 | MPI_Sendrecv(&(grid(0)), num_right, dts_right, neighbors.Right(), 3,
|
|---|
| 360 | &(grid(0)), num_left, dtr_left, neighbors.Left(), 3,
|
|---|
| 361 | comm, MPI_STATUS_IGNORE);
|
|---|
| 362 | MPI_Sendrecv(&(grid(0)), num_left, dts_left, neighbors.Left(), 4,
|
|---|
| 363 | &(grid(0)), num_right, dtr_right, neighbors.Right(), 4,
|
|---|
| 364 | comm, MPI_STATUS_IGNORE);
|
|---|
| 365 | Timer::Stop("MPI_Sendrecv");
|
|---|
| 366 |
|
|---|
| 367 | if (has_halo_1[i]) {
|
|---|
| 368 | MPI_Type_free(&dts_left);
|
|---|
| 369 | MPI_Type_free(&dtr_left);
|
|---|
| 370 | }
|
|---|
| 371 |
|
|---|
| 372 | if (has_halo_2[i]) {
|
|---|
| 373 | MPI_Type_free(&dts_right);
|
|---|
| 374 | MPI_Type_free(&dtr_right);
|
|---|
| 375 | }
|
|---|
| 376 |
|
|---|
| 377 | }
|
|---|
| 378 | }
|
|---|
| 379 |
|
|---|
| 380 | Timer::Stop("CommToGhosts");
|
|---|
| 381 | }
|
|---|
| 382 |
|
|---|
| 383 | Grid& CommMPI::GetCoarserGrid(Multigrid& multigrid)
|
|---|
| 384 | {
|
|---|
| 385 | std::map<const Grid*, TempGrid*>::iterator iter = coarser_grids.find(&multigrid());
|
|---|
| 386 |
|
|---|
| 387 | if (iter == coarser_grids.end()) {
|
|---|
| 388 | TempGrid* temp_grid = new TempGrid();
|
|---|
| 389 | temp_grid->SetPropertiesToCoarser(multigrid(), BoundaryConditions());
|
|---|
| 390 | iter = coarser_grids.insert(std::make_pair(&multigrid(), temp_grid)).first;
|
|---|
| 391 | }
|
|---|
| 392 |
|
|---|
| 393 | TempGrid& grid_coarser = *iter->second;
|
|---|
| 394 |
|
|---|
| 395 | CommSubgrid(multigrid(multigrid.Level()-1), grid_coarser);
|
|---|
| 396 |
|
|---|
| 397 | return grid_coarser;
|
|---|
| 398 | }
|
|---|
| 399 |
|
|---|
| 400 | Grid& CommMPI::GetFinerGrid(Multigrid& multigrid)
|
|---|
| 401 | {
|
|---|
| 402 | std::map<const Grid*, TempGrid*>::iterator iter = finer_grids.find(&multigrid());
|
|---|
| 403 |
|
|---|
| 404 | if (iter == finer_grids.end()) {
|
|---|
| 405 | TempGrid* temp_grid = new TempGrid();
|
|---|
| 406 | temp_grid->SetPropertiesToFiner(multigrid(), BoundaryConditions());
|
|---|
| 407 | iter = finer_grids.insert(std::make_pair(&multigrid(), temp_grid)).first;
|
|---|
| 408 | }
|
|---|
| 409 |
|
|---|
| 410 | TempGrid& grid_finer = *iter->second;
|
|---|
| 411 |
|
|---|
| 412 | CommSubgrid(multigrid(multigrid.Level()+1), grid_finer);
|
|---|
| 413 |
|
|---|
| 414 | return grid_finer;
|
|---|
| 415 | }
|
|---|
| 416 |
|
|---|
| 417 | Grid& CommMPI::GetGlobalCoarseGrid(Multigrid& multigrid)
|
|---|
| 418 | {
|
|---|
| 419 | if (global_coarse_grid == NULL) {
|
|---|
| 420 |
|
|---|
| 421 | global_coarse_grid = new TempGrid();
|
|---|
| 422 | global_coarse_grid->SetPropertiesToGlobalCoarseGrid();
|
|---|
| 423 |
|
|---|
| 424 | }
|
|---|
| 425 |
|
|---|
| 426 | CommSubgrid(multigrid(multigrid.MinLevel()), *global_coarse_grid);
|
|---|
| 427 |
|
|---|
| 428 | return *global_coarse_grid;
|
|---|
| 429 | }
|
|---|
| 430 |
|
|---|
| 431 | //
|
|---|
| 432 | // Decodes a derived datatype created by MPI_Type_create_subarray
|
|---|
| 433 | // and adds values to grid.
|
|---|
| 434 | // I chose this approach since MPI neither supports a point-to-point reduce nor
|
|---|
| 435 | // a non-blocking global reduce.
|
|---|
| 436 | // For more information see Chapter 4.1 "DERIVED DATATYPES" of the MPI standard
|
|---|
| 437 | //
|
|---|
| 438 | void CommMPI::ReceiveAndAddSubgrid(Grid& grid, const MPI_Comm& comm, MPI_Datatype& type,
|
|---|
| 439 | const int& rank, const int& tag)
|
|---|
| 440 | {
|
|---|
| 441 | int counter = 0;
|
|---|
| 442 | int ibuffer[11];
|
|---|
| 443 | Index i;
|
|---|
| 444 | MPI_Datatype base_type;
|
|---|
| 445 | MPI_Aint address;
|
|---|
| 446 |
|
|---|
| 447 | MPI_Type_get_contents(type, 11, 0, 1, ibuffer, &address, &base_type);
|
|---|
| 448 |
|
|---|
| 449 | if (static_cast<int>(receive_buffer.size()) < ibuffer[4]*ibuffer[5]*ibuffer[6])
|
|---|
| 450 | receive_buffer.resize(ibuffer[4]*ibuffer[5]*ibuffer[6]);
|
|---|
| 451 |
|
|---|
| 452 | MPI_Recv(&(receive_buffer.front()), ibuffer[4]*ibuffer[5]*ibuffer[6], MPI_DOUBLE, rank, tag, comm, MPI_STATUS_IGNORE);
|
|---|
| 453 |
|
|---|
| 454 | for (i.X()=ibuffer[7]; i.X()<ibuffer[7]+ibuffer[4]; ++i.X())
|
|---|
| 455 | for (i.Y()=ibuffer[8]; i.Y()<ibuffer[8]+ibuffer[5]; ++i.Y())
|
|---|
| 456 | for (i.Z()=ibuffer[9]; i.Z()<ibuffer[9]+ibuffer[6]; ++i.Z())
|
|---|
| 457 | grid(i) += receive_buffer[counter++];
|
|---|
| 458 | }
|
|---|
| 459 |
|
|---|
| 460 | void CommMPI::CommParticles(const Grid& grid, std::list<Particle::Particle>& particles)
|
|---|
| 461 | {
|
|---|
| 462 | Timer::Start("CommParticles");
|
|---|
| 463 |
|
|---|
| 464 | Factory& factory = MG::GetFactory();
|
|---|
| 465 |
|
|---|
| 466 | const vmg_int& num_particles_local = factory.GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
|
|---|
| 467 | vmg_float* x = factory.GetObjectStorageArray<vmg_float>("PARTICLE_POS_ARRAY");
|
|---|
| 468 | vmg_float* q = factory.GetObjectStorageArray<vmg_float>("PARTICLE_CHARGE_ARRAY");
|
|---|
| 469 |
|
|---|
| 470 | int rank, size;
|
|---|
| 471 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 472 | MPI_Comm_size(comm_global, &size);
|
|---|
| 473 |
|
|---|
| 474 | Index index;
|
|---|
| 475 | int global_extent[6*size], send_sizes[size], recv_sizes[size];
|
|---|
| 476 | Index begin_remote[size], end_remote[size];
|
|---|
| 477 | std::vector<int> pos_indices[size], charge_indices[size];
|
|---|
| 478 | MPI_Datatype dt_temp;
|
|---|
| 479 | std::vector<vmg_float> buffer_x, buffer_q;
|
|---|
| 480 | std::vector<vmg_int> buffer_ind;
|
|---|
| 481 |
|
|---|
| 482 | std::memcpy(&global_extent[6*rank], grid.Global().BeginLocal().vec(), 3*sizeof(vmg_float));
|
|---|
| 483 | std::memcpy(&global_extent[6*rank+3], grid.Global().EndLocal().vec(), 3*sizeof(vmg_float));
|
|---|
| 484 |
|
|---|
| 485 | MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, global_extent, 6, MPI_INT, MPI_COMM_WORLD);
|
|---|
| 486 |
|
|---|
| 487 | for (int i=0; i<size; ++i) {
|
|---|
| 488 | begin_remote[i] = static_cast<Vector>(&global_extent[6*i]);
|
|---|
| 489 | end_remote[i] = static_cast<Vector>(&global_extent[6*i+3]);
|
|---|
| 490 | }
|
|---|
| 491 |
|
|---|
| 492 | for (int i=0; i<num_particles_local; ++i) {
|
|---|
| 493 | index = static_cast<Index>((Vector(&x[3*i]) - grid.Extent().Begin()) / grid.Extent().MeshWidth());
|
|---|
| 494 | for (int j=0; j<size; ++j)
|
|---|
| 495 | if (index.IsInBounds(begin_remote[j], end_remote[j])) {
|
|---|
| 496 | pos_indices[j].push_back(3*i);
|
|---|
| 497 | charge_indices[j].push_back(i);
|
|---|
| 498 | break;
|
|---|
| 499 | }
|
|---|
| 500 | }
|
|---|
| 501 |
|
|---|
| 502 | /*
|
|---|
| 503 | * Communicate which process gets how many particles
|
|---|
| 504 | */
|
|---|
| 505 | for (int i=0; i<size; ++i)
|
|---|
| 506 | send_sizes[i] = charge_indices[i].size();
|
|---|
| 507 |
|
|---|
| 508 | MPI_Alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_global);
|
|---|
| 509 |
|
|---|
| 510 | /*
|
|---|
| 511 | * Send particles
|
|---|
| 512 | */
|
|---|
| 513 | for (int i=0; i<size; ++i) {
|
|---|
| 514 |
|
|---|
| 515 | if (pos_indices[i].size() > 0) {
|
|---|
| 516 |
|
|---|
| 517 | MPI_Type_create_indexed_block(pos_indices[i].size(), 3, &pos_indices[i].front(), MPI_DOUBLE, &dt_temp);
|
|---|
| 518 | MPI_Type_commit(&dt_temp);
|
|---|
| 519 | MPI_Isend(x, 1, dt_temp, i, 0, MPI_COMM_WORLD, &Request());
|
|---|
| 520 | MPI_Type_free(&dt_temp);
|
|---|
| 521 |
|
|---|
| 522 | MPI_Type_create_indexed_block(charge_indices[i].size(), 1, &charge_indices[i].front(), MPI_DOUBLE, &dt_temp);
|
|---|
| 523 | MPI_Type_commit(&dt_temp);
|
|---|
| 524 | MPI_Isend(q, 1, dt_temp, i, 1, MPI_COMM_WORLD, &Request());
|
|---|
| 525 | MPI_Type_free(&dt_temp);
|
|---|
| 526 |
|
|---|
| 527 | MPI_Isend(&charge_indices[i].front(), charge_indices[i].size(), MPI_INT, i, 2, MPI_COMM_WORLD, &Request());
|
|---|
| 528 |
|
|---|
| 529 | }
|
|---|
| 530 |
|
|---|
| 531 | }
|
|---|
| 532 |
|
|---|
| 533 | /*
|
|---|
| 534 | * Receive particles
|
|---|
| 535 | */
|
|---|
| 536 | for (int i=0; i<size; ++i) {
|
|---|
| 537 |
|
|---|
| 538 | if (recv_sizes[i] > 0) {
|
|---|
| 539 |
|
|---|
| 540 | buffer_x.resize(3*recv_sizes[i]);
|
|---|
| 541 | buffer_q.resize(recv_sizes[i]);
|
|---|
| 542 | buffer_ind.resize(recv_sizes[i]);
|
|---|
| 543 |
|
|---|
| 544 | MPI_Recv(&buffer_x.front(), 3*recv_sizes[i], MPI_DOUBLE, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
|---|
| 545 | MPI_Recv(&buffer_q.front(), recv_sizes[i], MPI_DOUBLE, i, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
|---|
| 546 | MPI_Recv(&buffer_ind.front(), recv_sizes[i], MPI_INT, i, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
|---|
| 547 |
|
|---|
| 548 | particles.clear();
|
|---|
| 549 |
|
|---|
| 550 | for (int j=0; j<recv_sizes[i]; ++j)
|
|---|
| 551 | particles.push_back(Particle::Particle(&buffer_x[3*j], buffer_q[j], 0.0, 0.0, i, buffer_ind[j]));
|
|---|
| 552 |
|
|---|
| 553 | }
|
|---|
| 554 |
|
|---|
| 555 | }
|
|---|
| 556 |
|
|---|
| 557 | ClearRequests();
|
|---|
| 558 |
|
|---|
| 559 | Timer::Stop("CommParticles");
|
|---|
| 560 | }
|
|---|
| 561 |
|
|---|
| 562 | void CommMPI::CommLCListGhosts(const Grid& grid, Particle::LinkedCellList& lc)
|
|---|
| 563 | {
|
|---|
| 564 | Timer::Start("CommLCListGhosts");
|
|---|
| 565 |
|
|---|
| 566 | std::vector<vmg_float> send_buffer_1, send_buffer_2, recv_buffer;
|
|---|
| 567 | vmg_int send_size_1, send_size_2, recv_size;
|
|---|
| 568 | std::list<Particle::Particle>::iterator p_iter;
|
|---|
| 569 | Grid::iterator g_iter;
|
|---|
| 570 |
|
|---|
| 571 | Tuple<int> neighbors;
|
|---|
| 572 |
|
|---|
| 573 | for (int i=2; i>=0; --i) {
|
|---|
| 574 |
|
|---|
| 575 | /*
|
|---|
| 576 | * Get ranks of neighbors
|
|---|
| 577 | */
|
|---|
| 578 | MPI_Cart_shift(comm_global, i, 1, &neighbors.Left(), &neighbors.Right());
|
|---|
| 579 |
|
|---|
| 580 | /*
|
|---|
| 581 | * Send to left
|
|---|
| 582 | */
|
|---|
| 583 | send_buffer_1.clear();
|
|---|
| 584 |
|
|---|
| 585 | for (g_iter=lc.Iterators().Boundary1()[i].Begin(); g_iter!=lc.Iterators().Boundary1()[i].End(); ++g_iter)
|
|---|
| 586 | lc(*g_iter).clear();
|
|---|
| 587 |
|
|---|
| 588 | for (g_iter=lc.Iterators().NearBoundary1()[i].Begin(); g_iter!=lc.Iterators().NearBoundary1()[i].End(); ++g_iter)
|
|---|
| 589 | for (p_iter=lc(*g_iter).begin(); p_iter!=lc(*g_iter).end(); ++p_iter) {
|
|---|
| 590 |
|
|---|
| 591 | if (grid.Global().BeginLocal()[i] == 0)
|
|---|
| 592 | for (int j=0; j<3; ++j)
|
|---|
| 593 | send_buffer_1.push_back(p_iter->Pos()[j] + grid.Extent().Size()[j]);
|
|---|
| 594 | else
|
|---|
| 595 | for (int j=0; j<3; ++j)
|
|---|
| 596 | send_buffer_1.push_back(p_iter->Pos()[j]);
|
|---|
| 597 |
|
|---|
| 598 | send_buffer_1.push_back(p_iter->Charge());
|
|---|
| 599 |
|
|---|
| 600 | }
|
|---|
| 601 |
|
|---|
| 602 | send_size_1 = send_buffer_1.size();
|
|---|
| 603 |
|
|---|
| 604 | MPI_Isend(&send_size_1, 1, MPI_INT, neighbors.Left(), 0, comm_global, &Request());
|
|---|
| 605 |
|
|---|
| 606 | if (send_size_1 > 0)
|
|---|
| 607 | MPI_Isend(&send_buffer_1.front(), send_size_1, MPI_DOUBLE, neighbors.Left(), 1, comm_global, &Request());
|
|---|
| 608 |
|
|---|
| 609 | /*
|
|---|
| 610 | * Send to right
|
|---|
| 611 | */
|
|---|
| 612 | send_buffer_2.clear();
|
|---|
| 613 |
|
|---|
| 614 | for (g_iter=lc.Iterators().Boundary2()[i].Begin(); g_iter!=lc.Iterators().Boundary2()[i].End(); ++g_iter)
|
|---|
| 615 | lc(*g_iter).clear();
|
|---|
| 616 |
|
|---|
| 617 | for (g_iter=lc.Iterators().NearBoundary2()[i].Begin(); g_iter!=lc.Iterators().NearBoundary2()[i].End(); ++g_iter)
|
|---|
| 618 | for (p_iter=lc(*g_iter).begin(); p_iter!=lc(*g_iter).end(); ++p_iter) {
|
|---|
| 619 |
|
|---|
| 620 | if (grid.Global().EndLocal()[i] == grid.Global().SizeGlobal()[i])
|
|---|
| 621 | for (int j=0; j<3; ++j)
|
|---|
| 622 | send_buffer_2.push_back(p_iter->Pos()[j] - grid.Extent().Size()[j]);
|
|---|
| 623 | else
|
|---|
| 624 | for (int j=0; j<3; ++j)
|
|---|
| 625 | send_buffer_2.push_back(p_iter->Pos()[j]);
|
|---|
| 626 |
|
|---|
| 627 | send_buffer_2.push_back(p_iter->Charge());
|
|---|
| 628 |
|
|---|
| 629 | }
|
|---|
| 630 |
|
|---|
| 631 | send_size_2 = send_buffer_2.size();
|
|---|
| 632 |
|
|---|
| 633 | MPI_Isend(&send_size_2, 1, MPI_INT, neighbors.Right(), 2, comm_global, &Request());
|
|---|
| 634 |
|
|---|
| 635 | if (send_size_2 > 0)
|
|---|
| 636 | MPI_Isend(&send_buffer_2.front(), send_size_2, MPI_DOUBLE, neighbors.Right(), 3, comm_global, &Request());
|
|---|
| 637 |
|
|---|
| 638 | /*
|
|---|
| 639 | * Receive from right
|
|---|
| 640 | */
|
|---|
| 641 | MPI_Recv(&recv_size, 1, MPI_INT, neighbors.Right(), 0, comm_global, MPI_STATUS_IGNORE);
|
|---|
| 642 |
|
|---|
| 643 | if (recv_size > 0) {
|
|---|
| 644 |
|
|---|
| 645 | recv_buffer.resize(recv_size);
|
|---|
| 646 | MPI_Recv(&recv_buffer.front(), recv_size, MPI_DOUBLE, neighbors.Right(), 1, comm_global, MPI_STATUS_IGNORE);
|
|---|
| 647 |
|
|---|
| 648 | for (vmg_int i=0; i<recv_size; i+=4)
|
|---|
| 649 | lc.AddParticleToComplete(&recv_buffer[i], recv_buffer[i+3]);
|
|---|
| 650 |
|
|---|
| 651 | }
|
|---|
| 652 |
|
|---|
| 653 | /*
|
|---|
| 654 | * Receive from left
|
|---|
| 655 | */
|
|---|
| 656 | MPI_Recv(&recv_size, 1, MPI_INT, neighbors.Left(), 2, comm_global, MPI_STATUS_IGNORE);
|
|---|
| 657 |
|
|---|
| 658 | if (recv_size > 0) {
|
|---|
| 659 |
|
|---|
| 660 | recv_buffer.resize(recv_size);
|
|---|
| 661 | MPI_Recv(&recv_buffer.front(), recv_size, MPI_DOUBLE, neighbors.Left(), 3, comm_global, MPI_STATUS_IGNORE);
|
|---|
| 662 |
|
|---|
| 663 | for (vmg_int i=0; i<recv_size; i+=4)
|
|---|
| 664 | lc.AddParticleToComplete(&recv_buffer[i], recv_buffer[i+3]);
|
|---|
| 665 |
|
|---|
| 666 | }
|
|---|
| 667 |
|
|---|
| 668 |
|
|---|
| 669 | ClearRequests();
|
|---|
| 670 | }
|
|---|
| 671 |
|
|---|
| 672 | Timer::Stop("CommLCListGhosts");
|
|---|
| 673 | }
|
|---|
| 674 |
|
|---|
| 675 | void CommMPI::CommAddPotential(std::list<Particle::Particle>& particles)
|
|---|
| 676 | {
|
|---|
| 677 | Timer::Start("CommAddPotential");
|
|---|
| 678 |
|
|---|
| 679 | std::list<Particle::Particle>::iterator iter;
|
|---|
| 680 |
|
|---|
| 681 | if (!win_created) {
|
|---|
| 682 | vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
|
|---|
| 683 | const vmg_int& num_particles_local = MG::GetFactory().GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
|
|---|
| 684 | MPI_Win_create(p, num_particles_local*sizeof(vmg_float), sizeof(vmg_float), info, comm_global, &win);
|
|---|
| 685 | win_created = true;
|
|---|
| 686 | }
|
|---|
| 687 |
|
|---|
| 688 | MPI_Win_fence(MPI_MODE_NOPRECEDE, win);
|
|---|
| 689 |
|
|---|
| 690 | for (iter=particles.begin(); iter!=particles.end(); ++iter)
|
|---|
| 691 | MPI_Accumulate(&iter->Pot(), 1, MPI_DOUBLE, iter->Rank(), iter->Index(), 1, MPI_DOUBLE, MPI_SUM, win);
|
|---|
| 692 |
|
|---|
| 693 | MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED, win);
|
|---|
| 694 |
|
|---|
| 695 | Timer::Stop("CommAddPotential");
|
|---|
| 696 | }
|
|---|
| 697 |
|
|---|
| 698 | void CommMPI::CommAddPotential(Particle::LinkedCellList& lc)
|
|---|
| 699 | {
|
|---|
| 700 | Timer::Start("CommAddPotential");
|
|---|
| 701 |
|
|---|
| 702 | Grid::iterator g_iter;
|
|---|
| 703 | std::list<Particle::Particle>::iterator p_iter;
|
|---|
| 704 |
|
|---|
| 705 | if (!win_created) {
|
|---|
| 706 | vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
|
|---|
| 707 | const vmg_int& num_particles_local = MG::GetFactory().GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
|
|---|
| 708 | MPI_Win_create(p, num_particles_local*sizeof(vmg_float), sizeof(vmg_float), info, comm_global, &win);
|
|---|
| 709 | win_created = true;
|
|---|
| 710 | }
|
|---|
| 711 |
|
|---|
| 712 | MPI_Win_fence(MPI_MODE_NOPRECEDE, win);
|
|---|
| 713 |
|
|---|
| 714 | for (g_iter=lc.Iterators().Local().Begin(); g_iter!=lc.Iterators().Local().End(); ++g_iter)
|
|---|
| 715 | for (p_iter=lc(*g_iter).begin(); p_iter!=lc(*g_iter).end(); ++p_iter)
|
|---|
| 716 | MPI_Accumulate(&p_iter->Pot(), 1, MPI_DOUBLE, p_iter->Rank(), p_iter->Index(), 1, MPI_DOUBLE, MPI_SUM, win);
|
|---|
| 717 |
|
|---|
| 718 | MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED, win);
|
|---|
| 719 |
|
|---|
| 720 | Timer::Stop("CommAddPotential");
|
|---|
| 721 | }
|
|---|
| 722 |
|
|---|
| 723 | vmg_float CommMPI::GlobalSum(const vmg_float& value)
|
|---|
| 724 | {
|
|---|
| 725 | vmg_float result = value;
|
|---|
| 726 |
|
|---|
| 727 | MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm_global);
|
|---|
| 728 |
|
|---|
| 729 | return result;
|
|---|
| 730 | }
|
|---|
| 731 |
|
|---|
| 732 | vmg_float CommMPI::GlobalSumRoot(const vmg_float& value)
|
|---|
| 733 | {
|
|---|
| 734 | vmg_float recv_buffer;
|
|---|
| 735 | vmg_float send_buffer = value;
|
|---|
| 736 |
|
|---|
| 737 | MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm_global);
|
|---|
| 738 |
|
|---|
| 739 | return recv_buffer;
|
|---|
| 740 | }
|
|---|
| 741 |
|
|---|
| 742 | void CommMPI::GlobalSumArray(vmg_float* array, const vmg_int& size)
|
|---|
| 743 | {
|
|---|
| 744 | MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm_global);
|
|---|
| 745 | }
|
|---|
| 746 |
|
|---|
| 747 | void CommMPI::PrintString(const char* format, ...)
|
|---|
| 748 | {
|
|---|
| 749 | va_list args;
|
|---|
| 750 | va_start(args, format);
|
|---|
| 751 | vsprintf(print_buffer, format, args);
|
|---|
| 752 | printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
|
|---|
| 753 | va_end(args);
|
|---|
| 754 | }
|
|---|
| 755 |
|
|---|
| 756 | void CommMPI::PrintStringOnce(const char* format, ...)
|
|---|
| 757 | {
|
|---|
| 758 | if (GlobalRank() == 0) {
|
|---|
| 759 | va_list args;
|
|---|
| 760 | va_start(args, format);
|
|---|
| 761 | vsprintf(print_buffer, format, args);
|
|---|
| 762 | printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
|
|---|
| 763 | va_end(args);
|
|---|
| 764 | }
|
|---|
| 765 | }
|
|---|
| 766 |
|
|---|
| 767 | void CommMPI::PrintXML(const std::string& filename, const std::string& xml_data)
|
|---|
| 768 | {
|
|---|
| 769 | MPI_File file;
|
|---|
| 770 | std::stringstream path, xml_header;
|
|---|
| 771 |
|
|---|
| 772 | pugi::xml_document doc;
|
|---|
| 773 | pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
|
|---|
| 774 | node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
|
|---|
| 775 | doc.save(xml_header);
|
|---|
| 776 |
|
|---|
| 777 | path << OutputPath() << filename;
|
|---|
| 778 |
|
|---|
| 779 | char* filename_array = Helper::GetCharArray(path.str());
|
|---|
| 780 | char* xml_header_array = Helper::GetCharArray(xml_header.str());
|
|---|
| 781 | char* str_array = Helper::GetCharArray(xml_data);
|
|---|
| 782 |
|
|---|
| 783 | MPI_File_open(MPI_COMM_SELF, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 784 | MPI_File_set_size(file, 0);
|
|---|
| 785 | MPI_File_write(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 786 | MPI_File_write(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 787 | MPI_File_close(&file);
|
|---|
| 788 |
|
|---|
| 789 | delete [] filename_array;
|
|---|
| 790 | delete [] xml_header_array;
|
|---|
| 791 | delete [] str_array;
|
|---|
| 792 | }
|
|---|
| 793 |
|
|---|
| 794 | void CommMPI::PrintXMLAll(const std::string& filename, const std::string& xml_data)
|
|---|
| 795 | {
|
|---|
| 796 | MPI_File file;
|
|---|
| 797 | std::stringstream path;
|
|---|
| 798 |
|
|---|
| 799 | path << OutputPath() << filename;
|
|---|
| 800 |
|
|---|
| 801 | char* filename_array = Helper::GetCharArray(path.str());
|
|---|
| 802 | char* str_array = Helper::GetCharArray(xml_data);
|
|---|
| 803 |
|
|---|
| 804 | MPI_File_open(comm_global, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 805 | MPI_File_set_size(file, 0);
|
|---|
| 806 |
|
|---|
| 807 | if (GlobalRank() == 0) {
|
|---|
| 808 | std::stringstream xml_header;
|
|---|
| 809 | pugi::xml_document doc;
|
|---|
| 810 | pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
|
|---|
| 811 | node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
|
|---|
| 812 | doc.save(xml_header);
|
|---|
| 813 |
|
|---|
| 814 | char* xml_header_array = Helper::GetCharArray(xml_header.str());
|
|---|
| 815 |
|
|---|
| 816 | MPI_File_write_shared(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 817 |
|
|---|
| 818 | delete [] xml_header_array;
|
|---|
| 819 | }
|
|---|
| 820 |
|
|---|
| 821 | MPI_File_write_ordered(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 822 | MPI_File_close(&file);
|
|---|
| 823 |
|
|---|
| 824 | delete [] filename_array;
|
|---|
| 825 | delete [] str_array;
|
|---|
| 826 | }
|
|---|
| 827 |
|
|---|
| 828 | void CommMPI::PrintAllSettings()
|
|---|
| 829 | {
|
|---|
| 830 | std::stringstream buf;
|
|---|
| 831 | MPI_File file;
|
|---|
| 832 | MPI_Status status;
|
|---|
| 833 |
|
|---|
| 834 | Multigrid* mg = MG::GetFactory().Get("SOL")->Cast<Multigrid>();
|
|---|
| 835 |
|
|---|
| 836 | buf << OutputPath() << "settings.txt";
|
|---|
| 837 | char *filename = Helper::GetCharArray(buf.str());
|
|---|
| 838 |
|
|---|
| 839 | MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 840 | MPI_File_set_size(file, 0);
|
|---|
| 841 | MPI_File_close(&file);
|
|---|
| 842 |
|
|---|
| 843 | for (int i=mg->MinLevel(); i<=mg->MaxLevel(); ++i) {
|
|---|
| 844 |
|
|---|
| 845 | MPI_Comm comm = comm_info.GetCommunicator((*mg)(i));
|
|---|
| 846 |
|
|---|
| 847 | if (comm != MPI_COMM_NULL) {
|
|---|
| 848 |
|
|---|
| 849 | MPI_File_open(comm, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
|
|---|
| 850 |
|
|---|
| 851 | int comm_rank;
|
|---|
| 852 | MPI_Comm_rank(comm, &comm_rank);
|
|---|
| 853 |
|
|---|
| 854 | if (comm_rank == 0) {
|
|---|
| 855 |
|
|---|
| 856 | buf.str("");
|
|---|
| 857 | buf << "PROCESS INDEPENDENT INFORMATION:" << std::endl
|
|---|
| 858 | << "GRID LEVEL " << i << std::endl
|
|---|
| 859 | << "BEGINFINEST: " << (*mg)(i).Global().BeginFinest() << std::endl
|
|---|
| 860 | << "ENDFINEST: " << (*mg)(i).Global().EndFinest() << std::endl
|
|---|
| 861 | << "SIZEFINEST: " << (*mg)(i).Global().SizeFinest() << std::endl
|
|---|
| 862 | << "SIZEGLOBAL: " << (*mg)(i).Global().SizeGlobal() << std::endl
|
|---|
| 863 | << "BOUNDARYTYPE: " << (*mg)(i).Global().BoundaryType() << std::endl
|
|---|
| 864 | << "EXTENT" << std::endl
|
|---|
| 865 | << "SIZE: " << (*mg)(i).Extent().Size() << std::endl
|
|---|
| 866 | << "SIZEFACTOR: " << (*mg)(i).Extent().SizeFactor() << std::endl
|
|---|
| 867 | << "BEGIN: " << (*mg)(i).Extent().Begin() << std::endl
|
|---|
| 868 | << "END: " << (*mg)(i).Extent().End() << std::endl
|
|---|
| 869 | << "MESHWIDTH: " << (*mg)(i).Extent().MeshWidth() << std::endl
|
|---|
| 870 | << std::endl
|
|---|
| 871 | << "PROCESS DEPENDENT INFORMATION:" << std::endl;
|
|---|
| 872 |
|
|---|
| 873 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 874 |
|
|---|
| 875 | MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, &status);
|
|---|
| 876 |
|
|---|
| 877 | delete [] char_buf;
|
|---|
| 878 |
|
|---|
| 879 | }
|
|---|
| 880 |
|
|---|
| 881 | buf.str("");
|
|---|
| 882 | buf << "PROCESS " << comm_rank << ":" << std::endl
|
|---|
| 883 | << "GLOBAL" << std::endl
|
|---|
| 884 | << "BEGINLOCAL: " << (*mg)(i).Global().BeginLocal() << std::endl
|
|---|
| 885 | << "ENDLOCAL: " << (*mg)(i).Global().EndLocal() << std::endl
|
|---|
| 886 | << "SIZELOCAL: " << (*mg)(i).Global().SizeLocal() << std::endl
|
|---|
| 887 | << "LOCAL" << std::endl
|
|---|
| 888 | << "SIZE: " << (*mg)(i).Local().Size() << std::endl
|
|---|
| 889 | << "SIZETOTAL: " << (*mg)(i).Local().SizeTotal() << std::endl
|
|---|
| 890 | << "BEGIN: " << (*mg)(i).Local().Begin() << std::endl
|
|---|
| 891 | << "END: " << (*mg)(i).Local().End() << std::endl
|
|---|
| 892 | << "HALOBEGIN1: " << (*mg)(i).Local().HaloBegin1() << std::endl
|
|---|
| 893 | << "HALOEND1: " << (*mg)(i).Local().HaloEnd1() << std::endl
|
|---|
| 894 | << "HALOBEGIN2: " << (*mg)(i).Local().HaloBegin2() << std::endl
|
|---|
| 895 | << "HALOEND2: " << (*mg)(i).Local().HaloEnd2() << std::endl
|
|---|
| 896 | << "BOUNDARYBEGIN1: " << (*mg)(i).Local().BoundaryBegin1() << std::endl
|
|---|
| 897 | << "BOUNDARYEND1: " << (*mg)(i).Local().BoundaryEnd1() << std::endl
|
|---|
| 898 | << "BOUNDARYBEGIN2: " << (*mg)(i).Local().BoundaryBegin2() << std::endl
|
|---|
| 899 | << "BOUNDARYEND2: " << (*mg)(i).Local().BoundaryEnd2() << std::endl
|
|---|
| 900 | << "ALIGNMENTBEGIN: " << (*mg)(i).Local().AlignmentBegin() << std::endl
|
|---|
| 901 | << "ALIGNMENTEND: " << (*mg)(i).Local().AlignmentEnd() << std::endl
|
|---|
| 902 | << std::endl;
|
|---|
| 903 |
|
|---|
| 904 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 905 |
|
|---|
| 906 | MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, &status);
|
|---|
| 907 |
|
|---|
| 908 | delete [] char_buf;
|
|---|
| 909 |
|
|---|
| 910 | MPI_File_close(&file);
|
|---|
| 911 |
|
|---|
| 912 | }
|
|---|
| 913 | }
|
|---|
| 914 |
|
|---|
| 915 | delete [] filename;
|
|---|
| 916 |
|
|---|
| 917 | }
|
|---|
| 918 |
|
|---|
| 919 | void CommMPI::PrintDefect(Grid& sol, Grid& rhs, const char* information)
|
|---|
| 920 | {
|
|---|
| 921 | TempGrid *temp = MG::GetTempGrid();
|
|---|
| 922 | temp->SetProperties(sol);
|
|---|
| 923 | temp->ImportFromResidual(sol, rhs);
|
|---|
| 924 |
|
|---|
| 925 | PrintInnerGrid(*temp, information);
|
|---|
| 926 | }
|
|---|
| 927 |
|
|---|
| 928 | void CommMPI::PrintGrid(Grid& grid, const char* information)
|
|---|
| 929 | {
|
|---|
| 930 | Index i;
|
|---|
| 931 | std::stringstream buf;
|
|---|
| 932 |
|
|---|
| 933 | Index begin = 0;
|
|---|
| 934 | Index end = grid.Local().End();
|
|---|
| 935 | Index begin_local, end_local, begin_global, end_global;
|
|---|
| 936 |
|
|---|
| 937 | CommToGhosts(grid);
|
|---|
| 938 |
|
|---|
| 939 | for (int j=0; j<3; ++j)
|
|---|
| 940 | if (grid.Global().EndLocal()[j] == grid.Global().SizeGlobal()[j])
|
|---|
| 941 | end[j] = grid.Local().SizeTotal()[j];
|
|---|
| 942 |
|
|---|
| 943 | for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
|
|---|
| 944 | for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
|
|---|
| 945 | for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
|
|---|
| 946 | buf << std::scientific << grid.GetVal(i) << " ";
|
|---|
| 947 |
|
|---|
| 948 | begin_local = grid.Global().BeginLocal();
|
|---|
| 949 | end_local = grid.Global().EndLocal();
|
|---|
| 950 | begin_global = 0;
|
|---|
| 951 |
|
|---|
| 952 | for (int i=0; i<3; ++i) {
|
|---|
| 953 |
|
|---|
| 954 | if (BoundaryConditions()[i] == Dirichlet ||
|
|---|
| 955 | BoundaryConditions()[i] == Open) {
|
|---|
| 956 |
|
|---|
| 957 | if (begin_local[i] != 0)
|
|---|
| 958 | begin_local[i] -= grid.Local().HaloEnd1()[i] - grid.Local().HaloBegin1()[i];
|
|---|
| 959 |
|
|---|
| 960 | end_global[i] = grid.Global().SizeGlobal()[i];
|
|---|
| 961 |
|
|---|
| 962 | }else if (BoundaryConditions()[i] == Periodic) {
|
|---|
| 963 |
|
|---|
| 964 | if (end_local[i] == grid.Global().SizeGlobal()[i])
|
|---|
| 965 | end_local[i] += grid.Local().HaloEnd1()[i] - grid.Local().HaloBegin1()[i] + grid.Local().HaloEnd2()[i] - grid.Local().HaloBegin2()[i];
|
|---|
| 966 | else
|
|---|
| 967 | end_local[i] += grid.Local().HaloEnd1()[i] - grid.Local().HaloBegin1()[i];
|
|---|
| 968 |
|
|---|
| 969 | end_global[i] = grid.Global().SizeGlobal()[i] +
|
|---|
| 970 | grid.Local().HaloEnd1()[i] - grid.Local().HaloBegin1()[i] +
|
|---|
| 971 | grid.Local().HaloEnd2()[i] - grid.Local().HaloBegin2()[i];
|
|---|
| 972 |
|
|---|
| 973 | }
|
|---|
| 974 |
|
|---|
| 975 | }
|
|---|
| 976 |
|
|---|
| 977 | CreateOutputFiles(grid, buf, information,
|
|---|
| 978 | begin_global, end_global,
|
|---|
| 979 | begin_local, end_local);
|
|---|
| 980 | }
|
|---|
| 981 |
|
|---|
| 982 | void CommMPI::PrintCorrectedGrid(Grid& grid, const char* information)
|
|---|
| 983 | {
|
|---|
| 984 | Index i;
|
|---|
| 985 |
|
|---|
| 986 | std::stringstream buf;
|
|---|
| 987 |
|
|---|
| 988 | Index begin = 0;
|
|---|
| 989 | Index end = grid.Local().End();
|
|---|
| 990 | Index begin_local, end_local, begin_global, end_global;
|
|---|
| 991 |
|
|---|
| 992 | CommToGhosts(grid);
|
|---|
| 993 |
|
|---|
| 994 | for (int j=0; j<3; ++j)
|
|---|
| 995 | if (grid.Global().EndLocal()[j] == grid.Global().SizeGlobal()[j])
|
|---|
| 996 | end[j] = grid.Local().SizeTotal()[j];
|
|---|
| 997 |
|
|---|
| 998 | for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
|
|---|
| 999 | for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
|
|---|
| 1000 | for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
|
|---|
| 1001 | buf << std::scientific << grid.GetVal(i) - Grid::Correction() << " ";
|
|---|
| 1002 |
|
|---|
| 1003 | begin_local = grid.Global().BeginLocal();
|
|---|
| 1004 | end_local = grid.Global().EndLocal();
|
|---|
| 1005 | begin_global = 0;
|
|---|
| 1006 |
|
|---|
| 1007 | for (int i=0; i<3; ++i) {
|
|---|
| 1008 |
|
|---|
| 1009 | if (BoundaryConditions()[i] == Dirichlet ||
|
|---|
| 1010 | BoundaryConditions()[i] == Open) {
|
|---|
| 1011 |
|
|---|
| 1012 | if (begin_local[i] != 0)
|
|---|
| 1013 | --begin_local[i];
|
|---|
| 1014 |
|
|---|
| 1015 | end_global[i] = grid.Global().SizeGlobal()[i];
|
|---|
| 1016 |
|
|---|
| 1017 | }else if (BoundaryConditions()[i] == Periodic) {
|
|---|
| 1018 |
|
|---|
| 1019 | if (begin_local[i] != 0)
|
|---|
| 1020 | --begin_local[i];
|
|---|
| 1021 |
|
|---|
| 1022 | if (end_local[i] == grid.Global().SizeGlobal()[i])
|
|---|
| 1023 | end_local[i] += 2;
|
|---|
| 1024 |
|
|---|
| 1025 | end_global[i] = grid.Global().SizeGlobal()[i] + 2;
|
|---|
| 1026 |
|
|---|
| 1027 | }
|
|---|
| 1028 |
|
|---|
| 1029 | }
|
|---|
| 1030 |
|
|---|
| 1031 | CreateOutputFiles(grid, buf, information,
|
|---|
| 1032 | 0, grid.Global().SizeGlobal()+2,
|
|---|
| 1033 | begin, end);
|
|---|
| 1034 | }
|
|---|
| 1035 |
|
|---|
| 1036 | void CommMPI::PrintInnerGrid(Grid& grid, const char* information)
|
|---|
| 1037 | {
|
|---|
| 1038 | Index i;
|
|---|
| 1039 | Index begin_local, end_local, begin_global, end_global;
|
|---|
| 1040 | std::stringstream buf;
|
|---|
| 1041 |
|
|---|
| 1042 | CommToGhosts(grid);
|
|---|
| 1043 |
|
|---|
| 1044 | Index begin = 0;
|
|---|
| 1045 | Index end = grid.Local().End();
|
|---|
| 1046 |
|
|---|
| 1047 | /* VTK parallel ImageData files seem to require some overlap. */
|
|---|
| 1048 | for (int j=0; j<3; ++j)
|
|---|
| 1049 | if (grid.Global().BeginLocal()[j] == 0)
|
|---|
| 1050 | ++begin[j];
|
|---|
| 1051 |
|
|---|
| 1052 | /* Write grid values into buffer */
|
|---|
| 1053 | for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
|
|---|
| 1054 | for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
|
|---|
| 1055 | for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
|
|---|
| 1056 | buf << std::scientific << grid.GetVal(i) << " ";
|
|---|
| 1057 |
|
|---|
| 1058 | for (int j=0; j<3; ++j) {
|
|---|
| 1059 |
|
|---|
| 1060 | if (BoundaryConditions()[j] == Dirichlet ||
|
|---|
| 1061 | BoundaryConditions()[j] == Open) {
|
|---|
| 1062 |
|
|---|
| 1063 | begin_global[j] = 1;
|
|---|
| 1064 | end_global[j] = grid.Global().SizeGlobal()[j] - 1;
|
|---|
| 1065 |
|
|---|
| 1066 | if (grid.Global().BeginLocal()[j] == 0)
|
|---|
| 1067 | begin_local[j] = 1;
|
|---|
| 1068 | else
|
|---|
| 1069 | begin_local[j] = grid.Global().BeginLocal()[j] - 1;
|
|---|
| 1070 |
|
|---|
| 1071 | if (grid.Global().EndLocal()[j] == grid.Global().SizeGlobal()[j])
|
|---|
| 1072 | end_local[j] = grid.Global().EndLocal()[j] - 1;
|
|---|
| 1073 | else
|
|---|
| 1074 | end_local[j] = grid.Global().EndLocal()[j];
|
|---|
| 1075 |
|
|---|
| 1076 | }else if (BoundaryConditions()[j] == Periodic) {
|
|---|
| 1077 |
|
|---|
| 1078 | begin_global[j] = 0;
|
|---|
| 1079 | end_global[j] = grid.Global().SizeGlobal()[j];
|
|---|
| 1080 |
|
|---|
| 1081 | if (grid.Global().BeginLocal()[j] == 0)
|
|---|
| 1082 | begin_local[j] = grid.Global().BeginLocal()[j];
|
|---|
| 1083 | else
|
|---|
| 1084 | begin_local[j] = grid.Global().BeginLocal()[j] - 1;
|
|---|
| 1085 |
|
|---|
| 1086 | end_local[j] = grid.Global().EndLocal()[j];
|
|---|
| 1087 |
|
|---|
| 1088 | }else {
|
|---|
| 1089 |
|
|---|
| 1090 | assert(0 == "Boundary condition not handled properly.");
|
|---|
| 1091 |
|
|---|
| 1092 | }
|
|---|
| 1093 |
|
|---|
| 1094 | }
|
|---|
| 1095 |
|
|---|
| 1096 | CreateOutputFiles(grid, buf, information,
|
|---|
| 1097 | begin_global, end_global,
|
|---|
| 1098 | begin_local, end_local);
|
|---|
| 1099 | }
|
|---|
| 1100 |
|
|---|
| 1101 | void CommMPI::PrintInnerCorrectedGrid(Grid& grid, const char* information)
|
|---|
| 1102 | {
|
|---|
| 1103 | Index i;
|
|---|
| 1104 | const Index begin = grid.Local().Begin();
|
|---|
| 1105 | const Index end = grid.Local().End();
|
|---|
| 1106 | std::stringstream buf;
|
|---|
| 1107 |
|
|---|
| 1108 | for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
|
|---|
| 1109 | for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
|
|---|
| 1110 | for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
|
|---|
| 1111 | buf << std::scientific << grid.GetVal(i) - Grid::Correction() << " ";
|
|---|
| 1112 |
|
|---|
| 1113 | CreateOutputFiles(grid, buf, information,
|
|---|
| 1114 | 0, grid.Global().SizeGlobal(),
|
|---|
| 1115 | grid.Global().BeginLocal(), grid.Global().EndLocal());
|
|---|
| 1116 | }
|
|---|
| 1117 |
|
|---|
| 1118 | MPI_Datatype CommMPI::GetDatatype(const Index& start, const Index& end, const Index& grid_size)
|
|---|
| 1119 | {
|
|---|
| 1120 | MPI_Datatype type;
|
|---|
| 1121 | Index sizes = grid_size;
|
|---|
| 1122 | Index subsizes = end - start;
|
|---|
| 1123 | Index starts = start;
|
|---|
| 1124 |
|
|---|
| 1125 | MPI_Type_create_subarray(3, sizes.vec(), subsizes.vec(), starts.vec(), MPI_ORDER_C, MPI_DOUBLE, &type);
|
|---|
| 1126 |
|
|---|
| 1127 | MPI_Type_commit(&type);
|
|---|
| 1128 |
|
|---|
| 1129 | return type;
|
|---|
| 1130 | }
|
|---|
| 1131 |
|
|---|
| 1132 | MPI_Datatype CommMPI::GetDatatype(const Grid& grid, const GridIteratorSet& bounds)
|
|---|
| 1133 | {
|
|---|
| 1134 | return GetDatatype(bounds.Begin().GetBegin(), bounds.Begin().GetEnd(), grid.Local().SizeTotal());
|
|---|
| 1135 | }
|
|---|
| 1136 |
|
|---|
| 1137 | void CommMPI::CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
|
|---|
| 1138 | const Index& begin_global, const Index& end_global,
|
|---|
| 1139 | const Index& begin_local, const Index& end_local)
|
|---|
| 1140 | {
|
|---|
| 1141 | int output_count = OutputCount();
|
|---|
| 1142 |
|
|---|
| 1143 | MPI_Comm comm = comm_info.GetCommunicator(grid);
|
|---|
| 1144 |
|
|---|
| 1145 | if (comm != MPI_COMM_NULL) {
|
|---|
| 1146 |
|
|---|
| 1147 | MPI_File file;
|
|---|
| 1148 | std::string conv_information = Helper::ReplaceWhitespaces(information, "_");
|
|---|
| 1149 |
|
|---|
| 1150 | CreateParallelOutputFile(grid, comm, output_count, conv_information.c_str(),
|
|---|
| 1151 | begin_global, end_global, begin_local, end_local);
|
|---|
| 1152 |
|
|---|
| 1153 | file = CreateSerialOutputFile(grid, comm, output_count, conv_information.c_str(),
|
|---|
| 1154 | begin_global, end_global, begin_local, end_local);
|
|---|
| 1155 |
|
|---|
| 1156 | char *char_buf = Helper::GetCharArray(serial_data.str());
|
|---|
| 1157 | MPI_File_write(file, char_buf, serial_data.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1158 | delete [] char_buf;
|
|---|
| 1159 |
|
|---|
| 1160 | FinalizeSerialOutputFile(file);
|
|---|
| 1161 |
|
|---|
| 1162 | }
|
|---|
| 1163 | }
|
|---|
| 1164 |
|
|---|
| 1165 | void CommMPI::CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
|
|---|
| 1166 | const int& output_count, const char* information,
|
|---|
| 1167 | const Index& begin_global, const Index& end_global,
|
|---|
| 1168 | const Index& begin_local, const Index& end_local)
|
|---|
| 1169 | {
|
|---|
| 1170 | int rank;
|
|---|
| 1171 | MPI_File file;
|
|---|
| 1172 | char parallel_filename[513], serial_filename[513];
|
|---|
| 1173 | std::stringstream buf;
|
|---|
| 1174 |
|
|---|
| 1175 | MPI_Comm_rank(comm, &rank);
|
|---|
| 1176 |
|
|---|
| 1177 | sprintf(parallel_filename, "%s%04d.pvti", OutputPath().c_str(), output_count);
|
|---|
| 1178 | sprintf(serial_filename, "%04d_%d.vti", output_count, rank);
|
|---|
| 1179 |
|
|---|
| 1180 | MPI_File_open(comm, parallel_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 1181 | MPI_File_set_size(file, 0);
|
|---|
| 1182 |
|
|---|
| 1183 | if (rank == 0) {
|
|---|
| 1184 |
|
|---|
| 1185 | buf << "<?xml version=\"1.0\"?>" << std::endl
|
|---|
| 1186 | << "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
|
|---|
| 1187 | << " <PImageData WholeExtent=\"";
|
|---|
| 1188 |
|
|---|
| 1189 | for (int i=0; i<3; ++i)
|
|---|
| 1190 | buf << begin_global[i] << " " << end_global[i] - 1 << " ";
|
|---|
| 1191 |
|
|---|
| 1192 | buf << "\"" << std::endl
|
|---|
| 1193 | << " GhostLevel=\"0\" Origin=\"0 0 0\" Spacing=\"";
|
|---|
| 1194 |
|
|---|
| 1195 | for (int i=0; i<3; ++i)
|
|---|
| 1196 | buf << grid.Extent().MeshWidth()[i] << " ";
|
|---|
| 1197 |
|
|---|
| 1198 | buf << "\">" << std::endl
|
|---|
| 1199 | << " <PPointData Scalars=\"" << information << "\">" << std::endl
|
|---|
| 1200 | << " <PDataArray type=\"Float32\" Name=\"" << information << "\"/>" << std::endl
|
|---|
| 1201 | << " </PPointData>" << std::endl;
|
|---|
| 1202 |
|
|---|
| 1203 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1204 |
|
|---|
| 1205 | MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1206 |
|
|---|
| 1207 | delete [] char_buf;
|
|---|
| 1208 | }
|
|---|
| 1209 |
|
|---|
| 1210 | buf.str("");
|
|---|
| 1211 | buf << " <Piece Extent=\"";
|
|---|
| 1212 |
|
|---|
| 1213 | for (int i=0; i<3; ++i)
|
|---|
| 1214 | buf << begin_local[i] << " " << end_local[i] - 1 << " ";
|
|---|
| 1215 |
|
|---|
| 1216 | buf << "\" Source=\"" << serial_filename << "\"/>" << std::endl;
|
|---|
| 1217 |
|
|---|
| 1218 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1219 |
|
|---|
| 1220 | MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1221 |
|
|---|
| 1222 | delete [] char_buf;
|
|---|
| 1223 |
|
|---|
| 1224 | if (rank == 0) {
|
|---|
| 1225 |
|
|---|
| 1226 | buf.str("");
|
|---|
| 1227 |
|
|---|
| 1228 | buf << " </PImageData>" << std::endl
|
|---|
| 1229 | << "</VTKFile>" << std::endl;
|
|---|
| 1230 |
|
|---|
| 1231 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1232 |
|
|---|
| 1233 | MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1234 |
|
|---|
| 1235 | delete [] char_buf;
|
|---|
| 1236 | }
|
|---|
| 1237 |
|
|---|
| 1238 | MPI_File_close(&file);
|
|---|
| 1239 | }
|
|---|
| 1240 |
|
|---|
| 1241 | MPI_File CommMPI::CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
|
|---|
| 1242 | const int& output_count, const char* information,
|
|---|
| 1243 | const Index& begin_global, const Index& end_global,
|
|---|
| 1244 | const Index& begin_local, const Index& end_local)
|
|---|
| 1245 | {
|
|---|
| 1246 | char serial_filename[513];
|
|---|
| 1247 | int rank;
|
|---|
| 1248 | MPI_File file;
|
|---|
| 1249 | std::stringstream buf;
|
|---|
| 1250 |
|
|---|
| 1251 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 1252 |
|
|---|
| 1253 | sprintf(serial_filename, "%s%04d_%d.vti", OutputPath().c_str(), output_count, rank);
|
|---|
| 1254 |
|
|---|
| 1255 | MPI_File_open(MPI_COMM_SELF, serial_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
|
|---|
| 1256 |
|
|---|
| 1257 | buf << "<?xml version=\"1.0\"?>" << std::endl
|
|---|
| 1258 | << "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
|
|---|
| 1259 | << " <ImageData WholeExtent=\"";
|
|---|
| 1260 |
|
|---|
| 1261 | for (int i=0; i<3; ++i)
|
|---|
| 1262 | buf << begin_global[i] << " " << end_global[i] - 1 << " ";
|
|---|
| 1263 |
|
|---|
| 1264 | buf << "\"" << std::endl
|
|---|
| 1265 | << " Origin=\"0 0 0\" Spacing=\"";
|
|---|
| 1266 |
|
|---|
| 1267 | for (int i=0; i<3; ++i)
|
|---|
| 1268 | buf << grid.Extent().MeshWidth()[i] << " ";
|
|---|
| 1269 |
|
|---|
| 1270 | buf << "\">" << std::endl
|
|---|
| 1271 | << " <Piece Extent=\"";
|
|---|
| 1272 |
|
|---|
| 1273 | for (int i=0; i<3; ++i)
|
|---|
| 1274 | buf << begin_local[i] << " " << end_local[i] - 1 << " ";
|
|---|
| 1275 |
|
|---|
| 1276 | buf << "\">" << std::endl
|
|---|
| 1277 | << " <PointData Scalars=\"" << information << "\">" << std::endl
|
|---|
| 1278 | << " <DataArray type=\"Float32\" Name=\"" << information << "\" format=\"ascii\">" << std::endl
|
|---|
| 1279 | << " ";
|
|---|
| 1280 |
|
|---|
| 1281 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1282 | MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1283 | delete [] char_buf;
|
|---|
| 1284 |
|
|---|
| 1285 | return file;
|
|---|
| 1286 | }
|
|---|
| 1287 |
|
|---|
| 1288 | void CommMPI::FinalizeSerialOutputFile(MPI_File& file)
|
|---|
| 1289 | {
|
|---|
| 1290 | std::stringstream buf;
|
|---|
| 1291 |
|
|---|
| 1292 | buf << std::endl
|
|---|
| 1293 | << " </DataArray>" << std::endl
|
|---|
| 1294 | << " </PointData>" << std::endl
|
|---|
| 1295 | << " </Piece>" << std::endl
|
|---|
| 1296 | << " </ImageData>" << std::endl
|
|---|
| 1297 | << "</VTKFile>" << std::endl;
|
|---|
| 1298 |
|
|---|
| 1299 | char* char_buf = Helper::GetCharArray(buf.str());
|
|---|
| 1300 | MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
|
|---|
| 1301 | delete [] char_buf;
|
|---|
| 1302 |
|
|---|
| 1303 | MPI_File_close(&file);
|
|---|
| 1304 | }
|
|---|
| 1305 |
|
|---|
| 1306 | int CommMPI::GlobalRank() const
|
|---|
| 1307 | {
|
|---|
| 1308 | int rank;
|
|---|
| 1309 |
|
|---|
| 1310 | MPI_Comm_rank(comm_global, &rank);
|
|---|
| 1311 |
|
|---|
| 1312 | return rank;
|
|---|
| 1313 | }
|
|---|
| 1314 |
|
|---|
| 1315 | Index CommMPI::GlobalPos() const
|
|---|
| 1316 | {
|
|---|
| 1317 | Index dims, periods, coords;
|
|---|
| 1318 |
|
|---|
| 1319 | MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
|
|---|
| 1320 |
|
|---|
| 1321 | return coords;
|
|---|
| 1322 | }
|
|---|
| 1323 |
|
|---|
| 1324 | Index CommMPI::GlobalProcs() const
|
|---|
| 1325 | {
|
|---|
| 1326 | Index dims, periods, coords;
|
|---|
| 1327 |
|
|---|
| 1328 | MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
|
|---|
| 1329 |
|
|---|
| 1330 | return dims;
|
|---|
| 1331 | }
|
|---|
| 1332 |
|
|---|
| 1333 | void CommMPI::InitCommMPI(const MPI_Comm& comm)
|
|---|
| 1334 | {
|
|---|
| 1335 | int status, size, rank;
|
|---|
| 1336 | int dims[3] = {0, 0, 0};
|
|---|
| 1337 | int periods[3];
|
|---|
| 1338 |
|
|---|
| 1339 | global_coarse_grid = NULL;
|
|---|
| 1340 |
|
|---|
| 1341 | for (int i=0; i<3; ++i)
|
|---|
| 1342 | periods[i] = (BoundaryConditions()[i] == Periodic ? 1 : 0);
|
|---|
| 1343 |
|
|---|
| 1344 | MPI_Comm_size(comm, &size);
|
|---|
| 1345 | MPI_Comm_rank(comm, &rank);
|
|---|
| 1346 |
|
|---|
| 1347 | MPI_Topo_test(comm, &status);
|
|---|
| 1348 |
|
|---|
| 1349 | if (status == MPI_CART) {
|
|---|
| 1350 |
|
|---|
| 1351 | comm_global = comm;
|
|---|
| 1352 |
|
|---|
| 1353 | }else {
|
|---|
| 1354 |
|
|---|
| 1355 | MPI_Dims_create(size, 3, dims);
|
|---|
| 1356 |
|
|---|
| 1357 | MPI_Cart_create(comm, 3, dims, periods, 1, &comm_global);
|
|---|
| 1358 |
|
|---|
| 1359 | }
|
|---|
| 1360 |
|
|---|
| 1361 | comm_info.SetCommGlobal(comm_global);
|
|---|
| 1362 |
|
|---|
| 1363 | MPI_Info_create(&info);
|
|---|
| 1364 | char key[] = "no_locks";
|
|---|
| 1365 | char val[] = "true";
|
|---|
| 1366 | MPI_Info_set(info, key, val);
|
|---|
| 1367 |
|
|---|
| 1368 | }
|
|---|
| 1369 |
|
|---|
| 1370 | CommMPI::~CommMPI()
|
|---|
| 1371 | {
|
|---|
| 1372 | std::map<const Grid*, TempGrid*>::iterator iter;
|
|---|
| 1373 |
|
|---|
| 1374 | for (iter=finer_grids.begin(); iter!=finer_grids.end(); ++iter)
|
|---|
| 1375 | delete iter->second;
|
|---|
| 1376 |
|
|---|
| 1377 | for (iter=coarser_grids.begin(); iter!=coarser_grids.end(); ++iter)
|
|---|
| 1378 | delete iter->second;
|
|---|
| 1379 |
|
|---|
| 1380 | MPI_Comm_free(&comm_global);
|
|---|
| 1381 |
|
|---|
| 1382 | if (win_created)
|
|---|
| 1383 | MPI_Win_free(&win);
|
|---|
| 1384 |
|
|---|
| 1385 | MPI_Info_free(&info);
|
|---|
| 1386 |
|
|---|
| 1387 | delete global_coarse_grid;
|
|---|
| 1388 | }
|
|---|
| 1389 |
|
|---|
| 1390 | std::string CommMPI::CreateOutputDirectory()
|
|---|
| 1391 | {
|
|---|
| 1392 | #ifdef HAVE_BOOST_FILESYSTEM
|
|---|
| 1393 | std::string path, unique_path;
|
|---|
| 1394 | std::stringstream unique_suffix;
|
|---|
| 1395 | int suffix_counter = 0;
|
|---|
| 1396 | char buffer[129];
|
|---|
| 1397 | time_t rawtime;
|
|---|
| 1398 | struct tm *timeinfo;
|
|---|
| 1399 | int path_size;
|
|---|
| 1400 | char* path_array;
|
|---|
| 1401 |
|
|---|
| 1402 | if (GlobalRank() == 0) {
|
|---|
| 1403 |
|
|---|
| 1404 | time(&rawtime);
|
|---|
| 1405 | timeinfo = localtime(&rawtime);
|
|---|
| 1406 | strftime(buffer, 128, "./output/%Y_%m_%d_%H_%M_%S/", timeinfo);
|
|---|
| 1407 | path = buffer;
|
|---|
| 1408 |
|
|---|
| 1409 | if (!fs::exists("output"))
|
|---|
| 1410 | fs::create_directory("output");
|
|---|
| 1411 |
|
|---|
| 1412 | unique_path = path;
|
|---|
| 1413 |
|
|---|
| 1414 | while (fs::exists(unique_path.c_str())) {
|
|---|
| 1415 |
|
|---|
| 1416 | unique_suffix.str("");
|
|---|
| 1417 | unique_suffix << "_" << suffix_counter++ << "/";
|
|---|
| 1418 |
|
|---|
| 1419 | unique_path = path;
|
|---|
| 1420 | unique_path.replace(unique_path.size()-1, 1, unique_suffix.str());
|
|---|
| 1421 |
|
|---|
| 1422 | }
|
|---|
| 1423 |
|
|---|
| 1424 | fs::create_directory(unique_path.c_str());
|
|---|
| 1425 |
|
|---|
| 1426 | path_size = unique_path.size() + 1;
|
|---|
| 1427 | path_array = Helper::GetCharArray(unique_path);
|
|---|
| 1428 |
|
|---|
| 1429 | MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
|
|---|
| 1430 |
|
|---|
| 1431 | }else {
|
|---|
| 1432 |
|
|---|
| 1433 | MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
|
|---|
| 1434 | path_array = new char[path_size];
|
|---|
| 1435 |
|
|---|
| 1436 | }
|
|---|
| 1437 |
|
|---|
| 1438 | MPI_Bcast(path_array, path_size, MPI_CHAR, 0, comm_global);
|
|---|
| 1439 |
|
|---|
| 1440 | unique_path = path_array;
|
|---|
| 1441 |
|
|---|
| 1442 | delete [] path_array;
|
|---|
| 1443 |
|
|---|
| 1444 | return unique_path;
|
|---|
| 1445 |
|
|---|
| 1446 | #else
|
|---|
| 1447 |
|
|---|
| 1448 | return "./";
|
|---|
| 1449 |
|
|---|
| 1450 | #endif
|
|---|
| 1451 | }
|
|---|
| 1452 |
|
|---|
| 1453 |
|
|---|
| 1454 | #endif /* HAVE_MPI */
|
|---|