source: src/comm/comm_mpi.cpp@ 32ff22

Last change on this file since 32ff22 was 894a5f, checked in by Julian Iseringhausen <isering@…>, 14 years ago

Parallel performance update.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1314 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 40.0 KB
Line 
1/**
2 * @file comm_mpi.cpp
3 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
4 * @date Wed Jun 16 13:21:06 2010
5 *
6 * @brief Class for MPI-based communication.
7 *
8 */
9
10#ifdef HAVE_CONFIG_H
11#include <config.h>
12#endif
13
14#ifdef HAVE_MPI
15
16#include <mpi.h>
17
18#ifdef HAVE_BOOST_FILESYSTEM
19#include <boost/filesystem.hpp>
20namespace fs = boost::filesystem;
21#endif
22
23#include <cstring>
24#include <sstream>
25
26#include "base/helper.hpp"
27#include "base/linked_cell_list.hpp"
28#include "base/tuple.hpp"
29#include "comm/comm_mpi.hpp"
30#include "grid/grid.hpp"
31#include "grid/multigrid.hpp"
32#include "grid/tempgrid.hpp"
33#include "mg.hpp"
34#include "base/timer.hpp"
35static char print_buffer[512];
36
37using namespace VMG;
38
39void CommMPI::Isend(Grid& grid, VMG::MPI::Datatype& type, const MPI_Comm& comm, const int& tag)
40{
41 if (type.Feasible())
42 MPI_Isend(&grid(0), 1, type.Type(), type.Rank(), tag, comm, &Request());
43}
44
45void CommMPI::IsendAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
46{
47 for (unsigned int i=0; i<types.size(); ++i)
48 if (types[i].Feasible())
49 MPI_Isend(&grid(0), 1, types[i].Type(), types[i].Rank(), tag_start+types[i].TagSend(), comm, &Request());
50}
51
52void CommMPI::Irecv(Grid& grid, VMG::MPI::Datatype& type, const MPI_Comm& comm, const int& tag)
53{
54 if (type.Feasible())
55 MPI_Irecv(&grid(0), 1, type.Type(), type.Rank(), tag, comm, &Request());
56}
57
58void CommMPI::IrecvAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
59{
60 for (unsigned int i=0; i<types.size(); ++i)
61 if (types[i].Feasible())
62 MPI_Irecv(&grid(0), 1, types[i].Type(), types[i].Rank(), tag_start+types[i].TagReceive(), comm, &Request());
63}
64
65void CommMPI::IrecvAllBuffered(std::vector<VMG::MPI::Datatype>& types, const MPI_Comm& comm, const int& tag_start)
66{
67 receive_buffer.resize(types.size());
68 for (unsigned int i=0; i<types.size(); ++i)
69 if (types[i].Feasible()) {
70 receive_buffer[i].resize(types[i].Size());
71 MPI_Irecv(&receive_buffer[i].front(), types[i].Size(), MPI_DOUBLE,
72 types[i].Rank(), tag_start+types[i].TagReceive(), comm, &Request());
73 }
74}
75
76void CommMPI::AddBufferAll(Grid& grid, std::vector<VMG::MPI::Datatype>& types)
77{
78 Index ind;
79
80 for (unsigned int i=0; i<types.size(); ++i) {
81 int c = 0;
82 for (ind.X()=types[i].Starts().X(); ind.X()<types[i].Starts().X()+types[i].Subsizes().X(); ++ind.X())
83 for (ind.Y()=types[i].Starts().Y(); ind.Y()<types[i].Starts().Y()+types[i].Subsizes().Y(); ++ind.Y())
84 for (ind.Z()=types[i].Starts().Z(); ind.Z()<types[i].Starts().Z()+types[i].Subsizes().Z(); ++ind.Z())
85 grid(ind) += receive_buffer[i][c++];
86 }
87
88 receive_buffer.clear();
89}
90
91void CommMPI::CommSubgrid(Grid& grid_old, Grid& grid_new)
92{
93 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
94 if (comm != MPI_COMM_NULL) {
95 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new);
96 IsendAll(grid_old, datatypes.Send(), comm, 0411);
97 IrecvAll(grid_new, datatypes.Receive(), comm, 0411);
98 ClearRequests();
99 }
100}
101
102void CommMPI::CommAddSubgrid(Grid& grid_old, Grid& grid_new)
103{
104 MPI_Comm comm = settings.CommunicatorGlobal(grid_old);
105 if (comm != MPI_COMM_NULL) {
106 VMG::MPI::DatatypesGlobal& datatypes = settings.DatatypesGlobal(grid_old, grid_new);
107 IsendAll(grid_old, datatypes.Send(), comm, 1806);
108 IrecvAllBuffered(datatypes.Receive(), comm, 1806);
109 ClearRequests();
110 AddBufferAll(grid_new, datatypes.Receive());
111 }
112}
113
114void CommMPI::CommFromGhosts(Grid& grid)
115{
116 MPI_Comm comm = settings.CommunicatorLocal(grid);
117
118 if (comm != MPI_COMM_NULL) {
119 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
120 IsendAll(grid, types.Halo(), comm, 1337);
121 IrecvAllBuffered(types.NB(), comm, 1337);
122 ClearRequests();
123 AddBufferAll(grid, types.NB());
124 }
125}
126
127void CommMPI::CommFromGhostsAsyncStart(Grid& grid)
128{
129 MPI_Comm comm = settings.CommunicatorLocal(grid);
130
131 if (comm != MPI_COMM_NULL) {
132 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
133 IsendAll(grid, types.Halo(), comm, 0xc0ffee);
134 IrecvAllBuffered(types.NB(), comm, 0xc0ffee);
135 }
136}
137
138void CommMPI::CommFromGhostsAsyncFinish(Grid& grid)
139{
140 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
141 ClearRequests();
142 AddBufferAll(grid, types.NB());
143}
144
145void CommMPI::CommToGhosts(Grid& grid)
146{
147 MPI_Comm comm = settings.CommunicatorLocal(grid);
148
149 if (comm != MPI_COMM_NULL) {
150 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
151 IsendAll(grid, types.NB(), comm, 2310);
152 IrecvAll(grid, types.Halo(), comm, 2310);
153 ClearRequests();
154 }
155}
156
157void CommMPI::CommToGhostsRB(Grid& grid, const int& offset)
158{
159 MPI_Comm comm = settings.CommunicatorLocal(grid);
160
161 if (comm != MPI_COMM_NULL) {
162
163 Index i;
164 Tuple<int> neighbors;
165 std::vector<vmg_float> buffer_send_left, buffer_send_right;
166 std::vector<vmg_float> buffer_recv_left, buffer_recv_right;
167 std::vector<vmg_float>::const_iterator iter;
168
169 const Index halo_size_1 = grid.Local().HaloEnd1() - grid.Local().HaloBegin1();
170 const Index halo_size_2 = grid.Local().HaloEnd2() - grid.Local().HaloBegin2();
171
172 buffer_send_left.clear();
173 buffer_send_right.clear();
174 buffer_recv_left.clear();
175 buffer_recv_right.clear();
176
177 MPI_Cart_shift(comm, 2, 1, &neighbors.Left(), &neighbors.Right());
178
179 if (halo_size_1.Z()) {
180
181 buffer_send_left.reserve(grid.Local().Size().X() *
182 grid.Local().Size().Y() *
183 halo_size_1.Z() / 2);
184
185 for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
186 for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
187 for (i.Z()=grid.Local().Begin().Z() + (i.X()+i.Y()+offset)%2;
188 i.Z()<grid.Local().Begin().Z() + halo_size_1.Z();
189 i.Z() += 2)
190 buffer_send_left.push_back(grid.GetVal(i));
191 }
192
193 if (halo_size_2.Z()) {
194
195 buffer_send_right.reserve(grid.Local().Size().X() *
196 grid.Local().Size().Y() *
197 halo_size_2.Z() / 2);
198
199 for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
200 for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
201 for (i.Z()=grid.Local().End().Z() - halo_size_2.Z() + (i.X()+i.Y()+offset)%2;
202 i.Z()<grid.Local().End().Z();
203 i.Z() += 2)
204 buffer_send_right.push_back(grid.GetVal(i));
205
206 }
207
208 buffer_recv_left.resize(buffer_send_left.size());
209 buffer_recv_right.resize(buffer_send_right.size());
210
211 MPI_Sendrecv(&buffer_send_right.front(), buffer_send_right.size(), MPI_DOUBLE, neighbors.Right(), 7,
212 &buffer_recv_left.front(), buffer_recv_left.size(), MPI_DOUBLE, neighbors.Left(), 7,
213 comm, MPI_STATUS_IGNORE);
214
215 MPI_Sendrecv(&buffer_send_left.front(), buffer_send_left.size(), MPI_DOUBLE, neighbors.Left(), 8,
216 & buffer_recv_right.front(), buffer_recv_right.size(), MPI_DOUBLE, neighbors.Right(), 8,
217 comm, MPI_STATUS_IGNORE);
218
219 if (halo_size_1.Z()) {
220
221 iter = buffer_recv_left.begin();
222 for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
223 for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
224 for (i.Z() = (i.X()+i.Y()+offset)%2; i.Z() < grid.Local().Begin().Z(); i.Z() += 2)
225 grid(i) = *iter++;
226
227 }
228
229 if (halo_size_2.Z()) {
230
231 iter = buffer_recv_right.begin();
232 for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
233 for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
234 for (i.Z()=grid.Local().End().Z() + (i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
235 grid(i) = *iter++;
236
237 }
238
239 buffer_send_left.clear();
240 buffer_send_right.clear();
241 buffer_recv_left.clear();
242 buffer_recv_right.clear();
243
244 MPI_Cart_shift(comm, 1, 1, &neighbors.Left(), &neighbors.Right());
245
246 if (halo_size_1.Y()) {
247
248 buffer_send_left.reserve(grid.Local().Size().X() *
249 halo_size_1.Y() *
250 grid.Local().SizeTotal().Z() / 2);
251
252 for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
253 for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().Begin().Y() + halo_size_1.Y(); ++i.Y())
254 for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
255 buffer_send_left.push_back(grid.GetVal(i));
256 }
257
258 if (halo_size_2.Y()) {
259
260 buffer_send_right.reserve(grid.Local().Size().X() *
261 halo_size_2.Y() *
262 grid.Local().SizeTotal().Z() / 2);
263
264 for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
265 for (i.Y()=grid.Local().End().Y() - halo_size_2.Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
266 for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
267 buffer_send_right.push_back(grid.GetVal(i));
268
269 }
270
271 buffer_recv_left.resize(buffer_send_left.size());
272 buffer_recv_right.resize(buffer_send_right.size());
273
274 MPI_Sendrecv(&buffer_send_right.front(), buffer_send_right.size(), MPI_DOUBLE, neighbors.Right(), 7,
275 &buffer_recv_left.front(), buffer_recv_left.size(), MPI_DOUBLE, neighbors.Left(), 7,
276 comm, MPI_STATUS_IGNORE);
277
278 MPI_Sendrecv(&buffer_send_left.front(), buffer_send_left.size(), MPI_DOUBLE, neighbors.Left(), 8,
279 & buffer_recv_right.front(), buffer_recv_right.size(), MPI_DOUBLE, neighbors.Right(), 8,
280 comm, MPI_STATUS_IGNORE);
281
282 if (halo_size_1.Y()) {
283
284 iter = buffer_recv_left.begin();
285 for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
286 for (i.Y()=0; i.Y()<grid.Local().Begin().Y(); ++i.Y())
287 for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
288 grid(i) = *iter++;
289
290 }
291
292 if (halo_size_2.Y()) {
293
294 iter = buffer_recv_right.begin();
295 for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
296 for (i.Y()=grid.Local().End().Y(); i.Y()<grid.Local().SizeTotal().Y(); ++i.Y())
297 for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
298 grid(i) = *iter++;
299
300 }
301
302 buffer_send_left.clear();
303 buffer_send_right.clear();
304 buffer_recv_left.clear();
305 buffer_recv_right.clear();
306
307 MPI_Cart_shift(comm, 0, 1, &neighbors.Left(), &neighbors.Right());
308
309 if (halo_size_1.X()) {
310
311 buffer_send_left.reserve(halo_size_1.X() *
312 grid.Local().SizeTotal().Y() *
313 grid.Local().SizeTotal().Z() / 2);
314
315 for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().Begin().X() + halo_size_1.X(); ++i.X())
316 for (i.Y()=0; i.Y()<grid.Local().SizeTotal().Y(); ++i.Y())
317 for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
318 buffer_send_left.push_back(grid.GetVal(i));
319 }
320
321 if (halo_size_2.X()) {
322
323 buffer_send_right.reserve(halo_size_2.X() *
324 grid.Local().SizeTotal().Y() *
325 grid.Local().SizeTotal().Z() / 2);
326
327 for (i.X()=grid.Local().End().X() - halo_size_2.X(); i.X()<grid.Local().End().X(); ++i.X())
328 for (i.Y()=0; i.Y()<grid.Local().SizeTotal().Y(); ++i.Y())
329 for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
330 buffer_send_right.push_back(grid.GetVal(i));
331
332 }
333
334 buffer_recv_left.resize(buffer_send_left.size());
335 buffer_recv_right.resize(buffer_send_right.size());
336
337 MPI_Sendrecv(&buffer_send_right.front(), buffer_send_right.size(), MPI_DOUBLE, neighbors.Right(), 7,
338 &buffer_recv_left.front(), buffer_recv_left.size(), MPI_DOUBLE, neighbors.Left(), 7,
339 comm, MPI_STATUS_IGNORE);
340
341 MPI_Sendrecv(&buffer_send_left.front(), buffer_send_left.size(), MPI_DOUBLE, neighbors.Left(), 8,
342 & buffer_recv_right.front(), buffer_recv_right.size(), MPI_DOUBLE, neighbors.Right(), 8,
343 comm, MPI_STATUS_IGNORE);
344
345 if (halo_size_1.X()) {
346
347 iter = buffer_recv_left.begin();
348 for (i.X()=0; i.X()<grid.Local().Begin().X(); ++i.X())
349 for (i.Y()=0; i.Y()<grid.Local().SizeTotal().Y(); ++i.Y())
350 for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
351 grid(i) = *iter++;
352 }
353
354 if (halo_size_2.X()) {
355
356 iter = buffer_recv_right.begin();
357 for (i.X()=grid.Local().End().X(); i.X()<grid.Local().SizeTotal().X(); ++i.X())
358 for (i.Y()=0; i.Y()<grid.Local().SizeTotal().Y(); ++i.Y())
359 for (i.Z()=(i.X()+i.Y()+offset)%2; i.Z()<grid.Local().SizeTotal().Z(); i.Z() += 2)
360 grid(i) = *iter++;
361
362 }
363
364 }
365}
366
367void CommMPI::CommToGhostsAsyncStart(Grid& grid)
368{
369 MPI_Comm comm = settings.CommunicatorLocal(grid);
370
371 if (comm != MPI_COMM_NULL) {
372 Timer::Start("CTGAsyncGetDatatypes");
373 VMG::MPI::DatatypesLocal& types = settings.DatatypesLocal(grid);
374 Timer::Stop("CTGAsyncGetDatatypes");
375 Timer::Start("CTGAsyncIsend");
376 IsendAll(grid, types.NB(), comm, 2412);
377 Timer::Stop("CTGAsyncIsend");
378 Timer::Start("CTGAsyncIrecv");
379 IrecvAll(grid, types.Halo(), comm, 2412);
380 Timer::Stop("CTGAsyncIrecv");
381 }
382}
383
384void CommMPI::CommToGhostsAsyncFinish()
385{
386 ClearRequests();
387}
388
389void CommMPI::CommParticles(const Grid& grid, std::list<Particle::Particle>& particles)
390{
391 Factory& factory = MG::GetFactory();
392
393 const vmg_int& num_particles_local = factory.GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
394 vmg_float* x = factory.GetObjectStorageArray<vmg_float>("PARTICLE_POS_ARRAY");
395 vmg_float* q = factory.GetObjectStorageArray<vmg_float>("PARTICLE_CHARGE_ARRAY");
396
397 int rank, size;
398 MPI_Comm_rank(comm_global, &rank);
399 MPI_Comm_size(comm_global, &size);
400
401 Index index;
402 int global_extent[6*size], send_sizes[size], recv_sizes[size];
403 Index begin_remote[size], end_remote[size];
404 std::vector<int> pos_indices[size], charge_indices[size];
405 MPI_Datatype dt_temp;
406 std::vector<vmg_float> buffer_x, buffer_q;
407 std::vector<vmg_int> buffer_ind;
408
409 std::memcpy(&global_extent[6*rank], grid.Global().BeginLocal().vec(), 3*sizeof(int));
410 std::memcpy(&global_extent[6*rank+3], grid.Global().EndLocal().vec(), 3*sizeof(int));
411
412 MPI_Allgather(MPI_IN_PLACE, 6, MPI_INT, global_extent, 6, MPI_INT, MPI_COMM_WORLD);
413
414 for (int i=0; i<size; ++i) {
415 begin_remote[i] = static_cast<Vector>(&global_extent[6*i]);
416 end_remote[i] = static_cast<Vector>(&global_extent[6*i+3]);
417 }
418
419 for (int i=0; i<num_particles_local; ++i) {
420 index = static_cast<Index>((Vector(&x[3*i]) - grid.Extent().Begin()) / grid.Extent().MeshWidth());
421 for (int j=0; j<size; ++j)
422 if (index.IsInBounds(begin_remote[j], end_remote[j])) {
423 pos_indices[j].push_back(3*i);
424 charge_indices[j].push_back(i);
425 break;
426 }
427 }
428
429 /*
430 * Communicate which process gets how many particles
431 */
432 for (int i=0; i<size; ++i)
433 send_sizes[i] = charge_indices[i].size();
434
435 MPI_Alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_global);
436
437 /*
438 * Send particles
439 */
440 for (int i=0; i<size; ++i) {
441
442 if (pos_indices[i].size() > 0) {
443
444 MPI_Type_create_indexed_block(pos_indices[i].size(), 3, &pos_indices[i].front(), MPI_DOUBLE, &dt_temp);
445 MPI_Type_commit(&dt_temp);
446 MPI_Isend(x, 1, dt_temp, i, 0, MPI_COMM_WORLD, &Request());
447 MPI_Type_free(&dt_temp);
448
449 MPI_Type_create_indexed_block(charge_indices[i].size(), 1, &charge_indices[i].front(), MPI_DOUBLE, &dt_temp);
450 MPI_Type_commit(&dt_temp);
451 MPI_Isend(q, 1, dt_temp, i, 1, MPI_COMM_WORLD, &Request());
452 MPI_Type_free(&dt_temp);
453
454 MPI_Isend(&charge_indices[i].front(), charge_indices[i].size(), MPI_INT, i, 2, MPI_COMM_WORLD, &Request());
455
456 }
457
458 }
459
460 /*
461 * Receive particles
462 */
463 for (int i=0; i<size; ++i) {
464
465 if (recv_sizes[i] > 0) {
466
467 buffer_x.resize(3*recv_sizes[i]);
468 buffer_q.resize(recv_sizes[i]);
469 buffer_ind.resize(recv_sizes[i]);
470
471 MPI_Recv(&buffer_x.front(), 3*recv_sizes[i], MPI_DOUBLE, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
472 MPI_Recv(&buffer_q.front(), recv_sizes[i], MPI_DOUBLE, i, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
473 MPI_Recv(&buffer_ind.front(), recv_sizes[i], MPI_INT, i, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
474
475 particles.clear();
476
477 for (int j=0; j<recv_sizes[i]; ++j)
478 particles.push_back(Particle::Particle(&buffer_x[3*j], buffer_q[j], 0.0, 0.0, i, buffer_ind[j]));
479
480 }
481
482 }
483
484 ClearRequests();
485}
486
487void CommMPI::CommLCListGhosts(const Grid& grid, Particle::LinkedCellList& lc)
488{
489 std::vector<vmg_float> send_buffer_1, send_buffer_2, recv_buffer;
490 vmg_int send_size_1, send_size_2, recv_size;
491 std::list<Particle::Particle>::iterator p_iter;
492 Grid::iterator g_iter;
493
494 Tuple<int> neighbors;
495
496 for (int i=2; i>=0; --i) {
497
498 /*
499 * Get ranks of neighbors
500 */
501 MPI_Cart_shift(comm_global, i, 1, &neighbors.Left(), &neighbors.Right());
502
503 /*
504 * Send to left
505 */
506 send_buffer_1.clear();
507
508 for (g_iter=lc.Iterators().Boundary1()[i].Begin(); g_iter!=lc.Iterators().Boundary1()[i].End(); ++g_iter)
509 lc(*g_iter).clear();
510
511 for (g_iter=lc.Iterators().NearBoundary1()[i].Begin(); g_iter!=lc.Iterators().NearBoundary1()[i].End(); ++g_iter)
512 for (p_iter=lc(*g_iter).begin(); p_iter!=lc(*g_iter).end(); ++p_iter) {
513
514 if (grid.Global().BeginLocal()[i] == 0)
515 for (int j=0; j<3; ++j)
516 send_buffer_1.push_back(p_iter->Pos()[j] + grid.Extent().Size()[j]);
517 else
518 for (int j=0; j<3; ++j)
519 send_buffer_1.push_back(p_iter->Pos()[j]);
520
521 send_buffer_1.push_back(p_iter->Charge());
522
523 }
524
525 send_size_1 = send_buffer_1.size();
526
527 MPI_Isend(&send_size_1, 1, MPI_INT, neighbors.Left(), 0, comm_global, &Request());
528
529 if (send_size_1 > 0)
530 MPI_Isend(&send_buffer_1.front(), send_size_1, MPI_DOUBLE, neighbors.Left(), 1, comm_global, &Request());
531
532 /*
533 * Send to right
534 */
535 send_buffer_2.clear();
536
537 for (g_iter=lc.Iterators().Boundary2()[i].Begin(); g_iter!=lc.Iterators().Boundary2()[i].End(); ++g_iter)
538 lc(*g_iter).clear();
539
540 for (g_iter=lc.Iterators().NearBoundary2()[i].Begin(); g_iter!=lc.Iterators().NearBoundary2()[i].End(); ++g_iter)
541 for (p_iter=lc(*g_iter).begin(); p_iter!=lc(*g_iter).end(); ++p_iter) {
542
543 if (grid.Global().EndLocal()[i] == grid.Global().SizeGlobal()[i])
544 for (int j=0; j<3; ++j)
545 send_buffer_2.push_back(p_iter->Pos()[j] - grid.Extent().Size()[j]);
546 else
547 for (int j=0; j<3; ++j)
548 send_buffer_2.push_back(p_iter->Pos()[j]);
549
550 send_buffer_2.push_back(p_iter->Charge());
551
552 }
553
554 send_size_2 = send_buffer_2.size();
555
556 MPI_Isend(&send_size_2, 1, MPI_INT, neighbors.Right(), 2, comm_global, &Request());
557
558 if (send_size_2 > 0)
559 MPI_Isend(&send_buffer_2.front(), send_size_2, MPI_DOUBLE, neighbors.Right(), 3, comm_global, &Request());
560
561 /*
562 * Receive from right
563 */
564 MPI_Recv(&recv_size, 1, MPI_INT, neighbors.Right(), 0, comm_global, MPI_STATUS_IGNORE);
565
566 if (recv_size > 0) {
567
568 recv_buffer.resize(recv_size);
569 MPI_Recv(&recv_buffer.front(), recv_size, MPI_DOUBLE, neighbors.Right(), 1, comm_global, MPI_STATUS_IGNORE);
570
571 for (vmg_int i=0; i<recv_size; i+=4)
572 lc.AddParticleToComplete(&recv_buffer[i], recv_buffer[i+3]);
573
574 }
575
576 /*
577 * Receive from left
578 */
579 MPI_Recv(&recv_size, 1, MPI_INT, neighbors.Left(), 2, comm_global, MPI_STATUS_IGNORE);
580
581 if (recv_size > 0) {
582
583 recv_buffer.resize(recv_size);
584 MPI_Recv(&recv_buffer.front(), recv_size, MPI_DOUBLE, neighbors.Left(), 3, comm_global, MPI_STATUS_IGNORE);
585
586 for (vmg_int i=0; i<recv_size; i+=4)
587 lc.AddParticleToComplete(&recv_buffer[i], recv_buffer[i+3]);
588
589 }
590
591
592 ClearRequests();
593 }
594}
595
596void CommMPI::CommAddPotential(std::list<Particle::Particle>& particles)
597{
598 std::list<Particle::Particle>::iterator iter;
599
600 if (!win_created) {
601 vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
602 const vmg_int& num_particles_local = MG::GetFactory().GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
603 MPI_Win_create(p, num_particles_local*sizeof(vmg_float), sizeof(vmg_float), info, comm_global, &win);
604 win_created = true;
605 }
606
607 MPI_Win_fence(MPI_MODE_NOPRECEDE, win);
608
609 for (iter=particles.begin(); iter!=particles.end(); ++iter)
610 MPI_Accumulate(&iter->Pot(), 1, MPI_DOUBLE, iter->Rank(), iter->Index(), 1, MPI_DOUBLE, MPI_SUM, win);
611
612 MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED, win);
613}
614
615void CommMPI::CommAddPotential(Particle::LinkedCellList& lc)
616{
617 Grid::iterator g_iter;
618 std::list<Particle::Particle>::iterator p_iter;
619
620 if (!win_created) {
621 vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
622 const vmg_int& num_particles_local = MG::GetFactory().GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
623 MPI_Win_create(p, num_particles_local*sizeof(vmg_float), sizeof(vmg_float), info, comm_global, &win);
624 win_created = true;
625 }
626
627 MPI_Win_fence(MPI_MODE_NOPRECEDE, win);
628
629 for (g_iter=lc.Iterators().Local().Begin(); g_iter!=lc.Iterators().Local().End(); ++g_iter)
630 for (p_iter=lc(*g_iter).begin(); p_iter!=lc(*g_iter).end(); ++p_iter)
631 MPI_Accumulate(&p_iter->Pot(), 1, MPI_DOUBLE, p_iter->Rank(), p_iter->Index(), 1, MPI_DOUBLE, MPI_SUM, win);
632
633 MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED, win);
634}
635
636vmg_float CommMPI::GlobalSum(const vmg_float& value)
637{
638 vmg_float result = value;
639
640 MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, comm_global);
641
642 return result;
643}
644
645vmg_float CommMPI::GlobalSumRoot(const vmg_float& value)
646{
647 vmg_float recv_buffer;
648 vmg_float send_buffer = value;
649
650 MPI_Reduce(&send_buffer, &recv_buffer, 1, MPI_DOUBLE, MPI_SUM, 0, comm_global);
651
652 return recv_buffer;
653}
654
655void CommMPI::GlobalSumArray(vmg_float* array, const vmg_int& size)
656{
657 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_DOUBLE, MPI_SUM, comm_global);
658}
659
660void CommMPI::PrintString(const char* format, ...)
661{
662 va_list args;
663 va_start(args, format);
664 vsprintf(print_buffer, format, args);
665 printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
666 va_end(args);
667}
668
669void CommMPI::PrintStringOnce(const char* format, ...)
670{
671 if (GlobalRank() == 0) {
672 va_list args;
673 va_start(args, format);
674 vsprintf(print_buffer, format, args);
675 printf("VMG: Rank %d: %s\n", GlobalRank(), print_buffer);
676 va_end(args);
677 }
678}
679
680void CommMPI::PrintXML(const std::string& filename, const std::string& xml_data)
681{
682 MPI_File file;
683 std::stringstream path, xml_header;
684
685 pugi::xml_document doc;
686 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
687 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
688 doc.save(xml_header);
689
690 path << OutputPath() << filename;
691
692 char* filename_array = Helper::GetCharArray(path.str());
693 char* xml_header_array = Helper::GetCharArray(xml_header.str());
694 char* str_array = Helper::GetCharArray(xml_data);
695
696 MPI_File_open(MPI_COMM_SELF, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
697 MPI_File_set_size(file, 0);
698 MPI_File_write(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
699 MPI_File_write(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
700 MPI_File_close(&file);
701
702 delete [] filename_array;
703 delete [] xml_header_array;
704 delete [] str_array;
705}
706
707void CommMPI::PrintXMLAll(const std::string& filename, const std::string& xml_data)
708{
709 MPI_File file;
710 std::stringstream path;
711
712 path << OutputPath() << filename;
713
714 char* filename_array = Helper::GetCharArray(path.str());
715 char* str_array = Helper::GetCharArray(xml_data);
716
717 MPI_File_open(comm_global, filename_array, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
718 MPI_File_set_size(file, 0);
719
720 if (GlobalRank() == 0) {
721 std::stringstream xml_header;
722 pugi::xml_document doc;
723 pugi::xml_node node_data = doc.append_child("Global").append_child("NumProcesses").append_child(pugi::node_pcdata);
724 node_data.set_value(Helper::ToString(GlobalProcs().Product()).c_str());
725 doc.save(xml_header);
726
727 char* xml_header_array = Helper::GetCharArray(xml_header.str());
728
729 MPI_File_write_shared(file, xml_header_array, xml_header.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
730
731 delete [] xml_header_array;
732 }
733
734 MPI_File_write_ordered(file, str_array, xml_data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
735 MPI_File_close(&file);
736
737 delete [] filename_array;
738 delete [] str_array;
739}
740
741void CommMPI::PrintAllSettings()
742{
743 std::stringstream buf;
744 MPI_File file;
745 MPI_Status status;
746
747 Multigrid* mg = MG::GetFactory().Get("SOL")->Cast<Multigrid>();
748
749 buf << OutputPath() << "settings.txt";
750 char *filename = Helper::GetCharArray(buf.str());
751
752 MPI_File_open(comm_global, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
753 MPI_File_set_size(file, 0);
754 MPI_File_close(&file);
755
756 for (int i=mg->MinLevel(); i<=mg->MaxLevel(); ++i) {
757
758 MPI_Comm comm = settings.CommunicatorGlobal((*mg)(i));
759
760 if (comm != MPI_COMM_NULL) {
761
762 MPI_File_open(comm, filename, MPI_MODE_WRONLY|MPI_MODE_CREATE|MPI_MODE_APPEND, MPI_INFO_NULL, &file);
763
764 int comm_rank;
765 MPI_Comm_rank(comm, &comm_rank);
766
767 if (comm_rank == 0) {
768
769 buf.str("");
770 buf << "PROCESS INDEPENDENT INFORMATION:" << std::endl
771 << "GRID LEVEL " << i << std::endl
772 << "BEGINFINEST: " << (*mg)(i).Global().BeginFinest() << std::endl
773 << "ENDFINEST: " << (*mg)(i).Global().EndFinest() << std::endl
774 << "SIZEFINEST: " << (*mg)(i).Global().SizeFinest() << std::endl
775 << "SIZEGLOBAL: " << (*mg)(i).Global().SizeGlobal() << std::endl
776 << "BOUNDARYTYPE: " << (*mg)(i).Global().BoundaryType() << std::endl
777 << "EXTENT" << std::endl
778 << "SIZE: " << (*mg)(i).Extent().Size() << std::endl
779 << "SIZEFACTOR: " << (*mg)(i).Extent().SizeFactor() << std::endl
780 << "BEGIN: " << (*mg)(i).Extent().Begin() << std::endl
781 << "END: " << (*mg)(i).Extent().End() << std::endl
782 << "MESHWIDTH: " << (*mg)(i).Extent().MeshWidth() << std::endl
783 << std::endl
784 << "PROCESS DEPENDENT INFORMATION:" << std::endl;
785
786 char* char_buf = Helper::GetCharArray(buf.str());
787
788 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, &status);
789
790 delete [] char_buf;
791
792 }
793
794 buf.str("");
795 buf << "PROCESS " << comm_rank << ":" << std::endl
796 << "GLOBAL" << std::endl
797 << "BEGINLOCAL: " << (*mg)(i).Global().BeginLocal() << std::endl
798 << "ENDLOCAL: " << (*mg)(i).Global().EndLocal() << std::endl
799 << "SIZELOCAL: " << (*mg)(i).Global().SizeLocal() << std::endl
800 << "LOCAL" << std::endl
801 << "SIZE: " << (*mg)(i).Local().Size() << std::endl
802 << "SIZETOTAL: " << (*mg)(i).Local().SizeTotal() << std::endl
803 << "BEGIN: " << (*mg)(i).Local().Begin() << std::endl
804 << "END: " << (*mg)(i).Local().End() << std::endl
805 << "HALOBEGIN1: " << (*mg)(i).Local().HaloBegin1() << std::endl
806 << "HALOEND1: " << (*mg)(i).Local().HaloEnd1() << std::endl
807 << "HALOBEGIN2: " << (*mg)(i).Local().HaloBegin2() << std::endl
808 << "HALOEND2: " << (*mg)(i).Local().HaloEnd2() << std::endl
809 << "BOUNDARYBEGIN1: " << (*mg)(i).Local().BoundaryBegin1() << std::endl
810 << "BOUNDARYEND1: " << (*mg)(i).Local().BoundaryEnd1() << std::endl
811 << "BOUNDARYBEGIN2: " << (*mg)(i).Local().BoundaryBegin2() << std::endl
812 << "BOUNDARYEND2: " << (*mg)(i).Local().BoundaryEnd2() << std::endl
813 << "ALIGNMENTBEGIN: " << (*mg)(i).Local().AlignmentBegin() << std::endl
814 << "ALIGNMENTEND: " << (*mg)(i).Local().AlignmentEnd() << std::endl
815 << std::endl;
816
817 char* char_buf = Helper::GetCharArray(buf.str());
818
819 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, &status);
820
821 delete [] char_buf;
822
823 MPI_File_close(&file);
824
825 }
826 }
827
828 delete [] filename;
829
830}
831
832void CommMPI::PrintDefect(Grid& sol, Grid& rhs, const char* information)
833{
834 TempGrid *temp = MG::GetTempGrid();
835 temp->SetProperties(sol);
836 temp->ImportFromResidual(sol, rhs);
837
838 PrintInnerGrid(*temp, information);
839}
840
841void CommMPI::PrintGrid(Grid& grid, const char* information)
842{
843 Index i;
844 std::stringstream buf;
845
846 Index begin = 0;
847 Index end = grid.Local().End();
848 Index begin_local, end_local, begin_global, end_global;
849
850 CommToGhosts(grid);
851
852 for (int j=0; j<3; ++j)
853 if (grid.Global().EndLocal()[j] == grid.Global().SizeGlobal()[j])
854 end[j] = grid.Local().SizeTotal()[j];
855
856 for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
857 for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
858 for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
859 buf << std::scientific << grid.GetVal(i) << " ";
860
861 begin_local = grid.Global().BeginLocal();
862 end_local = grid.Global().EndLocal();
863 begin_global = 0;
864
865 for (int i=0; i<3; ++i) {
866
867 if (BoundaryConditions()[i] == Dirichlet ||
868 BoundaryConditions()[i] == Open) {
869
870 if (begin_local[i] != 0)
871 begin_local[i] -= grid.Local().HaloEnd1()[i] - grid.Local().HaloBegin1()[i];
872
873 end_global[i] = grid.Global().SizeGlobal()[i];
874
875 }else if (BoundaryConditions()[i] == Periodic) {
876
877 if (end_local[i] == grid.Global().SizeGlobal()[i])
878 end_local[i] += grid.Local().HaloEnd1()[i] - grid.Local().HaloBegin1()[i] + grid.Local().HaloEnd2()[i] - grid.Local().HaloBegin2()[i];
879 else
880 end_local[i] += grid.Local().HaloEnd1()[i] - grid.Local().HaloBegin1()[i];
881
882 end_global[i] = grid.Global().SizeGlobal()[i] +
883 grid.Local().HaloEnd1()[i] - grid.Local().HaloBegin1()[i] +
884 grid.Local().HaloEnd2()[i] - grid.Local().HaloBegin2()[i];
885
886 }
887
888 }
889
890 CreateOutputFiles(grid, buf, information,
891 begin_global, end_global,
892 begin_local, end_local);
893}
894
895void CommMPI::PrintCorrectedGrid(Grid& grid, const char* information)
896{
897 Index i;
898
899 std::stringstream buf;
900
901 Index begin = 0;
902 Index end = grid.Local().End();
903 Index begin_local, end_local, begin_global, end_global;
904
905 CommToGhosts(grid);
906
907 for (int j=0; j<3; ++j)
908 if (grid.Global().EndLocal()[j] == grid.Global().SizeGlobal()[j])
909 end[j] = grid.Local().SizeTotal()[j];
910
911 for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
912 for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
913 for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
914 buf << std::scientific << grid.GetVal(i) - Grid::Correction() << " ";
915
916 begin_local = grid.Global().BeginLocal();
917 end_local = grid.Global().EndLocal();
918 begin_global = 0;
919
920 for (int i=0; i<3; ++i) {
921
922 if (BoundaryConditions()[i] == Dirichlet ||
923 BoundaryConditions()[i] == Open) {
924
925 if (begin_local[i] != 0)
926 --begin_local[i];
927
928 end_global[i] = grid.Global().SizeGlobal()[i];
929
930 }else if (BoundaryConditions()[i] == Periodic) {
931
932 if (begin_local[i] != 0)
933 --begin_local[i];
934
935 if (end_local[i] == grid.Global().SizeGlobal()[i])
936 end_local[i] += 2;
937
938 end_global[i] = grid.Global().SizeGlobal()[i] + 2;
939
940 }
941
942 }
943
944 CreateOutputFiles(grid, buf, information,
945 0, grid.Global().SizeGlobal()+2,
946 begin, end);
947}
948
949void CommMPI::PrintInnerGrid(Grid& grid, const char* information)
950{
951 Index i;
952 Index begin_local, end_local, begin_global, end_global;
953 std::stringstream buf;
954
955 CommToGhosts(grid);
956
957 Index begin = 0;
958 Index end = grid.Local().End();
959
960 /* VTK parallel ImageData files seem to require some overlap. */
961 for (int j=0; j<3; ++j)
962 if (grid.Global().BeginLocal()[j] == 0)
963 ++begin[j];
964
965 /* Write grid values into buffer */
966 for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
967 for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
968 for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
969 buf << std::scientific << grid.GetVal(i) << " ";
970
971 for (int j=0; j<3; ++j) {
972
973 if (BoundaryConditions()[j] == Dirichlet ||
974 BoundaryConditions()[j] == Open) {
975
976 begin_global[j] = 1;
977 end_global[j] = grid.Global().SizeGlobal()[j] - 1;
978
979 if (grid.Global().BeginLocal()[j] == 0)
980 begin_local[j] = 1;
981 else
982 begin_local[j] = grid.Global().BeginLocal()[j] - 1;
983
984 if (grid.Global().EndLocal()[j] == grid.Global().SizeGlobal()[j])
985 end_local[j] = grid.Global().EndLocal()[j] - 1;
986 else
987 end_local[j] = grid.Global().EndLocal()[j];
988
989 }else if (BoundaryConditions()[j] == Periodic) {
990
991 begin_global[j] = 0;
992 end_global[j] = grid.Global().SizeGlobal()[j];
993
994 if (grid.Global().BeginLocal()[j] == 0)
995 begin_local[j] = grid.Global().BeginLocal()[j];
996 else
997 begin_local[j] = grid.Global().BeginLocal()[j] - 1;
998
999 end_local[j] = grid.Global().EndLocal()[j];
1000
1001 }else {
1002
1003 assert(0 == "Boundary condition not handled properly.");
1004
1005 }
1006
1007 }
1008
1009 CreateOutputFiles(grid, buf, information,
1010 begin_global, end_global,
1011 begin_local, end_local);
1012}
1013
1014void CommMPI::PrintInnerCorrectedGrid(Grid& grid, const char* information)
1015{
1016 Index i;
1017 const Index begin = grid.Local().Begin();
1018 const Index end = grid.Local().End();
1019 std::stringstream buf;
1020
1021 for (i.Z()=begin.Z(); i.Z()<end.Z(); ++i.Z())
1022 for (i.Y()=begin.Y(); i.Y()<end.Y(); ++i.Y())
1023 for (i.X()=begin.X(); i.X()<end.X(); ++i.X())
1024 buf << std::scientific << grid.GetVal(i) - Grid::Correction() << " ";
1025
1026 CreateOutputFiles(grid, buf, information,
1027 0, grid.Global().SizeGlobal(),
1028 grid.Global().BeginLocal(), grid.Global().EndLocal());
1029}
1030
1031void CommMPI::CreateOutputFiles(const Grid& grid, const std::stringstream& serial_data, const char* information,
1032 const Index& begin_global, const Index& end_global,
1033 const Index& begin_local, const Index& end_local)
1034{
1035 int output_count = OutputCount();
1036
1037 MPI_Comm comm = settings.CommunicatorGlobal(grid);
1038
1039 if (comm != MPI_COMM_NULL) {
1040
1041 MPI_File file;
1042 std::string conv_information = Helper::ReplaceWhitespaces(information, "_");
1043
1044 CreateParallelOutputFile(grid, comm, output_count, conv_information.c_str(),
1045 begin_global, end_global, begin_local, end_local);
1046
1047 file = CreateSerialOutputFile(grid, comm, output_count, conv_information.c_str(),
1048 begin_global, end_global, begin_local, end_local);
1049
1050 char *char_buf = Helper::GetCharArray(serial_data.str());
1051 MPI_File_write(file, char_buf, serial_data.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
1052 delete [] char_buf;
1053
1054 FinalizeSerialOutputFile(file);
1055
1056 }
1057}
1058
1059void CommMPI::CreateParallelOutputFile(const Grid& grid, MPI_Comm& comm,
1060 const int& output_count, const char* information,
1061 const Index& begin_global, const Index& end_global,
1062 const Index& begin_local, const Index& end_local)
1063{
1064 int rank;
1065 MPI_File file;
1066 char parallel_filename[513], serial_filename[513];
1067 std::stringstream buf;
1068
1069 MPI_Comm_rank(comm, &rank);
1070
1071 sprintf(parallel_filename, "%s%04d.pvti", OutputPath().c_str(), output_count);
1072 sprintf(serial_filename, "%04d_%d.vti", output_count, rank);
1073
1074 MPI_File_open(comm, parallel_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
1075 MPI_File_set_size(file, 0);
1076
1077 if (rank == 0) {
1078
1079 buf << "<?xml version=\"1.0\"?>" << std::endl
1080 << "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
1081 << " <PImageData WholeExtent=\"";
1082
1083 for (int i=0; i<3; ++i)
1084 buf << begin_global[i] << " " << end_global[i] - 1 << " ";
1085
1086 buf << "\"" << std::endl
1087 << " GhostLevel=\"0\" Origin=\"0 0 0\" Spacing=\"";
1088
1089 for (int i=0; i<3; ++i)
1090 buf << grid.Extent().MeshWidth()[i] << " ";
1091
1092 buf << "\">" << std::endl
1093 << " <PPointData Scalars=\"" << information << "\">" << std::endl
1094 << " <PDataArray type=\"Float32\" Name=\"" << information << "\"/>" << std::endl
1095 << " </PPointData>" << std::endl;
1096
1097 char* char_buf = Helper::GetCharArray(buf.str());
1098
1099 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
1100
1101 delete [] char_buf;
1102 }
1103
1104 buf.str("");
1105 buf << " <Piece Extent=\"";
1106
1107 for (int i=0; i<3; ++i)
1108 buf << begin_local[i] << " " << end_local[i] - 1 << " ";
1109
1110 buf << "\" Source=\"" << serial_filename << "\"/>" << std::endl;
1111
1112 char* char_buf = Helper::GetCharArray(buf.str());
1113
1114 MPI_File_write_ordered(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
1115
1116 delete [] char_buf;
1117
1118 if (rank == 0) {
1119
1120 buf.str("");
1121
1122 buf << " </PImageData>" << std::endl
1123 << "</VTKFile>" << std::endl;
1124
1125 char* char_buf = Helper::GetCharArray(buf.str());
1126
1127 MPI_File_write_shared(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
1128
1129 delete [] char_buf;
1130 }
1131
1132 MPI_File_close(&file);
1133}
1134
1135MPI_File CommMPI::CreateSerialOutputFile(const Grid& grid, MPI_Comm& comm,
1136 const int& output_count, const char* information,
1137 const Index& begin_global, const Index& end_global,
1138 const Index& begin_local, const Index& end_local)
1139{
1140 char serial_filename[513];
1141 int rank;
1142 MPI_File file;
1143 std::stringstream buf;
1144
1145 MPI_Comm_rank(comm_global, &rank);
1146
1147 sprintf(serial_filename, "%s%04d_%d.vti", OutputPath().c_str(), output_count, rank);
1148
1149 MPI_File_open(MPI_COMM_SELF, serial_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
1150
1151 buf << "<?xml version=\"1.0\"?>" << std::endl
1152 << "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\">" << std::endl
1153 << " <ImageData WholeExtent=\"";
1154
1155 for (int i=0; i<3; ++i)
1156 buf << begin_global[i] << " " << end_global[i] - 1 << " ";
1157
1158 buf << "\"" << std::endl
1159 << " Origin=\"0 0 0\" Spacing=\"";
1160
1161 for (int i=0; i<3; ++i)
1162 buf << grid.Extent().MeshWidth()[i] << " ";
1163
1164 buf << "\">" << std::endl
1165 << " <Piece Extent=\"";
1166
1167 for (int i=0; i<3; ++i)
1168 buf << begin_local[i] << " " << end_local[i] - 1 << " ";
1169
1170 buf << "\">" << std::endl
1171 << " <PointData Scalars=\"" << information << "\">" << std::endl
1172 << " <DataArray type=\"Float32\" Name=\"" << information << "\" format=\"ascii\">" << std::endl
1173 << " ";
1174
1175 char* char_buf = Helper::GetCharArray(buf.str());
1176 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
1177 delete [] char_buf;
1178
1179 return file;
1180}
1181
1182void CommMPI::FinalizeSerialOutputFile(MPI_File& file)
1183{
1184 std::stringstream buf;
1185
1186 buf << std::endl
1187 << " </DataArray>" << std::endl
1188 << " </PointData>" << std::endl
1189 << " </Piece>" << std::endl
1190 << " </ImageData>" << std::endl
1191 << "</VTKFile>" << std::endl;
1192
1193 char* char_buf = Helper::GetCharArray(buf.str());
1194 MPI_File_write(file, char_buf, buf.str().size(), MPI_CHAR, MPI_STATUS_IGNORE);
1195 delete [] char_buf;
1196
1197 MPI_File_close(&file);
1198}
1199
1200int CommMPI::GlobalRank() const
1201{
1202 int rank;
1203
1204 MPI_Comm_rank(comm_global, &rank);
1205
1206 return rank;
1207}
1208
1209Index CommMPI::GlobalPos() const
1210{
1211 Index dims, periods, coords;
1212
1213 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
1214
1215 return coords;
1216}
1217
1218Index CommMPI::GlobalProcs() const
1219{
1220 Index dims, periods, coords;
1221
1222 MPI_Cart_get(comm_global, 3, dims.vec(), periods.vec(), coords.vec());
1223
1224 return dims;
1225}
1226
1227void CommMPI::InitCommMPI(const MPI_Comm& comm)
1228{
1229 int status, size, rank;
1230 int dims[3] = {0, 0, 0};
1231 int periods[3];
1232
1233 for (int i=0; i<3; ++i)
1234 periods[i] = (BoundaryConditions()[i] == Periodic ? 1 : 0);
1235
1236 MPI_Comm_size(comm, &size);
1237 MPI_Comm_rank(comm, &rank);
1238
1239 MPI_Topo_test(comm, &status);
1240
1241 if (status == MPI_CART) {
1242
1243 comm_global = comm;
1244
1245 }else {
1246
1247 MPI_Dims_create(size, 3, dims);
1248
1249 MPI_Cart_create(comm, 3, dims, periods, 1, &comm_global);
1250
1251 }
1252
1253 MPI_Info_create(&info);
1254 char key[] = "no_locks";
1255 char val[] = "true";
1256 MPI_Info_set(info, key, val);
1257
1258}
1259
1260CommMPI::~CommMPI()
1261{
1262 MPI_Comm_free(&comm_global);
1263
1264 if (win_created)
1265 MPI_Win_free(&win);
1266
1267 MPI_Info_free(&info);
1268}
1269
1270Grid& CommMPI::GetCoarserGrid(Multigrid& multigrid)
1271{
1272 return settings.CoarserGrid(multigrid(multigrid.Level()));
1273}
1274
1275Grid& CommMPI::GetFinerGrid(Multigrid& multigrid)
1276{
1277 return settings.FinerGrid(multigrid(multigrid.Level()));
1278}
1279
1280Grid& CommMPI::GetGlobalCoarseGrid(Multigrid& multigrid)
1281{
1282 Grid& global_coarse_grid = settings.GlobalCoarseGrid();
1283 CommSubgrid(multigrid(multigrid.MinLevel()), global_coarse_grid);
1284 return global_coarse_grid;
1285}
1286
1287
1288std::string CommMPI::CreateOutputDirectory()
1289{
1290#ifdef HAVE_BOOST_FILESYSTEM
1291 std::string path, unique_path;
1292 std::stringstream unique_suffix;
1293 int suffix_counter = 0;
1294 char buffer[129];
1295 time_t rawtime;
1296 struct tm *timeinfo;
1297 int path_size;
1298 char* path_array;
1299
1300 if (GlobalRank() == 0) {
1301
1302 time(&rawtime);
1303 timeinfo = localtime(&rawtime);
1304 strftime(buffer, 128, "./output/%Y_%m_%d_%H_%M_%S/", timeinfo);
1305 path = buffer;
1306
1307 if (!fs::exists("output"))
1308 fs::create_directory("output");
1309
1310 unique_path = path;
1311
1312 while (fs::exists(unique_path.c_str())) {
1313
1314 unique_suffix.str("");
1315 unique_suffix << "_" << suffix_counter++ << "/";
1316
1317 unique_path = path;
1318 unique_path.replace(unique_path.size()-1, 1, unique_suffix.str());
1319
1320 }
1321
1322 fs::create_directory(unique_path.c_str());
1323
1324 path_size = unique_path.size() + 1;
1325 path_array = Helper::GetCharArray(unique_path);
1326
1327 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
1328
1329 }else {
1330
1331 MPI_Bcast(&path_size, 1, MPI_INT, 0, comm_global);
1332 path_array = new char[path_size];
1333
1334 }
1335
1336 MPI_Bcast(path_array, path_size, MPI_CHAR, 0, comm_global);
1337
1338 unique_path = path_array;
1339
1340 delete [] path_array;
1341
1342 return unique_path;
1343
1344#else
1345
1346 return "./";
1347
1348#endif
1349}
1350
1351
1352#endif /* HAVE_MPI */
Note: See TracBrowser for help on using the repository browser.