source: src/comm/mpi/datatype.cpp@ 716da7

Last change on this file since 716da7 was 716da7, checked in by Julian Iseringhausen <isering@…>, 14 years ago

Fix energy calculation.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1729 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 4.6 KB
Line 
1#ifdef HAVE_CONFIG_H
2#include <config.h>
3#endif
4
5#ifndef HAVE_MPI
6#error MPI is needed to compile VMG::MPI::Datatype
7#endif
8
9#include <mpi.h>
10#ifdef HAVE_MARMOT
11#include <enhancempicalls.h>
12#include <sourceinfompicalls.h>
13#endif
14
15#include <cstring>
16
17#include "comm/mpi/datatype.hpp"
18
19using namespace VMG;
20
21void VMG::MPI::Datatype::Send(Grid& grid, const int& tag, const MPI_Comm& comm) const
22{
23 if (Feasible())
24 MPI_Send(&grid(0), 1, _type, _rank, _tag_send+tag, comm);
25}
26
27void VMG::MPI::Datatype::Isend(Grid& grid, const int& tag, const MPI_Comm& comm, MPI_Request& request) const
28{
29 if (Feasible())
30 MPI_Isend(&grid(0), 1, _type, _rank, _tag_send+tag, comm, &request);
31}
32
33void VMG::MPI::Datatype::Recv(Grid& grid, const int& tag, const MPI_Comm& comm) const
34{
35 if (Feasible())
36 MPI_Recv(&grid(0), 1 ,_type, _rank, _tag_recv+tag, comm, MPI_STATUS_IGNORE);
37}
38
39void VMG::MPI::Datatype::Irecv(Grid& grid, const int& tag, const MPI_Comm& comm, MPI_Request& request) const
40{
41 if (Feasible())
42 MPI_Irecv(&grid(0), 1, _type, _rank, _tag_recv+tag, comm, &request);
43}
44
45void VMG::MPI::Datatype::SendBuffered(const Grid& grid, const int& tag, const MPI_Comm& comm)
46{
47 if (Feasible()) {
48
49 Index i;
50 int c = 0;
51 const Index end = _starts + _subsizes;
52 const size_t memcpy_size = _subsizes.Z() * sizeof(vmg_float);
53
54 for (i.X()=_starts.X(); i.X()<end.X(); ++i.X())
55 for (i.Y()=_starts.Y(); i.Y()<end.Y(); ++i.Y()) {
56 std::memcpy(&_buffer[c], &grid.GetVal(i.X(), i.Y(), _starts.Z()), memcpy_size);
57 c += _subsizes.Z();
58 }
59
60 MPI_Send(&_buffer.front(), _buffer.size(), MPI_DOUBLE, _rank, _tag_send+tag, comm);
61 }
62}
63
64void VMG::MPI::Datatype::IsendBuffered(const Grid& grid, const int& tag, const MPI_Comm& comm, MPI_Request& request)
65{
66 if (Feasible()) {
67
68 Index i;
69 int c = 0;
70 const Index end = _starts + _subsizes;
71 const size_t memcpy_size = _subsizes.Z() * sizeof(vmg_float);
72
73 for (i.X()=_starts.X(); i.X()<end.X(); ++i.X())
74 for (i.Y()=_starts.Y(); i.Y()<end.Y(); ++i.Y()) {
75 std::memcpy(&_buffer[c], &grid.GetVal(i.X(), i.Y(), _starts.Z()), memcpy_size);
76 c += _subsizes.Z();
77 }
78
79 assert(c == _buffer.size());
80
81 MPI_Isend(&_buffer.front(), _buffer.size(), MPI_DOUBLE, _rank, _tag_send+tag, comm, &request);
82 }
83}
84
85void VMG::MPI::Datatype::RecvBuffered(const int& tag, const MPI_Comm& comm)
86{
87 if (Feasible())
88 MPI_Recv(&_buffer.front(), _buffer.size(), MPI_DOUBLE, _rank, _tag_recv+tag, comm, MPI_STATUS_IGNORE);
89}
90
91void VMG::MPI::Datatype::IrecvBuffered(const int& tag, const MPI_Comm& comm, MPI_Request& request)
92{
93 if (Feasible())
94 MPI_Irecv(&_buffer.front(), _buffer.size(), MPI_DOUBLE, _rank, _tag_recv+tag, comm, &request);
95}
96
97void VMG::MPI::Datatype::GridReplace(Grid& grid) const
98{
99 if (Feasible()) {
100
101 Index i;
102 int c = 0;
103 const Index end = _starts + _subsizes;
104 const size_t memcpy_size = _subsizes.Z() * sizeof(vmg_float);
105
106 for (i.X()=_starts.X(); i.X()<end.X(); ++i.X())
107 for (i.Y()=_starts.Y(); i.Y()<end.Y(); ++i.Y()) {
108 std::memcpy(&grid(i.X(), i.Y(), _starts.Z()), &_buffer[c], memcpy_size);
109 c += _subsizes.Z();
110 }
111
112 assert(c == _buffer.size());
113 }
114}
115
116void VMG::MPI::Datatype::GridSum(Grid& grid) const
117{
118 if (Feasible()) {
119
120 Index i;
121 const Index end = _starts + _subsizes;
122 std::vector<vmg_float>::const_iterator iter = _buffer.begin();
123
124 for (i.X()=_starts.X(); i.X()<end.X(); ++i.X())
125 for (i.Y()=_starts.Y(); i.Y()<end.Y(); ++i.Y())
126 for (i.Z()=_starts.Z(); i.Z()<end.Z(); ++i.Z())
127 grid(i) += *iter++;
128
129 assert(iter == _buffer.end());
130 }
131}
132
133void VMG::MPI::Datatype::Set(const GridIteratorSet& bounds, const Grid& grid, const int& rank,
134 const int& tag_send, const int& tag_receive)
135{
136 _sizes = grid.Local().SizeTotal();
137 _subsizes = bounds.Begin().GetEnd() - bounds.Begin().GetBegin();
138 _starts = bounds.Begin().GetBegin();
139 _rank = rank;
140 _tag_send = tag_send;
141 _tag_recv = tag_receive;
142
143 if (_type != MPI_DATATYPE_NULL)
144 MPI_Type_free(&_type);
145
146 InitDatatype();
147}
148
149void VMG::MPI::Datatype::Set(const Index& sizes, const Index& subsizes, const Index& starts, const int& rank,
150 const int& tag_send, const int& tag_receive)
151{
152 _sizes = sizes;
153 _subsizes = subsizes;
154 _starts = starts;
155 _rank = rank;
156 _tag_send = tag_send;
157 _tag_recv = tag_receive;
158
159 if (_type != MPI_DATATYPE_NULL)
160 MPI_Type_free(&_type);
161
162 InitDatatype();
163}
164
165void VMG::MPI::Datatype::InitDatatype()
166{
167 if (Feasible()) {
168 MPI_Type_create_subarray(3, _sizes.vec(), _subsizes.vec(), _starts.vec(), MPI_ORDER_C, MPI_DOUBLE, &_type);
169 MPI_Type_commit(&_type);
170 if (_alloc_buffer)
171 _buffer.resize(_subsizes.Product());
172 }else {
173 _type = MPI_DATATYPE_NULL;
174 }
175}
Note: See TracBrowser for help on using the repository browser.