source: src/interface/interface_particles_cf.cpp@ d24c2f

Last change on this file since d24c2f was 76e019, checked in by Julian Iseringhausen <isering@…>, 14 years ago

Check that mpi.h will be included as the first header.

Needed by certain mpi implementations.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1248 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 9.7 KB
RevLine 
[dfed1c]1/**
2 * @file interface_particles_cf.cpp
3 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
4 * @date Fri Sep 16 13:24:48 2011
5 *
6 */
7
8#ifdef HAVE_CONFIG_H
9#include <config.h>
10#endif
11
[76e019]12#ifdef HAVE_MPI
13#include <mpi.h>
14#endif
15
[dfed1c]16#include <cassert>
17#include <cstring>
18#include <cstdio>
19#include <fstream>
20#include <sstream>
21
22#include "base/factory.hpp"
23#include "base/helper.hpp"
24#include "interface/interface_particles.hpp"
25#include "interface/interface_particles_cf.hpp"
26#include "level/level_operator_cs.hpp"
27#include "level/level_operator_fas.hpp"
28#include "samples/discretization_poisson_fd.hpp"
29#include "samples/stencils.hpp"
30#include "samples/techniques.hpp"
31#include "solver/solver_regular.hpp"
32#include "solver/solver_singular.hpp"
33#include "smoother/gsrb.hpp"
34#include "thirdparty/pugixml/pugixml.hpp"
35
36#ifdef HAVE_MPI
37#include "comm/comm_mpi.hpp"
38#include "comm/domain_decomposition_mpi.hpp"
39#else
40#include "comm/comm_serial.hpp"
41#endif
42
43#ifdef HAVE_LAPACK
44#include "solver/dgesv.hpp"
45#else
46#include "solver/givens.hpp"
47#endif
48
49using namespace VMG;
50
51InterfaceParticlesCF::InterfaceParticlesCF(const char* filename)
52{
53 Factory& factory = MG::GetFactory();
54
55 int max_level, max_iterations;
56 int pre_smoothing_steps, post_smoothing_steps;
57 int gamma, near_field_cells;
58 vmg_float precision;
59 std::string lop_str, datafile_str;
60
61 pugi::xml_document doc;
62
63#ifdef HAVE_MPI
64 int rank;
65 long file_size;
66 char* file_content = NULL;
67
68 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
69
70 if (rank == 0) {
71 std::ifstream file(filename);
72
73 if (!file.is_open() || !file.good()) {
74 MPI_Abort(MPI_COMM_WORLD, MPI_ERR_OTHER);
75 }
76
77 assert(file.is_open() && file.good());
78
79 std::filebuf* buf = file.rdbuf();
80 file_size = buf->pubseekoff(0, std::ios::end, std::ios::in);
81 buf->pubseekpos(0, std::ios::in);
82 file_content = static_cast<char*>(pugi::get_memory_allocation_function()(file_size*sizeof(char)));
83 buf->sgetn(file_content, file_size);
84
85 file.close();
86 }
87
88 //Can't probe on collective communication
89 MPI_Bcast(&file_size, 1, MPI_LONG, 0, MPI_COMM_WORLD);
90 if (rank != 0)
91 file_content = static_cast<char*>(pugi::get_memory_allocation_function()(file_size*sizeof(char)));
92 MPI_Bcast(file_content, file_size, MPI_CHAR, 0, MPI_COMM_WORLD);
93
94 doc.load_buffer_inplace_own(file_content, file_size);
95#else
96 doc.load_file(filename);
97#endif
98
99 pugi::xml_node xml_conf = doc.document_element().child("conf");
100
101 max_level = Helper::ToValWithDefault<int>(xml_conf.child_value("max_level"), 7);
102 max_iterations = Helper::ToValWithDefault<int>(xml_conf.child_value("max_iterations"), 20);
103 pre_smoothing_steps = Helper::ToValWithDefault<int>(xml_conf.child_value("pre_smoothing_steps"), 3);
104 post_smoothing_steps = Helper::ToValWithDefault<int>(xml_conf.child_value("post_smoothing_steps"), 3);
105 gamma = Helper::ToValWithDefault<int>(xml_conf.child_value("gamma"), 1);
106 near_field_cells = Helper::ToValWithDefault<int>(xml_conf.child_value("near_field_cells"), 3);
107 precision = Helper::ToValWithDefault<vmg_float>(xml_conf.child_value("precision"), 1.0e-7);
108 lop_str = xml_conf.child_value("level_operator");
109 datafile_str = xml_conf.child_value("datafile");
110
111 // TODO: Hardcoded periodic boundary conditions are just for testing
112 Boundary boundary(Periodic, Periodic, Periodic);
113
114#ifdef HAVE_MPI
115 Comm* comm = new CommMPI(boundary, new DomainDecompositionMPI());
116#else
117 Comm* comm = new CommSerial(boundary);
118#endif
119 comm->Register("COMM");
120
121 Interface* interface = new InterfaceParticles(boundary, 2, max_level, 0.0, 1.0, near_field_cells);
122 MG::SetInterface(interface, comm);
123
124 Discretization* discretization = new DiscretizationPoissonFD();
125 discretization->Register("DISCRETIZATION");
126
127 LevelOperator* lop;
128 if (!lop_str.compare("cs")) {
129 lop = new LevelOperatorCS(Stencils::RestrictionFullWeight, Stencils::InterpolationTrilinear);
130 Techniques::SetCorrectionSchemePeriodic(interface->MinLevel(), interface->MaxLevel(), gamma);
131 } else if (!lop_str.compare("fas")) {
132 lop = new LevelOperatorFAS(Stencils::RestrictionFullWeight, Stencils::Injection, Stencils::InterpolationTrilinear);
133 Techniques::SetFullApproximationSchemePeriodic(interface->MinLevel(), interface->MaxLevel(), gamma);
134 } else if (!lop_str.compare("cs_debug")) {
135 lop = new LevelOperatorCS(Stencils::RestrictionFullWeight, Stencils::InterpolationTrilinear);
136 Techniques::SetCorrectionSchemePeriodicDebug(interface->MinLevel(), interface->MaxLevel(), gamma);
137 } else if (!lop_str.compare("fas_debug")) {
138 lop = new LevelOperatorFAS(Stencils::RestrictionFullWeight, Stencils::Injection, Stencils::InterpolationTrilinear);
139 Techniques::SetFullApproximationSchemePeriodicDebug(interface->MinLevel(), interface->MaxLevel(), gamma);
140 } else {
141 lop = new LevelOperatorCS(Stencils::RestrictionFullWeight, Stencils::InterpolationTrilinear);
142 Techniques::SetCorrectionSchemePeriodic(interface->MinLevel(), interface->MaxLevel(), gamma);
143 }
144 lop->Register("LEVEL_OPERATOR");
145
146 Smoother* smoother = new GaussSeidelRB();
147 smoother->Register("SMOOTHER");
148
149#ifdef HAVE_LAPACK
150 Solver* solver = new DGESV<SolverSingular>();
151#else
152 Solver* solver = new Givens<SolverSingular>();
153#endif
154 solver->Register("SOLVER");
155
156 factory.RegisterObjectStorage("MAX_ITERATION", max_iterations);
157 factory.RegisterObjectStorage("PRESMOOTHSTEPS", pre_smoothing_steps);
158 factory.RegisterObjectStorage("POSTSMOOTHSTEPS", post_smoothing_steps);
159 factory.RegisterObjectStorage("PRECISION", precision);
160
161 factory.RegisterObjectStorage("PARTICLE_NEAR_FIELD_CELLS", near_field_cells);
162
163 if (rank == 0) {
164 std::printf("VMG settings:\n");
165 std::printf(" Maximum level: %d\n", max_level);
166 std::printf(" Maximum number of iterations: %d\n", max_iterations);
167 std::printf(" Gamma: %d\n", gamma);
168 std::printf(" Pre smoothing steps: %d\n", pre_smoothing_steps);
169 std::printf(" Post smoothing steps: %d\n", post_smoothing_steps);
170 std::printf(" Precision: %e\n", precision);
171 std::printf(" Near field cells: %d\n", near_field_cells);
172 }
173
174 LoadDatafile(filename, datafile_str);
175}
176
177
178void InterfaceParticlesCF::LoadDatafile(const std::string& conf_filename, const std::string& data_filename)
179{
180 Factory& factory = MG::GetFactory();
181
182#ifdef HAVE_MPI
183 int rank;
184 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
185 if (rank == 0) {
186#endif
187
188 std::ifstream file(data_filename.c_str());
189
190 if (!file.is_open() || !file.good()) {
191 std::stringstream search_file;
192 search_file << "./" << data_filename;
193 file.clear();
194 file.open(search_file.str().c_str());
195 }
196
197 if (!file.is_open() || !file.good()) {
198 std::stringstream search_file;
199 search_file << conf_filename.substr(0, conf_filename.find_last_of('/')+1) << data_filename;
200 file.clear();
201 file.open(search_file.str().c_str());
202 }
203
204 assert(file.is_open() && file.good());
205
206 std::stringstream data;
207 vmg_float x, y, z, q;
208 data << file.rdbuf();
209
210 while (data.good()) {
211 data >> x >> y >> z >> q;
212 if (data.good()) {
213 data_vec.push_back(x);
214 data_vec.push_back(y);
215 data_vec.push_back(z);
216 data_vec.push_back(q);
217 }
218 }
219#ifdef HAVE_MPI
220 }
221#endif
222
223 factory.RegisterObjectStorage("PARTICLE_NUM_LOCAL", static_cast<int>(data_vec.size()/4));
224
225 vmg_float* x_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_POS_ARRAY", 3*(data_vec.size()/4));
226 vmg_float* q_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_CHARGE_ARRAY", data_vec.size()/4);
227 vmg_float* p_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY", data_vec.size()/4);
228 vmg_float* f_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_FORCE_ARRAY", 3*(data_vec.size()/4));
229
230 for (unsigned int i=0; i<data_vec.size()/4; ++i) {
231 std::memcpy(&x_arr[3*i], &data_vec[4*i], 3*sizeof(vmg_float));
232 q_arr[i] = data_vec[4*i+3];
233 p_arr[i] = 0.0;
234 f_arr[3*i+0] = 0.0;
235 f_arr[3*i+1] = 0.0;
236 f_arr[3*i+2] = 0.0;
237 }
238
239 data_vec.clear();
240}
241
242// void InterfaceParticlesCF::CommParticles(const Grid& grid)
243// {
244// #ifdef HAVE_MPI
245// int rank, size;
246// MPI_Comm_rank(MPI_COMM_WORLD, &rank);
247// MPI_Comm_size(MPI_COMM_WORLD, &size);
248
249// std::vector<double> send_buffer[size];
250
251// int global_extent[6];
252
253// for (int i=0; i<3; ++i) {
254// global_extent[i] = grid.Global().BeginLocal()[i];
255// global_extent[i+3] = grid.Global().EndLocal()[i];
256// }
257
258// if (rank == 0) {
259
260// int global_extent_all[6*size];
261// Index index;
262
263// MPI_Gather(global_extent, 6, MPI_INT, global_extent_all, 6, MPI_INT, 0, MPI_COMM_WORLD);
264
265// for (unsigned int i=0; i<data_vec.size()/4; ++i) {
266// index = (static_cast<Vector>(&(data_vec[4*i])) - grid.Extent().Begin()) / grid.Extent().MeshWidth();
267
268// for (int j=0; j<size; ++j) {
269// if (index[0] >= global_extent_all[6*j+0] && index[0] < global_extent_all[6*j+3] &&
270// index[1] >= global_extent_all[6*j+1] && index[1] < global_extent_all[6*j+4] &&
271// index[2] >= global_extent_all[6*j+2] && index[2] < global_extent_all[6*j+5]) {
272// for (int k=0; k<4; ++k)
273// send_buffer[j].push_back(data_vec[4*i+k]);
274// break;
275// }
276// }
277
278// }
279
280// data_vec.clear();
281
282// for (int i=0; i<size; ++i)
283// MPI_Isend(&send_buffer[i].front(), send_buffer[i].size(), MPI_DOUBLE, i, i, MPI_COMM_WORLD, &Request());
284
285// }else {
286// MPI_Gather(global_extent, 6, MPI_INT, NULL, 0, MPI_INT, 0, MPI_COMM_WORLD);
287// }
288
289// MPI_Status status;
290// int count;
291// MPI_Probe(0, rank, MPI_COMM_WORLD, &status);
292// MPI_Get_count(&status, MPI_DOUBLE, &count);
293
294// data_vec.resize(count);
295
296// MPI_Recv(&data_vec.front(), count, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
297
298// #endif /* HAVE_MPI */
299// }
Note: See TracBrowser for help on using the repository browser.