source: src/interface/interface_particles_cf.cpp@ e3dbbf

Last change on this file since e3dbbf was ac6d04, checked in by Julian Iseringhausen <isering@…>, 14 years ago

Merge recent changes of the vmg library into ScaFaCos.

Includes a fix for the communication problems on Jugene.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1666 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 9.8 KB
Line 
1/**
2 * @file interface_particles_cf.cpp
3 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
4 * @date Fri Sep 16 13:24:48 2011
5 *
6 */
7
8#ifdef HAVE_CONFIG_H
9#include <config.h>
10#endif
11
12#ifdef HAVE_MPI
13#include <mpi.h>
14#ifdef HAVE_MARMOT
15#include <enhancempicalls.h>
16#include <sourceinfompicalls.h>
17#endif
18#endif
19
20#include <cassert>
21#include <cstring>
22#include <cstdio>
23#include <fstream>
24#include <sstream>
25
26#include "base/factory.hpp"
27#include "base/helper.hpp"
28#include "interface/interface_particles.hpp"
29#include "interface/interface_particles_cf.hpp"
30#include "level/level_operator_cs.hpp"
31#include "level/level_operator_fas.hpp"
32#include "samples/discretization_poisson_fd.hpp"
33#include "samples/stencils.hpp"
34#include "samples/techniques.hpp"
35#include "solver/solver_regular.hpp"
36#include "solver/solver_singular.hpp"
37#include "smoother/gsrb.hpp"
38#include "thirdparty/pugixml/pugixml.hpp"
39
40#ifdef HAVE_MPI
41#include "comm/comm_mpi.hpp"
42#include "comm/domain_decomposition_mpi.hpp"
43#else
44#include "comm/comm_serial.hpp"
45#endif
46
47#ifdef HAVE_LAPACK
48#include "solver/dgesv.hpp"
49#else
50#include "solver/givens.hpp"
51#endif
52
53using namespace VMG;
54
55InterfaceParticlesCF::InterfaceParticlesCF(const char* filename)
56{
57 Factory& factory = MG::GetFactory();
58
59 int max_level, max_iterations;
60 int pre_smoothing_steps, post_smoothing_steps;
61 int gamma, near_field_cells;
62 vmg_float precision;
63 std::string lop_str, datafile_str;
64
65 pugi::xml_document doc;
66
67#ifdef HAVE_MPI
68 int rank;
69 long file_size;
70 char* file_content = NULL;
71
72 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
73
74 if (rank == 0) {
75 std::ifstream file(filename);
76
77 if (!file.is_open() || !file.good()) {
78 MPI_Abort(MPI_COMM_WORLD, MPI_ERR_OTHER);
79 }
80
81 assert(file.is_open() && file.good());
82
83 std::filebuf* buf = file.rdbuf();
84 file_size = buf->pubseekoff(0, std::ios::end, std::ios::in);
85 buf->pubseekpos(0, std::ios::in);
86 file_content = static_cast<char*>(pugi::get_memory_allocation_function()(file_size*sizeof(char)));
87 buf->sgetn(file_content, file_size);
88
89 file.close();
90 }
91
92 //Can't probe on collective communication
93 MPI_Bcast(&file_size, 1, MPI_LONG, 0, MPI_COMM_WORLD);
94 if (rank != 0)
95 file_content = static_cast<char*>(pugi::get_memory_allocation_function()(file_size*sizeof(char)));
96 MPI_Bcast(file_content, file_size, MPI_CHAR, 0, MPI_COMM_WORLD);
97
98 doc.load_buffer_inplace_own(file_content, file_size);
99#else
100 doc.load_file(filename);
101#endif
102
103 pugi::xml_node xml_conf = doc.document_element().child("conf");
104
105 max_level = Helper::ToValWithDefault<int>(xml_conf.child_value("max_level"), 7);
106 max_iterations = Helper::ToValWithDefault<int>(xml_conf.child_value("max_iterations"), 20);
107 pre_smoothing_steps = Helper::ToValWithDefault<int>(xml_conf.child_value("pre_smoothing_steps"), 3);
108 post_smoothing_steps = Helper::ToValWithDefault<int>(xml_conf.child_value("post_smoothing_steps"), 3);
109 gamma = Helper::ToValWithDefault<int>(xml_conf.child_value("gamma"), 1);
110 near_field_cells = Helper::ToValWithDefault<int>(xml_conf.child_value("near_field_cells"), 3);
111 precision = Helper::ToValWithDefault<vmg_float>(xml_conf.child_value("precision"), 1.0e-7);
112 lop_str = xml_conf.child_value("level_operator");
113 datafile_str = xml_conf.child_value("datafile");
114
115 // TODO: Hardcoded periodic boundary conditions are just for testing
116 Boundary boundary(Periodic, Periodic, Periodic);
117
118#ifdef HAVE_MPI
119 Comm* comm = new CommMPI(boundary, new DomainDecompositionMPI());
120#else
121 Comm* comm = new CommSerial(boundary);
122#endif
123 comm->Register("COMM");
124
125 Interface* interface = new InterfaceParticles(boundary, 2, max_level, 0.0, 1.0, near_field_cells, 0, 1.0);
126 MG::SetInterface(interface, comm);
127
128 Discretization* discretization = new DiscretizationPoissonFD();
129 discretization->Register("DISCRETIZATION");
130
131 LevelOperator* lop;
132 if (!lop_str.compare("cs")) {
133 lop = new LevelOperatorCS(Stencils::RestrictionFullWeight, Stencils::InterpolationTrilinear);
134 Techniques::SetCorrectionSchemePeriodic(interface->MinLevel(), interface->MaxLevel(), gamma);
135 } else if (!lop_str.compare("fas")) {
136 lop = new LevelOperatorFAS(Stencils::RestrictionFullWeight, Stencils::Injection, Stencils::InterpolationTrilinear);
137 Techniques::SetFullApproximationSchemePeriodic(interface->MinLevel(), interface->MaxLevel(), gamma);
138 } else if (!lop_str.compare("cs_debug")) {
139 lop = new LevelOperatorCS(Stencils::RestrictionFullWeight, Stencils::InterpolationTrilinear);
140 Techniques::SetCorrectionSchemePeriodicDebug(interface->MinLevel(), interface->MaxLevel(), gamma);
141 } else if (!lop_str.compare("fas_debug")) {
142 lop = new LevelOperatorFAS(Stencils::RestrictionFullWeight, Stencils::Injection, Stencils::InterpolationTrilinear);
143 Techniques::SetFullApproximationSchemePeriodicDebug(interface->MinLevel(), interface->MaxLevel(), gamma);
144 } else {
145 lop = new LevelOperatorCS(Stencils::RestrictionFullWeight, Stencils::InterpolationTrilinear);
146 Techniques::SetCorrectionSchemePeriodic(interface->MinLevel(), interface->MaxLevel(), gamma);
147 }
148 lop->Register("LEVEL_OPERATOR");
149
150 Smoother* smoother = new GaussSeidelRB();
151 smoother->Register("SMOOTHER");
152
153#ifdef HAVE_LAPACK
154 Solver* solver = new DGESV<SolverSingular>();
155#else
156 Solver* solver = new Givens<SolverSingular>();
157#endif
158 solver->Register("SOLVER");
159
160 factory.RegisterObjectStorage("MAX_ITERATION", max_iterations);
161 factory.RegisterObjectStorage("PRESMOOTHSTEPS", pre_smoothing_steps);
162 factory.RegisterObjectStorage("POSTSMOOTHSTEPS", post_smoothing_steps);
163 factory.RegisterObjectStorage("PRECISION", precision);
164
165 factory.RegisterObjectStorage("PARTICLE_NEAR_FIELD_CELLS", near_field_cells);
166
167 if (rank == 0) {
168 std::printf("VMG settings:\n");
169 std::printf(" Maximum level: %d\n", max_level);
170 std::printf(" Maximum number of iterations: %d\n", max_iterations);
171 std::printf(" Gamma: %d\n", gamma);
172 std::printf(" Pre smoothing steps: %d\n", pre_smoothing_steps);
173 std::printf(" Post smoothing steps: %d\n", post_smoothing_steps);
174 std::printf(" Precision: %e\n", precision);
175 std::printf(" Near field cells: %d\n", near_field_cells);
176 }
177
178 LoadDatafile(filename, datafile_str);
179}
180
181
182void InterfaceParticlesCF::LoadDatafile(const std::string& conf_filename, const std::string& data_filename)
183{
184 Factory& factory = MG::GetFactory();
185
186#ifdef HAVE_MPI
187 int rank;
188 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
189 if (rank == 0) {
190#endif
191
192 std::ifstream file(data_filename.c_str());
193
194 if (!file.is_open() || !file.good()) {
195 std::stringstream search_file;
196 search_file << "./" << data_filename;
197 file.clear();
198 file.open(search_file.str().c_str());
199 }
200
201 if (!file.is_open() || !file.good()) {
202 std::stringstream search_file;
203 search_file << conf_filename.substr(0, conf_filename.find_last_of('/')+1) << data_filename;
204 file.clear();
205 file.open(search_file.str().c_str());
206 }
207
208 assert(file.is_open() && file.good());
209
210 std::stringstream data;
211 vmg_float x, y, z, q;
212 data << file.rdbuf();
213
214 while (data.good()) {
215 data >> x >> y >> z >> q;
216 if (data.good()) {
217 data_vec.push_back(x);
218 data_vec.push_back(y);
219 data_vec.push_back(z);
220 data_vec.push_back(q);
221 }
222 }
223#ifdef HAVE_MPI
224 }
225#endif
226
227 factory.RegisterObjectStorage("PARTICLE_NUM_LOCAL", static_cast<int>(data_vec.size()/4));
228
229 vmg_float* x_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_POS_ARRAY", 3*(data_vec.size()/4));
230 vmg_float* q_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_CHARGE_ARRAY", data_vec.size()/4);
231 vmg_float* p_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY", data_vec.size()/4);
232 vmg_float* f_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_FORCE_ARRAY", 3*(data_vec.size()/4));
233
234 for (unsigned int i=0; i<data_vec.size()/4; ++i) {
235 std::memcpy(&x_arr[3*i], &data_vec[4*i], 3*sizeof(vmg_float));
236 q_arr[i] = data_vec[4*i+3];
237 p_arr[i] = 0.0;
238 f_arr[3*i+0] = 0.0;
239 f_arr[3*i+1] = 0.0;
240 f_arr[3*i+2] = 0.0;
241 }
242
243 data_vec.clear();
244}
245
246// void InterfaceParticlesCF::CommParticles(const Grid& grid)
247// {
248// #ifdef HAVE_MPI
249// int rank, size;
250// MPI_Comm_rank(MPI_COMM_WORLD, &rank);
251// MPI_Comm_size(MPI_COMM_WORLD, &size);
252
253// std::vector<double> send_buffer[size];
254
255// int global_extent[6];
256
257// for (int i=0; i<3; ++i) {
258// global_extent[i] = grid.Global().BeginLocal()[i];
259// global_extent[i+3] = grid.Global().EndLocal()[i];
260// }
261
262// if (rank == 0) {
263
264// int global_extent_all[6*size];
265// Index index;
266
267// MPI_Gather(global_extent, 6, MPI_INT, global_extent_all, 6, MPI_INT, 0, MPI_COMM_WORLD);
268
269// for (unsigned int i=0; i<data_vec.size()/4; ++i) {
270// index = (static_cast<Vector>(&(data_vec[4*i])) - grid.Extent().Begin()) / grid.Extent().MeshWidth();
271
272// for (int j=0; j<size; ++j) {
273// if (index[0] >= global_extent_all[6*j+0] && index[0] < global_extent_all[6*j+3] &&
274// index[1] >= global_extent_all[6*j+1] && index[1] < global_extent_all[6*j+4] &&
275// index[2] >= global_extent_all[6*j+2] && index[2] < global_extent_all[6*j+5]) {
276// for (int k=0; k<4; ++k)
277// send_buffer[j].push_back(data_vec[4*i+k]);
278// break;
279// }
280// }
281
282// }
283
284// data_vec.clear();
285
286// for (int i=0; i<size; ++i)
287// MPI_Isend(&send_buffer[i].front(), send_buffer[i].size(), MPI_DOUBLE, i, i, MPI_COMM_WORLD, &Request());
288
289// }else {
290// MPI_Gather(global_extent, 6, MPI_INT, NULL, 0, MPI_INT, 0, MPI_COMM_WORLD);
291// }
292
293// MPI_Status status;
294// int count;
295// MPI_Probe(0, rank, MPI_COMM_WORLD, &status);
296// MPI_Get_count(&status, MPI_DOUBLE, &count);
297
298// data_vec.resize(count);
299
300// MPI_Recv(&data_vec.front(), count, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
301
302// #endif /* HAVE_MPI */
303// }
Note: See TracBrowser for help on using the repository browser.