source: src/interface/interface_particles_cf.cpp@ 728149

Last change on this file since 728149 was 728149, checked in by Frederik Heber <heber@…>, 14 years ago

Conformalized vmg getter functions with other solvers.

  • also rename gamma -> cycle_type.
  • all getters now return FCSResult and require two parameters.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1778 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 10.0 KB
Line 
1/**
2 * @file interface_particles_cf.cpp
3 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
4 * @date Fri Sep 16 13:24:48 2011
5 *
6 */
7
8#ifdef HAVE_CONFIG_H
9#include <config.h>
10#endif
11
12#ifdef HAVE_MPI
13#include <mpi.h>
14#ifdef HAVE_MARMOT
15#include <enhancempicalls.h>
16#include <sourceinfompicalls.h>
17#endif
18#endif
19
20#include <cassert>
21#include <cstring>
22#include <cstdio>
23#include <fstream>
24#include <sstream>
25
26#include "base/factory.hpp"
27#include "base/helper.hpp"
28#include "interface/interface_particles.hpp"
29#include "interface/interface_particles_cf.hpp"
30#include "level/level_operator_cs.hpp"
31#include "level/level_operator_fas.hpp"
32#include "samples/discretization_poisson_fd.hpp"
33#include "samples/stencils.hpp"
34#include "samples/techniques.hpp"
35#include "solver/solver_regular.hpp"
36#include "solver/solver_singular.hpp"
37#include "smoother/gsrb.hpp"
38#include "thirdparty/pugixml/pugixml.hpp"
39
40#ifdef HAVE_MPI
41#include "comm/comm_mpi.hpp"
42#include "comm/domain_decomposition_mpi.hpp"
43#else
44#include "comm/comm_serial.hpp"
45#endif
46
47#ifdef HAVE_LAPACK
48#include "solver/dgesv.hpp"
49#else
50#include "solver/givens.hpp"
51#endif
52
53using namespace VMG;
54
55InterfaceParticlesCF::InterfaceParticlesCF(const char* filename)
56{
57 Factory& factory = MG::GetFactory();
58
59 int max_level, max_iterations;
60 int pre_smoothing_steps, post_smoothing_steps;
61 int cycle_type, near_field_cells, discretization_order;
62 vmg_float precision;
63 std::string lop_str, datafile_str;
64
65 pugi::xml_document doc;
66
67#ifdef HAVE_MPI
68 int rank;
69 long file_size;
70 char* file_content = NULL;
71
72 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
73
74 if (rank == 0) {
75 std::ifstream file(filename);
76
77 if (!file.is_open() || !file.good()) {
78 MPI_Abort(MPI_COMM_WORLD, MPI_ERR_OTHER);
79 }
80
81 assert(file.is_open() && file.good());
82
83 std::filebuf* buf = file.rdbuf();
84 file_size = buf->pubseekoff(0, std::ios::end, std::ios::in);
85 buf->pubseekpos(0, std::ios::in);
86 file_content = static_cast<char*>(pugi::get_memory_allocation_function()(file_size*sizeof(char)));
87 buf->sgetn(file_content, file_size);
88
89 file.close();
90 }
91
92 //Can't probe on collective communication
93 MPI_Bcast(&file_size, 1, MPI_LONG, 0, MPI_COMM_WORLD);
94 if (rank != 0)
95 file_content = static_cast<char*>(pugi::get_memory_allocation_function()(file_size*sizeof(char)));
96 MPI_Bcast(file_content, file_size, MPI_CHAR, 0, MPI_COMM_WORLD);
97
98 doc.load_buffer_inplace_own(file_content, file_size);
99#else
100 doc.load_file(filename);
101#endif
102
103 pugi::xml_node xml_conf = doc.document_element().child("conf");
104
105 max_level = Helper::ToValWithDefault<int>(xml_conf.child_value("max_level"), 7);
106 max_iterations = Helper::ToValWithDefault<int>(xml_conf.child_value("max_iterations"), 20);
107 pre_smoothing_steps = Helper::ToValWithDefault<int>(xml_conf.child_value("pre_smoothing_steps"), 3);
108 post_smoothing_steps = Helper::ToValWithDefault<int>(xml_conf.child_value("post_smoothing_steps"), 3);
109 cycle_type = Helper::ToValWithDefault<int>(xml_conf.child_value("cycle_type"), 1);
110 near_field_cells = Helper::ToValWithDefault<int>(xml_conf.child_value("near_field_cells"), 3);
111 discretization_order = Helper::ToValWithDefault<int>(xml_conf.child_value("discretization_order"), 2);
112 precision = Helper::ToValWithDefault<vmg_float>(xml_conf.child_value("precision"), 1.0e-7);
113 lop_str = xml_conf.child_value("level_operator");
114 datafile_str = xml_conf.child_value("datafile");
115
116 // TODO: Hardcoded periodic boundary conditions are just for testing
117 Boundary boundary(Periodic, Periodic, Periodic);
118
119#ifdef HAVE_MPI
120 Comm* comm = new CommMPI(boundary, new DomainDecompositionMPI());
121#else
122 Comm* comm = new CommSerial(boundary);
123#endif
124 comm->Register("COMM");
125
126 Interface* interface = new InterfaceParticles(boundary, 2, max_level, 0.0, 1.0, near_field_cells, 0, 1.0);
127 MG::SetInterface(interface, comm);
128
129 Discretization* discretization = new DiscretizationPoissonFD(discretization_order);
130 discretization->Register("DISCRETIZATION");
131
132 LevelOperator* lop;
133 if (!lop_str.compare("cs")) {
134 lop = new LevelOperatorCS(Stencils::RestrictionFullWeight, Stencils::InterpolationTrilinear);
135 Techniques::SetCorrectionSchemePeriodic(interface->MinLevel(), interface->MaxLevel(), cycle_type);
136 } else if (!lop_str.compare("fas")) {
137 lop = new LevelOperatorFAS(Stencils::RestrictionFullWeight, Stencils::Injection, Stencils::InterpolationTrilinear);
138 Techniques::SetFullApproximationSchemePeriodic(interface->MinLevel(), interface->MaxLevel(), cycle_type);
139 } else if (!lop_str.compare("cs_debug")) {
140 lop = new LevelOperatorCS(Stencils::RestrictionFullWeight, Stencils::InterpolationTrilinear);
141 Techniques::SetCorrectionSchemePeriodicDebug(interface->MinLevel(), interface->MaxLevel(), cycle_type);
142 } else if (!lop_str.compare("fas_debug")) {
143 lop = new LevelOperatorFAS(Stencils::RestrictionFullWeight, Stencils::Injection, Stencils::InterpolationTrilinear);
144 Techniques::SetFullApproximationSchemePeriodicDebug(interface->MinLevel(), interface->MaxLevel(), cycle_type);
145 } else {
146 lop = new LevelOperatorCS(Stencils::RestrictionFullWeight, Stencils::InterpolationTrilinear);
147 Techniques::SetCorrectionSchemePeriodic(interface->MinLevel(), interface->MaxLevel(), cycle_type);
148 }
149 lop->Register("LEVEL_OPERATOR");
150
151 Smoother* smoother = new GaussSeidelRB();
152 smoother->Register("SMOOTHER");
153
154#ifdef HAVE_LAPACK
155 Solver* solver = new DGESV<SolverSingular>();
156#else
157 Solver* solver = new Givens<SolverSingular>();
158#endif
159 solver->Register("SOLVER");
160
161 factory.RegisterObjectStorage("MAX_ITERATION", max_iterations);
162 factory.RegisterObjectStorage("PRESMOOTHSTEPS", pre_smoothing_steps);
163 factory.RegisterObjectStorage("POSTSMOOTHSTEPS", post_smoothing_steps);
164 factory.RegisterObjectStorage("PRECISION", precision);
165
166 factory.RegisterObjectStorage("PARTICLE_NEAR_FIELD_CELLS", near_field_cells);
167
168 if (rank == 0) {
169 std::printf("VMG settings:\n");
170 std::printf(" Maximum level: %d\n", max_level);
171 std::printf(" Maximum number of iterations: %d\n", max_iterations);
172 std::printf(" Gamma: %d\n", cycle_type);
173 std::printf(" Pre smoothing steps: %d\n", pre_smoothing_steps);
174 std::printf(" Post smoothing steps: %d\n", post_smoothing_steps);
175 std::printf(" Precision: %e\n", precision);
176 std::printf(" Near field cells: %d\n", near_field_cells);
177 std::printf(" Discretization order: %d\n", discretization_order);
178 }
179
180 LoadDatafile(filename, datafile_str);
181}
182
183
184void InterfaceParticlesCF::LoadDatafile(const std::string& conf_filename, const std::string& data_filename)
185{
186 Factory& factory = MG::GetFactory();
187
188#ifdef HAVE_MPI
189 int rank;
190 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
191 if (rank == 0) {
192#endif
193
194 std::ifstream file(data_filename.c_str());
195
196 if (!file.is_open() || !file.good()) {
197 std::stringstream search_file;
198 search_file << "./" << data_filename;
199 file.clear();
200 file.open(search_file.str().c_str());
201 }
202
203 if (!file.is_open() || !file.good()) {
204 std::stringstream search_file;
205 search_file << conf_filename.substr(0, conf_filename.find_last_of('/')+1) << data_filename;
206 file.clear();
207 file.open(search_file.str().c_str());
208 }
209
210 assert(file.is_open() && file.good());
211
212 std::stringstream data;
213 vmg_float x, y, z, q;
214 data << file.rdbuf();
215
216 while (data.good()) {
217 data >> x >> y >> z >> q;
218 if (data.good()) {
219 data_vec.push_back(x);
220 data_vec.push_back(y);
221 data_vec.push_back(z);
222 data_vec.push_back(q);
223 }
224 }
225#ifdef HAVE_MPI
226 }
227#endif
228
229 factory.RegisterObjectStorage("PARTICLE_NUM_LOCAL", static_cast<int>(data_vec.size()/4));
230
231 vmg_float* x_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_POS_ARRAY", 3*(data_vec.size()/4));
232 vmg_float* q_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_CHARGE_ARRAY", data_vec.size()/4);
233 vmg_float* p_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY", data_vec.size()/4);
234 vmg_float* f_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_FIELD_ARRAY", 3*(data_vec.size()/4));
235
236 for (unsigned int i=0; i<data_vec.size()/4; ++i) {
237 std::memcpy(&x_arr[3*i], &data_vec[4*i], 3*sizeof(vmg_float));
238 q_arr[i] = data_vec[4*i+3];
239 p_arr[i] = 0.0;
240 f_arr[3*i+0] = 0.0;
241 f_arr[3*i+1] = 0.0;
242 f_arr[3*i+2] = 0.0;
243 }
244
245 data_vec.clear();
246}
247
248// void InterfaceParticlesCF::CommParticles(const Grid& grid)
249// {
250// #ifdef HAVE_MPI
251// int rank, size;
252// MPI_Comm_rank(MPI_COMM_WORLD, &rank);
253// MPI_Comm_size(MPI_COMM_WORLD, &size);
254
255// std::vector<double> send_buffer[size];
256
257// int global_extent[6];
258
259// for (int i=0; i<3; ++i) {
260// global_extent[i] = grid.Global().BeginLocal()[i];
261// global_extent[i+3] = grid.Global().EndLocal()[i];
262// }
263
264// if (rank == 0) {
265
266// int global_extent_all[6*size];
267// Index index;
268
269// MPI_Gather(global_extent, 6, MPI_INT, global_extent_all, 6, MPI_INT, 0, MPI_COMM_WORLD);
270
271// for (unsigned int i=0; i<data_vec.size()/4; ++i) {
272// index = (static_cast<Vector>(&(data_vec[4*i])) - grid.Extent().Begin()) / grid.Extent().MeshWidth();
273
274// for (int j=0; j<size; ++j) {
275// if (index[0] >= global_extent_all[6*j+0] && index[0] < global_extent_all[6*j+3] &&
276// index[1] >= global_extent_all[6*j+1] && index[1] < global_extent_all[6*j+4] &&
277// index[2] >= global_extent_all[6*j+2] && index[2] < global_extent_all[6*j+5]) {
278// for (int k=0; k<4; ++k)
279// send_buffer[j].push_back(data_vec[4*i+k]);
280// break;
281// }
282// }
283
284// }
285
286// data_vec.clear();
287
288// for (int i=0; i<size; ++i)
289// MPI_Isend(&send_buffer[i].front(), send_buffer[i].size(), MPI_DOUBLE, i, i, MPI_COMM_WORLD, &Request());
290
291// }else {
292// MPI_Gather(global_extent, 6, MPI_INT, NULL, 0, MPI_INT, 0, MPI_COMM_WORLD);
293// }
294
295// MPI_Status status;
296// int count;
297// MPI_Probe(0, rank, MPI_COMM_WORLD, &status);
298// MPI_Get_count(&status, MPI_DOUBLE, &count);
299
300// data_vec.resize(count);
301
302// MPI_Recv(&data_vec.front(), count, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
303
304// #endif /* HAVE_MPI */
305// }
Note: See TracBrowser for help on using the repository browser.