source: src/interface/interface_particles_cf.cpp@ facba0

Last change on this file since facba0 was dfed1c, checked in by Julian Iseringhausen <isering@…>, 14 years ago

Major vmg update.

git-svn-id: https://svn.version.fz-juelich.de/scafacos/trunk@1136 5161e1c8-67bf-11de-9fd5-51895aff932f

  • Property mode set to 100644
File size: 9.6 KB
Line 
1/**
2 * @file interface_particles_cf.cpp
3 * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
4 * @date Fri Sep 16 13:24:48 2011
5 *
6 */
7
8#ifdef HAVE_CONFIG_H
9#include <config.h>
10#endif
11
12#include <cassert>
13#include <cstring>
14#include <cstdio>
15#include <fstream>
16#include <sstream>
17
18#include "base/factory.hpp"
19#include "base/helper.hpp"
20#include "interface/interface_particles.hpp"
21#include "interface/interface_particles_cf.hpp"
22#include "level/level_operator_cs.hpp"
23#include "level/level_operator_fas.hpp"
24#include "samples/discretization_poisson_fd.hpp"
25#include "samples/stencils.hpp"
26#include "samples/techniques.hpp"
27#include "solver/solver_regular.hpp"
28#include "solver/solver_singular.hpp"
29#include "smoother/gsrb.hpp"
30#include "thirdparty/pugixml/pugixml.hpp"
31
32#ifdef HAVE_MPI
33#include "comm/comm_mpi.hpp"
34#include "comm/domain_decomposition_mpi.hpp"
35#else
36#include "comm/comm_serial.hpp"
37#endif
38
39#ifdef HAVE_LAPACK
40#include "solver/dgesv.hpp"
41#else
42#include "solver/givens.hpp"
43#endif
44
45using namespace VMG;
46
47InterfaceParticlesCF::InterfaceParticlesCF(const char* filename)
48{
49 Factory& factory = MG::GetFactory();
50
51 int max_level, max_iterations;
52 int pre_smoothing_steps, post_smoothing_steps;
53 int gamma, near_field_cells;
54 vmg_float precision;
55 std::string lop_str, datafile_str;
56
57 pugi::xml_document doc;
58
59#ifdef HAVE_MPI
60 int rank;
61 long file_size;
62 char* file_content = NULL;
63
64 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
65
66 if (rank == 0) {
67 std::ifstream file(filename);
68
69 if (!file.is_open() || !file.good()) {
70 MPI_Abort(MPI_COMM_WORLD, MPI_ERR_OTHER);
71 }
72
73 assert(file.is_open() && file.good());
74
75 std::filebuf* buf = file.rdbuf();
76 file_size = buf->pubseekoff(0, std::ios::end, std::ios::in);
77 buf->pubseekpos(0, std::ios::in);
78 file_content = static_cast<char*>(pugi::get_memory_allocation_function()(file_size*sizeof(char)));
79 buf->sgetn(file_content, file_size);
80
81 file.close();
82 }
83
84 //Can't probe on collective communication
85 MPI_Bcast(&file_size, 1, MPI_LONG, 0, MPI_COMM_WORLD);
86 if (rank != 0)
87 file_content = static_cast<char*>(pugi::get_memory_allocation_function()(file_size*sizeof(char)));
88 MPI_Bcast(file_content, file_size, MPI_CHAR, 0, MPI_COMM_WORLD);
89
90 doc.load_buffer_inplace_own(file_content, file_size);
91#else
92 doc.load_file(filename);
93#endif
94
95 pugi::xml_node xml_conf = doc.document_element().child("conf");
96
97 max_level = Helper::ToValWithDefault<int>(xml_conf.child_value("max_level"), 7);
98 max_iterations = Helper::ToValWithDefault<int>(xml_conf.child_value("max_iterations"), 20);
99 pre_smoothing_steps = Helper::ToValWithDefault<int>(xml_conf.child_value("pre_smoothing_steps"), 3);
100 post_smoothing_steps = Helper::ToValWithDefault<int>(xml_conf.child_value("post_smoothing_steps"), 3);
101 gamma = Helper::ToValWithDefault<int>(xml_conf.child_value("gamma"), 1);
102 near_field_cells = Helper::ToValWithDefault<int>(xml_conf.child_value("near_field_cells"), 3);
103 precision = Helper::ToValWithDefault<vmg_float>(xml_conf.child_value("precision"), 1.0e-7);
104 lop_str = xml_conf.child_value("level_operator");
105 datafile_str = xml_conf.child_value("datafile");
106
107 // TODO: Hardcoded periodic boundary conditions are just for testing
108 Boundary boundary(Periodic, Periodic, Periodic);
109
110#ifdef HAVE_MPI
111 Comm* comm = new CommMPI(boundary, new DomainDecompositionMPI());
112#else
113 Comm* comm = new CommSerial(boundary);
114#endif
115 comm->Register("COMM");
116
117 Interface* interface = new InterfaceParticles(boundary, 2, max_level, 0.0, 1.0, near_field_cells);
118 MG::SetInterface(interface, comm);
119
120 Discretization* discretization = new DiscretizationPoissonFD();
121 discretization->Register("DISCRETIZATION");
122
123 LevelOperator* lop;
124 if (!lop_str.compare("cs")) {
125 lop = new LevelOperatorCS(Stencils::RestrictionFullWeight, Stencils::InterpolationTrilinear);
126 Techniques::SetCorrectionSchemePeriodic(interface->MinLevel(), interface->MaxLevel(), gamma);
127 } else if (!lop_str.compare("fas")) {
128 lop = new LevelOperatorFAS(Stencils::RestrictionFullWeight, Stencils::Injection, Stencils::InterpolationTrilinear);
129 Techniques::SetFullApproximationSchemePeriodic(interface->MinLevel(), interface->MaxLevel(), gamma);
130 } else if (!lop_str.compare("cs_debug")) {
131 lop = new LevelOperatorCS(Stencils::RestrictionFullWeight, Stencils::InterpolationTrilinear);
132 Techniques::SetCorrectionSchemePeriodicDebug(interface->MinLevel(), interface->MaxLevel(), gamma);
133 } else if (!lop_str.compare("fas_debug")) {
134 lop = new LevelOperatorFAS(Stencils::RestrictionFullWeight, Stencils::Injection, Stencils::InterpolationTrilinear);
135 Techniques::SetFullApproximationSchemePeriodicDebug(interface->MinLevel(), interface->MaxLevel(), gamma);
136 } else {
137 lop = new LevelOperatorCS(Stencils::RestrictionFullWeight, Stencils::InterpolationTrilinear);
138 Techniques::SetCorrectionSchemePeriodic(interface->MinLevel(), interface->MaxLevel(), gamma);
139 }
140 lop->Register("LEVEL_OPERATOR");
141
142 Smoother* smoother = new GaussSeidelRB();
143 smoother->Register("SMOOTHER");
144
145#ifdef HAVE_LAPACK
146 Solver* solver = new DGESV<SolverSingular>();
147#else
148 Solver* solver = new Givens<SolverSingular>();
149#endif
150 solver->Register("SOLVER");
151
152 factory.RegisterObjectStorage("MAX_ITERATION", max_iterations);
153 factory.RegisterObjectStorage("PRESMOOTHSTEPS", pre_smoothing_steps);
154 factory.RegisterObjectStorage("POSTSMOOTHSTEPS", post_smoothing_steps);
155 factory.RegisterObjectStorage("PRECISION", precision);
156
157 factory.RegisterObjectStorage("PARTICLE_NEAR_FIELD_CELLS", near_field_cells);
158
159 if (rank == 0) {
160 std::printf("VMG settings:\n");
161 std::printf(" Maximum level: %d\n", max_level);
162 std::printf(" Maximum number of iterations: %d\n", max_iterations);
163 std::printf(" Gamma: %d\n", gamma);
164 std::printf(" Pre smoothing steps: %d\n", pre_smoothing_steps);
165 std::printf(" Post smoothing steps: %d\n", post_smoothing_steps);
166 std::printf(" Precision: %e\n", precision);
167 std::printf(" Near field cells: %d\n", near_field_cells);
168 }
169
170 LoadDatafile(filename, datafile_str);
171}
172
173
174void InterfaceParticlesCF::LoadDatafile(const std::string& conf_filename, const std::string& data_filename)
175{
176 Factory& factory = MG::GetFactory();
177
178#ifdef HAVE_MPI
179 int rank;
180 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
181 if (rank == 0) {
182#endif
183
184 std::ifstream file(data_filename.c_str());
185
186 if (!file.is_open() || !file.good()) {
187 std::stringstream search_file;
188 search_file << "./" << data_filename;
189 file.clear();
190 file.open(search_file.str().c_str());
191 }
192
193 if (!file.is_open() || !file.good()) {
194 std::stringstream search_file;
195 search_file << conf_filename.substr(0, conf_filename.find_last_of('/')+1) << data_filename;
196 file.clear();
197 file.open(search_file.str().c_str());
198 }
199
200 assert(file.is_open() && file.good());
201
202 std::stringstream data;
203 vmg_float x, y, z, q;
204 data << file.rdbuf();
205
206 while (data.good()) {
207 data >> x >> y >> z >> q;
208 if (data.good()) {
209 data_vec.push_back(x);
210 data_vec.push_back(y);
211 data_vec.push_back(z);
212 data_vec.push_back(q);
213 }
214 }
215#ifdef HAVE_MPI
216 }
217#endif
218
219 factory.RegisterObjectStorage("PARTICLE_NUM_LOCAL", static_cast<int>(data_vec.size()/4));
220
221 vmg_float* x_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_POS_ARRAY", 3*(data_vec.size()/4));
222 vmg_float* q_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_CHARGE_ARRAY", data_vec.size()/4);
223 vmg_float* p_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY", data_vec.size()/4);
224 vmg_float* f_arr = factory.RegisterObjectStorageArray<vmg_float>("PARTICLE_FORCE_ARRAY", 3*(data_vec.size()/4));
225
226 for (unsigned int i=0; i<data_vec.size()/4; ++i) {
227 std::memcpy(&x_arr[3*i], &data_vec[4*i], 3*sizeof(vmg_float));
228 q_arr[i] = data_vec[4*i+3];
229 p_arr[i] = 0.0;
230 f_arr[3*i+0] = 0.0;
231 f_arr[3*i+1] = 0.0;
232 f_arr[3*i+2] = 0.0;
233 }
234
235 data_vec.clear();
236}
237
238// void InterfaceParticlesCF::CommParticles(const Grid& grid)
239// {
240// #ifdef HAVE_MPI
241// int rank, size;
242// MPI_Comm_rank(MPI_COMM_WORLD, &rank);
243// MPI_Comm_size(MPI_COMM_WORLD, &size);
244
245// std::vector<double> send_buffer[size];
246
247// int global_extent[6];
248
249// for (int i=0; i<3; ++i) {
250// global_extent[i] = grid.Global().BeginLocal()[i];
251// global_extent[i+3] = grid.Global().EndLocal()[i];
252// }
253
254// if (rank == 0) {
255
256// int global_extent_all[6*size];
257// Index index;
258
259// MPI_Gather(global_extent, 6, MPI_INT, global_extent_all, 6, MPI_INT, 0, MPI_COMM_WORLD);
260
261// for (unsigned int i=0; i<data_vec.size()/4; ++i) {
262// index = (static_cast<Vector>(&(data_vec[4*i])) - grid.Extent().Begin()) / grid.Extent().MeshWidth();
263
264// for (int j=0; j<size; ++j) {
265// if (index[0] >= global_extent_all[6*j+0] && index[0] < global_extent_all[6*j+3] &&
266// index[1] >= global_extent_all[6*j+1] && index[1] < global_extent_all[6*j+4] &&
267// index[2] >= global_extent_all[6*j+2] && index[2] < global_extent_all[6*j+5]) {
268// for (int k=0; k<4; ++k)
269// send_buffer[j].push_back(data_vec[4*i+k]);
270// break;
271// }
272// }
273
274// }
275
276// data_vec.clear();
277
278// for (int i=0; i<size; ++i)
279// MPI_Isend(&send_buffer[i].front(), send_buffer[i].size(), MPI_DOUBLE, i, i, MPI_COMM_WORLD, &Request());
280
281// }else {
282// MPI_Gather(global_extent, 6, MPI_INT, NULL, 0, MPI_INT, 0, MPI_COMM_WORLD);
283// }
284
285// MPI_Status status;
286// int count;
287// MPI_Probe(0, rank, MPI_COMM_WORLD, &status);
288// MPI_Get_count(&status, MPI_DOUBLE, &count);
289
290// data_vec.resize(count);
291
292// MPI_Recv(&data_vec.front(), count, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
293
294// #endif /* HAVE_MPI */
295// }
Note: See TracBrowser for help on using the repository browser.