[de061d] | 1 | /*
|
---|
| 2 | * vmg - a versatile multigrid solver
|
---|
| 3 | * Copyright (C) 2012 Institute for Numerical Simulation, University of Bonn
|
---|
| 4 | *
|
---|
| 5 | * vmg is free software: you can redistribute it and/or modify
|
---|
| 6 | * it under the terms of the GNU General Public License as published by
|
---|
| 7 | * the Free Software Foundation, either version 3 of the License, or
|
---|
| 8 | * (at your option) any later version.
|
---|
| 9 | *
|
---|
| 10 | * vmg is distributed in the hope that it will be useful,
|
---|
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
---|
| 13 | * GNU General Public License for more details.
|
---|
| 14 | *
|
---|
| 15 | * You should have received a copy of the GNU General Public License
|
---|
| 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
---|
| 17 | */
|
---|
| 18 |
|
---|
| 19 | /**
|
---|
| 20 | * @file interface_particles.cpp
|
---|
| 21 | * @author Julian Iseringhausen <isering@ins.uni-bonn.de>
|
---|
| 22 | * @date Mon Apr 18 12:56:48 2011
|
---|
| 23 | *
|
---|
| 24 | * @brief VMG::InterfaceParticles
|
---|
| 25 | *
|
---|
| 26 | */
|
---|
| 27 |
|
---|
| 28 | #ifdef HAVE_CONFIG_H
|
---|
| 29 | #include <libvmg_config.h>
|
---|
| 30 | #endif
|
---|
| 31 |
|
---|
| 32 | #ifdef HAVE_MPI
|
---|
| 33 | #include <mpi.h>
|
---|
| 34 | #ifdef HAVE_MARMOT
|
---|
| 35 | #include <enhancempicalls.h>
|
---|
| 36 | #include <sourceinfompicalls.h>
|
---|
| 37 | #endif
|
---|
| 38 | #endif
|
---|
| 39 |
|
---|
| 40 | #include <algorithm>
|
---|
| 41 | #include <cmath>
|
---|
| 42 | #include <cstring>
|
---|
| 43 |
|
---|
| 44 | #include "base/helper.hpp"
|
---|
| 45 | #include "base/index.hpp"
|
---|
| 46 | #include "base/math.hpp"
|
---|
| 47 | #include "base/vector.hpp"
|
---|
| 48 | #include "comm/comm.hpp"
|
---|
| 49 | #include "grid/grid.hpp"
|
---|
| 50 | #include "grid/multigrid.hpp"
|
---|
| 51 | #include "grid/tempgrid.hpp"
|
---|
| 52 | #include "units/particle/comm_mpi_particle.hpp"
|
---|
| 53 | #include "units/particle/interface_particles.hpp"
|
---|
| 54 | #include "units/particle/interpolation.hpp"
|
---|
| 55 | #include "units/particle/linked_cell_list.hpp"
|
---|
| 56 | #include "mg.hpp"
|
---|
| 57 |
|
---|
| 58 | using namespace VMG;
|
---|
| 59 |
|
---|
| 60 | void InterfaceParticles::ImportRightHandSide(Multigrid& multigrid)
|
---|
| 61 | {
|
---|
| 62 | Index index_global, index_local, index;
|
---|
| 63 | Vector pos_rel, pos_abs, grid_val;
|
---|
| 64 |
|
---|
| 65 | Factory& factory = MG::GetFactory();
|
---|
| 66 | Particle::CommMPI& comm = *dynamic_cast<Particle::CommMPI*>(MG::GetComm());
|
---|
| 67 |
|
---|
| 68 | const int& near_field_cells = factory.GetObjectStorageVal<int>("PARTICLE_NEAR_FIELD_CELLS");
|
---|
| 69 |
|
---|
| 70 | Grid& grid = multigrid(multigrid.MaxLevel());
|
---|
| 71 | Grid& particle_grid = comm.GetParticleGrid();
|
---|
| 72 |
|
---|
| 73 | // grid.Clear();
|
---|
| 74 | particle_grid.Clear();
|
---|
| 75 |
|
---|
| 76 | assert(particle_grid.Global().LocalSize().IsComponentwiseGreater(near_field_cells));
|
---|
| 77 |
|
---|
| 78 | /*
|
---|
| 79 | * Distribute particles to their processes
|
---|
| 80 | */
|
---|
| 81 | particles.clear();
|
---|
| 82 | comm.CommParticles(grid, particles);
|
---|
| 83 |
|
---|
| 84 | /*
|
---|
| 85 | * Charge assignment on the grid
|
---|
| 86 | */
|
---|
| 87 | std::list<Particle::Particle>::iterator iter;
|
---|
| 88 |
|
---|
| 89 | #ifdef OUTPUT_DEBUG
|
---|
| 90 | vmg_float particle_charges = 0.0;
|
---|
| 91 | for (iter=particles.begin(); iter!=particles.end(); ++iter)
|
---|
| 92 | particle_charges += iter->Charge();
|
---|
| 93 | particle_charges = MG::GetComm()->GlobalSumRoot(particle_charges);
|
---|
| 94 | comm.PrintOnce(Debug, "Particle list charge sum: %e", particle_charges);
|
---|
| 95 | comm.Print(Debug, "Local number of particles: %d", particles.size());
|
---|
| 96 | #endif
|
---|
| 97 |
|
---|
| 98 | for (iter=particles.begin(); iter!=particles.end(); ++iter)
|
---|
| 99 | spl.SetSpline(particle_grid, *iter);
|
---|
| 100 |
|
---|
| 101 | // Communicate charges over halo
|
---|
| 102 | comm.CommFromGhosts(particle_grid);
|
---|
| 103 |
|
---|
| 104 | // Assign charge values to the right hand side
|
---|
| 105 | for (int i=0; i<grid.Local().Size().X(); ++i)
|
---|
| 106 | for (int j=0; j<grid.Local().Size().Y(); ++j)
|
---|
| 107 | for (int k=0; k<grid.Local().Size().Z(); ++k)
|
---|
| 108 | grid(grid.Local().Begin().X() + i,
|
---|
| 109 | grid.Local().Begin().Y() + j,
|
---|
| 110 | grid.Local().Begin().Z() + k) = 4.0 * Math::pi *
|
---|
| 111 | particle_grid.GetVal(particle_grid.Local().Begin().X() + i,
|
---|
| 112 | particle_grid.Local().Begin().Y() + j,
|
---|
| 113 | particle_grid.Local().Begin().Z() + k);
|
---|
| 114 |
|
---|
| 115 | #ifdef OUTPUT_DEBUG
|
---|
| 116 | Grid::iterator grid_iter;
|
---|
| 117 | vmg_float charge_sum = 0.0;
|
---|
| 118 | for (grid_iter=grid.Iterators().Local().Begin(); grid_iter!=grid.Iterators().Local().End(); ++grid_iter)
|
---|
| 119 | charge_sum += grid.GetVal(*grid_iter);
|
---|
| 120 | charge_sum = MG::GetComm()->GlobalSum(charge_sum);
|
---|
| 121 | comm.PrintOnce(Debug, "Grid charge sum: %e", charge_sum);
|
---|
| 122 | #endif
|
---|
| 123 | }
|
---|
| 124 |
|
---|
| 125 | void InterfaceParticles::ExportSolution(Grid& grid)
|
---|
| 126 | {
|
---|
| 127 | Index i;
|
---|
| 128 |
|
---|
| 129 | #ifdef OUTPUT_DEBUG
|
---|
| 130 | vmg_float e = 0.0;
|
---|
| 131 | vmg_float e_long = 0.0;
|
---|
| 132 | vmg_float e_self = 0.0;
|
---|
| 133 | vmg_float e_short_peak = 0.0;
|
---|
| 134 | vmg_float e_short_spline = 0.0;
|
---|
| 135 | #endif
|
---|
| 136 |
|
---|
| 137 | Factory& factory = MG::GetFactory();
|
---|
| 138 | Particle::CommMPI& comm = *dynamic_cast<Particle::CommMPI*>(MG::GetComm());
|
---|
| 139 |
|
---|
| 140 | /*
|
---|
| 141 | * Get parameters and arrays
|
---|
| 142 | */
|
---|
| 143 | const vmg_int& near_field_cells = factory.GetObjectStorageVal<int>("PARTICLE_NEAR_FIELD_CELLS");
|
---|
| 144 | const vmg_int& interpolation_degree = factory.GetObjectStorageVal<int>("PARTICLE_INTERPOLATION_DEGREE");
|
---|
| 145 |
|
---|
| 146 | Particle::Interpolation ip(interpolation_degree);
|
---|
| 147 |
|
---|
| 148 | const vmg_float r_cut = near_field_cells * grid.Extent().MeshWidth().Max();
|
---|
| 149 |
|
---|
| 150 | /*
|
---|
| 151 | * Copy potential values to a grid with sufficiently large halo size.
|
---|
| 152 | * This may be optimized in future.
|
---|
| 153 | * The parameters of this grid have been set in the import step.
|
---|
| 154 | */
|
---|
| 155 | Grid& particle_grid = comm.GetParticleGrid();
|
---|
| 156 |
|
---|
| 157 | for (i.X()=0; i.X()<grid.Local().Size().X(); ++i.X())
|
---|
| 158 | for (i.Y()=0; i.Y()<grid.Local().Size().Y(); ++i.Y())
|
---|
| 159 | for (i.Z()=0; i.Z()<grid.Local().Size().Z(); ++i.Z())
|
---|
| 160 | particle_grid(i + particle_grid.Local().Begin()) = grid.GetVal(i + grid.Local().Begin());
|
---|
| 161 |
|
---|
| 162 | comm.CommToGhosts(particle_grid);
|
---|
| 163 |
|
---|
| 164 | /*
|
---|
| 165 | * Compute potentials
|
---|
| 166 | */
|
---|
| 167 | Particle::LinkedCellList lc(particles, near_field_cells, grid);
|
---|
| 168 | Particle::LinkedCellList::iterator p1, p2;
|
---|
| 169 | Grid::iterator iter;
|
---|
| 170 |
|
---|
| 171 | comm.CommLCListToGhosts(lc);
|
---|
| 172 |
|
---|
| 173 | for (int i=lc.Local().Begin().X(); i<lc.Local().End().X(); ++i)
|
---|
| 174 | for (int j=lc.Local().Begin().Y(); j<lc.Local().End().Y(); ++j)
|
---|
| 175 | for (int k=lc.Local().Begin().Z(); k<lc.Local().End().Z(); ++k) {
|
---|
| 176 |
|
---|
| 177 | if (lc(i,j,k).size() > 0)
|
---|
| 178 | ip.ComputeCoefficients(particle_grid, Index(i,j,k) - lc.Local().Begin() + particle_grid.Local().Begin());
|
---|
| 179 |
|
---|
| 180 | for (p1=lc(i,j,k).begin(); p1!=lc(i,j,k).end(); ++p1) {
|
---|
| 181 |
|
---|
| 182 | // Interpolate long-range part of potential and electric field
|
---|
| 183 | ip.Evaluate(**p1);
|
---|
| 184 |
|
---|
| 185 | // Subtract self-induced potential
|
---|
| 186 | (*p1)->Pot() -= (*p1)->Charge() * spl.GetAntiDerivativeAtZero();
|
---|
| 187 | // spl.SubtractSelfInducedForces(particle_grid, **p1);
|
---|
| 188 |
|
---|
| 189 | #ifdef OUTPUT_DEBUG
|
---|
| 190 | e_long += 0.5 * (*p1)->Charge() * ip.EvaluatePotentialLR(**p1);
|
---|
| 191 | e_self += 0.5 * (*p1)->Charge() * (*p1)->Charge() * spl.GetAntiDerivativeAtZero();
|
---|
| 192 | #endif
|
---|
| 193 |
|
---|
| 194 | for (int dx=-1*near_field_cells; dx<=near_field_cells; ++dx)
|
---|
| 195 | for (int dy=-1*near_field_cells; dy<=near_field_cells; ++dy)
|
---|
| 196 | for (int dz=-1*near_field_cells; dz<=near_field_cells; ++dz) {
|
---|
| 197 |
|
---|
| 198 | for (p2=lc(i+dx,j+dy,k+dz).begin(); p2!=lc(i+dx,j+dy,k+dz).end(); ++p2)
|
---|
| 199 |
|
---|
| 200 | if (*p1 != *p2) {
|
---|
| 201 |
|
---|
| 202 | const Vector dir = (*p1)->Pos() - (*p2)->Pos();
|
---|
| 203 | const vmg_float length = dir.Length();
|
---|
| 204 |
|
---|
| 205 | if (length < r_cut) {
|
---|
| 206 |
|
---|
| 207 | (*p1)->Pot() += (*p2)->Charge() / length * (1.0 + spl.EvaluatePotential(length));
|
---|
| 208 | (*p1)->Field() += (*p2)->Charge() * dir * spl.EvaluateField(length);
|
---|
| 209 |
|
---|
| 210 | #ifdef OUTPUT_DEBUG
|
---|
| 211 | e_short_peak += 0.5 * (*p1)->Charge() * (*p2)->Charge() / length;
|
---|
| 212 | e_short_spline += 0.5 * (*p1)->Charge() * (*p2)->Charge() / length * spl.EvaluatePotential(length);
|
---|
| 213 | #endif
|
---|
| 214 | }
|
---|
| 215 | }
|
---|
| 216 | }
|
---|
| 217 | }
|
---|
| 218 | }
|
---|
| 219 |
|
---|
| 220 | /* Remove average force term */
|
---|
| 221 | // Vector average_force = 0.0;
|
---|
| 222 | // for (std::list<Particle::Particle>::const_iterator iter=particles.begin(); iter!=particles.end(); ++iter)
|
---|
| 223 | // average_force += iter->Charge() * iter->Field();
|
---|
| 224 | // const vmg_int& npl = MG::GetFactory().GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
|
---|
| 225 | // const vmg_int num_particles_global = comm.GlobalSum(npl);
|
---|
| 226 | // average_force /= num_particles_global;
|
---|
| 227 | // comm.GlobalSumArray(average_force.vec(), 3);
|
---|
| 228 | // for (std::list<Particle::Particle>::iterator iter=particles.begin(); iter!=particles.end(); ++iter)
|
---|
| 229 | // iter->Field() -= average_force / iter->Charge();
|
---|
| 230 |
|
---|
| 231 | comm.CommParticlesBack(particles);
|
---|
| 232 |
|
---|
| 233 | #ifdef OUTPUT_DEBUG
|
---|
| 234 | vmg_float* q = factory.GetObjectStorageArray<vmg_float>("PARTICLE_CHARGE_ARRAY");
|
---|
| 235 | const vmg_int& num_particles_local = factory.GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");
|
---|
| 236 | const vmg_float* p = factory.GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");
|
---|
| 237 | const vmg_float* f = factory.GetObjectStorageArray<vmg_float>("PARTICLE_FIELD_ARRAY");
|
---|
| 238 |
|
---|
| 239 | // extract forces
|
---|
| 240 | if (num_particles_local != 0) {
|
---|
| 241 | size_t index = 0;
|
---|
| 242 | comm.PrintOnce(Debug, "%d force vector: %e %e %e", (index/3)+1, f[index++], f[index++], f[index++]);
|
---|
| 243 | }
|
---|
| 244 |
|
---|
| 245 | e_long = comm.GlobalSumRoot(e_long);
|
---|
| 246 | e_short_peak = comm.GlobalSumRoot(e_short_peak);
|
---|
| 247 | e_short_spline = comm.GlobalSumRoot(e_short_spline);
|
---|
| 248 | e_self = comm.GlobalSumRoot(e_self);
|
---|
| 249 |
|
---|
| 250 | for (int j=0; j<num_particles_local; ++j)
|
---|
| 251 | e += 0.5 * p[j] * q[j];
|
---|
| 252 | e = comm.GlobalSumRoot(e);
|
---|
| 253 |
|
---|
| 254 | comm.PrintOnce(Debug, "E_long: %e", e_long);
|
---|
| 255 | comm.PrintOnce(Debug, "E_short_peak: %e", e_short_peak);
|
---|
| 256 | comm.PrintOnce(Debug, "E_short_spline: %e", e_short_spline);
|
---|
| 257 | comm.PrintOnce(Debug, "E_self: %e", e_self);
|
---|
| 258 | comm.PrintOnce(Debug, "E_total: %e", e);
|
---|
| 259 | comm.PrintOnce(Debug, "E_total*: %e", e_long + e_short_peak + e_short_spline - e_self);
|
---|
| 260 | #endif
|
---|
| 261 | }
|
---|