Changeset 716da7 for src/comm/comm_mpi.cpp
- Timestamp:
- Apr 24, 2012, 2:26:14 PM (14 years ago)
- Children:
- b51c3b
- Parents:
- e3dbbf
- File:
-
- 1 edited
-
src/comm/comm_mpi.cpp (modified) (10 diffs)
Legend:
- Unmodified
- Added
- Removed
-
src/comm/comm_mpi.cpp
re3dbbf r716da7 232 232 MPI_Alltoall(&send_sizes.front(), 1, MPI_INT, &recv_sizes.front(), 1, MPI_INT, comm_global); 233 233 234 assert(RequestsPending() == 0); 235 234 236 /* 235 237 * Send particles … … 281 283 282 284 #ifdef VMG_ONE_SIDED 283 //if (!win_created) {284 //vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY");285 //const vmg_int& num_particles_local = MG::GetFactory().GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL");286 //MPI_Win_create(p, num_particles_local*sizeof(vmg_float), sizeof(vmg_float), info, comm_global, &win);287 //win_created = true;288 //}289 290 //MPI_Win_fence(MPI_MODE_NOPRECEDE, win);291 292 //for (iter=particles.begin(); iter!=particles.end(); ++iter)293 //MPI_Put(&iter->Pot(), 1, MPI_DOUBLE, iter->Rank(), iter->Index(), 1, MPI_DOUBLE, win);294 295 //MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED, win);285 if (!win_created) { 286 vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY"); 287 const vmg_int& num_particles_local = MG::GetFactory().GetObjectStorageVal<vmg_int>("PARTICLE_NUM_LOCAL"); 288 MPI_Win_create(p, num_particles_local*sizeof(vmg_float), sizeof(vmg_float), info, comm_global, &win); 289 win_created = true; 290 } 291 292 MPI_Win_fence(MPI_MODE_NOPRECEDE, win); 293 294 for (iter=particles.begin(); iter!=particles.end(); ++iter) 295 MPI_Put(&iter->Pot(), 1, MPI_DOUBLE, iter->Rank(), iter->Index(), 1, MPI_DOUBLE, win); 296 297 MPI_Win_fence(MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED, win); 296 298 #else 297 299 int rank, size; … … 299 301 MPI_Comm_size(comm_global, &size); 300 302 301 std::vector< std::vector<vmg_float> > send_buffer_ pot(size);302 std::vector< std::vector<vmg_float> > recv_buffer_ pot(size);303 std::vector< std::vector<vmg_float> > send_buffer_float(size); 304 std::vector< std::vector<vmg_float> > recv_buffer_float(size); 303 305 std::vector< std::vector<vmg_int> > send_buffer_index(size); 304 306 std::vector< std::vector<vmg_int> > recv_buffer_index(size); … … 306 308 vmg_int* size_receive = MG::GetFactory().GetObjectStorageArray<vmg_int>("PARTICLE_RECEIVER_ARRAY"); 307 309 vmg_float* p = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_POTENTIAL_ARRAY"); 310 vmg_float* f = MG::GetFactory().GetObjectStorageArray<vmg_float>("PARTICLE_FIELD_ARRAY"); 308 311 309 312 // Build send buffer 310 313 for (iter=particles.begin(); iter!=particles.end(); ++iter) { 311 send_buffer_pot[iter->Rank()].push_back(iter->Pot()); 314 send_buffer_float[iter->Rank()].push_back(iter->Pot()); 315 send_buffer_float[iter->Rank()].push_back(iter->Field()[0]); 316 send_buffer_float[iter->Rank()].push_back(iter->Field()[1]); 317 send_buffer_float[iter->Rank()].push_back(iter->Field()[2]); 312 318 send_buffer_index[iter->Rank()].push_back(iter->Index()); 313 319 } … … 315 321 // Send potentials 316 322 for (int i=0; i<size; ++i) { 317 if (!send_buffer_ pot[i].empty()) {318 MPI_Isend(&send_buffer_ pot[i].front(), send_buffer_pot[i].size(), MPI_DOUBLE, i, 699+rank, comm_global, &Request());323 if (!send_buffer_float[i].empty()) { 324 MPI_Isend(&send_buffer_float[i].front(), send_buffer_float[i].size(), MPI_DOUBLE, i, 699+rank, comm_global, &Request()); 319 325 MPI_Isend(&send_buffer_index[i].front(), send_buffer_index[i].size(), MPI_INT, i, 32111+rank, comm_global, &Request()); 320 326 } … … 324 330 for (int i=0; i<size; ++i) { 325 331 if (size_receive[i] > 0) { 326 recv_buffer_ pot[i].resize(size_receive[i]);332 recv_buffer_float[i].resize(4*size_receive[i]); 327 333 recv_buffer_index[i].resize(size_receive[i]); 328 MPI_Irecv(&recv_buffer_ pot[i].front(),size_receive[i], MPI_DOUBLE, i, 699+i, comm_global, &Request());334 MPI_Irecv(&recv_buffer_float[i].front(), 4*size_receive[i], MPI_DOUBLE, i, 699+i, comm_global, &Request()); 329 335 MPI_Irecv(&recv_buffer_index[i].front(), size_receive[i], MPI_INT, i, 32111+i, comm_global, &Request()); 330 336 } … … 335 341 // Add potential values 336 342 for (int i=0; i<size; ++i) 337 for (unsigned int j=0; j<size_receive[i]; ++j) 338 p[recv_buffer_index[i][j]] = recv_buffer_pot[i][j]; 343 for (unsigned int j=0; j<size_receive[i]; ++j) { 344 p[recv_buffer_index[i][j]] = recv_buffer_float[i][4*j]; 345 std::memcpy(&f[recv_buffer_index[i][j]], &recv_buffer_float[i][4*j+1], 3*sizeof(vmg_float)); 346 } 339 347 #endif 340 348 341 349 } 342 350 343 void CommMPI::CommLCListToGhosts( const Grid& grid,Particle::LinkedCellList& lc)351 void CommMPI::CommLCListToGhosts(Particle::LinkedCellList& lc) 344 352 { 345 353 VMG::MPI::DatatypesLocal types(lc, comm_global); 346 std::vector< std::vector<vmg_float> > send_buffer(types.NB().size()); 347 std::vector< std::vector<vmg_float> > recv_buffer(types.Halo().size()); 348 std::vector<vmg_int> send_size(types.NB().size()); 354 std::vector<int> send_size(types.NB().size()); 349 355 vmg_int recv_size; 350 356 std::list<Particle::Particle*>::iterator iter; 351 357 Index ind; 358 Vector offset; 359 360 const Vector halo_length = lc.Local().HaloSize1() * lc.Extent().MeshWidth(); 352 361 353 362 lc.ClearHalo(); 354 355 for (int i=0; i<3; ++i) {356 for (Grid::iterator iter=lc.Iterators().Halo1()[i].Begin(); iter!=lc.Iterators().Halo1()[i].End(); ++iter)357 assert(lc(*iter).size() == 0);358 for (Grid::iterator iter=lc.Iterators().Halo2()[i].Begin(); iter!=lc.Iterators().Halo2()[i].End(); ++iter)359 assert(lc(*iter).size() == 0);360 }361 363 362 364 for (unsigned int i=0; i<types.NB().size(); ++i) 363 365 if (types.NB()[i].Feasible()) { 366 367 for (int j=0; j<3; ++j) 368 if ((types.Offset()[i][j] < 0 && lc.Global().LocalBegin()[j] == 0) || 369 (types.Offset()[i][j] > 0 && lc.Global().LocalEnd()[j] == lc.Global().GlobalSize()[j])) 370 offset[j] = -1 * types.Offset()[i][j] * lc.Extent().Size()[j]; 371 else 372 offset[j] = 0; 373 364 374 for (ind.X() = types.NB()[i].Starts().X(); ind.X() < types.NB()[i].Starts().X()+types.NB()[i].Subsizes().X(); ++ind.X()) 365 375 for (ind.Y() = types.NB()[i].Starts().Y(); ind.Y() < types.NB()[i].Starts().Y()+types.NB()[i].Subsizes().Y(); ++ind.Y()) 366 376 for (ind.Z() = types.NB()[i].Starts().Z(); ind.Z() < types.NB()[i].Starts().Z()+types.NB()[i].Subsizes().Z(); ++ind.Z()) 367 377 for (iter=lc(ind).begin(); iter!=lc(ind).end(); ++iter) { 368 if (grid.Global().LocalBegin()[i] == 0)369 for (int j=0; j<3; ++j) 370 send_buffer[i].push_back((*iter)->Pos()[j] + (i==j ? grid.Extent().Size()[j] : 0.0));371 else 372 for (int j=0; j<3; ++j) 373 send_buffer[1].push_back((*iter)->Pos()[j]);374 375 send_buffer[1].push_back((*iter)->Charge());378 for (int j=0; j<3; ++j) 379 types.NB()[i].Buffer().push_back((*iter)->Pos()[j] + offset[j]); 380 types.NB()[i].Buffer().push_back((*iter)->Charge()); 381 382 assert(lc.Extent().Begin().IsComponentwiseLessOrEqual((*iter)->Pos())); 383 assert(lc.Extent().End().IsComponentwiseGreaterOrEqual((*iter)->Pos())); 384 assert(lc.Extent().Begin().IsComponentwiseLessOrEqual((*iter)->Pos() + offset + halo_length)); 385 assert(lc.Extent().End().IsComponentwiseGreaterOrEqual((*iter)->Pos() + offset - halo_length)); 376 386 } 377 387 388 send_size[i] = types.NB()[i].Buffer().size(); 378 389 MPI_Isend(&send_size[i], 1, MPI_INT, types.NB()[i].Rank(), 2048+types.NB()[i].TagSend(), comm_global, &Request()); 379 390 380 391 if (send_size[i] > 0) 381 MPI_Isend(& send_buffer[i].front(), send_size[i], MPI_DOUBLE,392 MPI_Isend(&types.NB()[i].Buffer().front(), send_size[i], MPI_DOUBLE, 382 393 types.NB()[i].Rank(), 4096+types.NB()[i].TagSend(), 383 394 comm_global, &Request()); … … 388 399 MPI_Recv(&recv_size, 1, MPI_INT, types.Halo()[i].Rank(), 2048+types.Halo()[i].TagReceive(), comm_global, MPI_STATUS_IGNORE); 389 400 if (recv_size > 0) { 390 recv_buffer[i].resize(recv_size);391 MPI_Irecv(& recv_buffer[i].front(), recv_size, MPI_DOUBLE,401 types.Halo()[i].Buffer().resize(recv_size); 402 MPI_Irecv(&types.Halo()[i].Buffer().front(), recv_size, MPI_DOUBLE, 392 403 types.Halo()[i].Rank(), 4096+types.Halo()[i].TagReceive(), 393 404 comm_global, &Request()); … … 397 408 WaitAll(); 398 409 399 for (unsigned int i=0; i< recv_buffer.size(); ++i)400 for (unsigned int j=0; j< recv_buffer[i].size(); j+=4)401 lc.AddParticleToHalo(& recv_buffer[i][j], recv_buffer[i][j+3]);410 for (unsigned int i=0; i<types.Halo().size(); ++i) 411 for (unsigned int j=0; j<types.Halo()[i].Buffer().size(); j+=4) 412 lc.AddParticleToHalo(&types.Halo()[i].Buffer()[j], types.Halo()[i].Buffer()[j+3]); 402 413 } 403 414 … … 712 723 << " BOUNDARY_END_2: " << grid.Local().BoundaryEnd2() << std::endl 713 724 << " BOUNDARY_SIZE_2: " << grid.Local().BoundarySize2() << std::endl 714 << " FINER_BEGIN: " << grid.Local().FinerBegin Foo() << std::endl715 << " FINER_END: " << grid.Local().FinerEnd Foo() << std::endl716 << " FINER_SIZE: " << grid.Local().FinerSize Foo() << std::endl;725 << " FINER_BEGIN: " << grid.Local().FinerBegin() << std::endl 726 << " FINER_END: " << grid.Local().FinerEnd() << std::endl 727 << " FINER_SIZE: " << grid.Local().FinerSize() << std::endl; 717 728 718 729 if (rank == size-1)
Note:
See TracChangeset
for help on using the changeset viewer.
