DOLFINx
DOLFINx C++ interface
Loading...
Searching...
No Matches
Scatterer.h
1// Copyright (C) 2022 Igor Baratta and Garth N. Wells
2//
3// This file is part of DOLFINx (https://www.fenicsproject.org)
4//
5// SPDX-License-Identifier: LGPL-3.0-or-later
6
7#pragma once
8
9#include "IndexMap.h"
10#include "MPI.h"
11#include "sort.h"
12#include <algorithm>
13#include <functional>
14#include <memory>
15#include <mpi.h>
16#include <numeric>
17#include <span>
18#include <type_traits>
19#include <vector>
20
21using namespace dolfinx;
22
23namespace dolfinx::common
24{
31template <class Allocator = std::allocator<std::int32_t>>
33{
34public:
36 using allocator_type = Allocator;
37
39 enum class type
40 {
41 neighbor, // use MPI neighborhood collectives
42 p2p // use MPI Isend/Irecv for communication
43 };
44
51 Scatterer(const IndexMap& map, int bs, const Allocator& alloc = Allocator())
52 : _bs(bs), _remote_inds(0, alloc), _local_inds(0, alloc),
53 _src(map.src().begin(), map.src().end()),
54 _dest(map.dest().begin(), map.dest().end())
55 {
56 if (dolfinx::MPI::size(map.comm()) == 1)
57 return;
58
59 // Check that src and dest ranks are unique and sorted
60 assert(std::is_sorted(_src.begin(), _src.end()));
61 assert(std::is_sorted(_dest.begin(), _dest.end()));
62
63 // Create communicators with directed edges:
64 // (0) owner -> ghost,
65 // (1) ghost -> owner
66 MPI_Comm comm0;
67 MPI_Dist_graph_create_adjacent(
68 map.comm(), _src.size(), _src.data(), MPI_UNWEIGHTED, _dest.size(),
69 _dest.data(), MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm0);
70 _comm0 = dolfinx::MPI::Comm(comm0, false);
71
72 MPI_Comm comm1;
73 MPI_Dist_graph_create_adjacent(
74 map.comm(), _dest.size(), _dest.data(), MPI_UNWEIGHTED, _src.size(),
75 _src.data(), MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm1);
76 _comm1 = dolfinx::MPI::Comm(comm1, false);
77
78 // Build permutation array that sorts ghost indices by owning rank
79 std::span owners = map.owners();
80 std::vector<std::int32_t> perm(owners.size());
81 std::iota(perm.begin(), perm.end(), 0);
82 dolfinx::argsort_radix<std::int32_t>(owners, perm);
83
84 // Sort (i) ghost indices and (ii) ghost index owners by rank
85 // (using perm array)
86 std::span ghosts = map.ghosts();
87 std::vector<int> owners_sorted(owners.size());
88 std::vector<std::int64_t> ghosts_sorted(owners.size());
89 std::transform(perm.begin(), perm.end(), owners_sorted.begin(),
90 [&owners](auto idx) { return owners[idx]; });
91 std::transform(perm.begin(), perm.end(), ghosts_sorted.begin(),
92 [&ghosts](auto idx) { return ghosts[idx]; });
93
94 // For data associated with ghost indices, packed by owning
95 // (neighbourhood) rank, compute sizes and displacements. I.e.,
96 // when sending ghost index data from this rank to the owning
97 // ranks, disp[i] is the first entry in the buffer sent to
98 // neighbourhood rank i, and disp[i + 1] - disp[i] is the number
99 // of values sent to rank i.
100 _sizes_remote.resize(_src.size(), 0);
101 _displs_remote.resize(_src.size() + 1, 0);
102 std::vector<std::int32_t>::iterator begin = owners_sorted.begin();
103 for (std::size_t i = 0; i < _src.size(); i++)
104 {
105 auto upper = std::upper_bound(begin, owners_sorted.end(), _src[i]);
106 int num_ind = std::distance(begin, upper);
107 _displs_remote[i + 1] = _displs_remote[i] + num_ind;
108 _sizes_remote[i] = num_ind;
109 begin = upper;
110 }
111
112 // For data associated with owned indices that are ghosted by
113 // other ranks, compute the size and displacement arrays. When
114 // sending data associated with ghost indices to the owner, these
115 // size and displacement arrays are for the receive buffer.
116
117 // Compute sizes and displacements of local data (how many local
118 // elements to be sent/received grouped by neighbors)
119 _sizes_local.resize(_dest.size());
120 _displs_local.resize(_sizes_local.size() + 1);
121 _sizes_remote.reserve(1);
122 _sizes_local.reserve(1);
123 MPI_Neighbor_alltoall(_sizes_remote.data(), 1, MPI_INT32_T,
124 _sizes_local.data(), 1, MPI_INT32_T, _comm1.comm());
125 std::partial_sum(_sizes_local.begin(), _sizes_local.end(),
126 std::next(_displs_local.begin()));
127
128 assert((std::int32_t)ghosts_sorted.size() == _displs_remote.back());
129 assert((std::int32_t)ghosts_sorted.size() == _displs_remote.back());
130
131 // Send ghost global indices to owning rank, and receive owned
132 // indices that are ghosts on other ranks
133 std::vector<std::int64_t> recv_buffer(_displs_local.back(), 0);
134 MPI_Neighbor_alltoallv(ghosts_sorted.data(), _sizes_remote.data(),
135 _displs_remote.data(), MPI_INT64_T,
136 recv_buffer.data(), _sizes_local.data(),
137 _displs_local.data(), MPI_INT64_T, _comm1.comm());
138
139 const std::array<std::int64_t, 2> range = map.local_range();
140#ifndef NDEBUG
141 // Check that all received indice are within the owned range
142 std::for_each(recv_buffer.begin(), recv_buffer.end(),
143 [range](auto idx)
144 { assert(idx >= range[0] and idx < range[1]); });
145#endif
146
147 // Scale sizes and displacements by block size
148 {
149 auto rescale = [](auto& x, int bs)
150 {
151 std::transform(x.begin(), x.end(), x.begin(),
152 [bs](auto e) { return e *= bs; });
153 };
154 rescale(_sizes_local, bs);
155 rescale(_displs_local, bs);
156 rescale(_sizes_remote, bs);
157 rescale(_displs_remote, bs);
158 }
159
160 // Expand local indices using block size and convert it from
161 // global to local numbering
162 _local_inds = std::vector<std::int32_t, allocator_type>(
163 recv_buffer.size() * _bs, alloc);
164 std::int64_t offset = range[0] * _bs;
165 for (std::size_t i = 0; i < recv_buffer.size(); i++)
166 for (int j = 0; j < _bs; j++)
167 _local_inds[i * _bs + j] = (recv_buffer[i] * _bs + j) - offset;
168
169 // Expand remote indices using block size
170 _remote_inds
171 = std::vector<std::int32_t, allocator_type>(perm.size() * _bs, alloc);
172 for (std::size_t i = 0; i < perm.size(); i++)
173 for (int j = 0; j < _bs; j++)
174 _remote_inds[i * _bs + j] = perm[i] * _bs + j;
175 }
176
197 template <typename T>
198 void scatter_fwd_begin(std::span<const T> send_buffer,
199 std::span<T> recv_buffer,
200 std::span<MPI_Request> requests,
201 Scatterer::type type = type::neighbor) const
202 {
203 // Return early if there are no incoming or outgoing edges
204 if (_sizes_local.empty() and _sizes_remote.empty())
205 return;
206
207 switch (type)
208 {
209 case type::neighbor:
210 {
211 assert(requests.size() == std::size_t(1));
212 MPI_Ineighbor_alltoallv(
213 send_buffer.data(), _sizes_local.data(), _displs_local.data(),
214 dolfinx::MPI::mpi_type<T>(), recv_buffer.data(), _sizes_remote.data(),
215 _displs_remote.data(), dolfinx::MPI::mpi_type<T>(), _comm0.comm(),
216 requests.data());
217 break;
218 }
219 case type::p2p:
220 {
221 assert(requests.size() == _dest.size() + _src.size());
222 for (std::size_t i = 0; i < _src.size(); i++)
223 {
224 MPI_Irecv(recv_buffer.data() + _displs_remote[i], _sizes_remote[i],
225 dolfinx::MPI::mpi_type<T>(), _src[i], MPI_ANY_TAG,
226 _comm0.comm(), &requests[i]);
227 }
228
229 for (std::size_t i = 0; i < _dest.size(); i++)
230 {
231 MPI_Isend(send_buffer.data() + _displs_local[i], _sizes_local[i],
232 dolfinx::MPI::mpi_type<T>(), _dest[i], 0, _comm0.comm(),
233 &requests[i + _src.size()]);
234 }
235 break;
236 }
237 default:
238 throw std::runtime_error("Scatter::type not recognized");
239 }
240 }
241
250 void scatter_fwd_end(std::span<MPI_Request> requests) const
251 {
252 // Return early if there are no incoming or outgoing edges
253 if (_sizes_local.empty() and _sizes_remote.empty())
254 return;
255
256 // Wait for communication to complete
257 MPI_Waitall(requests.size(), requests.data(), MPI_STATUS_IGNORE);
258 }
259
280 template <typename T, typename F>
281 requires std::is_invocable_v<F, std::span<const T>,
282 std::span<const std::int32_t>, std::span<T>>
283 void scatter_fwd_begin(std::span<const T> local_data,
284 std::span<T> local_buffer, std::span<T> remote_buffer,
285 F pack_fn, std::span<MPI_Request> requests,
286 Scatterer::type type = type::neighbor) const
287 {
288 assert(local_buffer.size() == _local_inds.size());
289 assert(remote_buffer.size() == _remote_inds.size());
290 pack_fn(local_data, _local_inds, local_buffer);
291 scatter_fwd_begin(std::span<const T>(local_buffer), remote_buffer, requests,
292 type);
293 }
294
314 template <typename T, typename F>
315 requires std::is_invocable_v<F, std::span<const T>,
316 std::span<const std::int32_t>, std::span<T>,
317 std::function<T(T, T)>>
318 void scatter_fwd_end(std::span<const T> remote_buffer,
319 std::span<T> remote_data, F unpack_fn,
320 std::span<MPI_Request> requests) const
321 {
322 assert(remote_buffer.size() == _remote_inds.size());
323 assert(remote_data.size() == _remote_inds.size());
324 scatter_fwd_end(requests);
325 unpack_fn(remote_buffer, _remote_inds, remote_data,
326 [](T /*a*/, T b) { return b; });
327 }
328
340 template <typename T>
341 void scatter_fwd(std::span<const T> local_data,
342 std::span<T> remote_data) const
343 {
344 std::vector<MPI_Request> requests(1, MPI_REQUEST_NULL);
345 std::vector<T> local_buffer(local_buffer_size(), 0);
346 std::vector<T> remote_buffer(remote_buffer_size(), 0);
347 auto pack_fn = [](auto&& in, auto&& idx, auto&& out)
348 {
349 for (std::size_t i = 0; i < idx.size(); ++i)
350 out[i] = in[idx[i]];
351 };
352 scatter_fwd_begin(local_data, std::span<T>(local_buffer),
353 std::span<T>(remote_buffer), pack_fn,
354 std::span<MPI_Request>(requests));
355
356 auto unpack_fn = [](auto&& in, auto&& idx, auto&& out, auto op)
357 {
358 for (std::size_t i = 0; i < idx.size(); ++i)
359 out[idx[i]] = op(out[idx[i]], in[i]);
360 };
361
362 scatter_fwd_end(std::span<const T>(remote_buffer), remote_data, unpack_fn,
363 std::span<MPI_Request>(requests));
364 }
365
392 template <typename T>
393 void scatter_rev_begin(std::span<const T> send_buffer,
394 std::span<T> recv_buffer,
395 std::span<MPI_Request> requests,
396 Scatterer::type type = type::neighbor) const
397 {
398 // Return early if there are no incoming or outgoing edges
399 if (_sizes_local.empty() and _sizes_remote.empty())
400 return;
401
402 // // Send and receive data
403
404 switch (type)
405 {
406 case type::neighbor:
407 {
408 assert(requests.size() == 1);
409 MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_remote.data(),
410 _displs_remote.data(), MPI::mpi_type<T>(),
411 recv_buffer.data(), _sizes_local.data(),
412 _displs_local.data(), MPI::mpi_type<T>(),
413 _comm1.comm(), &requests[0]);
414 break;
415 }
416 case type::p2p:
417 {
418 assert(requests.size() == _dest.size() + _src.size());
419 // Start non-blocking send from this process to ghost owners.
420 for (std::size_t i = 0; i < _dest.size(); i++)
421 {
422 MPI_Irecv(recv_buffer.data() + _displs_local[i], _sizes_local[i],
423 dolfinx::MPI::mpi_type<T>(), _dest[i], MPI_ANY_TAG,
424 _comm0.comm(), &requests[i]);
425 }
426
427 // Start non-blocking receive from neighbor process for which an owned
428 // index is a ghost.
429 for (std::size_t i = 0; i < _src.size(); i++)
430 {
431 MPI_Isend(send_buffer.data() + _displs_remote[i], _sizes_remote[i],
432 dolfinx::MPI::mpi_type<T>(), _src[i], 0, _comm0.comm(),
433 &requests[i + _dest.size()]);
434 }
435 break;
436 }
437 default:
438 throw std::runtime_error("Scatter::type not recognized");
439 }
440 }
441
450 void scatter_rev_end(std::span<MPI_Request> request) const
451 {
452 // Return early if there are no incoming or outgoing edges
453 if (_sizes_local.empty() and _sizes_remote.empty())
454 return;
455
456 // Wait for communication to complete
457 MPI_Waitall(request.size(), request.data(), MPI_STATUS_IGNORE);
458 }
459
484 template <typename T, typename F>
485 requires std::is_invocable_v<F, std::span<const T>,
486 std::span<const std::int32_t>, std::span<T>>
487 void scatter_rev_begin(std::span<const T> remote_data,
488 std::span<T> remote_buffer, std::span<T> local_buffer,
489 F pack_fn, std::span<MPI_Request> request,
490 Scatterer::type type = type::neighbor) const
491 {
492 assert(local_buffer.size() == _local_inds.size());
493 assert(remote_buffer.size() == _remote_inds.size());
494 pack_fn(remote_data, _remote_inds, remote_buffer);
495 scatter_rev_begin(std::span<const T>(remote_buffer), local_buffer, request,
496 type);
497 }
498
518 template <typename T, typename F, typename BinaryOp>
519 requires std::is_invocable_v<F, std::span<const T>,
520 std::span<const std::int32_t>, std::span<T>,
521 BinaryOp>
522 and std::is_invocable_r_v<T, BinaryOp, T, T>
523 void scatter_rev_end(std::span<const T> local_buffer, std::span<T> local_data,
524 F unpack_fn, BinaryOp op, std::span<MPI_Request> request)
525 {
526 assert(local_buffer.size() == _local_inds.size());
527 if (!_local_inds.empty())
528 {
529 assert(*std::max_element(_local_inds.begin(), _local_inds.end())
530 < std::int32_t(local_data.size()));
531 }
532 scatter_rev_end(request);
533 unpack_fn(local_buffer, _local_inds, local_data, op);
534 }
535
538 template <typename T, typename BinaryOp>
539 void scatter_rev(std::span<T> local_data, std::span<const T> remote_data,
540 BinaryOp op)
541 {
542 std::vector<T> local_buffer(local_buffer_size(), 0);
543 std::vector<T> remote_buffer(remote_buffer_size(), 0);
544 auto pack_fn = [](auto&& in, auto&& idx, auto&& out)
545 {
546 for (std::size_t i = 0; i < idx.size(); ++i)
547 out[i] = in[idx[i]];
548 };
549 auto unpack_fn = [](auto&& in, auto&& idx, auto&& out, auto op)
550 {
551 for (std::size_t i = 0; i < idx.size(); ++i)
552 out[idx[i]] = op(out[idx[i]], in[i]);
553 };
554 std::vector<MPI_Request> request(1, MPI_REQUEST_NULL);
555 scatter_rev_begin(remote_data, std::span<T>(remote_buffer),
556 std::span<T>(local_buffer), pack_fn,
557 std::span<MPI_Request>(request));
558 scatter_rev_end(std::span<const T>(local_buffer), local_data, unpack_fn, op,
559 std::span<MPI_Request>(request));
560 }
561
565 std::int32_t local_buffer_size() const noexcept { return _local_inds.size(); }
566
570 std::int32_t remote_buffer_size() const noexcept
571 {
572 return _remote_inds.size();
573 }
574
578 const std::vector<std::int32_t>& local_indices() const noexcept
579 {
580 return _local_inds;
581 }
582
585 const std::vector<std::int32_t>& remote_indices() const noexcept
586 {
587 return _remote_inds;
588 }
589
593 int bs() const noexcept { return _bs; }
594
598 = type::neighbor)
599 {
600 std::vector<MPI_Request> requests;
601 switch (type)
602 {
603 case type::neighbor:
604 requests = {MPI_REQUEST_NULL};
605 break;
606 case type::p2p:
607 requests.resize(_dest.size() + _src.size(), MPI_REQUEST_NULL);
608 break;
609 default:
610 throw std::runtime_error("Scatter::type not recognized");
611 }
612 return requests;
613 }
614
615private:
616 // Block size
617 int _bs;
618
619 // Communicator where the source ranks own the indices in the callers
620 // halo, and the destination ranks 'ghost' indices owned by the
621 // caller. I.e.,
622 // - in-edges (src) are from ranks that own my ghosts
623 // - out-edges (dest) go to ranks that 'ghost' my owned indices
624 dolfinx::MPI::Comm _comm0{MPI_COMM_NULL};
625
626 // Communicator where the source ranks have ghost indices that are
627 // owned by the caller, and the destination ranks are the owners of
628 // indices in the callers halo region. I.e.,
629 // - in-edges (src) are from ranks that 'ghost' my owned indices
630 // - out-edges (dest) are to the owning ranks of my ghost indices
631 dolfinx::MPI::Comm _comm1{MPI_COMM_NULL};
632
633 // Permutation indices used to pack and unpack ghost data (remote)
634 std::vector<std::int32_t, allocator_type> _remote_inds;
635
636 // Number of remote indices (ghosts) for each neighbor process
637 std::vector<int> _sizes_remote;
638
639 // Displacements of remote data for mpi scatter and gather
640 std::vector<int> _displs_remote;
641
642 // Permutation indices used to pack and unpack local shared data
643 // (owned indices that are shared with other processes). Indices are
644 // grouped by neighbor process.
645 std::vector<std::int32_t, allocator_type> _local_inds;
646
647 // Number of local shared indices per neighbor process
648 std::vector<int> _sizes_local;
649
650 // Displacements of local data for mpi scatter and gather
651 std::vector<int> _displs_local;
652
653 // Set of ranks that own ghosts
654 // FIXME: Should we store the index map instead?
655 std::vector<int> _src;
656
657 // Set of ranks ghost owned indices
658 // FIXME: Should we store the index map instead?
659 std::vector<int> _dest;
660};
661} // namespace dolfinx::common
A duplicate MPI communicator and manage lifetime of the communicator.
Definition MPI.h:43
This class represents the distribution index arrays across processes. An index array is a contiguous ...
Definition IndexMap.h:94
std::span< const int > owners() const
The ranks that own each ghost index.
Definition IndexMap.h:205
std::array< std::int64_t, 2 > local_range() const noexcept
Range of indices (global) owned by this process.
Definition IndexMap.cpp:861
std::span< const std::int64_t > ghosts() const noexcept
Local-to-global map for ghosts (local indexing beyond end of local range)
Definition IndexMap.cpp:875
MPI_Comm comm() const
Return the MPI communicator that the map is defined on.
Definition IndexMap.cpp:940
A Scatterer supports the MPI scattering and gathering of data that is associated with a common::Index...
Definition Scatterer.h:33
std::vector< MPI_Request > create_request_vector(Scatterer::type type=type::neighbor)
Create a vector of MPI_Requests for a given Scatterer::type.
Definition Scatterer.h:597
Allocator allocator_type
The allocator type.
Definition Scatterer.h:36
void scatter_fwd_end(std::span< MPI_Request > requests) const
Complete a non-blocking send from the local owner to process ranks that have the index as a ghost.
Definition Scatterer.h:250
void scatter_fwd(std::span< const T > local_data, std::span< T > remote_data) const
Scatter data associated with owned indices to ghosting ranks.
Definition Scatterer.h:341
Scatterer(const IndexMap &map, int bs, const Allocator &alloc=Allocator())
Create a scatterer.
Definition Scatterer.h:51
std::int32_t remote_buffer_size() const noexcept
Buffer size for remote data (ghosts) used in forward and reverse communication.
Definition Scatterer.h:570
std::int32_t local_buffer_size() const noexcept
Size of buffer for local data (owned and shared) used in forward and reverse communication.
Definition Scatterer.h:565
void scatter_fwd_end(std::span< const T > remote_buffer, std::span< T > remote_data, F unpack_fn, std::span< MPI_Request > requests) const
Complete a non-blocking send from the local owner to process ranks that have the index as a ghost,...
Definition Scatterer.h:318
and std::is_invocable_r_v< T, BinaryOp, T, T > void scatter_rev_end(std::span< const T > local_buffer, std::span< T > local_data, F unpack_fn, BinaryOp op, std::span< MPI_Request > request)
End the reverse scatter communication, and unpack the received local buffer into local data.
Definition Scatterer.h:523
void scatter_rev_begin(std::span< const T > remote_data, std::span< T > remote_buffer, std::span< T > local_buffer, F pack_fn, std::span< MPI_Request > request, Scatterer::type type=type::neighbor) const
Scatter data associated with ghost indices to owning ranks.
Definition Scatterer.h:487
type
Types of MPI communication pattern used by the Scatterer.
Definition Scatterer.h:40
void scatter_rev_end(std::span< MPI_Request > request) const
End the reverse scatter communication.
Definition Scatterer.h:450
void scatter_fwd_begin(std::span< const T > send_buffer, std::span< T > recv_buffer, std::span< MPI_Request > requests, Scatterer::type type=type::neighbor) const
Start a non-blocking send of owned data to ranks that ghost the data.
Definition Scatterer.h:198
const std::vector< std::int32_t > & local_indices() const noexcept
Return a vector of local indices (owned) used to pack/unpack local data. These indices are grouped by...
Definition Scatterer.h:578
const std::vector< std::int32_t > & remote_indices() const noexcept
Return a vector of remote indices (ghosts) used to pack/unpack ghost data. These indices are grouped ...
Definition Scatterer.h:585
void scatter_rev(std::span< T > local_data, std::span< const T > remote_data, BinaryOp op)
Scatter data associated with ghost indices to ranks that own the indices.
Definition Scatterer.h:539
int bs() const noexcept
The number values (block size) to send per index in the common::IndexMap use to create the scatterer.
Definition Scatterer.h:593
void scatter_fwd_begin(std::span< const T > local_data, std::span< T > local_buffer, std::span< T > remote_buffer, F pack_fn, std::span< MPI_Request > requests, Scatterer::type type=type::neighbor) const
Scatter data associated with owned indices to ghosting ranks.
Definition Scatterer.h:283
void scatter_rev_begin(std::span< const T > send_buffer, std::span< T > recv_buffer, std::span< MPI_Request > requests, Scatterer::type type=type::neighbor) const
Start a non-blocking send of ghost data to ranks that own the data.
Definition Scatterer.h:393
int size(MPI_Comm comm)
Return size of the group (number of processes) associated with the communicator.
Definition MPI.cpp:72
Miscellaneous classes, functions and types.
Definition dolfinx_common.h:8
Top-level namespace.
Definition defines.h:12