Package: release.debian.org
Followup-For: Bug #1109543

debdiff
diff -Nru dolfinx-mpc-0.9.1/debian/changelog dolfinx-mpc-0.9.1/debian/changelog
--- dolfinx-mpc-0.9.1/debian/changelog  2025-03-10 21:45:46.000000000 +0100
+++ dolfinx-mpc-0.9.1/debian/changelog  2025-07-09 12:33:18.000000000 +0200
@@ -1,3 +1,10 @@
+dolfinx-mpc (0.9.1-2) unstable; urgency=medium
+
+  * debian patch free_MPI_communicator_PR171.patch applies upstream
+    PR#171 (release 0.9.2) to free MPI communicators after use
+
+ -- Drew Parsons <dpars...@debian.org>  Wed, 09 Jul 2025 12:33:18 +0200
+
 dolfinx-mpc (0.9.1-1) unstable; urgency=medium
 
   * New upstream release
diff -Nru dolfinx-mpc-0.9.1/debian/patches/free_MPI_communicator_PR171.patch 
dolfinx-mpc-0.9.1/debian/patches/free_MPI_communicator_PR171.patch
--- dolfinx-mpc-0.9.1/debian/patches/free_MPI_communicator_PR171.patch  
1970-01-01 01:00:00.000000000 +0100
+++ dolfinx-mpc-0.9.1/debian/patches/free_MPI_communicator_PR171.patch  
2025-07-09 12:33:18.000000000 +0200
@@ -0,0 +1,156 @@
+From 40298081b822513d1b188d7d5d2ed8f66395ef87 Mon Sep 17 00:00:00 2001
+From: jorgensd <dokke...@gmail.com>
+Date: Thu, 3 Jul 2025 06:38:31 +0000
+Subject: [PATCH 1/2] Free communicators after usage. Should resolve:  #170
+
+---
+ cpp/ContactConstraint.h  | 21 +++++++++++++++++----
+ cpp/PeriodicConstraint.h | 10 ++++++++--
+ cpp/utils.cpp            |  3 ++-
+ cpp/utils.h              |  3 +++
+ 4 files changed, 30 insertions(+), 7 deletions(-)
+
+Index: dolfinx-mpc/cpp/ContactConstraint.h
+===================================================================
+--- dolfinx-mpc.orig/cpp/ContactConstraint.h   2025-07-09 12:31:25.942819506 
+0200
++++ dolfinx-mpc/cpp/ContactConstraint.h        2025-07-09 12:31:25.938819426 
+0200
+@@ -583,6 +583,9 @@
+                           num_slaves_recv3.data(), disp3.data(),
+                           dolfinx::MPI::mpi_type<U>(), neighborhood_comms[0]);
+ 
++  int err0 = MPI_Comm_free(&neighborhood_comms[0]);
++  dolfinx::MPI::check_error(comm, err0);
++
+   // Compute off-process contributions
+   mpc_data<T> remote_data;
+   {
+@@ -735,6 +738,9 @@
+   /// Wait for all communication to finish
+   MPI_Waitall(4, requests.data(), status.data());
+ 
++  int err1 = MPI_Comm_free(&neighborhood_comms[1]);
++  dolfinx::MPI::check_error(comm, err1);
++
+   // Move the masters, coeffs and owners from the input adjacency list
+   // to one where each node corresponds to an entry in slave_indices_remote
+   std::vector<std::int32_t> offproc_offsets(slave_indices_remote.size() + 1, 
0);
+@@ -953,10 +959,6 @@
+   std::array<MPI_Comm, 2> neighborhood_comms
+       = create_neighborhood_comms(comm, meshtags, has_slave, master_marker);
+ 
+-  // Create communicator local_blocks -> ghost_block
+-  MPI_Comm slave_to_ghost
+-      = create_owner_to_ghost_comm(local_blocks, ghost_blocks, imap);
+-
+   /// Compute which rank (relative to neighbourhood) to send each ghost to
+   std::span<const int> ghost_owners = imap->owners();
+ 
+@@ -1171,6 +1173,9 @@
+                           num_block_coordinates.data(), 
coordinate_disp.data(),
+                           dolfinx::MPI::mpi_type<U>(), neighborhood_comms[0]);
+ 
++  int err0 = MPI_Comm_free(&neighborhood_comms[0]);
++  dolfinx::MPI::check_error(comm, err0);
++
+   // Vector for processes with slaves, mapping slaves with
+   // collision on this process
+   std::vector<std::vector<std::int64_t>> collision_slaves(indegree);
+@@ -1394,6 +1399,9 @@
+       recv_num_found_blocks.data(), inc_block_disp.data(),
+       dolfinx::MPI::mpi_type<std::int32_t>(), neighborhood_comms[1]);
+ 
++  int err1 = MPI_Comm_free(&neighborhood_comms[1]);
++  dolfinx::MPI::check_error(comm, err1);
++
+   // Iterate through the processors
+   for (std::size_t i = 0; i < src_ranks_rev.size(); ++i)
+   {
+@@ -1476,6 +1484,8 @@
+       ghost_slaves[i * tdim + j] = ghost_blocks[i] * block_size + j;
+ 
+   // Compute source and dest ranks of communicator
++  MPI_Comm slave_to_ghost
++      = create_owner_to_ghost_comm(local_blocks, ghost_blocks, imap);
+   auto neighbour_ranks = dolfinx_mpc::compute_neighborhood(slave_to_ghost);
+   const std::vector<int>& src_ranks_ghost = neighbour_ranks.first;
+   const std::vector<int>& dest_ranks_ghost = neighbour_ranks.second;
+@@ -1640,6 +1650,9 @@
+       disp_recv_ghost_masters.data(), dolfinx::MPI::mpi_type<std::int32_t>(),
+       slave_to_ghost);
+ 
++  int err3 = MPI_Comm_free(&slave_to_ghost);
++  dolfinx::MPI::check_error(comm, err3);
++
+   // Accumulate offsets of masters from different processors
+   std::vector<std::int32_t> ghost_offsets = {0};
+   for (std::size_t i = 0; i < src_ranks_ghost.size(); ++i)
+Index: dolfinx-mpc/cpp/PeriodicConstraint.h
+===================================================================
+--- dolfinx-mpc.orig/cpp/PeriodicConstraint.h  2025-07-09 12:31:25.942819506 
+0200
++++ dolfinx-mpc/cpp/PeriodicConstraint.h       2025-07-09 12:31:25.938819426 
+0200
+@@ -246,7 +246,7 @@
+   // Slave block owners -> Process with possible masters
+   std::vector<int> s_to_m_weights(s_to_m_ranks.size(), 1);
+   std::vector<int> m_to_s_weights(m_to_s_ranks.size(), 1);
+-  auto slave_to_master = MPI_COMM_NULL;
++  MPI_Comm slave_to_master = MPI_COMM_NULL;
+   MPI_Dist_graph_create_adjacent(
+       mesh->comm(), (int)m_to_s_ranks.size(), m_to_s_ranks.data(),
+       m_to_s_weights.data(), (int)s_to_m_ranks.size(), s_to_m_ranks.data(),
+@@ -335,6 +335,9 @@
+       dolfinx::MPI::mpi_type<U>(), coords_recvb.data(), 
num_recv_slaves.data(),
+       disp_in.data(), dolfinx::MPI::mpi_type<U>(), slave_to_master);
+ 
++  int err = MPI_Comm_free(&slave_to_master);
++  dolfinx::MPI::check_error(mesh->comm(), err);
++
+   // Reset in_displacements to be per block for later usage
+   auto d_3 = [](auto& num) { num /= 3; };
+   std::ranges::for_each(disp_in, d_3);
+@@ -426,7 +429,7 @@
+ 
+   // Create inverse communicator
+   // Procs with possible masters -> slave block owners
+-  auto master_to_slave = MPI_COMM_NULL;
++  MPI_Comm master_to_slave = MPI_COMM_NULL;
+   MPI_Dist_graph_create_adjacent(
+       mesh->comm(), (int)s_to_m_ranks.size(), s_to_m_ranks.data(),
+       s_to_m_weights.data(), (int)m_to_s_ranks.size(), m_to_s_ranks.data(),
+@@ -438,6 +441,9 @@
+       num_masters_per_slave_remote, masters_remote, coeffs_remote,
+       owners_remote);
+ 
++  err = MPI_Comm_free(&master_to_slave);
++  dolfinx::MPI::check_error(mesh->comm(), err);
++
+   // Append found slaves/master pairs
+   dolfinx_mpc::append_master_data<T>(
+       recv_data, searching_dofs, slaves, masters, coeffs, owners,
+Index: dolfinx-mpc/cpp/utils.cpp
+===================================================================
+--- dolfinx-mpc.orig/cpp/utils.cpp     2025-07-09 12:31:25.942819506 +0200
++++ dolfinx-mpc/cpp/utils.cpp  2025-07-09 12:31:25.938819426 +0200
+@@ -116,7 +116,8 @@
+                                  source_weights.data(), dest_edges.size(),
+                                  dest_edges.data(), dest_weights.data(),
+                                  MPI_INFO_NULL, false, &comm_loc);
+-
++  int err = MPI_Comm_free(&comm);
++  dolfinx::MPI::check_error(index_map->comm(), err);
+   return comm_loc;
+ }
+ 
+Index: dolfinx-mpc/cpp/utils.h
+===================================================================
+--- dolfinx-mpc.orig/cpp/utils.h       2025-07-09 12:31:25.942819506 +0200
++++ dolfinx-mpc/cpp/utils.h    2025-07-09 12:31:25.938819426 +0200
+@@ -944,6 +944,9 @@
+                           disp_in_masters.data(), dolfinx::MPI::mpi_type<T>(),
+                           local_to_ghost, &ghost_requests[4]);
+ 
++  int err = MPI_Comm_free(&local_to_ghost);
++  dolfinx::MPI::check_error(imap.comm(), err);
++
+   mpc_data<T> ghost_data;
+   ghost_data.slaves = recv_local;
+   ghost_data.offsets = recv_num;
diff -Nru dolfinx-mpc-0.9.1/debian/patches/series 
dolfinx-mpc-0.9.1/debian/patches/series
--- dolfinx-mpc-0.9.1/debian/patches/series     2025-03-10 21:45:46.000000000 
+0100
+++ dolfinx-mpc-0.9.1/debian/patches/series     2025-07-09 12:33:18.000000000 
+0200
@@ -2,3 +2,4 @@
 tests_no_numba.patch
 cmake_no_runpath.patch
 python_local_LICENSE.patch
+free_MPI_communicator_PR171.patch

Reply via email to