numba-mpi 0.41__py3-none-any.whl → 0.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
numba_mpi/__init__.py CHANGED
@@ -1,4 +1,6 @@
1
- """ Numba @njittable MPI wrappers tested on Linux, macOS and Windows """
1
+ """
2
+ .. include::../README.md
3
+ """
2
4
 
3
5
  from importlib.metadata import PackageNotFoundError, version
4
6
 
numba_mpi/api/requests.py CHANGED
@@ -34,6 +34,9 @@ def wait(request):
34
34
  """Wrapper for MPI_Wait. Returns integer status code (0 == MPI_SUCCESS).
35
35
  Status is currently not handled. Requires 'request' parameter to be a
36
36
  c-style pointer to MPI_Request (such as returned by 'isend'/'irecv').
37
+
38
+ Uninitialized contents of 'request' (e.g., from numpy.empty()) may
39
+ cause invalid pointer dereference and segmentation faults.
37
40
  """
38
41
 
39
42
  status_buffer = create_status_buffer()
@@ -64,6 +67,9 @@ def waitall(requests):
64
67
  """Wrapper for MPI_Waitall. Returns integer status code (0 == MPI_SUCCESS).
65
68
  Status is currently not handled. Requires 'requests' parameter to be an
66
69
  array or tuple of MPI_Request objects.
70
+
71
+ Uninitialized contents of 'requests' (e.g., from numpy.empty()) may
72
+ cause invalid pointer dereference and segmentation faults.
67
73
  """
68
74
  if isinstance(requests, np.ndarray):
69
75
  return _waitall_array_impl(requests)
@@ -123,6 +129,9 @@ def waitany(requests):
123
129
  status; second - the index of request that was completed. Status is
124
130
  currently not handled. Requires 'requests' parameter to be an array
125
131
  or tuple of MPI_Request objects.
132
+
133
+ Uninitialized contents of 'requests' (e.g., from numpy.empty()) may
134
+ cause invalid pointer dereference and segmentation faults.
126
135
  """
127
136
 
128
137
  if isinstance(requests, np.ndarray):
@@ -167,6 +176,9 @@ def test(request):
167
176
  flag that indicates whether given request is completed. Status is currently
168
177
  not handled. Requires 'request' parameter to be a c-style pointer to
169
178
  MPI_Request (such as returned by 'isend'/'irecv').
179
+
180
+ Uninitialized contents of 'request' (e.g., from numpy.empty()) may
181
+ cause invalid pointer dereference and segmentation faults.
170
182
  """
171
183
 
172
184
  status_buffer = create_status_buffer()
@@ -203,6 +215,9 @@ def testall(requests):
203
215
  flag that indicates whether given request is completed. Status is currently
204
216
  not handled. Requires 'requests' parameter to be an array or tuple of
205
217
  MPI_Request objects.
218
+
219
+ Uninitialized contents of 'requests' (e.g., from numpy.empty()) may
220
+ cause invalid pointer dereference and segmentation faults.
206
221
  """
207
222
  if isinstance(requests, np.ndarray):
208
223
  return _testall_array_impl(requests)
@@ -269,6 +284,9 @@ def testany(requests):
269
284
  that indicates whether any of requests is completed, and index of request
270
285
  that is guaranteed to be completed. Requires 'requests' parameter to be an
271
286
  array or tuple of MPI_Request objects.
287
+
288
+ Uninitialized contents of 'requests' (e.g., from numpy.empty()) may
289
+ cause invalid pointer dereference and segmentation faults.
272
290
  """
273
291
 
274
292
  if isinstance(requests, np.ndarray):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: numba-mpi
3
- Version: 0.41
3
+ Version: 0.43
4
4
  Summary: Numba @njittable MPI wrappers tested on Linux, macOS and Windows
5
5
  Home-page: https://github.com/numba-mpi/numba-mpi
6
6
  Author: https://github.com/numba-mpi/numba-mpi/graphs/contributors
@@ -8,6 +8,7 @@ License: GPL v3
8
8
  Project-URL: Tracker, https://github.com/numba-mpi/numba-mpi/issues
9
9
  Project-URL: Documentation, https://numba-mpi.github.io/numba-mpi
10
10
  Project-URL: Source, https://github.com/numba-mpi/numba-mpi
11
+ Requires-Python: >=3.8
11
12
  Description-Content-Type: text/markdown
12
13
  License-File: LICENSE
13
14
  Requires-Dist: numba
@@ -79,40 +80,46 @@ hello()
79
80
 
80
81
  ### Example comparing numba-mpi vs. mpi4py performance:
81
82
 
82
- The example below compares Numba + mpi4py vs. Numba + numba-mpi performance.
83
- The sample code estimates $\pi$ by integration of $4/(1+x^2)$ between 0 and 1
83
+ The example below compares `Numba`+`mpi4py` vs. `Numba`+`numba-mpi` performance.
84
+ The sample code estimates $\pi$ by numerical integration of $\int_0^1 (4/(1+x^2))dx=\pi$
84
85
  dividing the workload into `n_intervals` handled by separate MPI processes
85
- and then obtaining a sum using `allreduce`.
86
- The computation is carried out in a JIT-compiled function and is repeated
87
- `N_TIMES`, the repetitions and the MPI-handled reduction are done outside or
88
- inside of the JIT-compiled block for mpi4py and numba-mpi, respectively.
86
+ and then obtaining a sum using `allreduce` (see, e.g., analogous [Matlab docs example](https://www.mathworks.com/help/parallel-computing/numerical-estimation-of-pi-using-message-passing.html)).
87
+ The computation is carried out in a JIT-compiled function `get_pi_part()` and is repeated
88
+ `N_TIMES`. The repetitions and the MPI-handled reduction are done outside or
89
+ inside of the JIT-compiled block for `mpi4py` and `numba-mpi`, respectively.
89
90
  Timing is repeated `N_REPEAT` times and the minimum time is reported.
90
- The generated plot shown below depicts the speedup obtained by replacing mpi4py
91
- with numba_mpi as a function of `n_intervals` - the more often communication
92
- is needed (smaller `n_intervals`), the larger the expected speedup.
93
-
91
+ The generated plot shown below depicts the speedup obtained by replacing `mpi4py`
92
+ with `numba_mpi`, plotted as a function of `N_TIMES / n_intervals` - the number of MPI calls per
93
+ interval. The speedup, which stems from avoiding roundtrips between JIT-compiled
94
+ and Python code is significant (150%-300%) in all cases. The more often communication
95
+ is needed (smaller `n_intervals`), the larger the measured speedup. Note that nothing
96
+ in the actual number crunching (within the `get_pi_part()` function) or in the employed communication logic
97
+ (handled by the same MPI library) differs between the `mpi4py` or `numba-mpi` solutions.
98
+ These are the overhead of `mpi4py` higher-level abstractions and the overhead of
99
+ repeatedly entering and leaving the JIT-compiled block if using `mpi4py`, which can be
100
+ eliminated by using `numba-mpi`, and which the measured differences in execution time
101
+ stem from.
94
102
  ```python
95
103
  import timeit, mpi4py, numba, numpy as np, numba_mpi
96
104
 
97
105
  N_TIMES = 10000
98
- N_REPEAT = 10
99
106
  RTOL = 1e-3
100
107
 
101
- @numba.njit
102
- def get_pi_part(out, n_intervals, rank, size):
108
+ @numba.jit
109
+ def get_pi_part(n_intervals=1000000, rank=0, size=1):
103
110
  h = 1 / n_intervals
104
111
  partial_sum = 0.0
105
112
  for i in range(rank + 1, n_intervals, size):
106
113
  x = h * (i - 0.5)
107
114
  partial_sum += 4 / (1 + x**2)
108
- out[0] = h * partial_sum
115
+ return h * partial_sum
109
116
 
110
- @numba.njit
117
+ @numba.jit
111
118
  def pi_numba_mpi(n_intervals):
112
119
  pi = np.array([0.])
113
120
  part = np.empty_like(pi)
114
121
  for _ in range(N_TIMES):
115
- get_pi_part(part, n_intervals, numba_mpi.rank(), numba_mpi.size())
122
+ part[0] = get_pi_part(n_intervals, numba_mpi.rank(), numba_mpi.size())
116
123
  numba_mpi.allreduce(part, pi, numba_mpi.Operator.SUM)
117
124
  assert abs(pi[0] - np.pi) / np.pi < RTOL
118
125
 
@@ -120,30 +127,30 @@ def pi_mpi4py(n_intervals):
120
127
  pi = np.array([0.])
121
128
  part = np.empty_like(pi)
122
129
  for _ in range(N_TIMES):
123
- get_pi_part(part, n_intervals, mpi4py.MPI.COMM_WORLD.rank, mpi4py.MPI.COMM_WORLD.size)
130
+ part[0] = get_pi_part(n_intervals, mpi4py.MPI.COMM_WORLD.rank, mpi4py.MPI.COMM_WORLD.size)
124
131
  mpi4py.MPI.COMM_WORLD.Allreduce(part, (pi, mpi4py.MPI.DOUBLE), op=mpi4py.MPI.SUM)
125
132
  assert abs(pi[0] - np.pi) / np.pi < RTOL
126
133
 
127
- plot_x = [1000 * k for k in range(1, 11)]
134
+ plot_x = [x for x in range(1, 11)]
128
135
  plot_y = {'numba_mpi': [], 'mpi4py': []}
129
- for n_intervals in plot_x:
136
+ for x in plot_x:
130
137
  for impl in plot_y:
131
138
  plot_y[impl].append(min(timeit.repeat(
132
- f"pi_{impl}({n_intervals})",
139
+ f"pi_{impl}(n_intervals={N_TIMES // x})",
133
140
  globals=locals(),
134
141
  number=1,
135
- repeat=N_REPEAT
142
+ repeat=10
136
143
  )))
137
144
 
138
145
  if numba_mpi.rank() == 0:
139
146
  from matplotlib import pyplot
140
147
  pyplot.figure(figsize=(8.3, 3.5), tight_layout=True)
141
148
  pyplot.plot(plot_x, np.array(plot_y['mpi4py'])/np.array(plot_y['numba_mpi']), marker='o')
142
- pyplot.xlabel('n_intervals (workload in between communication)')
143
- pyplot.ylabel('wall time ratio (mpi4py / numba_mpi)')
149
+ pyplot.xlabel('number of MPI calls per interval')
150
+ pyplot.ylabel('mpi4py/numba-mpi wall-time ratio')
144
151
  pyplot.title(f'mpiexec -np {numba_mpi.size()}')
145
152
  pyplot.grid()
146
- pyplot.savefig('readme_plot.png')
153
+ pyplot.savefig('readme_plot.svg')
147
154
  ```
148
155
 
149
156
  ![plot](https://github.com/numba-mpi/numba-mpi/releases/download/tip/readme_plot.png)
@@ -1,4 +1,4 @@
1
- numba_mpi/__init__.py,sha256=mpW16BzokTCNGHClW6K4qGvMIRqPrw0K2OHNSCSml5Y,781
1
+ numba_mpi/__init__.py,sha256=VkDzjRJcaS6j739oHdCqhdUcZLBlMFxfcn58zDjevoQ,741
2
2
  numba_mpi/common.py,sha256=2JJoUrd3Qa6GIFk6Zlt2NudS7ZurPxpVwBLRGSkCg5E,2266
3
3
  numba_mpi/utils.py,sha256=gfGFuzmGgs4FnBqzPI91ftAq4UHgXb_HFkvxrVWkcIo,1866
4
4
  numba_mpi/api/__init__.py,sha256=Zj5df4lWeGpxAXV8jKGFnmtLBQ50HwNU8dPf-os06X8,51
@@ -11,13 +11,13 @@ numba_mpi/api/isend.py,sha256=2mpP4FhMk0GrikjDluKwRnpVywdLj9RD4HVVEMSj9A8,1080
11
11
  numba_mpi/api/operator.py,sha256=3VTPZAdOP05bxdqt3lA0hRDICM-iaBMa4m-krEdO91s,342
12
12
  numba_mpi/api/rank.py,sha256=pqayxw-5QDJ7VJ3gKrvuu1G0sBlYEZt1juhnaDi_JD8,549
13
13
  numba_mpi/api/recv.py,sha256=YsYK-q7PNfi3zt0ftVddM363VsnJ4XFfmgMq8aeCr-o,1260
14
- numba_mpi/api/requests.py,sha256=oDe85ZQ4xFbHWlNdrDhqVLvCkcQHs_9upUf3ms8x58k,8300
14
+ numba_mpi/api/requests.py,sha256=5EhgFyeQCGP8YclSPwxP95c2AhBo19CLlShK0TxCR2U,9114
15
15
  numba_mpi/api/scatter_gather.py,sha256=goZn4BxMKakWQHjfXIOdjzK3DJ-lTeaiQQwgnyQeZ_s,2410
16
16
  numba_mpi/api/send.py,sha256=jn1hPw0YHBHOaeJop_ZbjaBChaqgfw3nM1xGhW9sabI,909
17
17
  numba_mpi/api/size.py,sha256=fYLeUrygvz_XcxIDsLiZlMtS-aiWfp58Zi7aIOAgaj8,549
18
18
  numba_mpi/api/wtime.py,sha256=qrTqlefW7K7hqnAQKkGYm8kgdiRGuSAGiHmPcTrhLzE,279
19
- numba_mpi-0.41.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
20
- numba_mpi-0.41.dist-info/METADATA,sha256=4POxiWzQaU7S3NeU8b_GKcog4H6OBjvvV62h-nPHj5I,8147
21
- numba_mpi-0.41.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
22
- numba_mpi-0.41.dist-info/top_level.txt,sha256=yb_ktLmrfuhOZS0rjS81FFNC-gK_4c19WbLG2ViP73g,10
23
- numba_mpi-0.41.dist-info/RECORD,,
19
+ numba_mpi-0.43.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
20
+ numba_mpi-0.43.dist-info/METADATA,sha256=oMga9o3BuVvyY-f-9tCWPqkYjj06J7cIbhLZHGZgW4g,8995
21
+ numba_mpi-0.43.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
22
+ numba_mpi-0.43.dist-info/top_level.txt,sha256=yb_ktLmrfuhOZS0rjS81FFNC-gK_4c19WbLG2ViP73g,10
23
+ numba_mpi-0.43.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.43.0)
2
+ Generator: setuptools (70.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5