dask-cuda 23.12.0a231026__py3-none-any.whl → 24.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. dask_cuda/VERSION +1 -0
  2. dask_cuda/__init__.py +1 -3
  3. dask_cuda/_version.py +20 -0
  4. dask_cuda/benchmarks/local_cudf_groupby.py +1 -1
  5. dask_cuda/benchmarks/local_cudf_merge.py +1 -1
  6. dask_cuda/benchmarks/local_cudf_shuffle.py +1 -1
  7. dask_cuda/benchmarks/local_cupy.py +1 -1
  8. dask_cuda/benchmarks/local_cupy_map_overlap.py +1 -1
  9. dask_cuda/benchmarks/utils.py +1 -1
  10. dask_cuda/cuda_worker.py +1 -3
  11. dask_cuda/device_host_file.py +1 -1
  12. dask_cuda/explicit_comms/dataframe/shuffle.py +1 -1
  13. dask_cuda/get_device_memory_objects.py +4 -0
  14. dask_cuda/initialize.py +47 -16
  15. dask_cuda/local_cuda_cluster.py +19 -19
  16. dask_cuda/plugins.py +122 -0
  17. dask_cuda/tests/test_dask_cuda_worker.py +3 -3
  18. dask_cuda/tests/test_dgx.py +49 -17
  19. dask_cuda/tests/test_explicit_comms.py +34 -6
  20. dask_cuda/tests/test_from_array.py +6 -2
  21. dask_cuda/tests/test_initialize.py +69 -21
  22. dask_cuda/tests/test_local_cuda_cluster.py +47 -14
  23. dask_cuda/tests/test_proxify_host_file.py +19 -4
  24. dask_cuda/tests/test_proxy.py +14 -3
  25. dask_cuda/tests/test_spill.py +3 -0
  26. dask_cuda/tests/test_utils.py +20 -6
  27. dask_cuda/utils.py +6 -140
  28. dask_cuda/utils_test.py +45 -0
  29. dask_cuda/worker_spec.py +2 -1
  30. {dask_cuda-23.12.0a231026.dist-info → dask_cuda-24.2.0.dist-info}/METADATA +11 -6
  31. dask_cuda-24.2.0.dist-info/RECORD +53 -0
  32. {dask_cuda-23.12.0a231026.dist-info → dask_cuda-24.2.0.dist-info}/WHEEL +1 -1
  33. dask_cuda/compat.py +0 -118
  34. dask_cuda-23.12.0a231026.dist-info/RECORD +0 -50
  35. {dask_cuda-23.12.0a231026.dist-info → dask_cuda-24.2.0.dist-info}/LICENSE +0 -0
  36. {dask_cuda-23.12.0a231026.dist-info → dask_cuda-24.2.0.dist-info}/entry_points.txt +0 -0
  37. {dask_cuda-23.12.0a231026.dist-info → dask_cuda-24.2.0.dist-info}/top_level.txt +0 -0
dask_cuda/utils.py CHANGED
@@ -1,4 +1,3 @@
1
- import importlib
2
1
  import math
3
2
  import operator
4
3
  import os
@@ -18,7 +17,7 @@ import dask
18
17
  import distributed # noqa: required for dask.config.get("distributed.comm.ucx")
19
18
  from dask.config import canonical_name
20
19
  from dask.utils import format_bytes, parse_bytes
21
- from distributed import Worker, WorkerPlugin, wait
20
+ from distributed import wait
22
21
  from distributed.comm import parse_address
23
22
 
24
23
  try:
@@ -32,122 +31,6 @@ except ImportError:
32
31
  yield
33
32
 
34
33
 
35
- class CPUAffinity(WorkerPlugin):
36
- def __init__(self, cores):
37
- self.cores = cores
38
-
39
- def setup(self, worker=None):
40
- os.sched_setaffinity(0, self.cores)
41
-
42
-
43
- class RMMSetup(WorkerPlugin):
44
- def __init__(
45
- self,
46
- initial_pool_size,
47
- maximum_pool_size,
48
- managed_memory,
49
- async_alloc,
50
- release_threshold,
51
- log_directory,
52
- track_allocations,
53
- ):
54
- if initial_pool_size is None and maximum_pool_size is not None:
55
- raise ValueError(
56
- "`rmm_maximum_pool_size` was specified without specifying "
57
- "`rmm_pool_size`.`rmm_pool_size` must be specified to use RMM pool."
58
- )
59
- if async_alloc is True:
60
- if managed_memory is True:
61
- raise ValueError(
62
- "`rmm_managed_memory` is incompatible with the `rmm_async`."
63
- )
64
- if async_alloc is False and release_threshold is not None:
65
- raise ValueError("`rmm_release_threshold` requires `rmm_async`.")
66
-
67
- self.initial_pool_size = initial_pool_size
68
- self.maximum_pool_size = maximum_pool_size
69
- self.managed_memory = managed_memory
70
- self.async_alloc = async_alloc
71
- self.release_threshold = release_threshold
72
- self.logging = log_directory is not None
73
- self.log_directory = log_directory
74
- self.rmm_track_allocations = track_allocations
75
-
76
- def setup(self, worker=None):
77
- if self.initial_pool_size is not None:
78
- self.initial_pool_size = parse_device_memory_limit(
79
- self.initial_pool_size, alignment_size=256
80
- )
81
-
82
- if self.async_alloc:
83
- import rmm
84
-
85
- if self.release_threshold is not None:
86
- self.release_threshold = parse_device_memory_limit(
87
- self.release_threshold, alignment_size=256
88
- )
89
-
90
- mr = rmm.mr.CudaAsyncMemoryResource(
91
- initial_pool_size=self.initial_pool_size,
92
- release_threshold=self.release_threshold,
93
- )
94
-
95
- if self.maximum_pool_size is not None:
96
- self.maximum_pool_size = parse_device_memory_limit(
97
- self.maximum_pool_size, alignment_size=256
98
- )
99
- mr = rmm.mr.LimitingResourceAdaptor(
100
- mr, allocation_limit=self.maximum_pool_size
101
- )
102
-
103
- rmm.mr.set_current_device_resource(mr)
104
- if self.logging:
105
- rmm.enable_logging(
106
- log_file_name=get_rmm_log_file_name(
107
- worker, self.logging, self.log_directory
108
- )
109
- )
110
- elif self.initial_pool_size is not None or self.managed_memory:
111
- import rmm
112
-
113
- pool_allocator = False if self.initial_pool_size is None else True
114
-
115
- if self.initial_pool_size is not None:
116
- if self.maximum_pool_size is not None:
117
- self.maximum_pool_size = parse_device_memory_limit(
118
- self.maximum_pool_size, alignment_size=256
119
- )
120
-
121
- rmm.reinitialize(
122
- pool_allocator=pool_allocator,
123
- managed_memory=self.managed_memory,
124
- initial_pool_size=self.initial_pool_size,
125
- maximum_pool_size=self.maximum_pool_size,
126
- logging=self.logging,
127
- log_file_name=get_rmm_log_file_name(
128
- worker, self.logging, self.log_directory
129
- ),
130
- )
131
- if self.rmm_track_allocations:
132
- import rmm
133
-
134
- mr = rmm.mr.get_current_device_resource()
135
- rmm.mr.set_current_device_resource(rmm.mr.TrackingResourceAdaptor(mr))
136
-
137
-
138
- class PreImport(WorkerPlugin):
139
- def __init__(self, libraries):
140
- if libraries is None:
141
- libraries = []
142
- elif isinstance(libraries, str):
143
- libraries = libraries.split(",")
144
- self.libraries = libraries
145
-
146
- def setup(self, worker=None):
147
- for l in self.libraries:
148
- importlib.import_module(l)
149
-
150
-
151
34
  def unpack_bitmask(x, mask_bits=64):
152
35
  """Unpack a list of integers containing bitmasks.
153
36
 
@@ -404,7 +287,7 @@ def get_preload_options(
404
287
  if create_cuda_context:
405
288
  preload_options["preload_argv"].append("--create-cuda-context")
406
289
 
407
- if protocol == "ucx":
290
+ if protocol in ["ucx", "ucxx"]:
408
291
  initialize_ucx_argv = []
409
292
  if enable_tcp_over_ucx:
410
293
  initialize_ucx_argv.append("--enable-tcp-over-ucx")
@@ -669,27 +552,6 @@ def parse_device_memory_limit(device_memory_limit, device_index=0, alignment_siz
669
552
  return _align(int(device_memory_limit), alignment_size)
670
553
 
671
554
 
672
- class MockWorker(Worker):
673
- """Mock Worker class preventing NVML from getting used by SystemMonitor.
674
-
675
- By preventing the Worker from initializing NVML in the SystemMonitor, we can
676
- mock test multiple devices in `CUDA_VISIBLE_DEVICES` behavior with single-GPU
677
- machines.
678
- """
679
-
680
- def __init__(self, *args, **kwargs):
681
- distributed.diagnostics.nvml.device_get_count = MockWorker.device_get_count
682
- self._device_get_count = distributed.diagnostics.nvml.device_get_count
683
- super().__init__(*args, **kwargs)
684
-
685
- def __del__(self):
686
- distributed.diagnostics.nvml.device_get_count = self._device_get_count
687
-
688
- @staticmethod
689
- def device_get_count():
690
- return 0
691
-
692
-
693
555
  def get_gpu_uuid_from_index(device_index=0):
694
556
  """Get GPU UUID from CUDA device index.
695
557
 
@@ -763,6 +625,10 @@ def get_worker_config(dask_worker):
763
625
  import ucp
764
626
 
765
627
  ret["ucx-transports"] = ucp.get_active_transports()
628
+ elif scheme == "ucxx":
629
+ import ucxx
630
+
631
+ ret["ucx-transports"] = ucxx.get_active_transports()
766
632
 
767
633
  # comm timeouts
768
634
  ret["distributed.comm.timeouts"] = dask.config.get("distributed.comm.timeouts")
@@ -0,0 +1,45 @@
1
+ from typing import Literal
2
+
3
+ import distributed
4
+ from distributed import Nanny, Worker
5
+
6
+
7
+ class MockWorker(Worker):
8
+ """Mock Worker class preventing NVML from getting used by SystemMonitor.
9
+
10
+ By preventing the Worker from initializing NVML in the SystemMonitor, we can
11
+ mock test multiple devices in `CUDA_VISIBLE_DEVICES` behavior with single-GPU
12
+ machines.
13
+ """
14
+
15
+ def __init__(self, *args, **kwargs):
16
+ distributed.diagnostics.nvml.device_get_count = MockWorker.device_get_count
17
+ self._device_get_count = distributed.diagnostics.nvml.device_get_count
18
+ super().__init__(*args, **kwargs)
19
+
20
+ def __del__(self):
21
+ distributed.diagnostics.nvml.device_get_count = self._device_get_count
22
+
23
+ @staticmethod
24
+ def device_get_count():
25
+ return 0
26
+
27
+
28
+ class IncreasedCloseTimeoutNanny(Nanny):
29
+ """Increase `Nanny`'s close timeout.
30
+
31
+ The internal close timeout mechanism of `Nanny` recomputes the time left to kill
32
+ the `Worker` process based on elapsed time of the close task, which may leave
33
+ very little time for the subprocess to shutdown cleanly, which may cause tests
34
+ to fail when the system is under higher load. This class increases the default
35
+ close timeout of 5.0 seconds that `Nanny` sets by default, which can be overriden
36
+ via Distributed's public API.
37
+
38
+ This class can be used with the `worker_class` argument of `LocalCluster` or
39
+ `LocalCUDACluster` to provide a much higher default of 30.0 seconds.
40
+ """
41
+
42
+ async def close( # type:ignore[override]
43
+ self, timeout: float = 30.0, reason: str = "nanny-close"
44
+ ) -> Literal["OK"]:
45
+ return await super().close(timeout=timeout, reason=reason)
dask_cuda/worker_spec.py CHANGED
@@ -5,7 +5,8 @@ from distributed.system import MEMORY_LIMIT
5
5
 
6
6
  from .initialize import initialize
7
7
  from .local_cuda_cluster import cuda_visible_devices
8
- from .utils import CPUAffinity, get_cpu_affinity, get_gpu_count
8
+ from .plugins import CPUAffinity
9
+ from .utils import get_cpu_affinity, get_gpu_count
9
10
 
10
11
 
11
12
  def worker_spec(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dask-cuda
3
- Version: 23.12.0a231026
3
+ Version: 24.2.0
4
4
  Summary: Utilities for Dask and CUDA interactions
5
5
  Author: NVIDIA Corporation
6
6
  License: Apache-2.0
@@ -17,12 +17,12 @@ Classifier: Programming Language :: Python :: 3.10
17
17
  Requires-Python: >=3.9
18
18
  Description-Content-Type: text/markdown
19
19
  License-File: LICENSE
20
- Requires-Dist: dask ==2023.9.2
21
- Requires-Dist: distributed ==2023.9.2
22
- Requires-Dist: pynvml <11.5,>=11.0.0
23
- Requires-Dist: numpy >=1.21
20
+ Requires-Dist: click >=8.1
24
21
  Requires-Dist: numba >=0.57
25
- Requires-Dist: pandas <1.6.0dev0,>=1.3
22
+ Requires-Dist: numpy >=1.21
23
+ Requires-Dist: pandas <1.6.0.dev0,>=1.3
24
+ Requires-Dist: pynvml <11.5,>=11.0.0
25
+ Requires-Dist: rapids-dask-dependency ==24.2.*
26
26
  Requires-Dist: zict >=2.0.0
27
27
  Provides-Extra: docs
28
28
  Requires-Dist: numpydoc >=1.1.0 ; extra == 'docs'
@@ -30,7 +30,12 @@ Requires-Dist: sphinx ; extra == 'docs'
30
30
  Requires-Dist: sphinx-click >=2.7.1 ; extra == 'docs'
31
31
  Requires-Dist: sphinx-rtd-theme >=0.5.1 ; extra == 'docs'
32
32
  Provides-Extra: test
33
+ Requires-Dist: cudf ==24.2.* ; extra == 'test'
34
+ Requires-Dist: dask-cudf ==24.2.* ; extra == 'test'
35
+ Requires-Dist: kvikio ==24.2.* ; extra == 'test'
33
36
  Requires-Dist: pytest ; extra == 'test'
37
+ Requires-Dist: pytest-cov ; extra == 'test'
38
+ Requires-Dist: ucx-py ==0.36.* ; extra == 'test'
34
39
 
35
40
  Dask CUDA
36
41
  =========
@@ -0,0 +1,53 @@
1
+ dask_cuda/VERSION,sha256=LOsdRePwGiMfhM2DrcvIm5wG4HpS3B0cMVJnTcjfKmM,9
2
+ dask_cuda/__init__.py,sha256=XnMTUi-SvoGn7g1Dj6XW97HnQzGQv0G3EnvSjcZ7vU4,1455
3
+ dask_cuda/_version.py,sha256=iR6Kt93dZiHB4aBed4vCsW9knxQxMl0--nC5HoIaxyE,778
4
+ dask_cuda/cli.py,sha256=XNRH0bu-6jzRoyWJB5qSWuzePJSh3z_5Ng6rDCnz7lg,15970
5
+ dask_cuda/cuda_worker.py,sha256=bIu-ESeIpJG_WaTYrv0z9z5juJ1qR5i_5Ng3CN1WK8s,8579
6
+ dask_cuda/device_host_file.py,sha256=yS31LGtt9VFAG78uBBlTDr7HGIng2XymV1OxXIuEMtM,10272
7
+ dask_cuda/disk_io.py,sha256=urSLKiPvJvYmKCzDPOUDCYuLI3r1RUiyVh3UZGRoF_Y,6626
8
+ dask_cuda/get_device_memory_objects.py,sha256=R3U2cq4fJZPgtsUKyIguy9161p3Q99oxmcCmTcg6BtQ,4075
9
+ dask_cuda/initialize.py,sha256=Gjcxs_c8DTafgsHe5-2mw4lJdOmbFJJAZVOnxA8lTjM,6462
10
+ dask_cuda/is_device_object.py,sha256=CnajvbQiX0FzFzwft0MqK1OPomx3ZGDnDxT56wNjixw,1046
11
+ dask_cuda/is_spillable_object.py,sha256=CddGmg0tuSpXh2m_TJSY6GRpnl1WRHt1CRcdWgHPzWA,1457
12
+ dask_cuda/local_cuda_cluster.py,sha256=hoEiEfJqAQrRS7N632VatSl1245GiWMT5B77Wc-i5C0,17928
13
+ dask_cuda/plugins.py,sha256=cnHsdrXx7PBPmrzHX6YEkCH5byCsUk8LE2FeTeu8ZLU,4259
14
+ dask_cuda/proxify_device_objects.py,sha256=99CD7LOE79YiQGJ12sYl_XImVhJXpFR4vG5utdkjTQo,8108
15
+ dask_cuda/proxify_host_file.py,sha256=Wf5CFCC1JN5zmfvND3ls0M5FL01Y8VhHrk0xV3UQ9kk,30850
16
+ dask_cuda/proxy_object.py,sha256=bZq92kjgFB-ad_luSAFT_RItV3nssmiEk4OOSp34laU,29812
17
+ dask_cuda/utils.py,sha256=RWlLK2cPHaCuNNhr8bW8etBeGklwREQJOafQbTydStk,25121
18
+ dask_cuda/utils_test.py,sha256=WNMR0gic2tuP3pgygcR9g52NfyX8iGMOan6juXhpkCE,1694
19
+ dask_cuda/worker_spec.py,sha256=7-Uq_e5q2SkTlsmctMcYLCa9_3RiiVHZLIN7ctfaFmE,4376
20
+ dask_cuda/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
+ dask_cuda/benchmarks/common.py,sha256=sEIFnRZS6wbyKCQyB4fDclYLc2YqC0PolurR5qzuRxw,6393
22
+ dask_cuda/benchmarks/local_cudf_groupby.py,sha256=T9lA9nb4Wzu46AH--SJEVCeCm3650J7slapdNR_08FU,8904
23
+ dask_cuda/benchmarks/local_cudf_merge.py,sha256=POjxoPx4zY1TjG2S_anElL6rDtC5Jhn3nF4HABlnwZg,12447
24
+ dask_cuda/benchmarks/local_cudf_shuffle.py,sha256=M-Lp3O3q8uyY50imQqMKZYwkAmyR0NApjx2ipGxDkXw,8608
25
+ dask_cuda/benchmarks/local_cupy.py,sha256=aUKIYfeR7c77K4kKk697Rxo8tG8kFabQ9jQEVGr-oTs,10762
26
+ dask_cuda/benchmarks/local_cupy_map_overlap.py,sha256=_texYmam1K_XbzIvURltui5KRsISGFNylXiGUtgRIz0,6442
27
+ dask_cuda/benchmarks/utils.py,sha256=baL5zK6VS6Mw_M4x9zJe8vMLUd2SZd1lS78JrL-h6oo,26896
28
+ dask_cuda/explicit_comms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
+ dask_cuda/explicit_comms/comms.py,sha256=Su6PuNo68IyS-AwoqU4S9TmqWsLvUdNa0jot2hx8jQQ,10400
30
+ dask_cuda/explicit_comms/dataframe/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
+ dask_cuda/explicit_comms/dataframe/shuffle.py,sha256=YferHNWKsMea8tele-ynPVr_6RAZNZIR-VzK_uFuEQU,20131
32
+ dask_cuda/tests/test_cudf_builtin_spilling.py,sha256=u3kW91YRLdHFycvpGfSQKrEucu5khMJ1k4sjmddO490,4910
33
+ dask_cuda/tests/test_dask_cuda_worker.py,sha256=gViHaMCSfB6ip125OEi9D0nfKC-qBXRoHz6BRodEdb4,17729
34
+ dask_cuda/tests/test_device_host_file.py,sha256=79ssUISo1YhsW_7HdwqPfsH2LRzS2bi5BjPym1Sdgqw,5882
35
+ dask_cuda/tests/test_dgx.py,sha256=Oh2vwL_CdUzSVQQoiIu6SPwXGRtmXwaW_Hh3ipXPUOc,7162
36
+ dask_cuda/tests/test_explicit_comms.py,sha256=I4lSW-NQ0E08baEoG7cY4Ix3blGb1Auz88q2BNd1cPA,13136
37
+ dask_cuda/tests/test_from_array.py,sha256=okT1B6UqHmLxoy0uER0Ylm3UyOmi5BAXwJpTuTAw44I,601
38
+ dask_cuda/tests/test_gds.py,sha256=6jf0HPTHAIG8Mp_FC4Ai4zpn-U1K7yk0fSXg8He8-r8,1513
39
+ dask_cuda/tests/test_initialize.py,sha256=Rba59ZbljEm1yyN94_sWZPEE_f7hWln95aiBVc49pmY,6960
40
+ dask_cuda/tests/test_local_cuda_cluster.py,sha256=G3kR-4o-vCqWWfSuQLFKVEK0F243FaDSgRlDTUll5aU,18376
41
+ dask_cuda/tests/test_proxify_host_file.py,sha256=Yiv0sDcUoWw0d2oiPeHGoHqqSSM4lfQ4rChCiaxb6EU,18994
42
+ dask_cuda/tests/test_proxy.py,sha256=6iicSYYT2BGo1iKUQ7jM00mCjC4gtfwwxFXfGwH3QHc,23807
43
+ dask_cuda/tests/test_spill.py,sha256=xN9PbVERBYMuZxvscSO0mAM22loq9WT3ltZVBFxlmM4,10239
44
+ dask_cuda/tests/test_utils.py,sha256=JRIwXfemc3lWSzLJX0VcvR1_0wB4yeoOTsw7kB6z6pU,9176
45
+ dask_cuda/tests/test_worker_spec.py,sha256=Bvu85vkqm6ZDAYPXKMJlI2pm9Uc5tiYKNtO4goXSw-I,2399
46
+ examples/ucx/client_initialize.py,sha256=YN3AXHF8btcMd6NicKKhKR9SXouAsK1foJhFspbOn70,1262
47
+ examples/ucx/local_cuda_cluster.py,sha256=7xVY3EhwhkY2L4VZin_BiMCbrjhirDNChoC86KiETNc,1983
48
+ dask_cuda-24.2.0.dist-info/LICENSE,sha256=MjI3I-EgxfEvZlgjk82rgiFsZqSDXHFETd2QJ89UwDA,11348
49
+ dask_cuda-24.2.0.dist-info/METADATA,sha256=WDvD-un12aVVPatfnue3HLTts2j9cUz9lxSUrzh05vE,2524
50
+ dask_cuda-24.2.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
51
+ dask_cuda-24.2.0.dist-info/entry_points.txt,sha256=UcRaKVEpywtxc6pF1VnfMB0UK4sJg7a8_NdZF67laPM,136
52
+ dask_cuda-24.2.0.dist-info/top_level.txt,sha256=3kKxJxeM108fuYc_lwwlklP7YBU9IEmdmRAouzi397o,33
53
+ dask_cuda-24.2.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.41.2)
2
+ Generator: bdist_wheel (0.42.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
dask_cuda/compat.py DELETED
@@ -1,118 +0,0 @@
1
- import pickle
2
-
3
- import msgpack
4
- from packaging.version import Version
5
-
6
- import dask
7
- import distributed
8
- import distributed.comm.utils
9
- import distributed.protocol
10
- from distributed.comm.utils import OFFLOAD_THRESHOLD, nbytes, offload
11
- from distributed.protocol.core import (
12
- Serialized,
13
- decompress,
14
- logger,
15
- merge_and_deserialize,
16
- msgpack_decode_default,
17
- msgpack_opts,
18
- )
19
-
20
- if Version(distributed.__version__) >= Version("2023.8.1"):
21
- # Monkey-patch protocol.core.loads (and its users)
22
- async def from_frames(
23
- frames, deserialize=True, deserializers=None, allow_offload=True
24
- ):
25
- """
26
- Unserialize a list of Distributed protocol frames.
27
- """
28
- size = False
29
-
30
- def _from_frames():
31
- try:
32
- # Patched code
33
- return loads(
34
- frames, deserialize=deserialize, deserializers=deserializers
35
- )
36
- # end patched code
37
- except EOFError:
38
- if size > 1000:
39
- datastr = "[too large to display]"
40
- else:
41
- datastr = frames
42
- # Aid diagnosing
43
- logger.error("truncated data stream (%d bytes): %s", size, datastr)
44
- raise
45
-
46
- if allow_offload and deserialize and OFFLOAD_THRESHOLD:
47
- size = sum(map(nbytes, frames))
48
- if (
49
- allow_offload
50
- and deserialize
51
- and OFFLOAD_THRESHOLD
52
- and size > OFFLOAD_THRESHOLD
53
- ):
54
- res = await offload(_from_frames)
55
- else:
56
- res = _from_frames()
57
-
58
- return res
59
-
60
- def loads(frames, deserialize=True, deserializers=None):
61
- """Transform bytestream back into Python value"""
62
-
63
- allow_pickle = dask.config.get("distributed.scheduler.pickle")
64
-
65
- try:
66
-
67
- def _decode_default(obj):
68
- offset = obj.get("__Serialized__", 0)
69
- if offset > 0:
70
- sub_header = msgpack.loads(
71
- frames[offset],
72
- object_hook=msgpack_decode_default,
73
- use_list=False,
74
- **msgpack_opts,
75
- )
76
- offset += 1
77
- sub_frames = frames[offset : offset + sub_header["num-sub-frames"]]
78
- if deserialize:
79
- if "compression" in sub_header:
80
- sub_frames = decompress(sub_header, sub_frames)
81
- return merge_and_deserialize(
82
- sub_header, sub_frames, deserializers=deserializers
83
- )
84
- else:
85
- return Serialized(sub_header, sub_frames)
86
-
87
- offset = obj.get("__Pickled__", 0)
88
- if offset > 0:
89
- sub_header = msgpack.loads(frames[offset])
90
- offset += 1
91
- sub_frames = frames[offset : offset + sub_header["num-sub-frames"]]
92
- # Patched code
93
- if "compression" in sub_header:
94
- sub_frames = decompress(sub_header, sub_frames)
95
- # end patched code
96
- if allow_pickle:
97
- return pickle.loads(
98
- sub_header["pickled-obj"], buffers=sub_frames
99
- )
100
- else:
101
- raise ValueError(
102
- "Unpickle on the Scheduler isn't allowed, "
103
- "set `distributed.scheduler.pickle=true`"
104
- )
105
-
106
- return msgpack_decode_default(obj)
107
-
108
- return msgpack.loads(
109
- frames[0], object_hook=_decode_default, use_list=False, **msgpack_opts
110
- )
111
-
112
- except Exception:
113
- logger.critical("Failed to deserialize", exc_info=True)
114
- raise
115
-
116
- distributed.protocol.loads = loads
117
- distributed.protocol.core.loads = loads
118
- distributed.comm.utils.from_frames = from_frames
@@ -1,50 +0,0 @@
1
- dask_cuda/__init__.py,sha256=xtogPs_QSmTTMOWetj9CqLaCwdF-bANfrD75LYpulMc,1452
2
- dask_cuda/cli.py,sha256=XNRH0bu-6jzRoyWJB5qSWuzePJSh3z_5Ng6rDCnz7lg,15970
3
- dask_cuda/compat.py,sha256=BLXv9IHUtD3h6-T_8MX-uGt-UDMG6EuGuyN-zw3XndU,4084
4
- dask_cuda/cuda_worker.py,sha256=hUJ3dCdeF1GxL0Oio-d-clQ5tLxQ9xjwU6Bse5JW54g,8571
5
- dask_cuda/device_host_file.py,sha256=D0rHOFz1TRfvaecoP30x3JRWe1TiHUaq45Dg-v0DfoY,10272
6
- dask_cuda/disk_io.py,sha256=urSLKiPvJvYmKCzDPOUDCYuLI3r1RUiyVh3UZGRoF_Y,6626
7
- dask_cuda/get_device_memory_objects.py,sha256=zMSqWzm5rflRInbNMz7U2Ewv5nMcE-H8stMJeWHVWyc,3890
8
- dask_cuda/initialize.py,sha256=mzPgKhs8oLgUWpqd4ckvLNKvhLoHjt96RrBPeVneenI,5231
9
- dask_cuda/is_device_object.py,sha256=CnajvbQiX0FzFzwft0MqK1OPomx3ZGDnDxT56wNjixw,1046
10
- dask_cuda/is_spillable_object.py,sha256=CddGmg0tuSpXh2m_TJSY6GRpnl1WRHt1CRcdWgHPzWA,1457
11
- dask_cuda/local_cuda_cluster.py,sha256=qG-ZxcXuylC2ud1S9n06CZDR8kYP6MfKjSc5RSUWVsg,17625
12
- dask_cuda/proxify_device_objects.py,sha256=99CD7LOE79YiQGJ12sYl_XImVhJXpFR4vG5utdkjTQo,8108
13
- dask_cuda/proxify_host_file.py,sha256=Wf5CFCC1JN5zmfvND3ls0M5FL01Y8VhHrk0xV3UQ9kk,30850
14
- dask_cuda/proxy_object.py,sha256=bZq92kjgFB-ad_luSAFT_RItV3nssmiEk4OOSp34laU,29812
15
- dask_cuda/utils.py,sha256=1vqT2pWDGGMukGfOaOefJ67WarzNTKVT9k2oCyfSgFg,29871
16
- dask_cuda/worker_spec.py,sha256=EQffH_fuBBaghmO8o9kxJ7EAQiB4gaW-uPRYesPknSs,4356
17
- dask_cuda/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
- dask_cuda/benchmarks/common.py,sha256=sEIFnRZS6wbyKCQyB4fDclYLc2YqC0PolurR5qzuRxw,6393
19
- dask_cuda/benchmarks/local_cudf_groupby.py,sha256=2iHk-a-GvLmAgajwQJNrqmZ-WJeiyMFEyflcxh7SPO8,8894
20
- dask_cuda/benchmarks/local_cudf_merge.py,sha256=vccM5PyzZVW99-a8YaIgftsGAiA5yXnT9NoAusx0PZY,12437
21
- dask_cuda/benchmarks/local_cudf_shuffle.py,sha256=LaNCMKhhfE1lYFUUWMtdYH-efbqV6YTFhKC-Eog9-8Q,8598
22
- dask_cuda/benchmarks/local_cupy.py,sha256=G36CI46ROtNPf6QISK6QjoguB2Qb19ScsylwLFOlMy4,10752
23
- dask_cuda/benchmarks/local_cupy_map_overlap.py,sha256=rQNLGvpX1XpgK-0Wx5fd3kV9Veu2ulBd5eX2sanNlEQ,6432
24
- dask_cuda/benchmarks/utils.py,sha256=mx_JKe4q1xFNwKJX03o8dEwc48iqnqHm-ZTHOcMn17E,26888
25
- dask_cuda/explicit_comms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
- dask_cuda/explicit_comms/comms.py,sha256=Su6PuNo68IyS-AwoqU4S9TmqWsLvUdNa0jot2hx8jQQ,10400
27
- dask_cuda/explicit_comms/dataframe/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
- dask_cuda/explicit_comms/dataframe/shuffle.py,sha256=2f2wlPyqXpryIHgMpsZzs3pDE7eyslYam-jQh3ujszQ,20124
29
- dask_cuda/tests/test_cudf_builtin_spilling.py,sha256=u3kW91YRLdHFycvpGfSQKrEucu5khMJ1k4sjmddO490,4910
30
- dask_cuda/tests/test_dask_cuda_worker.py,sha256=VgybyylO7eaSk9yVBj1snp3vM7ZTG-VPEcE8agTmaWI,17714
31
- dask_cuda/tests/test_device_host_file.py,sha256=79ssUISo1YhsW_7HdwqPfsH2LRzS2bi5BjPym1Sdgqw,5882
32
- dask_cuda/tests/test_dgx.py,sha256=bKX-GvkYjWlmcEIK15aGErxmc0qPqIWOG1CeDFGoXFU,6381
33
- dask_cuda/tests/test_explicit_comms.py,sha256=Ifjem3oVUVoJGdKh1v-SDgf8duV67O3zaxyi9nQrHcM,12291
34
- dask_cuda/tests/test_from_array.py,sha256=i2Vha4mchB0BopTlEdXV7CxY7qyTzFYdgYQTmukZX38,493
35
- dask_cuda/tests/test_gds.py,sha256=6jf0HPTHAIG8Mp_FC4Ai4zpn-U1K7yk0fSXg8He8-r8,1513
36
- dask_cuda/tests/test_initialize.py,sha256=EV3FTqBRX_kxHJ0ZEij34JpLyOJvGIYB_hQc-0afoG8,5235
37
- dask_cuda/tests/test_local_cuda_cluster.py,sha256=5-55CSMDJqBXqQzFQibmbWwvVOFC5iq7F1KtvtUx0kE,17417
38
- dask_cuda/tests/test_proxify_host_file.py,sha256=vnmUuU9w9hO4Et-qwnvY5VMkoohRt62cKhyP-wi7zKM,18492
39
- dask_cuda/tests/test_proxy.py,sha256=eJuXU0KRQC36R8g0WN9gyIeZ3tbKFlqMxybEzmaT1LA,23371
40
- dask_cuda/tests/test_spill.py,sha256=vO1fW9uyjI75pUGgBqcCx08do8vscmj-l--5-FPwFxI,10073
41
- dask_cuda/tests/test_utils.py,sha256=wgYPvu7Sk61C64pah9ZbK8cnBXK5RyUCpu3G2ny6OZQ,8832
42
- dask_cuda/tests/test_worker_spec.py,sha256=Bvu85vkqm6ZDAYPXKMJlI2pm9Uc5tiYKNtO4goXSw-I,2399
43
- examples/ucx/client_initialize.py,sha256=YN3AXHF8btcMd6NicKKhKR9SXouAsK1foJhFspbOn70,1262
44
- examples/ucx/local_cuda_cluster.py,sha256=7xVY3EhwhkY2L4VZin_BiMCbrjhirDNChoC86KiETNc,1983
45
- dask_cuda-23.12.0a231026.dist-info/LICENSE,sha256=MjI3I-EgxfEvZlgjk82rgiFsZqSDXHFETd2QJ89UwDA,11348
46
- dask_cuda-23.12.0a231026.dist-info/METADATA,sha256=sdRR0HPmdcnpUjSrDkKAxPMMTZf1RCicvjsnsL2rKEI,2285
47
- dask_cuda-23.12.0a231026.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
48
- dask_cuda-23.12.0a231026.dist-info/entry_points.txt,sha256=UcRaKVEpywtxc6pF1VnfMB0UK4sJg7a8_NdZF67laPM,136
49
- dask_cuda-23.12.0a231026.dist-info/top_level.txt,sha256=3kKxJxeM108fuYc_lwwlklP7YBU9IEmdmRAouzi397o,33
50
- dask_cuda-23.12.0a231026.dist-info/RECORD,,