dask-cuda 24.12.0__py3-none-any.whl → 25.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dask_cuda/GIT_COMMIT ADDED
@@ -0,0 +1 @@
1
+ e9ebd92886e6f518af02faf8a2cdadeb700b25a9
dask_cuda/VERSION CHANGED
@@ -1 +1 @@
1
- 24.12.00
1
+ 25.04.00
dask_cuda/__init__.py CHANGED
@@ -5,65 +5,50 @@ if sys.platform != "linux":
5
5
 
6
6
  import dask
7
7
  import dask.utils
8
- import dask.dataframe.core
9
- import dask.dataframe.shuffle
10
- import dask.dataframe.multi
11
- import dask.bag.core
12
8
  from distributed.protocol.cuda import cuda_deserialize, cuda_serialize
13
9
  from distributed.protocol.serialize import dask_deserialize, dask_serialize
14
10
 
15
11
  from ._version import __git_commit__, __version__
16
12
  from .cuda_worker import CUDAWorker
17
- from .explicit_comms.dataframe.shuffle import (
18
- get_rearrange_by_column_wrapper,
19
- get_default_shuffle_method,
20
- )
21
- from .local_cuda_cluster import LocalCUDACluster
22
- from .proxify_device_objects import proxify_decorator, unproxify_decorator
23
-
24
-
25
- if dask.config.get("dataframe.query-planning", None) is not False and dask.config.get(
26
- "explicit-comms", False
27
- ):
28
- raise NotImplementedError(
29
- "The 'explicit-comms' config is not yet supported when "
30
- "query-planning is enabled in dask. Please use the shuffle "
31
- "API directly, or use the legacy dask-dataframe API "
32
- "(set the 'dataframe.query-planning' config to `False`"
33
- "before importing `dask.dataframe`).",
34
- )
35
-
36
13
 
37
- # Monkey patching Dask to make use of explicit-comms when `DASK_EXPLICIT_COMMS=True`
38
- dask.dataframe.shuffle.rearrange_by_column = get_rearrange_by_column_wrapper(
39
- dask.dataframe.shuffle.rearrange_by_column
40
- )
41
- # We have to replace all modules that imports Dask's `get_default_shuffle_method()`
42
- # TODO: introduce a shuffle-algorithm dispatcher in Dask so we don't need this hack
43
- dask.dataframe.shuffle.get_default_shuffle_method = get_default_shuffle_method
44
- dask.dataframe.multi.get_default_shuffle_method = get_default_shuffle_method
45
- dask.bag.core.get_default_shuffle_method = get_default_shuffle_method
46
-
47
-
48
- # Monkey patching Dask to make use of proxify and unproxify in compatibility mode
49
- dask.dataframe.shuffle.shuffle_group = proxify_decorator(
50
- dask.dataframe.shuffle.shuffle_group
51
- )
52
- dask.dataframe.core._concat = unproxify_decorator(dask.dataframe.core._concat)
14
+ from .local_cuda_cluster import LocalCUDACluster
53
15
 
54
16
 
55
- def _register_cudf_spill_aware():
56
- import cudf
17
+ try:
18
+ import dask.dataframe as dask_dataframe
19
+ except ImportError:
20
+ # Dask DataFrame (optional) isn't installed
21
+ dask_dataframe = None
57
22
 
58
- # Only enable Dask/cuDF spilling if cuDF spilling is disabled, see
59
- # https://github.com/rapidsai/dask-cuda/issues/1363
60
- if not cudf.get_option("spill"):
61
- # This reproduces the implementation of `_register_cudf`, see
62
- # https://github.com/dask/distributed/blob/40fcd65e991382a956c3b879e438be1b100dff97/distributed/protocol/__init__.py#L106-L115
63
- from cudf.comm import serialize
64
23
 
24
+ if dask_dataframe is not None:
25
+ from .explicit_comms.dataframe.shuffle import patch_shuffle_expression
26
+ from .proxify_device_objects import proxify_decorator, unproxify_decorator
65
27
 
66
- for registry in [cuda_serialize, cuda_deserialize, dask_serialize, dask_deserialize]:
67
- for lib in ["cudf", "dask_cudf"]:
68
- if lib in registry._lazy:
69
- registry._lazy[lib] = _register_cudf_spill_aware
28
+ # Monkey patching Dask to make use of explicit-comms when `DASK_EXPLICIT_COMMS=True`
29
+ patch_shuffle_expression()
30
+ # Monkey patching Dask to make use of proxify and unproxify in compatibility mode
31
+ dask_dataframe.shuffle.shuffle_group = proxify_decorator(
32
+ dask.dataframe.shuffle.shuffle_group
33
+ )
34
+ dask_dataframe.core._concat = unproxify_decorator(dask.dataframe.core._concat)
35
+
36
+ def _register_cudf_spill_aware():
37
+ import cudf
38
+
39
+ # Only enable Dask/cuDF spilling if cuDF spilling is disabled, see
40
+ # https://github.com/rapidsai/dask-cuda/issues/1363
41
+ if not cudf.get_option("spill"):
42
+ # This reproduces the implementation of `_register_cudf`, see
43
+ # https://github.com/dask/distributed/blob/40fcd65e991382a956c3b879e438be1b100dff97/distributed/protocol/__init__.py#L106-L115
44
+ from cudf.comm import serialize
45
+
46
+ for registry in [
47
+ cuda_serialize,
48
+ cuda_deserialize,
49
+ dask_serialize,
50
+ dask_deserialize,
51
+ ]:
52
+ for lib in ["cudf", "dask_cudf"]:
53
+ if lib in registry._lazy:
54
+ registry._lazy[lib] = _register_cudf_spill_aware
@@ -246,7 +246,6 @@ def parse_args():
246
246
  return parse_benchmark_args(
247
247
  description="Distributed shuffle (dask/cudf) benchmark",
248
248
  args_list=special_args,
249
- check_explicit_comms=False,
250
249
  )
251
250
 
252
251
 
@@ -251,7 +251,6 @@ def parse_args():
251
251
  args = parse_benchmark_args(
252
252
  description="Parquet read benchmark",
253
253
  args_list=special_args,
254
- check_explicit_comms=False,
255
254
  )
256
255
  args.no_show_p2p_bandwidth = True
257
256
  return args
@@ -11,7 +11,6 @@ from typing import Any, Callable, Mapping, NamedTuple, Optional, Tuple
11
11
  import numpy as np
12
12
  import pandas as pd
13
13
 
14
- from dask import config
15
14
  from dask.distributed import Client, SSHCluster
16
15
  from dask.utils import format_bytes, format_time, parse_bytes
17
16
  from distributed.comm.addressing import get_address_host
@@ -52,7 +51,6 @@ def as_noop(dsk):
52
51
  def parse_benchmark_args(
53
52
  description="Generic dask-cuda Benchmark",
54
53
  args_list=[],
55
- check_explicit_comms=True,
56
54
  ):
57
55
  parser = argparse.ArgumentParser(description=description)
58
56
  worker_args = parser.add_argument_group(description="Worker configuration")
@@ -377,24 +375,6 @@ def parse_benchmark_args(
377
375
  if args.multi_node and len(args.hosts.split(",")) < 2:
378
376
  raise ValueError("--multi-node requires at least 2 hosts")
379
377
 
380
- # Raise error early if "explicit-comms" is not allowed
381
- if (
382
- check_explicit_comms
383
- and args.backend == "explicit-comms"
384
- and config.get(
385
- "dataframe.query-planning",
386
- None,
387
- )
388
- is not False
389
- ):
390
- raise NotImplementedError(
391
- "The 'explicit-comms' config is not yet supported when "
392
- "query-planning is enabled in dask. Please use the legacy "
393
- "dask-dataframe API by setting the following environment "
394
- "variable before executing:",
395
- " DASK_DATAFRAME__QUERY_PLANNING=False",
396
- )
397
-
398
378
  return args
399
379
 
400
380
 
@@ -1,15 +1,21 @@
1
+ # Copyright (c) 2021-2025 NVIDIA CORPORATION.
1
2
  import asyncio
2
3
  import concurrent.futures
3
4
  import contextlib
4
5
  import time
5
6
  import uuid
7
+ import weakref
6
8
  from typing import Any, Dict, Hashable, Iterable, List, Optional
7
9
 
8
10
  import distributed.comm
11
+ from dask.tokenize import tokenize
9
12
  from distributed import Client, Worker, default_client, get_worker
10
13
  from distributed.comm.addressing import parse_address, parse_host_port, unparse_address
11
14
 
12
- _default_comms = None
15
+ # Mapping tokenize(client ID, [worker addresses]) to CommsContext
16
+ _comms_cache: weakref.WeakValueDictionary[
17
+ str, "CommsContext"
18
+ ] = weakref.WeakValueDictionary()
13
19
 
14
20
 
15
21
  def get_multi_lock_or_null_context(multi_lock_context, *args, **kwargs):
@@ -38,9 +44,10 @@ def get_multi_lock_or_null_context(multi_lock_context, *args, **kwargs):
38
44
 
39
45
 
40
46
  def default_comms(client: Optional[Client] = None) -> "CommsContext":
41
- """Return the default comms object
47
+ """Return the default comms object for ``client``.
42
48
 
43
- Creates a new default comms object if no one exist.
49
+ Creates a new default comms object if one does not already exist
50
+ for ``client``.
44
51
 
45
52
  Parameters
46
53
  ----------
@@ -52,11 +59,31 @@ def default_comms(client: Optional[Client] = None) -> "CommsContext":
52
59
  -------
53
60
  comms: CommsContext
54
61
  The default comms object
62
+
63
+ Notes
64
+ -----
65
+ There are some subtle points around explicit-comms and the lifecycle
66
+ of a Dask Cluster.
67
+
68
+ A :class:`CommsContext` establishes explicit communication channels
69
+ between the workers *at the time it's created*. If workers are added
70
+ or removed, they will not be included in the communication channels
71
+ with the other workers.
72
+
73
+ If you need to refresh the explicit communications channels, then
74
+ create a new :class:`CommsContext` object or call ``default_comms``
75
+ again after workers have been added to or removed from the cluster.
55
76
  """
56
- global _default_comms
57
- if _default_comms is None:
58
- _default_comms = CommsContext(client=client)
59
- return _default_comms
77
+ # Comms are unique to a {client, [workers]} pair, so we key our
78
+ # cache by the token of that.
79
+ client = client or default_client()
80
+ token = tokenize(client.id, list(client.scheduler_info()["workers"].keys()))
81
+ maybe_comms = _comms_cache.get(token)
82
+ if maybe_comms is None:
83
+ maybe_comms = CommsContext(client=client)
84
+ _comms_cache[token] = maybe_comms
85
+
86
+ return maybe_comms
60
87
 
61
88
 
62
89
  def worker_state(sessionId: Optional[int] = None) -> dict:
@@ -1,8 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
- import functools
5
- import inspect
6
4
  from collections import defaultdict
7
5
  from math import ceil
8
6
  from operator import getitem
@@ -20,7 +18,7 @@ import distributed.worker
20
18
  from dask.base import tokenize
21
19
  from dask.dataframe import DataFrame, Series
22
20
  from dask.dataframe.core import _concat as dd_concat
23
- from dask.dataframe.shuffle import group_split_dispatch, hash_object_dispatch
21
+ from dask.dataframe.dispatch import group_split_dispatch, hash_object_dispatch
24
22
  from distributed import wait
25
23
  from distributed.protocol import nested_deserialize, to_serialize
26
24
  from distributed.worker import Worker
@@ -33,6 +31,20 @@ T = TypeVar("T")
33
31
  Proxify = Callable[[T], T]
34
32
 
35
33
 
34
+ try:
35
+ from dask.dataframe import dask_expr
36
+
37
+ except ImportError:
38
+ # TODO: Remove when pinned to dask>2024.12.1
39
+ import dask_expr
40
+
41
+ if not dd._dask_expr_enabled():
42
+ raise ValueError(
43
+ "The legacy DataFrame API is not supported in dask_cudf>24.12. "
44
+ "Please enable query-planning, or downgrade to dask_cudf<=24.12"
45
+ )
46
+
47
+
36
48
  def get_proxify(worker: Worker) -> Proxify:
37
49
  """Get function to proxify objects"""
38
50
  from dask_cuda.proxify_host_file import ProxifyHostFile
@@ -570,40 +582,48 @@ def _use_explicit_comms() -> bool:
570
582
  return False
571
583
 
572
584
 
573
- def get_rearrange_by_column_wrapper(func):
574
- """Returns a function wrapper that dispatch the shuffle to explicit-comms.
585
+ def patch_shuffle_expression() -> None:
586
+ """Patch Dasks Shuffle expression.
575
587
 
576
- Notice, this is monkey patched into Dask at dask_cuda import
588
+ Notice, this is monkey patched into Dask at dask_cuda
589
+ import, and it changes `Shuffle._layer` to lower into
590
+ an `ECShuffle` expression when the 'explicit-comms'
591
+ config is set to `True`.
577
592
  """
578
593
 
579
- func_sig = inspect.signature(func)
580
-
581
- @functools.wraps(func)
582
- def wrapper(*args, **kwargs):
583
- if _use_explicit_comms():
584
- # Convert `*args, **kwargs` to a dict of `keyword -> values`
585
- kw = func_sig.bind(*args, **kwargs)
586
- kw.apply_defaults()
587
- kw = kw.arguments
588
- # Notice, we only overwrite the default and the "tasks" shuffle
589
- # algorithm. The "disk" and "p2p" algorithm, we don't touch.
590
- if kw["shuffle_method"] in ("tasks", None):
591
- col = kw["col"]
592
- if isinstance(col, str):
593
- col = [col]
594
- return shuffle(kw["df"], col, kw["npartitions"], kw["ignore_index"])
595
- return func(*args, **kwargs)
596
-
597
- return wrapper
598
-
599
-
600
- def get_default_shuffle_method() -> str:
601
- """Return the default shuffle algorithm used by Dask
594
+ class ECShuffle(dask_expr._shuffle.TaskShuffle):
595
+ """Explicit-Comms Shuffle Expression."""
596
+
597
+ def _layer(self):
598
+ # Execute an explicit-comms shuffle
599
+ if not hasattr(self, "_ec_shuffled"):
600
+ on = self.partitioning_index
601
+ df = dask_expr.new_collection(self.frame)
602
+ self._ec_shuffled = shuffle(
603
+ df,
604
+ [on] if isinstance(on, str) else on,
605
+ self.npartitions_out,
606
+ self.ignore_index,
607
+ )
608
+ graph = self._ec_shuffled.dask.copy()
609
+ shuffled_name = self._ec_shuffled._name
610
+ for i in range(self.npartitions_out):
611
+ graph[(self._name, i)] = graph[(shuffled_name, i)]
612
+ return graph
613
+
614
+ _base_lower = dask_expr._shuffle.Shuffle._lower
615
+
616
+ def _patched_lower(self):
617
+ if self.method in (None, "tasks") and _use_explicit_comms():
618
+ return ECShuffle(
619
+ self.frame,
620
+ self.partitioning_index,
621
+ self.npartitions_out,
622
+ self.ignore_index,
623
+ self.options,
624
+ self.original_partitioning_index,
625
+ )
626
+ else:
627
+ return _base_lower(self)
602
628
 
603
- This changes the default shuffle algorithm from "p2p" to "tasks"
604
- when explicit comms is enabled.
605
- """
606
- ret = dask.config.get("dataframe.shuffle.algorithm", None)
607
- if ret is None and _use_explicit_comms():
608
- return "tasks"
609
- return dask.utils.get_default_shuffle_method()
629
+ dask_expr._shuffle.Shuffle._lower = _patched_lower
dask_cuda/plugins.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import importlib
2
+ import logging
2
3
  import os
3
4
  from typing import Callable, Dict
4
5
 
@@ -12,7 +13,15 @@ class CPUAffinity(WorkerPlugin):
12
13
  self.cores = cores
13
14
 
14
15
  def setup(self, worker=None):
15
- os.sched_setaffinity(0, self.cores)
16
+ try:
17
+ os.sched_setaffinity(0, self.cores)
18
+ except Exception:
19
+ logger = logging.getLogger("distributed.worker")
20
+ logger.warning(
21
+ "Setting CPU affinity for GPU failed. Please refer to the following "
22
+ "link for troubleshooting information: "
23
+ "https://docs.rapids.ai/api/dask-cuda/nightly/troubleshooting/#setting-cpu-affinity-failure" # noqa: E501
24
+ )
16
25
 
17
26
 
18
27
  class CUDFSetup(WorkerPlugin):
dask_cuda/proxy_object.py CHANGED
@@ -11,9 +11,6 @@ from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Tuple, Type, Un
11
11
  import pandas
12
12
 
13
13
  import dask
14
- import dask.array.core
15
- import dask.dataframe.methods
16
- import dask.dataframe.utils
17
14
  import dask.utils
18
15
  import distributed.protocol
19
16
  import distributed.utils
@@ -22,16 +19,6 @@ from distributed.protocol.compression import decompress
22
19
 
23
20
  from dask_cuda.disk_io import disk_read
24
21
 
25
- try:
26
- from dask.dataframe.backends import concat_pandas
27
- except ImportError:
28
- from dask.dataframe.methods import concat_pandas
29
-
30
- try:
31
- from dask.dataframe.dispatch import make_meta_dispatch as make_meta_dispatch
32
- except ImportError:
33
- from dask.dataframe.utils import make_meta as make_meta_dispatch
34
-
35
22
  from .disk_io import SpillToDiskFile
36
23
  from .is_device_object import is_device_object
37
24
 
@@ -39,6 +26,22 @@ if TYPE_CHECKING:
39
26
  from .proxify_host_file import ProxyManager
40
27
 
41
28
 
29
+ try:
30
+ import dask.dataframe as dask_dataframe
31
+ import dask.dataframe.backends
32
+ import dask.dataframe.dispatch
33
+ import dask.dataframe.utils
34
+ except ImportError:
35
+ dask_dataframe = None
36
+
37
+
38
+ try:
39
+ import dask.array as dask_array
40
+ import dask.array.core
41
+ except ImportError:
42
+ dask_array = None
43
+
44
+
42
45
  # List of attributes that should be copied to the proxy at creation, which makes
43
46
  # them accessible without deserialization of the proxied object
44
47
  _FIXED_ATTRS = ["name", "__len__"]
@@ -893,12 +896,6 @@ def obj_pxy_dask_deserialize(header, frames):
893
896
  return subclass(pxy)
894
897
 
895
898
 
896
- @dask.dataframe.core.get_parallel_type.register(ProxyObject)
897
- def get_parallel_type_proxy_object(obj: ProxyObject):
898
- # Notice, `get_parallel_type()` needs a instance not a type object
899
- return dask.dataframe.core.get_parallel_type(obj.__class__.__new__(obj.__class__))
900
-
901
-
902
899
  def unproxify_input_wrapper(func):
903
900
  """Unproxify the input of `func`"""
904
901
 
@@ -911,26 +908,42 @@ def unproxify_input_wrapper(func):
911
908
  return wrapper
912
909
 
913
910
 
914
- # Register dispatch of ProxyObject on all known dispatch objects
915
- for dispatch in (
916
- dask.dataframe.core.hash_object_dispatch,
917
- make_meta_dispatch,
918
- dask.dataframe.utils.make_scalar,
919
- dask.dataframe.core.group_split_dispatch,
920
- dask.array.core.tensordot_lookup,
921
- dask.array.core.einsum_lookup,
922
- dask.array.core.concatenate_lookup,
923
- ):
924
- dispatch.register(ProxyObject, unproxify_input_wrapper(dispatch))
925
-
926
- dask.dataframe.methods.concat_dispatch.register(
927
- ProxyObject, unproxify_input_wrapper(dask.dataframe.methods.concat)
928
- )
929
-
930
-
931
- # We overwrite the Dask dispatch of Pandas objects in order to
932
- # deserialize all ProxyObjects before concatenating
933
- dask.dataframe.methods.concat_dispatch.register(
934
- (pandas.DataFrame, pandas.Series, pandas.Index),
935
- unproxify_input_wrapper(concat_pandas),
936
- )
911
+ if dask_array is not None:
912
+
913
+ # Register dispatch of ProxyObject on all known dispatch objects
914
+ for dispatch in (
915
+ dask.array.core.tensordot_lookup,
916
+ dask.array.core.einsum_lookup,
917
+ dask.array.core.concatenate_lookup,
918
+ ):
919
+ dispatch.register(ProxyObject, unproxify_input_wrapper(dispatch))
920
+
921
+
922
+ if dask_dataframe is not None:
923
+
924
+ @dask.dataframe.dispatch.get_parallel_type.register(ProxyObject)
925
+ def get_parallel_type_proxy_object(obj: ProxyObject):
926
+ # Notice, `get_parallel_type()` needs a instance not a type object
927
+ return dask.dataframe.dispatch.get_parallel_type(
928
+ obj.__class__.__new__(obj.__class__)
929
+ )
930
+
931
+ # Register dispatch of ProxyObject on all known dispatch objects
932
+ for dispatch in (
933
+ dask.dataframe.dispatch.hash_object_dispatch,
934
+ dask.dataframe.dispatch.make_meta_dispatch,
935
+ dask.dataframe.utils.make_scalar,
936
+ dask.dataframe.dispatch.group_split_dispatch,
937
+ ):
938
+ dispatch.register(ProxyObject, unproxify_input_wrapper(dispatch))
939
+
940
+ dask.dataframe.dispatch.concat_dispatch.register(
941
+ ProxyObject, unproxify_input_wrapper(dask.dataframe.dispatch.concat)
942
+ )
943
+
944
+ # We overwrite the Dask dispatch of Pandas objects in order to
945
+ # deserialize all ProxyObjects before concatenating
946
+ dask.dataframe.dispatch.concat_dispatch.register(
947
+ (pandas.DataFrame, pandas.Series, pandas.Index),
948
+ unproxify_input_wrapper(dask.dataframe.backends.concat_pandas),
949
+ )
@@ -320,6 +320,7 @@ def test_unknown_argument():
320
320
  assert b"Scheduler address: --my-argument" in ret.stderr
321
321
 
322
322
 
323
+ @pytest.mark.xfail(reason="https://github.com/rapidsai/dask-cuda/issues/1441")
323
324
  @patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0"})
324
325
  def test_pre_import(loop): # noqa: F811
325
326
  module = None
@@ -1,3 +1,5 @@
1
+ # Copyright (c) 2021-2025 NVIDIA CORPORATION.
2
+
1
3
  import asyncio
2
4
  import multiprocessing as mp
3
5
  import os
@@ -25,16 +27,6 @@ from dask_cuda.utils_test import IncreasedCloseTimeoutNanny
25
27
  mp = mp.get_context("spawn") # type: ignore
26
28
  ucp = pytest.importorskip("ucp")
27
29
 
28
- QUERY_PLANNING_ON = dask.config.get("dataframe.query-planning", None) is not False
29
-
30
- # Skip these tests when dask-expr is active (for now)
31
- query_planning_skip = pytest.mark.skipif(
32
- QUERY_PLANNING_ON,
33
- reason=(
34
- "The 'explicit-comms' config is not supported "
35
- "when query planning is enabled."
36
- ),
37
- )
38
30
 
39
31
  # Set default shuffle method to "tasks"
40
32
  if dask.config.get("dataframe.shuffle.method", None) is None:
@@ -98,7 +90,6 @@ def _test_dataframe_merge_empty_partitions(nrows, npartitions):
98
90
  pd.testing.assert_frame_equal(got, expected)
99
91
 
100
92
 
101
- @query_planning_skip
102
93
  def test_dataframe_merge_empty_partitions():
103
94
  # Notice, we use more partitions than rows
104
95
  p = mp.Process(target=_test_dataframe_merge_empty_partitions, args=(2, 4))
@@ -250,7 +241,7 @@ def _test_dask_use_explicit_comms(in_cluster):
250
241
  ):
251
242
  dask.config.refresh() # Trigger re-read of the environment variables
252
243
  with pytest.raises(ValueError, match="explicit-comms-batchsize"):
253
- ddf.shuffle(on="key", npartitions=4)
244
+ ddf.shuffle(on="key", npartitions=4).dask
254
245
 
255
246
  if in_cluster:
256
247
  with LocalCluster(
@@ -267,7 +258,6 @@ def _test_dask_use_explicit_comms(in_cluster):
267
258
  check_shuffle()
268
259
 
269
260
 
270
- @query_planning_skip
271
261
  @pytest.mark.parametrize("in_cluster", [True, False])
272
262
  def test_dask_use_explicit_comms(in_cluster):
273
263
  def _timeout(process, function, timeout):
@@ -330,7 +320,6 @@ def _test_dataframe_shuffle_merge(backend, protocol, n_workers):
330
320
  assert_eq(got, expected)
331
321
 
332
322
 
333
- @query_planning_skip
334
323
  @pytest.mark.parametrize("nworkers", [1, 2, 4])
335
324
  @pytest.mark.parametrize("backend", ["pandas", "cudf"])
336
325
  @pytest.mark.parametrize("protocol", ["tcp", "ucx", "ucxx"])
@@ -428,3 +417,116 @@ def test_lock_workers():
428
417
  p.join()
429
418
 
430
419
  assert all(p.exitcode == 0 for p in ps)
420
+
421
+
422
+ def test_create_destroy_create():
423
+ # https://github.com/rapidsai/dask-cuda/issues/1450
424
+ assert len(comms._comms_cache) == 0
425
+ with LocalCluster(n_workers=1) as cluster:
426
+ with Client(cluster) as client:
427
+ context = comms.default_comms()
428
+ scheduler_addresses_old = list(client.scheduler_info()["workers"].keys())
429
+ comms_addresses_old = list(comms.default_comms().worker_addresses)
430
+ assert comms.default_comms() is context
431
+ assert len(comms._comms_cache) == 1
432
+
433
+ # Add a worker, which should have a new comms object
434
+ cluster.scale(2)
435
+ client.wait_for_workers(2, timeout=5)
436
+ context2 = comms.default_comms()
437
+ assert context is not context2
438
+ assert len(comms._comms_cache) == 2
439
+
440
+ del context
441
+ del context2
442
+ assert len(comms._comms_cache) == 0
443
+ assert scheduler_addresses_old == comms_addresses_old
444
+
445
+ # A new cluster should have a new comms object. Previously, this failed
446
+ # because we referenced the old cluster's addresses.
447
+ with LocalCluster(n_workers=1) as cluster:
448
+ with Client(cluster) as client:
449
+ scheduler_addresses_new = list(client.scheduler_info()["workers"].keys())
450
+ comms_addresses_new = list(comms.default_comms().worker_addresses)
451
+
452
+ assert scheduler_addresses_new == comms_addresses_new
453
+
454
+
455
+ def test_scaled_cluster_gets_new_comms_context():
456
+ # Ensure that if we create a CommsContext, scale the cluster,
457
+ # and create a new CommsContext, then the new CommsContext
458
+ # should include the new worker.
459
+ # https://github.com/rapidsai/dask-cuda/issues/1450
460
+
461
+ name = "explicit-comms-shuffle"
462
+ ddf = dd.from_pandas(pd.DataFrame({"key": np.arange(10)}), npartitions=2)
463
+
464
+ with LocalCluster(n_workers=2) as cluster:
465
+ with Client(cluster) as client:
466
+ context_1 = comms.default_comms()
467
+
468
+ def check(dask_worker, session_id: int):
469
+ has_state = hasattr(dask_worker, "_explicit_comm_state")
470
+ has_state_for_session = (
471
+ has_state and session_id in dask_worker._explicit_comm_state
472
+ )
473
+ if has_state_for_session:
474
+ n_workers = dask_worker._explicit_comm_state[session_id]["nworkers"]
475
+ else:
476
+ n_workers = None
477
+ return {
478
+ "has_state": has_state,
479
+ "has_state_for_session": has_state_for_session,
480
+ "n_workers": n_workers,
481
+ }
482
+
483
+ result_1 = client.run(check, session_id=context_1.sessionId)
484
+ expected_values = {
485
+ "has_state": True,
486
+ "has_state_for_session": True,
487
+ "n_workers": 2,
488
+ }
489
+ expected_1 = {
490
+ k: expected_values for k in client.scheduler_info()["workers"]
491
+ }
492
+ assert result_1 == expected_1
493
+
494
+ # Run a shuffle with the initial setup as a sanity test
495
+ with dask.config.set(explicit_comms=True):
496
+ shuffled = ddf.shuffle(on="key", npartitions=4)
497
+ assert any(name in str(key) for key in shuffled.dask)
498
+ result = shuffled.compute()
499
+
500
+ with dask.config.set(explicit_comms=False):
501
+ shuffled = ddf.shuffle(on="key", npartitions=4)
502
+ expected = shuffled.compute()
503
+
504
+ assert_eq(result, expected)
505
+
506
+ # --- Scale the cluster ---
507
+ cluster.scale(3)
508
+ client.wait_for_workers(3, timeout=5)
509
+
510
+ context_2 = comms.default_comms()
511
+ result_2 = client.run(check, session_id=context_2.sessionId)
512
+ expected_values = {
513
+ "has_state": True,
514
+ "has_state_for_session": True,
515
+ "n_workers": 3,
516
+ }
517
+ expected_2 = {
518
+ k: expected_values for k in client.scheduler_info()["workers"]
519
+ }
520
+ assert result_2 == expected_2
521
+
522
+ # Run a shuffle with the new setup
523
+ with dask.config.set(explicit_comms=True):
524
+ shuffled = ddf.shuffle(on="key", npartitions=4)
525
+ assert any(name in str(key) for key in shuffled.dask)
526
+ result = shuffled.compute()
527
+
528
+ with dask.config.set(explicit_comms=False):
529
+ shuffled = ddf.shuffle(on="key", npartitions=4)
530
+ expected = shuffled.compute()
531
+
532
+ assert_eq(result, expected)
@@ -1,4 +1,5 @@
1
1
  import multiprocessing as mp
2
+ import sys
2
3
 
3
4
  import numpy
4
5
  import psutil
@@ -214,3 +215,38 @@ def test_initialize_ucx_all(protocol):
214
215
  p.start()
215
216
  p.join()
216
217
  assert not p.exitcode
218
+
219
+
220
+ def _test_dask_cuda_import():
221
+ # Check that importing `dask_cuda` does NOT
222
+ # require `dask.dataframe` or `dask.array`.
223
+
224
+ # Patch sys.modules so that `dask.dataframe`
225
+ # and `dask.array` cannot be found.
226
+ with pytest.MonkeyPatch.context() as monkeypatch:
227
+ for k in list(sys.modules):
228
+ if k.startswith("dask.dataframe") or k.startswith("dask.array"):
229
+ monkeypatch.setitem(sys.modules, k, None)
230
+ monkeypatch.delitem(sys.modules, "dask_cuda")
231
+
232
+ # Check that top-level imports still succeed.
233
+ import dask_cuda # noqa: F401
234
+ from dask_cuda import CUDAWorker # noqa: F401
235
+ from dask_cuda import LocalCUDACluster
236
+
237
+ with LocalCUDACluster(
238
+ dashboard_address=None,
239
+ n_workers=1,
240
+ threads_per_worker=1,
241
+ processes=True,
242
+ worker_class=IncreasedCloseTimeoutNanny,
243
+ ) as cluster:
244
+ with Client(cluster) as client:
245
+ client.run(lambda *args: None)
246
+
247
+
248
+ def test_dask_cuda_import():
249
+ p = mp.Process(target=_test_dask_cuda_import)
250
+ p.start()
251
+ p.join()
252
+ assert not p.exitcode
@@ -504,27 +504,27 @@ def test_pandas():
504
504
  df1 = pandas.DataFrame({"a": range(10)})
505
505
  df2 = pandas.DataFrame({"a": range(10)})
506
506
 
507
- res = dask.dataframe.methods.concat([df1, df2])
508
- got = dask.dataframe.methods.concat([df1, df2])
507
+ res = dask.dataframe.dispatch.concat([df1, df2])
508
+ got = dask.dataframe.dispatch.concat([df1, df2])
509
509
  assert_frame_equal(res, got)
510
510
 
511
- got = dask.dataframe.methods.concat([proxy_object.asproxy(df1), df2])
511
+ got = dask.dataframe.dispatch.concat([proxy_object.asproxy(df1), df2])
512
512
  assert_frame_equal(res, got)
513
513
 
514
- got = dask.dataframe.methods.concat([df1, proxy_object.asproxy(df2)])
514
+ got = dask.dataframe.dispatch.concat([df1, proxy_object.asproxy(df2)])
515
515
  assert_frame_equal(res, got)
516
516
 
517
517
  df1 = pandas.Series(range(10))
518
518
  df2 = pandas.Series(range(10))
519
519
 
520
- res = dask.dataframe.methods.concat([df1, df2])
521
- got = dask.dataframe.methods.concat([df1, df2])
520
+ res = dask.dataframe.dispatch.concat([df1, df2])
521
+ got = dask.dataframe.dispatch.concat([df1, df2])
522
522
  assert all(res == got)
523
523
 
524
- got = dask.dataframe.methods.concat([proxy_object.asproxy(df1), df2])
524
+ got = dask.dataframe.dispatch.concat([proxy_object.asproxy(df1), df2])
525
525
  assert all(res == got)
526
526
 
527
- got = dask.dataframe.methods.concat([df1, proxy_object.asproxy(df2)])
527
+ got = dask.dataframe.dispatch.concat([df1, proxy_object.asproxy(df2)])
528
528
  assert all(res == got)
529
529
 
530
530
 
@@ -1,6 +1,7 @@
1
1
  import os
2
2
  from unittest.mock import patch
3
3
 
4
+ import pynvml
4
5
  import pytest
5
6
  from numba import cuda
6
7
 
@@ -197,7 +198,6 @@ def test_get_ucx_config(enable_tcp_over_ucx, enable_infiniband, enable_nvlink):
197
198
 
198
199
 
199
200
  def test_parse_visible_devices():
200
- pynvml = pytest.importorskip("pynvml")
201
201
  pynvml.nvmlInit()
202
202
  indices = []
203
203
  uuids = []
@@ -250,7 +250,6 @@ def test_parse_device_memory_limit():
250
250
 
251
251
 
252
252
  def test_parse_visible_mig_devices():
253
- pynvml = pytest.importorskip("pynvml")
254
253
  pynvml.nvmlInit()
255
254
  for index in range(get_gpu_count()):
256
255
  handle = pynvml.nvmlDeviceGetHandleByIndex(index)
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: dask-cuda
3
- Version: 24.12.0
3
+ Version: 25.4.0
4
4
  Summary: Utilities for Dask and CUDA interactions
5
5
  Author: NVIDIA Corporation
6
6
  License: Apache 2.0
@@ -19,24 +19,18 @@ Requires-Python: >=3.10
19
19
  Description-Content-Type: text/markdown
20
20
  License-File: LICENSE
21
21
  Requires-Dist: click>=8.1
22
- Requires-Dist: numba>=0.57
22
+ Requires-Dist: numba<0.61.0a0,>=0.59.1
23
23
  Requires-Dist: numpy<3.0a0,>=1.23
24
24
  Requires-Dist: pandas>=1.3
25
- Requires-Dist: pynvml<12.0.0a0,>=11.0.0
26
- Requires-Dist: rapids-dask-dependency==24.12.*
25
+ Requires-Dist: pynvml<13.0.0a0,>=12.0.0
26
+ Requires-Dist: rapids-dask-dependency==25.4.*
27
27
  Requires-Dist: zict>=2.0.0
28
28
  Provides-Extra: docs
29
29
  Requires-Dist: numpydoc>=1.1.0; extra == "docs"
30
30
  Requires-Dist: sphinx; extra == "docs"
31
31
  Requires-Dist: sphinx-click>=2.7.1; extra == "docs"
32
32
  Requires-Dist: sphinx-rtd-theme>=0.5.1; extra == "docs"
33
- Provides-Extra: test
34
- Requires-Dist: cudf==24.12.*; extra == "test"
35
- Requires-Dist: dask-cudf==24.12.*; extra == "test"
36
- Requires-Dist: kvikio==24.12.*; extra == "test"
37
- Requires-Dist: pytest; extra == "test"
38
- Requires-Dist: pytest-cov; extra == "test"
39
- Requires-Dist: ucx-py==0.41.*; extra == "test"
33
+ Dynamic: license-file
40
34
 
41
35
  Dask CUDA
42
36
  =========
@@ -1,5 +1,6 @@
1
- dask_cuda/VERSION,sha256=NltZ4By82NzVjz00LGPhCXfkG4BB0JdUSXqlG8fiVuo,8
2
- dask_cuda/__init__.py,sha256=eOCH3Wj0A8X0qbNUoNA15dgxb2O-ZApha4QHq5EEVFw,2748
1
+ dask_cuda/GIT_COMMIT,sha256=wbY8QunTBf6nZeA4ulUfzAdQWyE7hoxV330KmJ3VnjA,41
2
+ dask_cuda/VERSION,sha256=EM36MPurzJgotElKb8R7ZaIOF2woBA69gsVnmiyf-LY,8
3
+ dask_cuda/__init__.py,sha256=Wbc7R0voN4vsQkb7SKuVXH0YXuXtfnAxrupxfM4lT10,1933
3
4
  dask_cuda/_version.py,sha256=cHDO9AzNtxkCVhwYu7hL3H7RPAkQnxpKBjElOst3rkI,964
4
5
  dask_cuda/cli.py,sha256=cScVyNiA_l9uXeDgkIcmbcR4l4cH1_1shqSqsVmuHPE,17053
5
6
  dask_cuda/cuda_worker.py,sha256=rZ1ITG_ZCbuaMA9e8uSqCjU8Km4AMphGGrxpBPQG8xU,9477
@@ -10,10 +11,10 @@ dask_cuda/initialize.py,sha256=Gjcxs_c8DTafgsHe5-2mw4lJdOmbFJJAZVOnxA8lTjM,6462
10
11
  dask_cuda/is_device_object.py,sha256=CnajvbQiX0FzFzwft0MqK1OPomx3ZGDnDxT56wNjixw,1046
11
12
  dask_cuda/is_spillable_object.py,sha256=CddGmg0tuSpXh2m_TJSY6GRpnl1WRHt1CRcdWgHPzWA,1457
12
13
  dask_cuda/local_cuda_cluster.py,sha256=wqwKVRV6jT13sf9e-XsvbVBlTrnhmcbmHQBFPTFcayw,20335
13
- dask_cuda/plugins.py,sha256=yGHEurbYhL4jucQrmsxLfOyE5c3bSJdfs6GVwvDAeEA,6770
14
+ dask_cuda/plugins.py,sha256=A2aT8HA6q_JhIEx6-XKcpbWEbl7aTg1GNoZQH8_vh00,7197
14
15
  dask_cuda/proxify_device_objects.py,sha256=99CD7LOE79YiQGJ12sYl_XImVhJXpFR4vG5utdkjTQo,8108
15
16
  dask_cuda/proxify_host_file.py,sha256=Wf5CFCC1JN5zmfvND3ls0M5FL01Y8VhHrk0xV3UQ9kk,30850
16
- dask_cuda/proxy_object.py,sha256=bZq92kjgFB-ad_luSAFT_RItV3nssmiEk4OOSp34laU,29812
17
+ dask_cuda/proxy_object.py,sha256=mrCCGwS-mltcY8oddJEXnPL6rV2dBpGgsFypBVbxRsA,30150
17
18
  dask_cuda/utils.py,sha256=Goq-m78rYZ-bcJitg47N1h_PC4PDuzXG0CUVH7V8azU,25515
18
19
  dask_cuda/utils_test.py,sha256=WNMR0gic2tuP3pgygcR9g52NfyX8iGMOan6juXhpkCE,1694
19
20
  dask_cuda/worker_spec.py,sha256=7-Uq_e5q2SkTlsmctMcYLCa9_3RiiVHZLIN7ctfaFmE,4376
@@ -21,35 +22,35 @@ dask_cuda/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
21
22
  dask_cuda/benchmarks/common.py,sha256=YFhxBYkoxIV-2mddSbLwTbyg67U4zXDd2_fFq9oP3_A,6922
22
23
  dask_cuda/benchmarks/local_cudf_groupby.py,sha256=zrDiF-yBAUxVt9mWOTH5hUm-pb-XnVX-G9gvCEX7_GI,8512
23
24
  dask_cuda/benchmarks/local_cudf_merge.py,sha256=Q7lnZ87-O7j28hkS-i_5hMApTX8VsuI4ftZf2XAnp1E,12195
24
- dask_cuda/benchmarks/local_cudf_shuffle.py,sha256=8FjPFtiC-UqZcdPfocdMuzq_8TURAQWJlmhfcMWdo4w,8276
25
+ dask_cuda/benchmarks/local_cudf_shuffle.py,sha256=Ied7r_fdGuOJyikBVVkMaIX3niJIlF39C1Xk6IVwgo4,8240
25
26
  dask_cuda/benchmarks/local_cupy.py,sha256=RCxQJd88bn3vyMAJDPK3orUpxzvDZY957wOSYkfriq0,10323
26
27
  dask_cuda/benchmarks/local_cupy_map_overlap.py,sha256=YAllGFuG6MePfPL8gdZ-Ld7a44-G0eEaHZJWB4vFPdY,6017
27
- dask_cuda/benchmarks/read_parquet.py,sha256=TARcG-TS1NGcQWJmuAKtfmBmy5LAaLc3xgtKgAd1DaA,7650
28
- dask_cuda/benchmarks/utils.py,sha256=_NSWS5e8SzZ6vxDcEFo97Y8gs_e23Qqd-c3r83BA6PU,30748
28
+ dask_cuda/benchmarks/read_parquet.py,sha256=spKu6RLWYngPZq9hnaoU0mz7INIaJnErfqjBG2wH8Zc,7614
29
+ dask_cuda/benchmarks/utils.py,sha256=_x0XXL_F3W-fExpuQfTBwuK3WnrVuXQQepbnvjUqS9o,30075
29
30
  dask_cuda/explicit_comms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- dask_cuda/explicit_comms/comms.py,sha256=Su6PuNo68IyS-AwoqU4S9TmqWsLvUdNa0jot2hx8jQQ,10400
31
+ dask_cuda/explicit_comms/comms.py,sha256=uq-XPOH38dFcYS_13Vomj2ER6zxQz7DPeSM000mOVmY,11541
31
32
  dask_cuda/explicit_comms/dataframe/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
- dask_cuda/explicit_comms/dataframe/shuffle.py,sha256=4xfhfbTGa36YPs_ex1_fFhzfGMYJq-QkS5q0RwgeHh8,20645
33
+ dask_cuda/explicit_comms/dataframe/shuffle.py,sha256=g9xDyFKmblEuevZt5Drh66uMLw-LUNOI8CIucDdACmY,21231
33
34
  dask_cuda/tests/test_cudf_builtin_spilling.py,sha256=qVN9J0Hdv66A9COFArLIdRriyyxEKpS3lEZGHbVHaq8,4903
34
- dask_cuda/tests/test_dask_cuda_worker.py,sha256=6rroHvJAn5R3X9LwIcE8QrPxG1GO3PaxXVjhbdQ90Pw,20477
35
+ dask_cuda/tests/test_dask_cuda_worker.py,sha256=C1emlr47yGa3TdSSlAXJRzguY4bcH74htk21x9th7nQ,20556
35
36
  dask_cuda/tests/test_device_host_file.py,sha256=79ssUISo1YhsW_7HdwqPfsH2LRzS2bi5BjPym1Sdgqw,5882
36
37
  dask_cuda/tests/test_dgx.py,sha256=BPCF4ZvhrVKkT43OOFHdijuo-M34vW3V18C8rRH1HXg,7489
37
- dask_cuda/tests/test_explicit_comms.py,sha256=Pa5vVx63qWtScnVJuS31WESXIt2FPyTJVFO-0OUbbmU,15276
38
+ dask_cuda/tests/test_explicit_comms.py,sha256=xnQjjUrd6RFd9CS99pVuWY1frfiMXzRv_fW4rk9opOk,19465
38
39
  dask_cuda/tests/test_from_array.py,sha256=okT1B6UqHmLxoy0uER0Ylm3UyOmi5BAXwJpTuTAw44I,601
39
40
  dask_cuda/tests/test_gds.py,sha256=j1Huud6UGm1fbkyRLQEz_ysrVw__5AimwSn_M-2GEvs,1513
40
- dask_cuda/tests/test_initialize.py,sha256=Rba59ZbljEm1yyN94_sWZPEE_f7hWln95aiBVc49pmY,6960
41
+ dask_cuda/tests/test_initialize.py,sha256=4Ovv_ClokKibPX6wfuaoQgN4eKCohagRFoE3s3D7Huk,8119
41
42
  dask_cuda/tests/test_local_cuda_cluster.py,sha256=Lc9QncyGwBwhaZPGBfreXJf3ZC9Zd8SjDc2fpeQ-BT0,19710
42
43
  dask_cuda/tests/test_proxify_host_file.py,sha256=LC3jjo_gbfhdIy1Zy_ynmgyv31HXFoBINCe1-XXZ4XU,18994
43
- dask_cuda/tests/test_proxy.py,sha256=51qsXGJBg_hwSMRsC_QvJBz4wVM0Bf8fbFmTUFA7HJE,23809
44
+ dask_cuda/tests/test_proxy.py,sha256=U9uE-QesTwquNKzTReEKiYgoRgS_pfGW-A-gJNppHyg,23817
44
45
  dask_cuda/tests/test_spill.py,sha256=CYMbp5HDBYlZ7T_n8RfSOZxaWFcAQKjprjRM7Wupcdw,13419
45
- dask_cuda/tests/test_utils.py,sha256=JRIwXfemc3lWSzLJX0VcvR1_0wB4yeoOTsw7kB6z6pU,9176
46
+ dask_cuda/tests/test_utils.py,sha256=PQI_oTONWnKSKlkQfEeK-vlmYa0-cPpDjDEbm74cNCE,9104
46
47
  dask_cuda/tests/test_version.py,sha256=vK2HjlRLX0nxwvRsYxBqhoZryBNZklzA-vdnyuWDxVg,365
47
48
  dask_cuda/tests/test_worker_spec.py,sha256=Bvu85vkqm6ZDAYPXKMJlI2pm9Uc5tiYKNtO4goXSw-I,2399
49
+ dask_cuda-25.4.0.dist-info/licenses/LICENSE,sha256=MjI3I-EgxfEvZlgjk82rgiFsZqSDXHFETd2QJ89UwDA,11348
48
50
  examples/ucx/client_initialize.py,sha256=YN3AXHF8btcMd6NicKKhKR9SXouAsK1foJhFspbOn70,1262
49
51
  examples/ucx/local_cuda_cluster.py,sha256=7xVY3EhwhkY2L4VZin_BiMCbrjhirDNChoC86KiETNc,1983
50
- dask_cuda-24.12.0.dist-info/LICENSE,sha256=MjI3I-EgxfEvZlgjk82rgiFsZqSDXHFETd2QJ89UwDA,11348
51
- dask_cuda-24.12.0.dist-info/METADATA,sha256=qFewjmkl67EsxFm9VoMTmw_XOOK3savtnO9hK-Qwx-E,2557
52
- dask_cuda-24.12.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
53
- dask_cuda-24.12.0.dist-info/entry_points.txt,sha256=UcRaKVEpywtxc6pF1VnfMB0UK4sJg7a8_NdZF67laPM,136
54
- dask_cuda-24.12.0.dist-info/top_level.txt,sha256=3kKxJxeM108fuYc_lwwlklP7YBU9IEmdmRAouzi397o,33
55
- dask_cuda-24.12.0.dist-info/RECORD,,
52
+ dask_cuda-25.4.0.dist-info/METADATA,sha256=udK2maTnpkUBnOOtTvGOwySUtJxnIo4rcIOmySPBuOk,2294
53
+ dask_cuda-25.4.0.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
54
+ dask_cuda-25.4.0.dist-info/entry_points.txt,sha256=UcRaKVEpywtxc6pF1VnfMB0UK4sJg7a8_NdZF67laPM,136
55
+ dask_cuda-25.4.0.dist-info/top_level.txt,sha256=3kKxJxeM108fuYc_lwwlklP7YBU9IEmdmRAouzi397o,33
56
+ dask_cuda-25.4.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.6.0)
2
+ Generator: setuptools (78.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5