dask-cuda 24.4.0__py3-none-any.whl → 24.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dask_cuda/VERSION +1 -1
- dask_cuda/__init__.py +12 -0
- dask_cuda/_version.py +1 -1
- dask_cuda/benchmarks/local_cudf_merge.py +27 -20
- dask_cuda/benchmarks/local_cudf_shuffle.py +16 -10
- dask_cuda/benchmarks/utils.py +24 -1
- dask_cuda/explicit_comms/dataframe/shuffle.py +19 -13
- dask_cuda/tests/test_dgx.py +12 -4
- dask_cuda/tests/test_explicit_comms.py +19 -0
- dask_cuda/tests/test_proxy.py +2 -2
- {dask_cuda-24.4.0.dist-info → dask_cuda-24.6.0.dist-info}/METADATA +7 -7
- {dask_cuda-24.4.0.dist-info → dask_cuda-24.6.0.dist-info}/RECORD +16 -16
- {dask_cuda-24.4.0.dist-info → dask_cuda-24.6.0.dist-info}/LICENSE +0 -0
- {dask_cuda-24.4.0.dist-info → dask_cuda-24.6.0.dist-info}/WHEEL +0 -0
- {dask_cuda-24.4.0.dist-info → dask_cuda-24.6.0.dist-info}/entry_points.txt +0 -0
- {dask_cuda-24.4.0.dist-info → dask_cuda-24.6.0.dist-info}/top_level.txt +0 -0
dask_cuda/VERSION
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
24.
|
|
1
|
+
24.06.00
|
dask_cuda/__init__.py
CHANGED
|
@@ -20,6 +20,18 @@ from .local_cuda_cluster import LocalCUDACluster
|
|
|
20
20
|
from .proxify_device_objects import proxify_decorator, unproxify_decorator
|
|
21
21
|
|
|
22
22
|
|
|
23
|
+
if dask.config.get("dataframe.query-planning", None) is not False and dask.config.get(
|
|
24
|
+
"explicit-comms", False
|
|
25
|
+
):
|
|
26
|
+
raise NotImplementedError(
|
|
27
|
+
"The 'explicit-comms' config is not yet supported when "
|
|
28
|
+
"query-planning is enabled in dask. Please use the shuffle "
|
|
29
|
+
"API directly, or use the legacy dask-dataframe API "
|
|
30
|
+
"(set the 'dataframe.query-planning' config to `False`"
|
|
31
|
+
"before importing `dask.dataframe`).",
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
|
|
23
35
|
# Monkey patching Dask to make use of explicit-comms when `DASK_EXPLICIT_COMMS=True`
|
|
24
36
|
dask.dataframe.shuffle.rearrange_by_column = get_rearrange_by_column_wrapper(
|
|
25
37
|
dask.dataframe.shuffle.rearrange_by_column
|
dask_cuda/_version.py
CHANGED
|
@@ -7,8 +7,7 @@ import numpy as np
|
|
|
7
7
|
import pandas as pd
|
|
8
8
|
|
|
9
9
|
import dask
|
|
10
|
-
|
|
11
|
-
from dask.dataframe.core import new_dd_object
|
|
10
|
+
import dask.dataframe as dd
|
|
12
11
|
from dask.distributed import performance_report, wait
|
|
13
12
|
from dask.utils import format_bytes, parse_bytes
|
|
14
13
|
|
|
@@ -25,12 +24,20 @@ from dask_cuda.benchmarks.utils import (
|
|
|
25
24
|
# <https://gist.github.com/rjzamora/0ffc35c19b5180ab04bbf7c793c45955>
|
|
26
25
|
|
|
27
26
|
|
|
28
|
-
|
|
27
|
+
# Set default shuffle method to "tasks"
|
|
28
|
+
if dask.config.get("dataframe.shuffle.method", None) is None:
|
|
29
|
+
dask.config.set({"dataframe.shuffle.method": "tasks"})
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def generate_chunk(input):
|
|
33
|
+
i_chunk, local_size, num_chunks, chunk_type, frac_match, gpu = input
|
|
34
|
+
|
|
29
35
|
# Setting a seed that triggers max amount of comm in the two-GPU case.
|
|
30
36
|
if gpu:
|
|
31
37
|
import cupy as xp
|
|
32
38
|
|
|
33
39
|
import cudf as xdf
|
|
40
|
+
import dask_cudf # noqa: F401
|
|
34
41
|
else:
|
|
35
42
|
import numpy as xp
|
|
36
43
|
import pandas as xdf
|
|
@@ -105,25 +112,25 @@ def get_random_ddf(chunk_size, num_chunks, frac_match, chunk_type, args):
|
|
|
105
112
|
|
|
106
113
|
parts = [chunk_size for _ in range(num_chunks)]
|
|
107
114
|
device_type = True if args.type == "gpu" else False
|
|
108
|
-
meta = generate_chunk(0, 4, 1, chunk_type, None, device_type)
|
|
115
|
+
meta = generate_chunk((0, 4, 1, chunk_type, None, device_type))
|
|
109
116
|
divisions = [None] * (len(parts) + 1)
|
|
110
117
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
118
|
+
ddf = dd.from_map(
|
|
119
|
+
generate_chunk,
|
|
120
|
+
[
|
|
121
|
+
(
|
|
122
|
+
i,
|
|
123
|
+
part,
|
|
124
|
+
len(parts),
|
|
125
|
+
chunk_type,
|
|
126
|
+
frac_match,
|
|
127
|
+
device_type,
|
|
128
|
+
)
|
|
129
|
+
for i, part in enumerate(parts)
|
|
130
|
+
],
|
|
131
|
+
meta=meta,
|
|
132
|
+
divisions=divisions,
|
|
133
|
+
)
|
|
127
134
|
|
|
128
135
|
if chunk_type == "build":
|
|
129
136
|
if not args.no_shuffle:
|
|
@@ -8,8 +8,6 @@ import pandas as pd
|
|
|
8
8
|
|
|
9
9
|
import dask
|
|
10
10
|
import dask.dataframe
|
|
11
|
-
from dask.dataframe.core import new_dd_object
|
|
12
|
-
from dask.dataframe.shuffle import shuffle
|
|
13
11
|
from dask.distributed import Client, performance_report, wait
|
|
14
12
|
from dask.utils import format_bytes, parse_bytes
|
|
15
13
|
|
|
@@ -33,7 +31,7 @@ except ImportError:
|
|
|
33
31
|
|
|
34
32
|
|
|
35
33
|
def shuffle_dask(df, args):
|
|
36
|
-
result = shuffle(
|
|
34
|
+
result = df.shuffle("data", shuffle_method="tasks", ignore_index=args.ignore_index)
|
|
37
35
|
if args.backend == "dask-noop":
|
|
38
36
|
result = as_noop(result)
|
|
39
37
|
t1 = perf_counter()
|
|
@@ -94,18 +92,24 @@ def create_data(
|
|
|
94
92
|
)
|
|
95
93
|
|
|
96
94
|
# Create partition based to the specified partition distribution
|
|
97
|
-
|
|
95
|
+
futures = []
|
|
98
96
|
for i, part_size in enumerate(dist):
|
|
99
97
|
for _ in range(part_size):
|
|
100
98
|
# We use `client.submit` to control placement of the partition.
|
|
101
|
-
|
|
102
|
-
|
|
99
|
+
futures.append(
|
|
100
|
+
client.submit(
|
|
101
|
+
create_df, chunksize, args.type, workers=[workers[i]], pure=False
|
|
102
|
+
)
|
|
103
103
|
)
|
|
104
|
-
wait(
|
|
104
|
+
wait(futures)
|
|
105
105
|
|
|
106
106
|
df_meta = create_df(0, args.type)
|
|
107
|
-
divs = [None] * (len(
|
|
108
|
-
ret =
|
|
107
|
+
divs = [None] * (len(futures) + 1)
|
|
108
|
+
ret = dask.dataframe.from_delayed(
|
|
109
|
+
futures,
|
|
110
|
+
meta=df_meta,
|
|
111
|
+
divisions=divs,
|
|
112
|
+
).persist()
|
|
109
113
|
wait(ret)
|
|
110
114
|
|
|
111
115
|
data_processed = args.in_parts * args.partition_size
|
|
@@ -254,7 +258,9 @@ def parse_args():
|
|
|
254
258
|
]
|
|
255
259
|
|
|
256
260
|
return parse_benchmark_args(
|
|
257
|
-
description="Distributed shuffle (dask/cudf) benchmark",
|
|
261
|
+
description="Distributed shuffle (dask/cudf) benchmark",
|
|
262
|
+
args_list=special_args,
|
|
263
|
+
check_explicit_comms=False,
|
|
258
264
|
)
|
|
259
265
|
|
|
260
266
|
|
dask_cuda/benchmarks/utils.py
CHANGED
|
@@ -11,6 +11,7 @@ from typing import Any, Callable, Mapping, NamedTuple, Optional, Tuple
|
|
|
11
11
|
import numpy as np
|
|
12
12
|
import pandas as pd
|
|
13
13
|
|
|
14
|
+
from dask import config
|
|
14
15
|
from dask.distributed import Client, SSHCluster
|
|
15
16
|
from dask.utils import format_bytes, format_time, parse_bytes
|
|
16
17
|
from distributed.comm.addressing import get_address_host
|
|
@@ -47,7 +48,11 @@ def as_noop(dsk):
|
|
|
47
48
|
raise RuntimeError("Requested noop computation but dask-noop not installed.")
|
|
48
49
|
|
|
49
50
|
|
|
50
|
-
def parse_benchmark_args(
|
|
51
|
+
def parse_benchmark_args(
|
|
52
|
+
description="Generic dask-cuda Benchmark",
|
|
53
|
+
args_list=[],
|
|
54
|
+
check_explicit_comms=True,
|
|
55
|
+
):
|
|
51
56
|
parser = argparse.ArgumentParser(description=description)
|
|
52
57
|
worker_args = parser.add_argument_group(description="Worker configuration")
|
|
53
58
|
worker_args.add_argument(
|
|
@@ -317,6 +322,24 @@ def parse_benchmark_args(description="Generic dask-cuda Benchmark", args_list=[]
|
|
|
317
322
|
if args.multi_node and len(args.hosts.split(",")) < 2:
|
|
318
323
|
raise ValueError("--multi-node requires at least 2 hosts")
|
|
319
324
|
|
|
325
|
+
# Raise error early if "explicit-comms" is not allowed
|
|
326
|
+
if (
|
|
327
|
+
check_explicit_comms
|
|
328
|
+
and args.backend == "explicit-comms"
|
|
329
|
+
and config.get(
|
|
330
|
+
"dataframe.query-planning",
|
|
331
|
+
None,
|
|
332
|
+
)
|
|
333
|
+
is not False
|
|
334
|
+
):
|
|
335
|
+
raise NotImplementedError(
|
|
336
|
+
"The 'explicit-comms' config is not yet supported when "
|
|
337
|
+
"query-planning is enabled in dask. Please use the legacy "
|
|
338
|
+
"dask-dataframe API by setting the following environment "
|
|
339
|
+
"variable before executing:",
|
|
340
|
+
" DASK_DATAFRAME__QUERY_PLANNING=False",
|
|
341
|
+
)
|
|
342
|
+
|
|
320
343
|
return args
|
|
321
344
|
|
|
322
345
|
|
|
@@ -11,10 +11,12 @@ from typing import Any, Callable, Dict, List, Optional, Set, TypeVar
|
|
|
11
11
|
import dask
|
|
12
12
|
import dask.config
|
|
13
13
|
import dask.dataframe
|
|
14
|
+
import dask.dataframe as dd
|
|
14
15
|
import dask.utils
|
|
15
16
|
import distributed.worker
|
|
16
17
|
from dask.base import tokenize
|
|
17
|
-
from dask.dataframe
|
|
18
|
+
from dask.dataframe import DataFrame, Series
|
|
19
|
+
from dask.dataframe.core import _concat as dd_concat
|
|
18
20
|
from dask.dataframe.shuffle import group_split_dispatch, hash_object_dispatch
|
|
19
21
|
from distributed import wait
|
|
20
22
|
from distributed.protocol import nested_deserialize, to_serialize
|
|
@@ -468,18 +470,19 @@ def shuffle(
|
|
|
468
470
|
npartitions = df.npartitions
|
|
469
471
|
|
|
470
472
|
# Step (a):
|
|
471
|
-
df = df.persist() # Make sure optimizations are
|
|
473
|
+
df = df.persist() # Make sure optimizations are applied on the existing graph
|
|
472
474
|
wait([df]) # Make sure all keys has been materialized on workers
|
|
475
|
+
persisted_keys = [f.key for f in c.client.futures_of(df)]
|
|
473
476
|
name = (
|
|
474
477
|
"explicit-comms-shuffle-"
|
|
475
|
-
f"{tokenize(df, column_names, npartitions, ignore_index)}"
|
|
478
|
+
f"{tokenize(df, column_names, npartitions, ignore_index, batchsize)}"
|
|
476
479
|
)
|
|
477
480
|
df_meta: DataFrame = df._meta
|
|
478
481
|
|
|
479
482
|
# Stage all keys of `df` on the workers and cancel them, which makes it possible
|
|
480
483
|
# for the shuffle to free memory as the partitions of `df` are consumed.
|
|
481
484
|
# See CommsContext.stage_keys() for a description of staging.
|
|
482
|
-
rank_to_inkeys = c.stage_keys(name=name, keys=
|
|
485
|
+
rank_to_inkeys = c.stage_keys(name=name, keys=persisted_keys)
|
|
483
486
|
c.client.cancel(df)
|
|
484
487
|
|
|
485
488
|
# Get batchsize
|
|
@@ -526,23 +529,26 @@ def shuffle(
|
|
|
526
529
|
# TODO: can we do this without using `submit()` to avoid the overhead
|
|
527
530
|
# of creating a Future for each dataframe partition?
|
|
528
531
|
|
|
529
|
-
|
|
532
|
+
futures = []
|
|
530
533
|
for rank in ranks:
|
|
531
534
|
for part_id in rank_to_out_part_ids[rank]:
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
535
|
+
futures.append(
|
|
536
|
+
c.client.submit(
|
|
537
|
+
getitem,
|
|
538
|
+
shuffle_result[rank],
|
|
539
|
+
part_id,
|
|
540
|
+
workers=[c.worker_addresses[rank]],
|
|
541
|
+
)
|
|
537
542
|
)
|
|
538
543
|
|
|
539
544
|
# Create a distributed Dataframe from all the pieces
|
|
540
|
-
divs = [None] * (len(
|
|
541
|
-
|
|
545
|
+
divs = [None] * (len(futures) + 1)
|
|
546
|
+
kwargs = {"meta": df_meta, "divisions": divs, "prefix": "explicit-comms-shuffle"}
|
|
547
|
+
ret = dd.from_delayed(futures, **kwargs).persist()
|
|
542
548
|
wait([ret])
|
|
543
549
|
|
|
544
550
|
# Release all temporary dataframes
|
|
545
|
-
for fut in [*shuffle_result.values(), *
|
|
551
|
+
for fut in [*shuffle_result.values(), *futures]:
|
|
546
552
|
fut.release()
|
|
547
553
|
return ret
|
|
548
554
|
|
dask_cuda/tests/test_dgx.py
CHANGED
|
@@ -15,6 +15,10 @@ mp = mp.get_context("spawn") # type: ignore
|
|
|
15
15
|
psutil = pytest.importorskip("psutil")
|
|
16
16
|
|
|
17
17
|
|
|
18
|
+
def _is_ucx_116(ucp):
|
|
19
|
+
return ucp.get_ucx_version()[:2] == (1, 16)
|
|
20
|
+
|
|
21
|
+
|
|
18
22
|
class DGXVersion(Enum):
|
|
19
23
|
DGX_1 = auto()
|
|
20
24
|
DGX_2 = auto()
|
|
@@ -102,9 +106,11 @@ def _test_tcp_over_ucx(protocol):
|
|
|
102
106
|
)
|
|
103
107
|
def test_tcp_over_ucx(protocol):
|
|
104
108
|
if protocol == "ucx":
|
|
105
|
-
pytest.importorskip("ucp")
|
|
109
|
+
ucp = pytest.importorskip("ucp")
|
|
106
110
|
elif protocol == "ucxx":
|
|
107
|
-
pytest.importorskip("ucxx")
|
|
111
|
+
ucp = pytest.importorskip("ucxx")
|
|
112
|
+
if _is_ucx_116(ucp):
|
|
113
|
+
pytest.skip("https://github.com/rapidsai/ucx-py/issues/1037")
|
|
108
114
|
|
|
109
115
|
p = mp.Process(target=_test_tcp_over_ucx, args=(protocol,))
|
|
110
116
|
p.start()
|
|
@@ -217,9 +223,11 @@ def _test_ucx_infiniband_nvlink(
|
|
|
217
223
|
)
|
|
218
224
|
def test_ucx_infiniband_nvlink(protocol, params):
|
|
219
225
|
if protocol == "ucx":
|
|
220
|
-
pytest.importorskip("ucp")
|
|
226
|
+
ucp = pytest.importorskip("ucp")
|
|
221
227
|
elif protocol == "ucxx":
|
|
222
|
-
pytest.importorskip("ucxx")
|
|
228
|
+
ucp = pytest.importorskip("ucxx")
|
|
229
|
+
if _is_ucx_116(ucp) and params["enable_infiniband"] is False:
|
|
230
|
+
pytest.skip("https://github.com/rapidsai/ucx-py/issues/1037")
|
|
223
231
|
|
|
224
232
|
skip_queue = mp.Queue()
|
|
225
233
|
|
|
@@ -25,6 +25,22 @@ from dask_cuda.utils_test import IncreasedCloseTimeoutNanny
|
|
|
25
25
|
mp = mp.get_context("spawn") # type: ignore
|
|
26
26
|
ucp = pytest.importorskip("ucp")
|
|
27
27
|
|
|
28
|
+
QUERY_PLANNING_ON = dask.config.get("dataframe.query-planning", None) is not False
|
|
29
|
+
|
|
30
|
+
# Skip these tests when dask-expr is active (for now)
|
|
31
|
+
query_planning_skip = pytest.mark.skipif(
|
|
32
|
+
QUERY_PLANNING_ON,
|
|
33
|
+
reason=(
|
|
34
|
+
"The 'explicit-comms' config is not supported "
|
|
35
|
+
"when query planning is enabled."
|
|
36
|
+
),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
# Set default shuffle method to "tasks"
|
|
40
|
+
if dask.config.get("dataframe.shuffle.method", None) is None:
|
|
41
|
+
dask.config.set({"dataframe.shuffle.method": "tasks"})
|
|
42
|
+
|
|
43
|
+
|
|
28
44
|
# Notice, all of the following tests is executed in a new process such
|
|
29
45
|
# that UCX options of the different tests doesn't conflict.
|
|
30
46
|
|
|
@@ -82,6 +98,7 @@ def _test_dataframe_merge_empty_partitions(nrows, npartitions):
|
|
|
82
98
|
pd.testing.assert_frame_equal(got, expected)
|
|
83
99
|
|
|
84
100
|
|
|
101
|
+
@query_planning_skip
|
|
85
102
|
def test_dataframe_merge_empty_partitions():
|
|
86
103
|
# Notice, we use more partitions than rows
|
|
87
104
|
p = mp.Process(target=_test_dataframe_merge_empty_partitions, args=(2, 4))
|
|
@@ -220,6 +237,7 @@ def _test_dask_use_explicit_comms(in_cluster):
|
|
|
220
237
|
check_shuffle()
|
|
221
238
|
|
|
222
239
|
|
|
240
|
+
@query_planning_skip
|
|
223
241
|
@pytest.mark.parametrize("in_cluster", [True, False])
|
|
224
242
|
def test_dask_use_explicit_comms(in_cluster):
|
|
225
243
|
def _timeout(process, function, timeout):
|
|
@@ -282,6 +300,7 @@ def _test_dataframe_shuffle_merge(backend, protocol, n_workers):
|
|
|
282
300
|
assert_eq(got, expected)
|
|
283
301
|
|
|
284
302
|
|
|
303
|
+
@query_planning_skip
|
|
285
304
|
@pytest.mark.parametrize("nworkers", [1, 2, 4])
|
|
286
305
|
@pytest.mark.parametrize("backend", ["pandas", "cudf"])
|
|
287
306
|
@pytest.mark.parametrize("protocol", ["tcp", "ucx", "ucxx"])
|
dask_cuda/tests/test_proxy.py
CHANGED
|
@@ -537,10 +537,10 @@ def test_from_cudf_of_proxy_object():
|
|
|
537
537
|
assert has_parallel_type(df)
|
|
538
538
|
|
|
539
539
|
ddf = dask_cudf.from_cudf(df, npartitions=1)
|
|
540
|
-
assert has_parallel_type(ddf)
|
|
540
|
+
assert has_parallel_type(ddf._meta)
|
|
541
541
|
|
|
542
542
|
# Notice, the output is a dask-cudf dataframe and not a proxy object
|
|
543
|
-
assert type(ddf) is
|
|
543
|
+
assert type(ddf._meta) is cudf.DataFrame
|
|
544
544
|
|
|
545
545
|
|
|
546
546
|
def test_proxy_object_parquet(tmp_path):
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: dask-cuda
|
|
3
|
-
Version: 24.
|
|
3
|
+
Version: 24.6.0
|
|
4
4
|
Summary: Utilities for Dask and CUDA interactions
|
|
5
5
|
Author: NVIDIA Corporation
|
|
6
|
-
License: Apache
|
|
6
|
+
License: Apache 2.0
|
|
7
7
|
Project-URL: Homepage, https://github.com/rapidsai/dask-cuda
|
|
8
8
|
Project-URL: Documentation, https://docs.rapids.ai/api/dask-cuda/stable/
|
|
9
9
|
Project-URL: Source, https://github.com/rapidsai/dask-cuda
|
|
@@ -23,7 +23,7 @@ Requires-Dist: numba >=0.57
|
|
|
23
23
|
Requires-Dist: numpy <2.0a0,>=1.23
|
|
24
24
|
Requires-Dist: pandas >=1.3
|
|
25
25
|
Requires-Dist: pynvml <11.5,>=11.0.0
|
|
26
|
-
Requires-Dist: rapids-dask-dependency ==24.
|
|
26
|
+
Requires-Dist: rapids-dask-dependency ==24.6.*
|
|
27
27
|
Requires-Dist: zict >=2.0.0
|
|
28
28
|
Provides-Extra: docs
|
|
29
29
|
Requires-Dist: numpydoc >=1.1.0 ; extra == 'docs'
|
|
@@ -31,12 +31,12 @@ Requires-Dist: sphinx ; extra == 'docs'
|
|
|
31
31
|
Requires-Dist: sphinx-click >=2.7.1 ; extra == 'docs'
|
|
32
32
|
Requires-Dist: sphinx-rtd-theme >=0.5.1 ; extra == 'docs'
|
|
33
33
|
Provides-Extra: test
|
|
34
|
-
Requires-Dist: cudf ==24.
|
|
35
|
-
Requires-Dist: dask-cudf ==24.
|
|
36
|
-
Requires-Dist: kvikio ==24.
|
|
34
|
+
Requires-Dist: cudf ==24.6.* ; extra == 'test'
|
|
35
|
+
Requires-Dist: dask-cudf ==24.6.* ; extra == 'test'
|
|
36
|
+
Requires-Dist: kvikio ==24.6.* ; extra == 'test'
|
|
37
37
|
Requires-Dist: pytest ; extra == 'test'
|
|
38
38
|
Requires-Dist: pytest-cov ; extra == 'test'
|
|
39
|
-
Requires-Dist: ucx-py ==0.
|
|
39
|
+
Requires-Dist: ucx-py ==0.38.* ; extra == 'test'
|
|
40
40
|
|
|
41
41
|
Dask CUDA
|
|
42
42
|
=========
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
dask_cuda/VERSION,sha256=
|
|
2
|
-
dask_cuda/__init__.py,sha256=
|
|
3
|
-
dask_cuda/_version.py,sha256=
|
|
1
|
+
dask_cuda/VERSION,sha256=dIWV5q3UAaQInFeBt7NGhhmqTBqP_0Y540pyLeZ8mkc,9
|
|
2
|
+
dask_cuda/__init__.py,sha256=JLDWev7vI_dPusLgRdOwXBz-xfhlX_hc-DzmLtrEYO0,1918
|
|
3
|
+
dask_cuda/_version.py,sha256=U6CHD0Kkafws8nJSbEwZcu-ZKReghzbciFgluwauXtg,778
|
|
4
4
|
dask_cuda/cli.py,sha256=XNRH0bu-6jzRoyWJB5qSWuzePJSh3z_5Ng6rDCnz7lg,15970
|
|
5
5
|
dask_cuda/cuda_worker.py,sha256=bIu-ESeIpJG_WaTYrv0z9z5juJ1qR5i_5Ng3CN1WK8s,8579
|
|
6
6
|
dask_cuda/device_host_file.py,sha256=yS31LGtt9VFAG78uBBlTDr7HGIng2XymV1OxXIuEMtM,10272
|
|
@@ -20,34 +20,34 @@ dask_cuda/worker_spec.py,sha256=7-Uq_e5q2SkTlsmctMcYLCa9_3RiiVHZLIN7ctfaFmE,4376
|
|
|
20
20
|
dask_cuda/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
21
|
dask_cuda/benchmarks/common.py,sha256=sEIFnRZS6wbyKCQyB4fDclYLc2YqC0PolurR5qzuRxw,6393
|
|
22
22
|
dask_cuda/benchmarks/local_cudf_groupby.py,sha256=T9lA9nb4Wzu46AH--SJEVCeCm3650J7slapdNR_08FU,8904
|
|
23
|
-
dask_cuda/benchmarks/local_cudf_merge.py,sha256=
|
|
24
|
-
dask_cuda/benchmarks/local_cudf_shuffle.py,sha256=
|
|
23
|
+
dask_cuda/benchmarks/local_cudf_merge.py,sha256=AsuVnMA3H93sJwjjgi4KaIdYKnnX1OeRMPiXizrwHGk,12577
|
|
24
|
+
dask_cuda/benchmarks/local_cudf_shuffle.py,sha256=2xWJZf3gwDNimXKZN2ivtU3OE_qec1KNOhgL4_AGQZU,8655
|
|
25
25
|
dask_cuda/benchmarks/local_cupy.py,sha256=aUKIYfeR7c77K4kKk697Rxo8tG8kFabQ9jQEVGr-oTs,10762
|
|
26
26
|
dask_cuda/benchmarks/local_cupy_map_overlap.py,sha256=_texYmam1K_XbzIvURltui5KRsISGFNylXiGUtgRIz0,6442
|
|
27
|
-
dask_cuda/benchmarks/utils.py,sha256=
|
|
27
|
+
dask_cuda/benchmarks/utils.py,sha256=mrQAGbZCqx4N8AC-ASlw-vhDxz060D4i_oSksKZkl2c,27580
|
|
28
28
|
dask_cuda/explicit_comms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
29
29
|
dask_cuda/explicit_comms/comms.py,sha256=Su6PuNo68IyS-AwoqU4S9TmqWsLvUdNa0jot2hx8jQQ,10400
|
|
30
30
|
dask_cuda/explicit_comms/dataframe/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
31
|
-
dask_cuda/explicit_comms/dataframe/shuffle.py,sha256=
|
|
31
|
+
dask_cuda/explicit_comms/dataframe/shuffle.py,sha256=qJP6WxY0EkuafGrpZDCxeVGuQIoAacYc1SchcpmK0WM,20368
|
|
32
32
|
dask_cuda/tests/test_cudf_builtin_spilling.py,sha256=u3kW91YRLdHFycvpGfSQKrEucu5khMJ1k4sjmddO490,4910
|
|
33
33
|
dask_cuda/tests/test_dask_cuda_worker.py,sha256=gViHaMCSfB6ip125OEi9D0nfKC-qBXRoHz6BRodEdb4,17729
|
|
34
34
|
dask_cuda/tests/test_device_host_file.py,sha256=79ssUISo1YhsW_7HdwqPfsH2LRzS2bi5BjPym1Sdgqw,5882
|
|
35
|
-
dask_cuda/tests/test_dgx.py,sha256=
|
|
36
|
-
dask_cuda/tests/test_explicit_comms.py,sha256=
|
|
35
|
+
dask_cuda/tests/test_dgx.py,sha256=BPCF4ZvhrVKkT43OOFHdijuo-M34vW3V18C8rRH1HXg,7489
|
|
36
|
+
dask_cuda/tests/test_explicit_comms.py,sha256=l__DAIHx_DmV71LUEyvDNsLsHYYzafzvy0z_loFwQDo,13686
|
|
37
37
|
dask_cuda/tests/test_from_array.py,sha256=okT1B6UqHmLxoy0uER0Ylm3UyOmi5BAXwJpTuTAw44I,601
|
|
38
38
|
dask_cuda/tests/test_gds.py,sha256=6jf0HPTHAIG8Mp_FC4Ai4zpn-U1K7yk0fSXg8He8-r8,1513
|
|
39
39
|
dask_cuda/tests/test_initialize.py,sha256=Rba59ZbljEm1yyN94_sWZPEE_f7hWln95aiBVc49pmY,6960
|
|
40
40
|
dask_cuda/tests/test_local_cuda_cluster.py,sha256=G3kR-4o-vCqWWfSuQLFKVEK0F243FaDSgRlDTUll5aU,18376
|
|
41
41
|
dask_cuda/tests/test_proxify_host_file.py,sha256=Yiv0sDcUoWw0d2oiPeHGoHqqSSM4lfQ4rChCiaxb6EU,18994
|
|
42
|
-
dask_cuda/tests/test_proxy.py,sha256=
|
|
42
|
+
dask_cuda/tests/test_proxy.py,sha256=OnGnPkl5ksCb-3hpEKG2z1OfPK9DbnOCtBHOjcUUjhg,23809
|
|
43
43
|
dask_cuda/tests/test_spill.py,sha256=xN9PbVERBYMuZxvscSO0mAM22loq9WT3ltZVBFxlmM4,10239
|
|
44
44
|
dask_cuda/tests/test_utils.py,sha256=JRIwXfemc3lWSzLJX0VcvR1_0wB4yeoOTsw7kB6z6pU,9176
|
|
45
45
|
dask_cuda/tests/test_worker_spec.py,sha256=Bvu85vkqm6ZDAYPXKMJlI2pm9Uc5tiYKNtO4goXSw-I,2399
|
|
46
46
|
examples/ucx/client_initialize.py,sha256=YN3AXHF8btcMd6NicKKhKR9SXouAsK1foJhFspbOn70,1262
|
|
47
47
|
examples/ucx/local_cuda_cluster.py,sha256=7xVY3EhwhkY2L4VZin_BiMCbrjhirDNChoC86KiETNc,1983
|
|
48
|
-
dask_cuda-24.
|
|
49
|
-
dask_cuda-24.
|
|
50
|
-
dask_cuda-24.
|
|
51
|
-
dask_cuda-24.
|
|
52
|
-
dask_cuda-24.
|
|
53
|
-
dask_cuda-24.
|
|
48
|
+
dask_cuda-24.6.0.dist-info/LICENSE,sha256=MjI3I-EgxfEvZlgjk82rgiFsZqSDXHFETd2QJ89UwDA,11348
|
|
49
|
+
dask_cuda-24.6.0.dist-info/METADATA,sha256=eHHrrmTxKYk6JuFexzLAz8ybdummYxVAbqadz8fZGro,2570
|
|
50
|
+
dask_cuda-24.6.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
51
|
+
dask_cuda-24.6.0.dist-info/entry_points.txt,sha256=UcRaKVEpywtxc6pF1VnfMB0UK4sJg7a8_NdZF67laPM,136
|
|
52
|
+
dask_cuda-24.6.0.dist-info/top_level.txt,sha256=3kKxJxeM108fuYc_lwwlklP7YBU9IEmdmRAouzi397o,33
|
|
53
|
+
dask_cuda-24.6.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|