cudf-polars-cu13 25.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cudf_polars/GIT_COMMIT +1 -0
- cudf_polars/VERSION +1 -0
- cudf_polars/__init__.py +28 -0
- cudf_polars/_version.py +21 -0
- cudf_polars/callback.py +318 -0
- cudf_polars/containers/__init__.py +13 -0
- cudf_polars/containers/column.py +495 -0
- cudf_polars/containers/dataframe.py +361 -0
- cudf_polars/containers/datatype.py +137 -0
- cudf_polars/dsl/__init__.py +8 -0
- cudf_polars/dsl/expr.py +66 -0
- cudf_polars/dsl/expressions/__init__.py +8 -0
- cudf_polars/dsl/expressions/aggregation.py +226 -0
- cudf_polars/dsl/expressions/base.py +272 -0
- cudf_polars/dsl/expressions/binaryop.py +120 -0
- cudf_polars/dsl/expressions/boolean.py +326 -0
- cudf_polars/dsl/expressions/datetime.py +271 -0
- cudf_polars/dsl/expressions/literal.py +97 -0
- cudf_polars/dsl/expressions/rolling.py +643 -0
- cudf_polars/dsl/expressions/selection.py +74 -0
- cudf_polars/dsl/expressions/slicing.py +46 -0
- cudf_polars/dsl/expressions/sorting.py +85 -0
- cudf_polars/dsl/expressions/string.py +1002 -0
- cudf_polars/dsl/expressions/struct.py +137 -0
- cudf_polars/dsl/expressions/ternary.py +49 -0
- cudf_polars/dsl/expressions/unary.py +517 -0
- cudf_polars/dsl/ir.py +2607 -0
- cudf_polars/dsl/nodebase.py +164 -0
- cudf_polars/dsl/to_ast.py +359 -0
- cudf_polars/dsl/tracing.py +16 -0
- cudf_polars/dsl/translate.py +939 -0
- cudf_polars/dsl/traversal.py +224 -0
- cudf_polars/dsl/utils/__init__.py +8 -0
- cudf_polars/dsl/utils/aggregations.py +481 -0
- cudf_polars/dsl/utils/groupby.py +98 -0
- cudf_polars/dsl/utils/naming.py +34 -0
- cudf_polars/dsl/utils/replace.py +61 -0
- cudf_polars/dsl/utils/reshape.py +74 -0
- cudf_polars/dsl/utils/rolling.py +121 -0
- cudf_polars/dsl/utils/windows.py +192 -0
- cudf_polars/experimental/__init__.py +8 -0
- cudf_polars/experimental/base.py +386 -0
- cudf_polars/experimental/benchmarks/__init__.py +4 -0
- cudf_polars/experimental/benchmarks/pdsds.py +220 -0
- cudf_polars/experimental/benchmarks/pdsds_queries/__init__.py +4 -0
- cudf_polars/experimental/benchmarks/pdsds_queries/q1.py +88 -0
- cudf_polars/experimental/benchmarks/pdsds_queries/q10.py +225 -0
- cudf_polars/experimental/benchmarks/pdsds_queries/q2.py +244 -0
- cudf_polars/experimental/benchmarks/pdsds_queries/q3.py +65 -0
- cudf_polars/experimental/benchmarks/pdsds_queries/q4.py +359 -0
- cudf_polars/experimental/benchmarks/pdsds_queries/q5.py +462 -0
- cudf_polars/experimental/benchmarks/pdsds_queries/q6.py +92 -0
- cudf_polars/experimental/benchmarks/pdsds_queries/q7.py +79 -0
- cudf_polars/experimental/benchmarks/pdsds_queries/q8.py +524 -0
- cudf_polars/experimental/benchmarks/pdsds_queries/q9.py +137 -0
- cudf_polars/experimental/benchmarks/pdsh.py +814 -0
- cudf_polars/experimental/benchmarks/utils.py +832 -0
- cudf_polars/experimental/dask_registers.py +200 -0
- cudf_polars/experimental/dispatch.py +156 -0
- cudf_polars/experimental/distinct.py +197 -0
- cudf_polars/experimental/explain.py +157 -0
- cudf_polars/experimental/expressions.py +590 -0
- cudf_polars/experimental/groupby.py +327 -0
- cudf_polars/experimental/io.py +943 -0
- cudf_polars/experimental/join.py +391 -0
- cudf_polars/experimental/parallel.py +423 -0
- cudf_polars/experimental/repartition.py +69 -0
- cudf_polars/experimental/scheduler.py +155 -0
- cudf_polars/experimental/select.py +188 -0
- cudf_polars/experimental/shuffle.py +354 -0
- cudf_polars/experimental/sort.py +609 -0
- cudf_polars/experimental/spilling.py +151 -0
- cudf_polars/experimental/statistics.py +795 -0
- cudf_polars/experimental/utils.py +169 -0
- cudf_polars/py.typed +0 -0
- cudf_polars/testing/__init__.py +8 -0
- cudf_polars/testing/asserts.py +448 -0
- cudf_polars/testing/io.py +122 -0
- cudf_polars/testing/plugin.py +236 -0
- cudf_polars/typing/__init__.py +219 -0
- cudf_polars/utils/__init__.py +8 -0
- cudf_polars/utils/config.py +741 -0
- cudf_polars/utils/conversion.py +40 -0
- cudf_polars/utils/dtypes.py +118 -0
- cudf_polars/utils/sorting.py +53 -0
- cudf_polars/utils/timer.py +39 -0
- cudf_polars/utils/versions.py +27 -0
- cudf_polars_cu13-25.10.0.dist-info/METADATA +136 -0
- cudf_polars_cu13-25.10.0.dist-info/RECORD +92 -0
- cudf_polars_cu13-25.10.0.dist-info/WHEEL +5 -0
- cudf_polars_cu13-25.10.0.dist-info/licenses/LICENSE +201 -0
- cudf_polars_cu13-25.10.0.dist-info/top_level.txt +1 -0
cudf_polars/GIT_COMMIT
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
f4e35ca02118eada383e7417273c6cb1857ec66e
|
cudf_polars/VERSION
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
25.10.00
|
cudf_polars/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
An executor for polars logical plans.
|
|
6
|
+
|
|
7
|
+
This package implements an executor for polars logical plans using
|
|
8
|
+
pylibcudf to execute the plans on device.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
from cudf_polars._version import __git_commit__, __version__
|
|
14
|
+
from cudf_polars.callback import execute_with_cudf
|
|
15
|
+
from cudf_polars.dsl.translate import Translator
|
|
16
|
+
|
|
17
|
+
# Check we have a supported polars version
|
|
18
|
+
from cudf_polars.utils.versions import _ensure_polars_version
|
|
19
|
+
|
|
20
|
+
_ensure_polars_version()
|
|
21
|
+
del _ensure_polars_version
|
|
22
|
+
|
|
23
|
+
__all__: list[str] = [
|
|
24
|
+
"Translator",
|
|
25
|
+
"__git_commit__",
|
|
26
|
+
"__version__",
|
|
27
|
+
"execute_with_cudf",
|
|
28
|
+
]
|
cudf_polars/_version.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import importlib.resources
|
|
7
|
+
|
|
8
|
+
__version__ = (
|
|
9
|
+
importlib.resources.files(__package__).joinpath("VERSION").read_text().strip()
|
|
10
|
+
)
|
|
11
|
+
try:
|
|
12
|
+
__git_commit__ = (
|
|
13
|
+
importlib.resources.files(__package__)
|
|
14
|
+
.joinpath("GIT_COMMIT")
|
|
15
|
+
.read_text()
|
|
16
|
+
.strip()
|
|
17
|
+
)
|
|
18
|
+
except FileNotFoundError:
|
|
19
|
+
__git_commit__ = ""
|
|
20
|
+
|
|
21
|
+
__all__ = ["__git_commit__", "__version__"]
|
cudf_polars/callback.py
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
"""Callback for the polars collect function to execute on device."""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import contextlib
|
|
9
|
+
import os
|
|
10
|
+
import textwrap
|
|
11
|
+
import time
|
|
12
|
+
import warnings
|
|
13
|
+
from functools import cache, partial
|
|
14
|
+
from typing import TYPE_CHECKING, Literal, overload
|
|
15
|
+
|
|
16
|
+
import nvtx
|
|
17
|
+
from typing_extensions import assert_never
|
|
18
|
+
|
|
19
|
+
from polars.exceptions import ComputeError, PerformanceWarning
|
|
20
|
+
|
|
21
|
+
import pylibcudf
|
|
22
|
+
import rmm
|
|
23
|
+
from rmm._cuda import gpu
|
|
24
|
+
|
|
25
|
+
from cudf_polars.dsl.tracing import CUDF_POLARS_NVTX_DOMAIN
|
|
26
|
+
from cudf_polars.dsl.translate import Translator
|
|
27
|
+
from cudf_polars.utils.config import _env_get_int, get_total_device_memory
|
|
28
|
+
from cudf_polars.utils.timer import Timer
|
|
29
|
+
|
|
30
|
+
if TYPE_CHECKING:
|
|
31
|
+
from collections.abc import Generator
|
|
32
|
+
|
|
33
|
+
import polars as pl
|
|
34
|
+
from polars import GPUEngine
|
|
35
|
+
|
|
36
|
+
from cudf_polars.dsl.ir import IR
|
|
37
|
+
from cudf_polars.typing import NodeTraverser
|
|
38
|
+
from cudf_polars.utils.config import ConfigOptions
|
|
39
|
+
|
|
40
|
+
__all__: list[str] = ["execute_with_cudf"]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@cache
|
|
44
|
+
def default_memory_resource(
|
|
45
|
+
device: int,
|
|
46
|
+
cuda_managed_memory: bool, # noqa: FBT001
|
|
47
|
+
) -> rmm.mr.DeviceMemoryResource:
|
|
48
|
+
"""
|
|
49
|
+
Return the default memory resource for cudf-polars.
|
|
50
|
+
|
|
51
|
+
Parameters
|
|
52
|
+
----------
|
|
53
|
+
device
|
|
54
|
+
Disambiguating device id when selecting the device. Must be
|
|
55
|
+
the active device when this function is called.
|
|
56
|
+
cuda_managed_memory
|
|
57
|
+
Whether to use managed memory or not.
|
|
58
|
+
|
|
59
|
+
Returns
|
|
60
|
+
-------
|
|
61
|
+
rmm.mr.DeviceMemoryResource
|
|
62
|
+
The default memory resource that cudf-polars uses. Currently
|
|
63
|
+
a managed memory resource, if `cuda_managed_memory` is `True`.
|
|
64
|
+
else, an async pool resource is returned.
|
|
65
|
+
"""
|
|
66
|
+
try:
|
|
67
|
+
if (
|
|
68
|
+
cuda_managed_memory
|
|
69
|
+
and pylibcudf.utils._is_concurrent_managed_access_supported()
|
|
70
|
+
):
|
|
71
|
+
# Allocating 80% of the available memory for the pool.
|
|
72
|
+
# Leaving a 20% headroom to avoid OOM errors.
|
|
73
|
+
free_memory, _ = rmm.mr.available_device_memory()
|
|
74
|
+
free_memory = int(round(float(free_memory) * 0.80 / 256) * 256)
|
|
75
|
+
pylibcudf.prefetch.enable()
|
|
76
|
+
mr = rmm.mr.PrefetchResourceAdaptor(
|
|
77
|
+
rmm.mr.PoolMemoryResource(
|
|
78
|
+
rmm.mr.ManagedMemoryResource(),
|
|
79
|
+
initial_pool_size=free_memory,
|
|
80
|
+
)
|
|
81
|
+
)
|
|
82
|
+
else:
|
|
83
|
+
mr = rmm.mr.CudaAsyncMemoryResource()
|
|
84
|
+
except RuntimeError as e: # pragma: no cover
|
|
85
|
+
msg, *_ = e.args
|
|
86
|
+
if (
|
|
87
|
+
msg.startswith("RMM failure")
|
|
88
|
+
and msg.find("not supported with this CUDA driver/runtime version") > -1
|
|
89
|
+
):
|
|
90
|
+
raise ComputeError(
|
|
91
|
+
"GPU engine requested, but incorrect cudf-polars package installed. "
|
|
92
|
+
"cudf-polars requires CUDA 12.0+ to installed."
|
|
93
|
+
) from None
|
|
94
|
+
else:
|
|
95
|
+
raise
|
|
96
|
+
else:
|
|
97
|
+
return mr
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
@contextlib.contextmanager
|
|
101
|
+
def set_memory_resource(
|
|
102
|
+
mr: rmm.mr.DeviceMemoryResource | None,
|
|
103
|
+
) -> Generator[rmm.mr.DeviceMemoryResource, None, None]:
|
|
104
|
+
"""
|
|
105
|
+
Set the current memory resource for an execution block.
|
|
106
|
+
|
|
107
|
+
Parameters
|
|
108
|
+
----------
|
|
109
|
+
mr
|
|
110
|
+
Memory resource to use. If `None`, calls :func:`default_memory_resource`
|
|
111
|
+
to obtain an mr on the currently active device.
|
|
112
|
+
|
|
113
|
+
Returns
|
|
114
|
+
-------
|
|
115
|
+
Memory resource used.
|
|
116
|
+
|
|
117
|
+
Notes
|
|
118
|
+
-----
|
|
119
|
+
At exit, the memory resource is restored to whatever was current
|
|
120
|
+
at entry. If a memory resource is provided, it must be valid to
|
|
121
|
+
use with the currently active device.
|
|
122
|
+
"""
|
|
123
|
+
previous = rmm.mr.get_current_device_resource()
|
|
124
|
+
if mr is None:
|
|
125
|
+
device: int = gpu.getDevice()
|
|
126
|
+
mr = default_memory_resource(
|
|
127
|
+
device=device,
|
|
128
|
+
cuda_managed_memory=bool(
|
|
129
|
+
_env_get_int(
|
|
130
|
+
"POLARS_GPU_ENABLE_CUDA_MANAGED_MEMORY",
|
|
131
|
+
default=1 if get_total_device_memory() is not None else 0,
|
|
132
|
+
)
|
|
133
|
+
!= 0
|
|
134
|
+
),
|
|
135
|
+
)
|
|
136
|
+
rmm.mr.set_current_device_resource(mr)
|
|
137
|
+
try:
|
|
138
|
+
yield mr
|
|
139
|
+
finally:
|
|
140
|
+
rmm.mr.set_current_device_resource(previous)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
@contextlib.contextmanager
|
|
144
|
+
def set_device(device: int | None) -> Generator[int, None, None]:
|
|
145
|
+
"""
|
|
146
|
+
Set the device the query is executed on.
|
|
147
|
+
|
|
148
|
+
Parameters
|
|
149
|
+
----------
|
|
150
|
+
device
|
|
151
|
+
Device to use. If `None`, uses the current device.
|
|
152
|
+
|
|
153
|
+
Returns
|
|
154
|
+
-------
|
|
155
|
+
Device active for the execution of the block.
|
|
156
|
+
|
|
157
|
+
Notes
|
|
158
|
+
-----
|
|
159
|
+
At exit, the device is restored to whatever was current at entry.
|
|
160
|
+
"""
|
|
161
|
+
previous: int = gpu.getDevice()
|
|
162
|
+
if device is not None:
|
|
163
|
+
gpu.setDevice(device)
|
|
164
|
+
try:
|
|
165
|
+
yield previous
|
|
166
|
+
finally:
|
|
167
|
+
gpu.setDevice(previous)
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
@overload
|
|
171
|
+
def _callback(
|
|
172
|
+
ir: IR,
|
|
173
|
+
with_columns: list[str] | None,
|
|
174
|
+
pyarrow_predicate: str | None,
|
|
175
|
+
n_rows: int | None,
|
|
176
|
+
should_time: Literal[False],
|
|
177
|
+
*,
|
|
178
|
+
memory_resource: rmm.mr.DeviceMemoryResource | None,
|
|
179
|
+
config_options: ConfigOptions,
|
|
180
|
+
timer: Timer | None,
|
|
181
|
+
) -> pl.DataFrame: ...
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
@overload
|
|
185
|
+
def _callback(
|
|
186
|
+
ir: IR,
|
|
187
|
+
with_columns: list[str] | None,
|
|
188
|
+
pyarrow_predicate: str | None,
|
|
189
|
+
n_rows: int | None,
|
|
190
|
+
should_time: Literal[True],
|
|
191
|
+
*,
|
|
192
|
+
memory_resource: rmm.mr.DeviceMemoryResource | None,
|
|
193
|
+
config_options: ConfigOptions,
|
|
194
|
+
timer: Timer | None,
|
|
195
|
+
) -> tuple[pl.DataFrame, list[tuple[int, int, str]]]: ...
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def _callback(
|
|
199
|
+
ir: IR,
|
|
200
|
+
with_columns: list[str] | None,
|
|
201
|
+
pyarrow_predicate: str | None,
|
|
202
|
+
n_rows: int | None,
|
|
203
|
+
should_time: bool, # noqa: FBT001
|
|
204
|
+
*,
|
|
205
|
+
memory_resource: rmm.mr.DeviceMemoryResource | None,
|
|
206
|
+
config_options: ConfigOptions,
|
|
207
|
+
timer: Timer | None,
|
|
208
|
+
) -> pl.DataFrame | tuple[pl.DataFrame, list[tuple[int, int, str]]]:
|
|
209
|
+
assert with_columns is None
|
|
210
|
+
assert pyarrow_predicate is None
|
|
211
|
+
assert n_rows is None
|
|
212
|
+
if timer is not None:
|
|
213
|
+
assert should_time
|
|
214
|
+
with (
|
|
215
|
+
nvtx.annotate(message="ExecuteIR", domain=CUDF_POLARS_NVTX_DOMAIN),
|
|
216
|
+
# Device must be set before memory resource is obtained.
|
|
217
|
+
set_device(config_options.device),
|
|
218
|
+
set_memory_resource(memory_resource),
|
|
219
|
+
):
|
|
220
|
+
if config_options.executor.name == "in-memory":
|
|
221
|
+
df = ir.evaluate(cache={}, timer=timer).to_polars()
|
|
222
|
+
if timer is None:
|
|
223
|
+
return df
|
|
224
|
+
else:
|
|
225
|
+
return df, timer.timings
|
|
226
|
+
elif config_options.executor.name == "streaming":
|
|
227
|
+
from cudf_polars.experimental.parallel import evaluate_streaming
|
|
228
|
+
|
|
229
|
+
if timer is not None:
|
|
230
|
+
msg = textwrap.dedent("""\
|
|
231
|
+
LazyFrame.profile() is not supported with the streaming executor.
|
|
232
|
+
To profile execution with the streaming executor, use:
|
|
233
|
+
|
|
234
|
+
- NVIDIA NSight Systems with the 'streaming' scheduler.
|
|
235
|
+
- Dask's built-in profiling tools with the 'distributed' scheduler.
|
|
236
|
+
""")
|
|
237
|
+
raise NotImplementedError(msg)
|
|
238
|
+
|
|
239
|
+
return evaluate_streaming(ir, config_options).to_polars()
|
|
240
|
+
assert_never(f"Unknown executor '{config_options.executor}'")
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def execute_with_cudf(
|
|
244
|
+
nt: NodeTraverser, duration_since_start: int | None, *, config: GPUEngine
|
|
245
|
+
) -> None:
|
|
246
|
+
"""
|
|
247
|
+
A post optimization callback that attempts to execute the plan with cudf.
|
|
248
|
+
|
|
249
|
+
Parameters
|
|
250
|
+
----------
|
|
251
|
+
nt
|
|
252
|
+
NodeTraverser
|
|
253
|
+
|
|
254
|
+
duration_since_start
|
|
255
|
+
Time since the user started executing the query (or None if no
|
|
256
|
+
profiling should occur).
|
|
257
|
+
|
|
258
|
+
config
|
|
259
|
+
GPUEngine object. Configuration is available as ``engine.config``.
|
|
260
|
+
|
|
261
|
+
Raises
|
|
262
|
+
------
|
|
263
|
+
ValueError
|
|
264
|
+
If the config contains unsupported keys.
|
|
265
|
+
NotImplementedError
|
|
266
|
+
If translation of the plan is unsupported.
|
|
267
|
+
|
|
268
|
+
Notes
|
|
269
|
+
-----
|
|
270
|
+
The NodeTraverser is mutated if the libcudf executor can handle the plan.
|
|
271
|
+
"""
|
|
272
|
+
if duration_since_start is None:
|
|
273
|
+
timer = None
|
|
274
|
+
else:
|
|
275
|
+
start = time.monotonic_ns()
|
|
276
|
+
timer = Timer(start - duration_since_start)
|
|
277
|
+
|
|
278
|
+
memory_resource = config.memory_resource
|
|
279
|
+
|
|
280
|
+
with nvtx.annotate(message="ConvertIR", domain=CUDF_POLARS_NVTX_DOMAIN):
|
|
281
|
+
translator = Translator(nt, config)
|
|
282
|
+
ir = translator.translate_ir()
|
|
283
|
+
ir_translation_errors = translator.errors
|
|
284
|
+
if timer is not None:
|
|
285
|
+
timer.store(start, time.monotonic_ns(), "gpu-ir-translation")
|
|
286
|
+
|
|
287
|
+
if (
|
|
288
|
+
memory_resource is None
|
|
289
|
+
and translator.config_options.executor.name == "streaming"
|
|
290
|
+
and translator.config_options.executor.scheduler == "distributed"
|
|
291
|
+
): # pragma: no cover; Requires distributed cluster
|
|
292
|
+
memory_resource = rmm.mr.get_current_device_resource()
|
|
293
|
+
if len(ir_translation_errors):
|
|
294
|
+
# TODO: Display these errors in user-friendly way.
|
|
295
|
+
# tracked in https://github.com/rapidsai/cudf/issues/17051
|
|
296
|
+
unique_errors = sorted(set(ir_translation_errors), key=str)
|
|
297
|
+
formatted_errors = "\n".join(
|
|
298
|
+
f"- {e.__class__.__name__}: {e}" for e in unique_errors
|
|
299
|
+
)
|
|
300
|
+
error_message = (
|
|
301
|
+
"Query execution with GPU not possible: unsupported operations."
|
|
302
|
+
f"\nThe errors were:\n{formatted_errors}"
|
|
303
|
+
)
|
|
304
|
+
exception = NotImplementedError(error_message, unique_errors)
|
|
305
|
+
if bool(int(os.environ.get("POLARS_VERBOSE", 0))):
|
|
306
|
+
warnings.warn(error_message, PerformanceWarning, stacklevel=2)
|
|
307
|
+
if translator.config_options.raise_on_fail:
|
|
308
|
+
raise exception
|
|
309
|
+
else:
|
|
310
|
+
nt.set_udf(
|
|
311
|
+
partial(
|
|
312
|
+
_callback,
|
|
313
|
+
ir,
|
|
314
|
+
memory_resource=memory_resource,
|
|
315
|
+
config_options=translator.config_options,
|
|
316
|
+
timer=timer,
|
|
317
|
+
)
|
|
318
|
+
)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
"""Containers of concrete data."""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
__all__: list[str] = ["Column", "DataFrame", "DataType"]
|
|
9
|
+
|
|
10
|
+
# dataframe.py & column.py imports DataType, so import in this order to avoid circular import
|
|
11
|
+
from cudf_polars.containers.datatype import DataType # noqa: I001
|
|
12
|
+
from cudf_polars.containers.column import Column
|
|
13
|
+
from cudf_polars.containers.dataframe import DataFrame
|