dora-rs 0.3.7rc2__cp37-abi3-win_amd64.whl → 0.3.8rc0__cp37-abi3-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dora-rs might be problematic. Click here for more details.

dora/cuda.py ADDED
@@ -0,0 +1,106 @@
1
+ import pyarrow as pa
2
+
3
+ # To install pyarrow.cuda, run `conda install pyarrow "arrow-cpp-proc=*=cuda" -c conda-forge`
4
+ import pyarrow.cuda as cuda
5
+
6
+ # Make sure to install torch with cuda
7
+ import torch
8
+
9
+ # Make sure to install numba with cuda
10
+ from numba.cuda.cudadrv.devicearray import DeviceNDArray
11
+ from numba.cuda import to_device
12
+
13
+
14
+ def torch_to_ipc_buffer(tensor: torch.TensorType) -> tuple[pa.array, dict]:
15
+ """
16
+ Converts a Pytorch tensor into a pyarrow buffer containing the IPC handle and its metadata.
17
+
18
+ Example Use:
19
+ ```python
20
+ torch_tensor = torch.tensor(random_data, dtype=torch.int64, device="cuda")
21
+ ipc_buffer, metadata = torch_to_ipc_buffer(torch_tensor)
22
+ node.send_output("latency", ipc_buffer, metadata)
23
+ ```
24
+ """
25
+ device_arr = to_device(tensor)
26
+ cuda_buf = pa.cuda.CudaBuffer.from_numba(device_arr.gpu_data)
27
+ handle_buffer = cuda_buf.export_for_ipc().serialize()
28
+ metadata = {
29
+ "shape": device_arr.shape,
30
+ "strides": device_arr.strides,
31
+ "dtype": device_arr.dtype.str,
32
+ }
33
+ return pa.array(handle_buffer, type=pa.uint8()), metadata
34
+
35
+
36
+ def ipc_buffer_to_ipc_handle(handle_buffer: pa.array) -> cuda.IpcMemHandle:
37
+ """
38
+ Converts a buffer containing a serialized handler into cuda IPC MemHandle.
39
+
40
+ example use:
41
+ ```python
42
+
43
+ import pyarrow as pa
44
+ from dora.cuda import ipc_buffer_to_ipc_handle, cudabuffer_to_torch
45
+
46
+ ctx = pa.cuda.context()
47
+ event = node.next()
48
+
49
+ ipc_handle = ipc_buffer_to_ipc_handle(event["value"])
50
+ cudabuffer = ctx.open_ipc_buffer(ipc_handle)
51
+ torch_tensor = cudabuffer_to_torch(cudabuffer, event["metadata"]) # on cuda
52
+ ```
53
+ """
54
+ handle_buffer = handle_buffer.buffers()[1]
55
+ ipc_handle = pa.cuda.IpcMemHandle.from_buffer(handle_buffer)
56
+ return ipc_handle
57
+
58
+
59
+ def cudabuffer_to_numba(buffer: cuda.CudaBuffer, metadata: dict) -> DeviceNDArray:
60
+ """
61
+ Converts a pyarrow CUDA buffer to numba.
62
+
63
+ example use:
64
+ ```python
65
+
66
+ import pyarrow as pa
67
+ from dora.cuda import ipc_buffer_to_ipc_handle, cudabuffer_to_torch
68
+
69
+ ctx = pa.cuda.context()
70
+ event = node.next()
71
+
72
+ ipc_handle = ipc_buffer_to_ipc_handle(event["value"])
73
+ cudabuffer = ctx.open_ipc_buffer(ipc_handle)
74
+ numba_tensor = cudabuffer_to_numbda(cudabuffer, event["metadata"])
75
+ ```
76
+ """
77
+
78
+ shape = metadata["shape"]
79
+ strides = metadata["strides"]
80
+ dtype = metadata["dtype"]
81
+ device_arr = DeviceNDArray(shape, strides, dtype, gpu_data=buffer.to_numba())
82
+ return device_arr
83
+
84
+
85
+ def cudabuffer_to_torch(buffer: cuda.CudaBuffer, metadata: dict) -> torch.Tensor:
86
+ """
87
+ Converts a pyarrow CUDA buffer to a torch tensor.
88
+
89
+ example use:
90
+ ```python
91
+
92
+ import pyarrow as pa
93
+ from dora.cuda import ipc_buffer_to_ipc_handle, cudabuffer_to_torch
94
+
95
+ ctx = pa.cuda.context()
96
+ event = node.next()
97
+
98
+ ipc_handle = ipc_buffer_to_ipc_handle(event["value"])
99
+ cudabuffer = ctx.open_ipc_buffer(ipc_handle)
100
+ torch_tensor = cudabuffer_to_torch(cudabuffer, event["metadata"]) # on cuda
101
+ ```
102
+ """
103
+
104
+ device_arr = cudabuffer_to_numba(buffer, metadata)
105
+ torch_tensor = torch.as_tensor(device_arr, device="cuda")
106
+ return torch_tensor
dora/dora.pyd CHANGED
Binary file
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: dora-rs
3
- Version: 0.3.7rc2
3
+ Version: 0.3.8rc0
4
4
  Requires-Dist: pyarrow
5
5
  Summary: `dora` goal is to be a low latency, composable, and distributed data flow.
6
6
  License: Apache-2.0
@@ -0,0 +1,7 @@
1
+ dora_rs-0.3.8rc0.dist-info/METADATA,sha256=MR3NL31ebeRXrWBmkpQJWpynhB68ag5s7CiYclRcbbg,670
2
+ dora_rs-0.3.8rc0.dist-info/WHEEL,sha256=8_UzpQVLjhi_DxWEhc5q5kLz0DLADIO7XbXdIeTYx_s,94
3
+ dora/cuda.py,sha256=naTHXaSKwRADxVv9J_K2g4MQRZSnMVw-joZitJjwxKA,3292
4
+ dora/__init__.py,sha256=wRvKTqLYFROzvt95XWXPmCSd2OQeBW4zCn37H8VhLBs,751
5
+ dora/__init__.pyi,sha256=KrLt3v0CXiURVMyukfq_hqNhZaOqmmGzrkYNFcmFXIE,8652
6
+ dora/dora.pyd,sha256=wh5uaQQ-AqWMB_cCyQtk6zW-4TFjUN5_YYkd1CdMoiU,17591296
7
+ dora_rs-0.3.8rc0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: maturin (1.7.4)
2
+ Generator: maturin (1.7.8)
3
3
  Root-Is-Purelib: false
4
4
  Tag: cp37-abi3-win_amd64
@@ -1,6 +0,0 @@
1
- dora_rs-0.3.7rc2.dist-info/METADATA,sha256=_mV0G9T43k6VGdda8BgdVSXIl3v4KgepU3WbDS-pBPA,670
2
- dora_rs-0.3.7rc2.dist-info/WHEEL,sha256=YApKqCDyi9GqUQokZqOT5I78CS4GNvgM6MM-DsXhn2s,94
3
- dora/__init__.py,sha256=wRvKTqLYFROzvt95XWXPmCSd2OQeBW4zCn37H8VhLBs,751
4
- dora/__init__.pyi,sha256=KrLt3v0CXiURVMyukfq_hqNhZaOqmmGzrkYNFcmFXIE,8652
5
- dora/dora.pyd,sha256=nmahlenNm8g0akPblM5HCbv1PeRXR7QoS3xQgVbow94,17536000
6
- dora_rs-0.3.7rc2.dist-info/RECORD,,