turbopipe 1.2.1__tar.gz → 1.2.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of turbopipe might be problematic. Click here for more details.
- turbopipe-1.2.3/.github/funding.yml +2 -0
- turbopipe-1.2.3/.github/workflows/release.yml +98 -0
- turbopipe-1.2.3/.python-version +1 -0
- {turbopipe-1.2.1 → turbopipe-1.2.3}/PKG-INFO +52 -48
- {turbopipe-1.2.1 → turbopipe-1.2.3}/meson.build +1 -1
- turbopipe-1.2.3/pyproject.toml +26 -0
- turbopipe-1.2.1/Readme.md → turbopipe-1.2.3/readme.md +44 -20
- turbopipe-1.2.3/turbopipe/__init__.py +29 -0
- {turbopipe-1.2.1 → turbopipe-1.2.3}/turbopipe/_turbopipe.cpp +85 -79
- {turbopipe-1.2.1 → turbopipe-1.2.3}/turbopipe/version.py +1 -1
- turbopipe-1.2.1/.github/workflows/release.yaml +0 -106
- turbopipe-1.2.1/pyproject.toml +0 -22
- turbopipe-1.2.1/turbopipe/__init__.py +0 -47
- {turbopipe-1.2.1 → turbopipe-1.2.3}/.gitattributes +0 -0
- {turbopipe-1.2.1 → turbopipe-1.2.3}/.gitignore +0 -0
- {turbopipe-1.2.1 → turbopipe-1.2.3}/examples/basic.py +0 -0
- {turbopipe-1.2.1 → turbopipe-1.2.3}/examples/benchmark.py +0 -0
- /turbopipe-1.2.1/License.md → /turbopipe-1.2.3/license.txt +0 -0
- {turbopipe-1.2.1 → turbopipe-1.2.3}/turbopipe/resources/images/turbopipe.png +0 -0
- {turbopipe-1.2.1 → turbopipe-1.2.3}/turbopipe/resources/images/turbopipe.svg +0 -0
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
name: release
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
workflow_dispatch:
|
|
5
|
+
|
|
6
|
+
jobs:
|
|
7
|
+
sdist:
|
|
8
|
+
name: Make sdist
|
|
9
|
+
runs-on: ubuntu-latest
|
|
10
|
+
steps:
|
|
11
|
+
- uses: actions/checkout@v4
|
|
12
|
+
- uses: astral-sh/setup-uv@v6
|
|
13
|
+
|
|
14
|
+
- name: Make sdist
|
|
15
|
+
run: uv build --sdist
|
|
16
|
+
|
|
17
|
+
- name: upload
|
|
18
|
+
uses: actions/upload-artifact@v4
|
|
19
|
+
with:
|
|
20
|
+
name: package-sdist
|
|
21
|
+
path: dist/*.tar.gz
|
|
22
|
+
|
|
23
|
+
wheels:
|
|
24
|
+
name: Make wheels for ${{matrix.os}}
|
|
25
|
+
runs-on: ${{matrix.os}}
|
|
26
|
+
strategy:
|
|
27
|
+
matrix:
|
|
28
|
+
os: [
|
|
29
|
+
ubuntu-latest,
|
|
30
|
+
windows-latest,
|
|
31
|
+
macos-latest
|
|
32
|
+
]
|
|
33
|
+
env:
|
|
34
|
+
CIBW_BUILD: cp39-* cp310-* cp311-* cp312-* cp313-* cp313t-*
|
|
35
|
+
CIBW_ENABLE: cpython-freethreading
|
|
36
|
+
CIBW_BUILD_FRONTEND: "build[uv]"
|
|
37
|
+
CIBW_ARCHS_LINUX: x86_64 aarch64
|
|
38
|
+
CIBW_ARCHS_MACOS: x86_64 arm64
|
|
39
|
+
CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux*"
|
|
40
|
+
steps:
|
|
41
|
+
- uses: actions/checkout@v4
|
|
42
|
+
- uses: actions/setup-python@v5
|
|
43
|
+
- uses: astral-sh/setup-uv@v6
|
|
44
|
+
|
|
45
|
+
- name: Install MSVC
|
|
46
|
+
if: matrix.os == 'windows-latest'
|
|
47
|
+
uses: bus1/cabuild/action/msdevshell@v1
|
|
48
|
+
|
|
49
|
+
- name: Install QEMU
|
|
50
|
+
if: runner.os == 'linux'
|
|
51
|
+
uses: docker/setup-qemu-action@v3
|
|
52
|
+
with:
|
|
53
|
+
platforms: all
|
|
54
|
+
|
|
55
|
+
- name: Make wheels
|
|
56
|
+
run: uvx cibuildwheel==2.23.3 --output-dir dist
|
|
57
|
+
|
|
58
|
+
- name: upload
|
|
59
|
+
uses: actions/upload-artifact@v4
|
|
60
|
+
with:
|
|
61
|
+
name: package-wheels-${{matrix.os}}
|
|
62
|
+
path: dist/*.whl
|
|
63
|
+
|
|
64
|
+
publish:
|
|
65
|
+
needs: [sdist, wheels]
|
|
66
|
+
name: Publish to PyPI
|
|
67
|
+
runs-on: ubuntu-latest
|
|
68
|
+
permissions:
|
|
69
|
+
id-token: write
|
|
70
|
+
steps:
|
|
71
|
+
- uses: astral-sh/setup-uv@v6
|
|
72
|
+
|
|
73
|
+
- name: Download artifacts
|
|
74
|
+
uses: actions/download-artifact@v4
|
|
75
|
+
with:
|
|
76
|
+
pattern: 'package-*'
|
|
77
|
+
merge-multiple: true
|
|
78
|
+
path: dist
|
|
79
|
+
|
|
80
|
+
- name: Publish
|
|
81
|
+
run: uv publish dist/*
|
|
82
|
+
|
|
83
|
+
tag:
|
|
84
|
+
needs: publish
|
|
85
|
+
name: Create Release Tag
|
|
86
|
+
runs-on: ubuntu-latest
|
|
87
|
+
steps:
|
|
88
|
+
- uses: actions/checkout@v4
|
|
89
|
+
|
|
90
|
+
- name: Get version
|
|
91
|
+
run: echo VERSION=$(uv run turbopipe/version.py) >> $GITHUB_ENV
|
|
92
|
+
|
|
93
|
+
- name: Create Release Tag
|
|
94
|
+
run: |
|
|
95
|
+
git config --local user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
|
96
|
+
git config --local user.name "github-actions[bot]"
|
|
97
|
+
git tag -a v$VERSION -m "Release v$VERSION"
|
|
98
|
+
git push origin v$VERSION
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
3.13
|
|
@@ -1,41 +1,18 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: turbopipe
|
|
3
|
-
Version: 1.2.
|
|
4
|
-
Summary: 🌀 Faster
|
|
5
|
-
Home-page: https://brokensrc.dev
|
|
3
|
+
Version: 1.2.3
|
|
4
|
+
Summary: 🌀 Faster ModernGL Buffers inter-process data transfers for subprocesses
|
|
6
5
|
Author-Email: Tremeschin <29046864+Tremeschin@users.noreply.github.com>
|
|
7
|
-
License: MIT
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
of this software and associated documentation files (the "Software"), to deal
|
|
13
|
-
in the Software without restriction, including without limitation the rights
|
|
14
|
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
15
|
-
copies of the Software, and to permit persons to whom the Software is
|
|
16
|
-
furnished to do so, subject to the following conditions:
|
|
17
|
-
|
|
18
|
-
The above copyright notice and this permission notice shall be included in all
|
|
19
|
-
copies or substantial portions of the Software.
|
|
20
|
-
|
|
21
|
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
22
|
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
23
|
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
24
|
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
25
|
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
26
|
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
27
|
-
SOFTWARE.
|
|
28
|
-
Project-URL: Issues, https://github.com/BrokenSource/TurboPipe/issues
|
|
29
|
-
Project-URL: Repository, https://github.com/BrokenSource/TurboPipe
|
|
30
|
-
Project-URL: Documentation, https://github.com/BrokenSource/TurboPipe
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: GitHub, https://github.com/BrokenSource/TurboPipe
|
|
8
|
+
Project-URL: Changelog, https://brokensrc.dev/about/changelog
|
|
9
|
+
Project-URL: Funding, https://brokensrc.dev/about/sponsors
|
|
10
|
+
Project-URL: Contact, https://brokensrc.dev/about/contact
|
|
31
11
|
Project-URL: Homepage, https://brokensrc.dev
|
|
32
12
|
Requires-Python: >=3.7
|
|
33
13
|
Requires-Dist: moderngl
|
|
34
14
|
Description-Content-Type: text/markdown
|
|
35
15
|
|
|
36
|
-
> [!IMPORTANT]
|
|
37
|
-
> <sub>Also check out [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow), where **TurboPipe** shines! 😉</sub>
|
|
38
|
-
<!-- PyPI -->
|
|
39
16
|
<div align="center">
|
|
40
17
|
<a href="https://brokensrc.dev/"><img src="https://raw.githubusercontent.com/BrokenSource/TurboPipe/main/turbopipe/resources/images/turbopipe.png" width="200"></a>
|
|
41
18
|
<h1>TurboPipe</h1>
|
|
@@ -57,15 +34,17 @@ Description-Content-Type: text/markdown
|
|
|
57
34
|
|
|
58
35
|
The **optimizations** involved are:
|
|
59
36
|
|
|
60
|
-
- **Zero-copy**: Avoid unnecessary memory copies or allocation (intermediate `buffer.read
|
|
37
|
+
- **Zero-copy**: Avoid unnecessary memory copies or allocation (intermediate `buffer.read`)
|
|
61
38
|
- **C++**: The core of TurboPipe is written in C++ for speed, efficiency and low-level control
|
|
62
|
-
- **Chunks**: Write in chunks of 4096 bytes (RAM page size), so the hardware is happy (Unix)
|
|
63
39
|
- **Threaded**:
|
|
64
40
|
- Doesn't block Python code execution, allows to render next frame
|
|
65
41
|
- Decouples the main thread from the I/O thread for performance
|
|
42
|
+
- **Chunks**: Write in chunks of 4096 bytes (RAM page size), so the hardware is happy (Unix)
|
|
66
43
|
|
|
67
44
|
✅ Don't worry, there's proper **safety** in place. TurboPipe will block Python if a memory address is already queued for writing, and guarantees order of writes per file-descriptor. Just call `.sync()` when done 😉
|
|
68
45
|
|
|
46
|
+
<sub>Also check out [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow), where **TurboPipe** shines! 😉</sub>
|
|
47
|
+
|
|
69
48
|
<br>
|
|
70
49
|
|
|
71
50
|
# 📦 Installation
|
|
@@ -90,7 +69,7 @@ rye add turbopipe
|
|
|
90
69
|
|
|
91
70
|
# 🚀 Usage
|
|
92
71
|
|
|
93
|
-
See also the [**Examples**](https://github.com/BrokenSource/TurboPipe/tree/main/examples) folder for comparisons, and [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow/blob/main/ShaderFlow/
|
|
72
|
+
See also the [**Examples**](https://github.com/BrokenSource/TurboPipe/tree/main/examples) folder for comparisons, and [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow/blob/main/ShaderFlow/Exporting.py)'s usage of it!
|
|
94
73
|
|
|
95
74
|
```python
|
|
96
75
|
import subprocess
|
|
@@ -98,27 +77,53 @@ import subprocess
|
|
|
98
77
|
import moderngl
|
|
99
78
|
import turbopipe
|
|
100
79
|
|
|
101
|
-
# Create ModernGL objects
|
|
80
|
+
# Create ModernGL objects and proxy buffers
|
|
102
81
|
ctx = moderngl.create_standalone_context()
|
|
103
|
-
|
|
82
|
+
width, height, duration, fps = (1920, 1080, 10, 60)
|
|
83
|
+
buffers = [
|
|
84
|
+
ctx.buffer(reserve=(width*height*3))
|
|
85
|
+
for _ in range(nbuffers := 2)
|
|
86
|
+
]
|
|
87
|
+
|
|
88
|
+
# Create your FBO, Textures, Shaders, etc.
|
|
104
89
|
|
|
105
90
|
# Make sure resolution, pixel format matches!
|
|
106
|
-
ffmpeg = subprocess.Popen(
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
91
|
+
ffmpeg = subprocess.Popen((
|
|
92
|
+
"ffmpeg",
|
|
93
|
+
"-f", "rawvideo",
|
|
94
|
+
"-pix_fmt", "rgb24",
|
|
95
|
+
"-r", str(fps),
|
|
96
|
+
"-s", f"{width}x{height}",
|
|
97
|
+
"-i", "-",
|
|
98
|
+
"-f", "null",
|
|
99
|
+
"output.mp4"
|
|
100
|
+
), stdin=subprocess.PIPE)
|
|
101
|
+
|
|
102
|
+
# Rendering loop of yours
|
|
103
|
+
for frame in range(duration*fps):
|
|
104
|
+
buffer = buffers[frame % nbuffers]
|
|
105
|
+
|
|
106
|
+
# Wait queued writes before copying
|
|
114
107
|
turbopipe.sync(buffer)
|
|
115
108
|
fbo.read_into(buffer)
|
|
109
|
+
|
|
110
|
+
# Doesn't lock the GIL, writes in parallel
|
|
116
111
|
turbopipe.pipe(buffer, ffmpeg.stdin.fileno())
|
|
117
112
|
|
|
118
|
-
#
|
|
119
|
-
|
|
113
|
+
# Wait for queued writes, clean memory
|
|
114
|
+
for buffer in buffers:
|
|
115
|
+
turbopipe.sync(buffer)
|
|
116
|
+
buffer.release()
|
|
117
|
+
|
|
118
|
+
# Signal stdin stream is done
|
|
120
119
|
ffmpeg.stdin.close()
|
|
120
|
+
|
|
121
|
+
# wait for encoding to finish
|
|
121
122
|
ffmpeg.wait()
|
|
123
|
+
|
|
124
|
+
# Warn: Albeit rare, only call close when no other data
|
|
125
|
+
# write is pending, as it might skip a frame or halt
|
|
126
|
+
turbopipe.close()
|
|
122
127
|
```
|
|
123
128
|
|
|
124
129
|
<br>
|
|
@@ -370,6 +375,5 @@ On realistically loads, like [**ShaderFlow**](https://github.com/BrokenSource/Sh
|
|
|
370
375
|
# 📚 Future work
|
|
371
376
|
|
|
372
377
|
- Disable/investigate performance degradation on Windows iGPUs
|
|
373
|
-
- Improve the thread synchronization and/or use a ThreadPool
|
|
374
378
|
- Maybe use `mmap` instead of chunks writing on Linux
|
|
375
|
-
-
|
|
379
|
+
- Split the code into a libturbopipe? Not sure where it would be useful 😅
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
[project.urls]
|
|
2
|
+
GitHub = "https://github.com/BrokenSource/TurboPipe"
|
|
3
|
+
Changelog = "https://brokensrc.dev/about/changelog"
|
|
4
|
+
Funding = "https://brokensrc.dev/about/sponsors"
|
|
5
|
+
Contact = "https://brokensrc.dev/about/contact"
|
|
6
|
+
Homepage = "https://brokensrc.dev"
|
|
7
|
+
|
|
8
|
+
[project]
|
|
9
|
+
name = "turbopipe"
|
|
10
|
+
description = "🌀 Faster ModernGL Buffers inter-process data transfers for subprocesses"
|
|
11
|
+
authors = [{name="Tremeschin", email="29046864+Tremeschin@users.noreply.github.com"}]
|
|
12
|
+
dynamic = ["version"]
|
|
13
|
+
readme = "readme.md"
|
|
14
|
+
license = "MIT"
|
|
15
|
+
dependencies = ["moderngl"]
|
|
16
|
+
requires-python = ">=3.7"
|
|
17
|
+
|
|
18
|
+
[build-system]
|
|
19
|
+
requires = ["meson-python", "ninja"]
|
|
20
|
+
build-backend = "mesonpy"
|
|
21
|
+
|
|
22
|
+
[tool.uv]
|
|
23
|
+
managed = false
|
|
24
|
+
|
|
25
|
+
[tool.ruff.format]
|
|
26
|
+
exclude = ["*"]
|
|
@@ -1,6 +1,3 @@
|
|
|
1
|
-
> [!IMPORTANT]
|
|
2
|
-
> <sub>Also check out [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow), where **TurboPipe** shines! 😉</sub>
|
|
3
|
-
<!-- PyPI -->
|
|
4
1
|
<div align="center">
|
|
5
2
|
<a href="https://brokensrc.dev/"><img src="https://raw.githubusercontent.com/BrokenSource/TurboPipe/main/turbopipe/resources/images/turbopipe.png" width="200"></a>
|
|
6
3
|
<h1>TurboPipe</h1>
|
|
@@ -22,15 +19,17 @@
|
|
|
22
19
|
|
|
23
20
|
The **optimizations** involved are:
|
|
24
21
|
|
|
25
|
-
- **Zero-copy**: Avoid unnecessary memory copies or allocation (intermediate `buffer.read
|
|
22
|
+
- **Zero-copy**: Avoid unnecessary memory copies or allocation (intermediate `buffer.read`)
|
|
26
23
|
- **C++**: The core of TurboPipe is written in C++ for speed, efficiency and low-level control
|
|
27
|
-
- **Chunks**: Write in chunks of 4096 bytes (RAM page size), so the hardware is happy (Unix)
|
|
28
24
|
- **Threaded**:
|
|
29
25
|
- Doesn't block Python code execution, allows to render next frame
|
|
30
26
|
- Decouples the main thread from the I/O thread for performance
|
|
27
|
+
- **Chunks**: Write in chunks of 4096 bytes (RAM page size), so the hardware is happy (Unix)
|
|
31
28
|
|
|
32
29
|
✅ Don't worry, there's proper **safety** in place. TurboPipe will block Python if a memory address is already queued for writing, and guarantees order of writes per file-descriptor. Just call `.sync()` when done 😉
|
|
33
30
|
|
|
31
|
+
<sub>Also check out [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow), where **TurboPipe** shines! 😉</sub>
|
|
32
|
+
|
|
34
33
|
<br>
|
|
35
34
|
|
|
36
35
|
# 📦 Installation
|
|
@@ -55,7 +54,7 @@ rye add turbopipe
|
|
|
55
54
|
|
|
56
55
|
# 🚀 Usage
|
|
57
56
|
|
|
58
|
-
See also the [**Examples**](https://github.com/BrokenSource/TurboPipe/tree/main/examples) folder for comparisons, and [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow/blob/main/ShaderFlow/
|
|
57
|
+
See also the [**Examples**](https://github.com/BrokenSource/TurboPipe/tree/main/examples) folder for comparisons, and [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow/blob/main/ShaderFlow/Exporting.py)'s usage of it!
|
|
59
58
|
|
|
60
59
|
```python
|
|
61
60
|
import subprocess
|
|
@@ -63,27 +62,53 @@ import subprocess
|
|
|
63
62
|
import moderngl
|
|
64
63
|
import turbopipe
|
|
65
64
|
|
|
66
|
-
# Create ModernGL objects
|
|
65
|
+
# Create ModernGL objects and proxy buffers
|
|
67
66
|
ctx = moderngl.create_standalone_context()
|
|
68
|
-
|
|
67
|
+
width, height, duration, fps = (1920, 1080, 10, 60)
|
|
68
|
+
buffers = [
|
|
69
|
+
ctx.buffer(reserve=(width*height*3))
|
|
70
|
+
for _ in range(nbuffers := 2)
|
|
71
|
+
]
|
|
72
|
+
|
|
73
|
+
# Create your FBO, Textures, Shaders, etc.
|
|
69
74
|
|
|
70
75
|
# Make sure resolution, pixel format matches!
|
|
71
|
-
ffmpeg = subprocess.Popen(
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
76
|
+
ffmpeg = subprocess.Popen((
|
|
77
|
+
"ffmpeg",
|
|
78
|
+
"-f", "rawvideo",
|
|
79
|
+
"-pix_fmt", "rgb24",
|
|
80
|
+
"-r", str(fps),
|
|
81
|
+
"-s", f"{width}x{height}",
|
|
82
|
+
"-i", "-",
|
|
83
|
+
"-f", "null",
|
|
84
|
+
"output.mp4"
|
|
85
|
+
), stdin=subprocess.PIPE)
|
|
86
|
+
|
|
87
|
+
# Rendering loop of yours
|
|
88
|
+
for frame in range(duration*fps):
|
|
89
|
+
buffer = buffers[frame % nbuffers]
|
|
90
|
+
|
|
91
|
+
# Wait queued writes before copying
|
|
79
92
|
turbopipe.sync(buffer)
|
|
80
93
|
fbo.read_into(buffer)
|
|
94
|
+
|
|
95
|
+
# Doesn't lock the GIL, writes in parallel
|
|
81
96
|
turbopipe.pipe(buffer, ffmpeg.stdin.fileno())
|
|
82
97
|
|
|
83
|
-
#
|
|
84
|
-
|
|
98
|
+
# Wait for queued writes, clean memory
|
|
99
|
+
for buffer in buffers:
|
|
100
|
+
turbopipe.sync(buffer)
|
|
101
|
+
buffer.release()
|
|
102
|
+
|
|
103
|
+
# Signal stdin stream is done
|
|
85
104
|
ffmpeg.stdin.close()
|
|
105
|
+
|
|
106
|
+
# wait for encoding to finish
|
|
86
107
|
ffmpeg.wait()
|
|
108
|
+
|
|
109
|
+
# Warn: Albeit rare, only call close when no other data
|
|
110
|
+
# write is pending, as it might skip a frame or halt
|
|
111
|
+
turbopipe.close()
|
|
87
112
|
```
|
|
88
113
|
|
|
89
114
|
<br>
|
|
@@ -335,6 +360,5 @@ On realistically loads, like [**ShaderFlow**](https://github.com/BrokenSource/Sh
|
|
|
335
360
|
# 📚 Future work
|
|
336
361
|
|
|
337
362
|
- Disable/investigate performance degradation on Windows iGPUs
|
|
338
|
-
- Improve the thread synchronization and/or use a ThreadPool
|
|
339
363
|
- Maybe use `mmap` instead of chunks writing on Linux
|
|
340
|
-
-
|
|
364
|
+
- Split the code into a libturbopipe? Not sure where it would be useful 😅
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from typing import Optional, Union
|
|
2
|
+
|
|
3
|
+
from moderngl import Buffer
|
|
4
|
+
|
|
5
|
+
from turbopipe import _turbopipe
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"pipe",
|
|
9
|
+
"sync",
|
|
10
|
+
"close"
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
def pipe(buffer: Union[Buffer, memoryview], fileno: int) -> None:
|
|
14
|
+
"""Pipe a buffer contents to a file descriptor, fast and threaded"""
|
|
15
|
+
if isinstance(buffer, Buffer):
|
|
16
|
+
buffer = memoryview(buffer.mglo)
|
|
17
|
+
_turbopipe.pipe(buffer, fileno)
|
|
18
|
+
del buffer
|
|
19
|
+
|
|
20
|
+
def sync(buffer: Optional[Union[Buffer, memoryview]]=None) -> None:
|
|
21
|
+
"""Wait for pending operations on a buffer to finish"""
|
|
22
|
+
if isinstance(buffer, Buffer):
|
|
23
|
+
buffer = memoryview(buffer.mglo)
|
|
24
|
+
_turbopipe.sync(buffer)
|
|
25
|
+
del buffer
|
|
26
|
+
|
|
27
|
+
def close() -> None:
|
|
28
|
+
"""Syncs and deletes objects"""
|
|
29
|
+
_turbopipe.close()
|
|
@@ -1,9 +1,6 @@
|
|
|
1
1
|
// ------------------------------------------------------------------------------------------------|
|
|
2
|
-
//
|
|
3
2
|
// TurboPipe - Faster ModernGL Buffers inter-process data transfers for subprocesses
|
|
4
|
-
//
|
|
5
|
-
// (c) 2024, Tremeschin, MIT License
|
|
6
|
-
//
|
|
3
|
+
// (c) MIT License 2024-2025, Tremeschin
|
|
7
4
|
// ------------------------------------------------------------------------------------------------|
|
|
8
5
|
|
|
9
6
|
#define PY_SSIZE_T_CLEAN
|
|
@@ -29,9 +26,9 @@ using namespace std;
|
|
|
29
26
|
// TurboPipe internals
|
|
30
27
|
|
|
31
28
|
struct Work {
|
|
32
|
-
void*
|
|
33
|
-
int file;
|
|
29
|
+
void* data;
|
|
34
30
|
size_t size;
|
|
31
|
+
int file;
|
|
35
32
|
};
|
|
36
33
|
|
|
37
34
|
class TurboPipe {
|
|
@@ -39,108 +36,110 @@ public:
|
|
|
39
36
|
TurboPipe(): running(true) {}
|
|
40
37
|
~TurboPipe() {close();}
|
|
41
38
|
|
|
42
|
-
void pipe(PyObject*
|
|
43
|
-
Py_buffer
|
|
44
|
-
this->_pipe(
|
|
39
|
+
void pipe(PyObject* view, int file) {
|
|
40
|
+
Py_buffer data = *PyMemoryView_GET_BUFFER(view);
|
|
41
|
+
this->_pipe(data.buf, (size_t) data.len, file);
|
|
45
42
|
}
|
|
46
43
|
|
|
47
|
-
void sync(PyObject*
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
data = view.buf;
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
// Wait for some or all queues to be empty, as they are erased when
|
|
56
|
-
// each thread's writing loop is done, guaranteeing finish
|
|
57
|
-
for (auto& values: queue) {
|
|
58
|
-
while (true) {
|
|
59
|
-
{
|
|
60
|
-
// Prevent segfault on iteration on changing data
|
|
61
|
-
lock_guard<mutex> lock(mutexes[values.first]);
|
|
62
|
-
|
|
63
|
-
// Either all empty or some memory not queued (None or specific)
|
|
64
|
-
if (data != nullptr && values.second.find(data) == values.second.end())
|
|
65
|
-
break;
|
|
66
|
-
if (data == nullptr && values.second.empty())
|
|
67
|
-
break;
|
|
68
|
-
}
|
|
69
|
-
this_thread::sleep_for(chrono::microseconds(200));
|
|
70
|
-
}
|
|
71
|
-
}
|
|
44
|
+
void sync(PyObject* view=nullptr) {
|
|
45
|
+
if (view != nullptr)
|
|
46
|
+
this->_sync((*PyMemoryView_GET_BUFFER(view)).buf);
|
|
47
|
+
else
|
|
48
|
+
this->_sync(nullptr);
|
|
72
49
|
}
|
|
73
50
|
|
|
74
51
|
void close() {
|
|
75
|
-
|
|
76
|
-
running = false;
|
|
77
|
-
signal
|
|
78
|
-
|
|
52
|
+
this->_sync();
|
|
53
|
+
this->running = false;
|
|
54
|
+
for (auto& pair: this->signal)
|
|
55
|
+
pair.second.notify_all();
|
|
56
|
+
for (auto& pair: this->threads)
|
|
79
57
|
pair.second.join();
|
|
80
|
-
threads.clear();
|
|
58
|
+
this->threads.clear();
|
|
81
59
|
}
|
|
82
60
|
|
|
83
61
|
private:
|
|
84
|
-
unordered_map<int,
|
|
62
|
+
unordered_map<int, condition_variable> pending;
|
|
63
|
+
unordered_map<int, condition_variable> signal;
|
|
85
64
|
unordered_map<int, unordered_set<void*>> queue;
|
|
86
65
|
unordered_map<int, deque<Work>> stream;
|
|
87
66
|
unordered_map<int, thread> threads;
|
|
88
67
|
unordered_map<int, mutex> mutexes;
|
|
89
|
-
condition_variable signal;
|
|
90
68
|
bool running;
|
|
91
69
|
|
|
92
70
|
void _pipe(void* data, size_t size, int file) {
|
|
93
|
-
|
|
94
|
-
unique_lock<mutex> lock(mutexes[file]);
|
|
71
|
+
unique_lock<mutex> lock(this->mutexes[file]);
|
|
95
72
|
|
|
96
73
|
/* Notify this memory is queued, wait if pending */ {
|
|
97
|
-
if (!queue[file].insert(data).second) {
|
|
98
|
-
pending[file]
|
|
99
|
-
return queue[file].find(data) == queue[file].end();
|
|
74
|
+
if (!this->queue[file].insert(data).second) {
|
|
75
|
+
this->pending[file].wait(lock, [this, file, data] {
|
|
76
|
+
return this->queue[file].find(data) == this->queue[file].end();
|
|
100
77
|
});
|
|
101
78
|
}
|
|
102
79
|
}
|
|
103
80
|
|
|
104
81
|
/* Add another job to the queue */ {
|
|
105
|
-
stream[file].push_back(
|
|
106
|
-
queue[file].insert(data);
|
|
82
|
+
this->stream[file].push_back(Work{data, size, file});
|
|
83
|
+
this->queue[file].insert(data);
|
|
107
84
|
this->running = true;
|
|
108
85
|
lock.unlock();
|
|
109
86
|
}
|
|
110
87
|
|
|
111
88
|
// Each file descriptor has its own thread
|
|
112
|
-
if (threads.find(file) == threads.end())
|
|
113
|
-
threads[file] = thread(&TurboPipe::worker, this, file);
|
|
89
|
+
if (this->threads.find(file) == this->threads.end())
|
|
90
|
+
this->threads[file] = thread(&TurboPipe::worker, this, file);
|
|
114
91
|
|
|
115
|
-
|
|
92
|
+
// Trigger the worker to write the data
|
|
93
|
+
this->signal[file].notify_all();
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
void _sync(void* data=nullptr) {
|
|
97
|
+
for (auto& values: this->queue) {
|
|
98
|
+
while (true) {
|
|
99
|
+
{
|
|
100
|
+
// Prevent segfault on iteration on changing data
|
|
101
|
+
lock_guard<mutex> lock(this->mutexes[values.first]);
|
|
102
|
+
|
|
103
|
+
// Continue if specific data is not in queue
|
|
104
|
+
if (data != nullptr)
|
|
105
|
+
if (values.second.find(data) == values.second.end())
|
|
106
|
+
break;
|
|
107
|
+
|
|
108
|
+
// Continue if all queues are empty
|
|
109
|
+
if (data == nullptr)
|
|
110
|
+
if (values.second.empty())
|
|
111
|
+
break;
|
|
112
|
+
}
|
|
113
|
+
this_thread::sleep_for(chrono::microseconds(200));
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
116
|
}
|
|
117
117
|
|
|
118
118
|
void worker(int file) {
|
|
119
119
|
while (this->running) {
|
|
120
|
-
unique_lock<mutex> lock(mutexes[file]);
|
|
120
|
+
unique_lock<mutex> lock(this->mutexes[file]);
|
|
121
121
|
|
|
122
|
-
signal.wait(lock, [this, file] {
|
|
123
|
-
return (!stream[file].empty() || !this->running);
|
|
122
|
+
this->signal[file].wait(lock, [this, file] {
|
|
123
|
+
return (!this->stream[file].empty() || !this->running);
|
|
124
124
|
});
|
|
125
125
|
|
|
126
126
|
// Skip on false positives, exit condition
|
|
127
|
-
if (stream[file].empty()) continue;
|
|
127
|
+
if ( this->stream[file].empty()) continue;
|
|
128
128
|
if (!this->running) break;
|
|
129
129
|
|
|
130
130
|
// Get the next work item
|
|
131
|
-
Work work = stream[file].front();
|
|
132
|
-
stream[file].pop_front();
|
|
131
|
+
Work work = this->stream[file].front();
|
|
132
|
+
this->stream[file].pop_front();
|
|
133
133
|
lock.unlock();
|
|
134
134
|
|
|
135
135
|
#ifdef _WIN32
|
|
136
|
-
// Windows doesn't like chunked writes
|
|
137
|
-
write(work.file, (char*) work.data, work.size);
|
|
136
|
+
// Fixme: Windows doesn't like chunked writes?
|
|
137
|
+
write(work.file, (char*) work.data, static_cast<unsigned int>(work.size));
|
|
138
138
|
#else
|
|
139
|
-
// Optimization: Write in chunks of 4096 (RAM page size)
|
|
140
139
|
size_t tell = 0;
|
|
141
140
|
while (tell < work.size) {
|
|
142
141
|
size_t chunk = min(work.size - tell, static_cast<size_t>(4096));
|
|
143
|
-
|
|
142
|
+
int written = write(work.file, (char*) work.data + tell, chunk);
|
|
144
143
|
if (written == -1) break;
|
|
145
144
|
tell += written;
|
|
146
145
|
}
|
|
@@ -149,9 +148,9 @@ private:
|
|
|
149
148
|
lock.lock();
|
|
150
149
|
|
|
151
150
|
/* Signal work is done */ {
|
|
152
|
-
pending[file]
|
|
153
|
-
queue[file].erase(work.data);
|
|
154
|
-
signal.notify_all();
|
|
151
|
+
this->pending[file].notify_all();
|
|
152
|
+
this->queue[file].erase(work.data);
|
|
153
|
+
this->signal[file].notify_all();
|
|
155
154
|
}
|
|
156
155
|
}
|
|
157
156
|
}
|
|
@@ -167,15 +166,15 @@ static PyObject* turbopipe_pipe(
|
|
|
167
166
|
PyObject* Py_UNUSED(self),
|
|
168
167
|
PyObject* args
|
|
169
168
|
) {
|
|
170
|
-
PyObject*
|
|
169
|
+
PyObject* view;
|
|
171
170
|
PyObject* file;
|
|
172
|
-
if (!PyArg_ParseTuple(args, "OO", &
|
|
171
|
+
if (!PyArg_ParseTuple(args, "OO", &view, &file))
|
|
173
172
|
return NULL;
|
|
174
|
-
if (!PyMemoryView_Check(
|
|
173
|
+
if (!PyMemoryView_Check(view)) {
|
|
175
174
|
PyErr_SetString(PyExc_TypeError, "Expected a memoryview object");
|
|
176
175
|
return NULL;
|
|
177
176
|
}
|
|
178
|
-
turbopipe->pipe(
|
|
177
|
+
turbopipe->pipe(view, PyLong_AsLong(file));
|
|
179
178
|
Py_RETURN_NONE;
|
|
180
179
|
}
|
|
181
180
|
|
|
@@ -183,14 +182,14 @@ static PyObject* turbopipe_sync(
|
|
|
183
182
|
PyObject* Py_UNUSED(self),
|
|
184
183
|
PyObject* args
|
|
185
184
|
) {
|
|
186
|
-
PyObject*
|
|
187
|
-
if (!PyArg_ParseTuple(args, "|O", &
|
|
185
|
+
PyObject* view;
|
|
186
|
+
if (!PyArg_ParseTuple(args, "|O", &view))
|
|
188
187
|
return NULL;
|
|
189
|
-
if (
|
|
188
|
+
if (view != nullptr && !PyMemoryView_Check(view)) {
|
|
190
189
|
PyErr_SetString(PyExc_TypeError, "Expected a memoryview object or None");
|
|
191
190
|
return NULL;
|
|
192
191
|
}
|
|
193
|
-
turbopipe->sync(
|
|
192
|
+
turbopipe->sync(view);
|
|
194
193
|
Py_RETURN_NONE;
|
|
195
194
|
}
|
|
196
195
|
|
|
@@ -216,18 +215,25 @@ static PyMethodDef TurboPipeMethods[] = {
|
|
|
216
215
|
{NULL, NULL, 0, NULL}
|
|
217
216
|
};
|
|
218
217
|
|
|
219
|
-
static struct PyModuleDef
|
|
220
|
-
PyModuleDef_HEAD_INIT,
|
|
221
|
-
"_turbopipe",
|
|
222
|
-
NULL,
|
|
223
|
-
|
|
224
|
-
|
|
218
|
+
static struct PyModuleDef TurboPipeModule = {
|
|
219
|
+
.m_base = PyModuleDef_HEAD_INIT,
|
|
220
|
+
.m_name = "_turbopipe",
|
|
221
|
+
.m_doc = NULL,
|
|
222
|
+
.m_size = -1,
|
|
223
|
+
.m_methods = TurboPipeMethods,
|
|
224
|
+
.m_slots = NULL,
|
|
225
|
+
.m_traverse = NULL,
|
|
226
|
+
.m_clear = NULL,
|
|
227
|
+
.m_free = NULL
|
|
225
228
|
};
|
|
226
229
|
|
|
227
230
|
PyMODINIT_FUNC PyInit__turbopipe(void) {
|
|
228
|
-
PyObject* module = PyModule_Create(&
|
|
231
|
+
PyObject* module = PyModule_Create(&TurboPipeModule);
|
|
229
232
|
if (module == NULL)
|
|
230
233
|
return NULL;
|
|
234
|
+
#ifdef Py_GIL_DISABLED
|
|
235
|
+
PyUnstable_Module_SetGIL(module, Py_MOD_GIL_NOT_USED);
|
|
236
|
+
#endif
|
|
231
237
|
turbopipe = new TurboPipe();
|
|
232
238
|
Py_AtExit(turbopipe_exit);
|
|
233
239
|
return module;
|
|
@@ -1,106 +0,0 @@
|
|
|
1
|
-
name: release
|
|
2
|
-
|
|
3
|
-
on:
|
|
4
|
-
workflow_dispatch:
|
|
5
|
-
push:
|
|
6
|
-
paths:
|
|
7
|
-
- 'turbopipe/version.py'
|
|
8
|
-
|
|
9
|
-
jobs:
|
|
10
|
-
sdist:
|
|
11
|
-
name: Package source
|
|
12
|
-
runs-on: ubuntu-latest
|
|
13
|
-
|
|
14
|
-
steps:
|
|
15
|
-
- uses: actions/checkout@v4
|
|
16
|
-
- uses: actions/setup-python@v5
|
|
17
|
-
|
|
18
|
-
- name: deps
|
|
19
|
-
run: python -m pip install -U pip wheel build
|
|
20
|
-
|
|
21
|
-
- name: sdist
|
|
22
|
-
run: python -m build --sdist -o package
|
|
23
|
-
|
|
24
|
-
- name: upload
|
|
25
|
-
uses: actions/upload-artifact@v3
|
|
26
|
-
with:
|
|
27
|
-
name: package
|
|
28
|
-
path: package/*.tar.gz
|
|
29
|
-
|
|
30
|
-
wheels:
|
|
31
|
-
name: Build Python wheels on ${{matrix.os}}
|
|
32
|
-
runs-on: ${{matrix.os}}
|
|
33
|
-
strategy:
|
|
34
|
-
matrix:
|
|
35
|
-
os: [ubuntu-latest, windows-latest, macos-14]
|
|
36
|
-
|
|
37
|
-
env:
|
|
38
|
-
CIBW_BUILD: cp37-* cp38-* cp39-* cp310-* cp311-* cp312-*
|
|
39
|
-
CIBW_ARCHS_LINUX: auto
|
|
40
|
-
CIBW_ARCHS_MACOS: arm64
|
|
41
|
-
CIBW_ARCHS_WINDOWS: auto
|
|
42
|
-
CIBW_SKIP: '*musllinux* *i686* *-win32'
|
|
43
|
-
|
|
44
|
-
steps:
|
|
45
|
-
- uses: actions/checkout@v4
|
|
46
|
-
- uses: actions/setup-python@v5
|
|
47
|
-
|
|
48
|
-
- name: Install MSVC
|
|
49
|
-
if: matrix.os == 'windows-latest'
|
|
50
|
-
uses: bus1/cabuild/action/msdevshell@v1
|
|
51
|
-
|
|
52
|
-
- name: deps
|
|
53
|
-
run: python -m pip install cibuildwheel==2.19.2
|
|
54
|
-
|
|
55
|
-
- name: wheels
|
|
56
|
-
run: python -m cibuildwheel --output-dir package
|
|
57
|
-
|
|
58
|
-
- name: upload
|
|
59
|
-
uses: actions/upload-artifact@v3
|
|
60
|
-
with:
|
|
61
|
-
name: package
|
|
62
|
-
path: package/*.whl
|
|
63
|
-
|
|
64
|
-
publish:
|
|
65
|
-
needs: [sdist, wheels]
|
|
66
|
-
name: Publish to PyPI
|
|
67
|
-
runs-on: ubuntu-latest
|
|
68
|
-
|
|
69
|
-
steps:
|
|
70
|
-
- uses: actions/checkout@v4
|
|
71
|
-
- uses: actions/setup-python@v5
|
|
72
|
-
|
|
73
|
-
- name: Download artifacts
|
|
74
|
-
uses: actions/download-artifact@v3
|
|
75
|
-
with:
|
|
76
|
-
name: package
|
|
77
|
-
path: package
|
|
78
|
-
|
|
79
|
-
- name: deps
|
|
80
|
-
run: python -m pip install -U twine
|
|
81
|
-
|
|
82
|
-
- name: publish
|
|
83
|
-
env:
|
|
84
|
-
TWINE_USERNAME: __token__
|
|
85
|
-
TWINE_PASSWORD: ${{secrets.PYPI_TOKEN}}
|
|
86
|
-
run: twine upload package/*
|
|
87
|
-
|
|
88
|
-
tag:
|
|
89
|
-
needs: publish
|
|
90
|
-
name: Create Release Tag
|
|
91
|
-
runs-on: ubuntu-latest
|
|
92
|
-
|
|
93
|
-
steps:
|
|
94
|
-
- uses: actions/checkout@v4
|
|
95
|
-
- uses: actions/setup-python@v5
|
|
96
|
-
|
|
97
|
-
- name: Get version
|
|
98
|
-
run: echo VERSION=$(python turbopipe/version.py) >> $GITHUB_ENV
|
|
99
|
-
shell: bash
|
|
100
|
-
|
|
101
|
-
- name: Create Release Tag
|
|
102
|
-
run: |
|
|
103
|
-
git config --local user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
|
104
|
-
git config --local user.name "github-actions[bot]"
|
|
105
|
-
git tag -a v$VERSION -m "Release v$VERSION"
|
|
106
|
-
git push origin v$VERSION
|
turbopipe-1.2.1/pyproject.toml
DELETED
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
[project.urls]
|
|
2
|
-
issues = "https://github.com/BrokenSource/TurboPipe/issues"
|
|
3
|
-
repository = "https://github.com/BrokenSource/TurboPipe"
|
|
4
|
-
documentation = "https://github.com/BrokenSource/TurboPipe"
|
|
5
|
-
homepage = "https://brokensrc.dev"
|
|
6
|
-
|
|
7
|
-
[project]
|
|
8
|
-
name = "turbopipe"
|
|
9
|
-
dynamic = ["version"]
|
|
10
|
-
description = "🌀 Faster MemoryView inter-process data transfers for subprocesses"
|
|
11
|
-
authors = [{name="Tremeschin", email="29046864+Tremeschin@users.noreply.github.com"}]
|
|
12
|
-
readme = "Readme.md"
|
|
13
|
-
license = {file="License.md"}
|
|
14
|
-
dependencies = ["moderngl"]
|
|
15
|
-
requires-python = ">=3.7"
|
|
16
|
-
|
|
17
|
-
[build-system]
|
|
18
|
-
requires = ["meson-python", "ninja"]
|
|
19
|
-
build-backend = "mesonpy"
|
|
20
|
-
|
|
21
|
-
[tool.ruff.format]
|
|
22
|
-
exclude = ["*"]
|
|
@@ -1,47 +0,0 @@
|
|
|
1
|
-
from typing import Optional, Union
|
|
2
|
-
|
|
3
|
-
from moderngl import Buffer
|
|
4
|
-
|
|
5
|
-
from turbopipe import _turbopipe
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
def pipe(buffer: Union[Buffer, memoryview], fileno: int) -> None:
|
|
9
|
-
"""
|
|
10
|
-
Pipe the content of a moderngl.Buffer or memoryview to a file descriptor, fast, threaded and
|
|
11
|
-
blocking when needed. Call `sync(buffer)` before this, and `sync()` when done for
|
|
12
|
-
|
|
13
|
-
Usage:
|
|
14
|
-
```python
|
|
15
|
-
# Assuming `buffer = ctx.buffer(...)`
|
|
16
|
-
# Note: Use as `fbo.read_into(buffer)`
|
|
17
|
-
|
|
18
|
-
# As a open() file
|
|
19
|
-
with open("file.bin", "wb") as file:
|
|
20
|
-
turbopipe.pipe(buffer, file)
|
|
21
|
-
|
|
22
|
-
# As a subprocess
|
|
23
|
-
child = subprocess.Popen(..., stdin=subprocess.PIPE)
|
|
24
|
-
turbopipe.pipe(buffer, child.stdin.fileno())
|
|
25
|
-
```
|
|
26
|
-
"""
|
|
27
|
-
if isinstance(buffer, Buffer):
|
|
28
|
-
buffer = memoryview(buffer.mglo)
|
|
29
|
-
_turbopipe.pipe(buffer, fileno)
|
|
30
|
-
del buffer
|
|
31
|
-
|
|
32
|
-
def sync(buffer: Optional[Union[Buffer, memoryview]]=None) -> None:
|
|
33
|
-
"""Waits for any pending write operation on a buffer, or 'all buffers' if None, to finish"""
|
|
34
|
-
if isinstance(buffer, Buffer):
|
|
35
|
-
buffer = memoryview(buffer.mglo)
|
|
36
|
-
_turbopipe.sync(buffer)
|
|
37
|
-
del buffer
|
|
38
|
-
|
|
39
|
-
def close() -> None:
|
|
40
|
-
"""Syncs and deletes objects"""
|
|
41
|
-
_turbopipe.close()
|
|
42
|
-
|
|
43
|
-
__all__ = [
|
|
44
|
-
"pipe",
|
|
45
|
-
"sync",
|
|
46
|
-
"close"
|
|
47
|
-
]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|