turbopipe 0.0.0__tar.gz → 1.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of turbopipe might be problematic. Click here for more details.

@@ -0,0 +1,2 @@
1
+ turbopipe/include/*.hpp linguist-vendored
2
+ turbopipe/include/*.h linguist-vendored
@@ -0,0 +1,83 @@
1
+ name: release
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ push:
6
+ paths:
7
+ - 'turbopipe/version.py'
8
+
9
+ jobs:
10
+ sdist:
11
+ name: Package source
12
+ runs-on: ubuntu-latest
13
+
14
+ steps:
15
+ - uses: actions/checkout@v4
16
+ - uses: actions/setup-python@v5
17
+
18
+ - name: deps
19
+ run: python -m pip install -U pip wheel build
20
+
21
+ - name: sdist
22
+ run: python -m build --sdist -o package
23
+
24
+ - name: upload
25
+ uses: actions/upload-artifact@v3
26
+ with:
27
+ name: package
28
+ path: package/*.tar.gz
29
+
30
+ wheels:
31
+ name: Build ${{matrix.pyver}} wheels on ${{matrix.os}}
32
+ runs-on: ${{matrix.os}}
33
+ strategy:
34
+ matrix:
35
+ os: [ubuntu-latest, windows-latest, macos-13, macos-14]
36
+ pyver: [cp39, cp310, cp311, cp312]
37
+
38
+ env:
39
+ CIBW_BUILD: ${{matrix.pyver}}-*
40
+ CIBW_ARCHS_LINUX: auto
41
+ CIBW_ARCHS_MACOS: arm64
42
+ CIBW_ARCHS_WINDOWS: auto
43
+ CIBW_SKIP: '*musllinux* *i686* *-win32'
44
+
45
+ steps:
46
+ - uses: actions/checkout@v4
47
+ - uses: actions/setup-python@v5
48
+
49
+ - name: deps
50
+ run: python -m pip install cibuildwheel==2.19.2
51
+
52
+ - name: wheels
53
+ run: python -m cibuildwheel --output-dir package
54
+
55
+ - name: upload
56
+ uses: actions/upload-artifact@v3
57
+ with:
58
+ name: package
59
+ path: package/*.whl
60
+
61
+ publish:
62
+ needs: [sdist, wheels]
63
+ name: Publish to PyPI
64
+ runs-on: ubuntu-latest
65
+
66
+ steps:
67
+ - uses: actions/checkout@v4
68
+ - uses: actions/setup-python@v5
69
+
70
+ - name: Download artifacts
71
+ uses: actions/download-artifact@v3
72
+ with:
73
+ name: package
74
+ path: package
75
+
76
+ - name: deps
77
+ run: python -m pip install -U twine
78
+
79
+ - name: publish
80
+ env:
81
+ TWINE_USERNAME: __token__
82
+ TWINE_PASSWORD: ${{secrets.PYPI_TOKEN}}
83
+ run: twine upload package/*
@@ -0,0 +1,4 @@
1
+ turbopipe.egg*
2
+ build*
3
+ .venv
4
+ *.so
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Gabriel Tremeschin
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,223 @@
1
+ Metadata-Version: 2.1
2
+ Name: turbopipe
3
+ Version: 1.0.1
4
+ Summary: 🌀 Faster ModernGL Buffer inter process data transfers
5
+ Home-page: https://brokensrc.dev
6
+ Author-Email: Tremeschin <29046864+Tremeschin@users.noreply.github.com>
7
+ License: MIT License
8
+
9
+ Copyright (c) 2024 Gabriel Tremeschin
10
+
11
+ Permission is hereby granted, free of charge, to any person obtaining a copy
12
+ of this software and associated documentation files (the "Software"), to deal
13
+ in the Software without restriction, including without limitation the rights
14
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15
+ copies of the Software, and to permit persons to whom the Software is
16
+ furnished to do so, subject to the following conditions:
17
+
18
+ The above copyright notice and this permission notice shall be included in all
19
+ copies or substantial portions of the Software.
20
+
21
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27
+ SOFTWARE.
28
+ Project-URL: Issues, https://github.com/BrokenSource/TurboPipe/issues
29
+ Project-URL: Repository, https://github.com/BrokenSource/TurboPipe
30
+ Project-URL: Documentation, https://github.com/BrokenSource/TurboPipe
31
+ Project-URL: Homepage, https://brokensrc.dev
32
+ Requires-Python: >=3.7
33
+ Requires-Dist: moderngl
34
+ Description-Content-Type: text/markdown
35
+
36
+ > [!IMPORTANT]
37
+ > <sub>Also check out [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow), where **TurboPipe** shines! 😉</sub>
38
+
39
+ <div align="center">
40
+ <a href="https://brokensrc.dev/"><img src="https://raw.githubusercontent.com/BrokenSource/TurboPipe/main/turbopipe/resources/images/turbopipe.png" width="200"></a>
41
+ <h1>TurboPipe</h1>
42
+ <br>
43
+ Faster <a href="https://github.com/moderngl/moderngl"><b>ModernGL</b></a> inter-process data transfers
44
+ </div>
45
+
46
+ <br>
47
+
48
+ # 🔥 Description
49
+
50
+ > TurboPipe speeds up sending raw bytes from `moderngl.Buffer` objects primarily to `FFmpeg` subprocess
51
+
52
+ The **optimizations** involved are:
53
+
54
+ - **Zero-copy**: Avoid unnecessary memory copies or allocation (intermediate `buffer.read()`)
55
+ - **C++**: The core of TurboPipe is written in C++ for speed, efficiency and low-level control
56
+ - **Chunks**: Write in chunks of 4096 bytes (RAM page size), so the hardware is happy
57
+ - **Threaded**:
58
+ - Doesn't block Python code execution, allows to render next frame
59
+ - Decouples the main thread from the I/O thread for performance
60
+
61
+ ✅ Don't worry, there's proper **safety** in place. TurboPipe will block Python if a memory address is already queued for writing, and guarantees order of writes per file-descriptor. Just call `.sync()` when done 😉
62
+
63
+ <br>
64
+
65
+ # 📦 Installation
66
+
67
+ It couldn't be easier! Just install in your package manager:
68
+
69
+ ```bash
70
+ pip install turbopipe
71
+ poetry add turbopipe
72
+ pdm add turbopipe
73
+ rye add turbopipe
74
+ ```
75
+
76
+ <br>
77
+
78
+ # 🚀 Usage
79
+
80
+ See also the [**Examples**](examples) folder for more controlled usage, and [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow/blob/main/ShaderFlow/Scene.py) usage of it!
81
+
82
+ ```python
83
+ import subprocess
84
+ import moderngl
85
+ import turbopipe
86
+
87
+ # Create ModernGL objects
88
+ ctx = moderngl.create_standalone_context()
89
+ buffer = ctx.buffer(reserve=1920*1080*3)
90
+
91
+ # Make sure resolution, pixel format matches!
92
+ ffmpeg = subprocess.Popen(
93
+ 'ffmpeg -f rawvideo -pix_fmt rgb24 -s 1920x1080 -i - -f null -'.split(),
94
+ stdin=subprocess.PIPE
95
+ )
96
+
97
+ # Rendering loop of yours
98
+ for _ in range(100):
99
+ turbopipe.pipe(buffer, ffmpeg.stdin.fileno())
100
+
101
+ # Finalize writing
102
+ turbo.sync()
103
+ ffmpeg.stdin.close()
104
+ ffmpeg.wait()
105
+ ```
106
+
107
+ <br>
108
+
109
+ # ⭐️ Benchmarks
110
+
111
+ > [!NOTE]
112
+ > **The tests conditions are as follows**:
113
+ > - The tests are the average of 3 runs to ensure consistency, with 3 GB of the same data being piped
114
+ > - The data is a random noise per-buffer between 128-135. So, multi-buffers runs are a noise video
115
+ > - All resolutions are wide-screen (16:9) and have 3 components (RGB) with 3 bytes per pixel (SDR)
116
+ > - Multi-buffer cycles through a list of buffer (eg. 1, 2, 3, 1, 2, 3... for 3-buffers)
117
+ > - All FFmpeg outputs are scrapped with `-f null -` to avoid any disk I/O bottlenecks
118
+ > - The `gain` column is the percentage increase over the standard method
119
+ > - When `x264` is Null, no encoding took place (passthrough)
120
+ > - The test cases emoji signifies:
121
+ > - 🐢: Standard `ffmpeg.stdin.write(buffer.read())` on just the main thread, pure Python
122
+ > - 🚀: Threaded `ffmpeg.stdin.write(buffer.read())` with a queue (similar to turbopipe)
123
+ > - 🌀: The magic of `turbopipe.pipe(buffer, ffmpeg.stdin.fileno())`
124
+ >
125
+ > Also see [`benchmark.py`](examples/benchmark.py) for the implementation
126
+
127
+ ✅ Check out benchmarks in a couple of systems below:
128
+
129
+ <details>
130
+ <summary><b>Desktop</b> • (AMD Ryzen 9 5900x) • (NVIDIA RTX 3060 12 GB) • (DDR4 2x32 GB 3200 MT/s) • (Arch Linux)</summary>
131
+ <br>
132
+
133
+ | 720p | x264 | Buffers | Framerate | Bandwidth | Gain |
134
+ |:----:|:----------|:---------:|----------:|------------:|---------:|
135
+ | 🐢 | Null | 1 | 882 fps | 2.44 GB/s | |
136
+ | 🚀 | Null | 1 | 793 fps | 2.19 GB/s | -10.04% |
137
+ | 🌀 | Null | 1 | 1911 fps | 5.28 GB/s | 116.70% |
138
+ | 🐢 | Null | 4 | 818 fps | 2.26 GB/s | |
139
+ | 🚀 | Null | 4 | 684 fps | 1.89 GB/s | -16.35% |
140
+ | 🌀 | Null | 4 | 1494 fps | 4.13 GB/s | 82.73% |
141
+ | 🐢 | ultrafast | 4 | 664 fps | 1.84 GB/s | |
142
+ | 🚀 | ultrafast | 4 | 635 fps | 1.76 GB/s | -4.33% |
143
+ | 🌀 | ultrafast | 4 | 869 fps | 2.40 GB/s | 31.00% |
144
+ | 🐢 | slow | 4 | 204 fps | 0.57 GB/s | |
145
+ | 🚀 | slow | 4 | 205 fps | 0.57 GB/s | 0.58% |
146
+ | 🌀 | slow | 4 | 208 fps | 0.58 GB/s | 2.22% |
147
+
148
+ | 1080p | x264 | Buffers | Framerate | Bandwidth | Gain |
149
+ |:-----:|:----------|:---------:|----------:|------------:|--------:|
150
+ | 🐢 | Null | 1 | 385 fps | 2.40 GB/s | |
151
+ | 🚀 | Null | 1 | 369 fps | 2.30 GB/s | -3.91% |
152
+ | 🌀 | Null | 1 | 641 fps | 3.99 GB/s | 66.54% |
153
+ | 🐢 | Null | 4 | 387 fps | 2.41 GB/s | |
154
+ | 🚀 | Null | 4 | 359 fps | 2.23 GB/s | -7.21% |
155
+ | 🌀 | Null | 4 | 632 fps | 3.93 GB/s | 63.40% |
156
+ | 🐢 | ultrafast | 4 | 272 fps | 1.70 GB/s | |
157
+ | 🚀 | ultrafast | 4 | 266 fps | 1.66 GB/s | -2.14% |
158
+ | 🌀 | ultrafast | 4 | 405 fps | 2.53 GB/s | 49.24% |
159
+ | 🐢 | slow | 4 | 117 fps | 0.73 GB/s | |
160
+ | 🚀 | slow | 4 | 122 fps | 0.76 GB/s | 4.43% |
161
+ | 🌀 | slow | 4 | 124 fps | 0.77 GB/s | 6.48% |
162
+
163
+ | 1440p | x264 | Buffers | Framerate | Bandwidth | Gain |
164
+ |:-----:|:----------|:---------:|----------:|------------:|--------:|
165
+ | 🐢 | Null | 1 | 204 fps | 2.26 GB/s | |
166
+ | 🚀 | Null | 1 | 241 fps | 2.67 GB/s | 18.49% |
167
+ | 🌀 | Null | 1 | 297 fps | 3.29 GB/s | 45.67% |
168
+ | 🐢 | Null | 4 | 230 fps | 2.54 GB/s | |
169
+ | 🚀 | Null | 4 | 235 fps | 2.61 GB/s | 2.52% |
170
+ | 🌀 | Null | 4 | 411 fps | 4.55 GB/s | 78.97% |
171
+ | 🐢 | ultrafast | 4 | 146 fps | 1.62 GB/s | |
172
+ | 🚀 | ultrafast | 4 | 153 fps | 1.70 GB/s | 5.21% |
173
+ | 🌀 | ultrafast | 4 | 216 fps | 2.39 GB/s | 47.96% |
174
+ | 🐢 | slow | 4 | 73 fps | 0.82 GB/s | |
175
+ | 🚀 | slow | 4 | 78 fps | 0.86 GB/s | 7.06% |
176
+ | 🌀 | slow | 4 | 79 fps | 0.88 GB/s | 9.27% |
177
+
178
+ | 2160p | x264 | Buffers | Framerate | Bandwidth | Gain |
179
+ |:-----:|:----------|:---------:|----------:|------------:|---------:|
180
+ | 🐢 | Null | 1 | 81 fps | 2.03 GB/s | |
181
+ | 🚀 | Null | 1 | 107 fps | 2.67 GB/s | 32.26% |
182
+ | 🌀 | Null | 1 | 213 fps | 5.31 GB/s | 163.47% |
183
+ | 🐢 | Null | 4 | 87 fps | 2.18 GB/s | |
184
+ | 🚀 | Null | 4 | 109 fps | 2.72 GB/s | 25.43% |
185
+ | 🌀 | Null | 4 | 212 fps | 5.28 GB/s | 143.72% |
186
+ | 🐢 | ultrafast | 4 | 59 fps | 1.48 GB/s | |
187
+ | 🚀 | ultrafast | 4 | 67 fps | 1.68 GB/s | 14.46% |
188
+ | 🌀 | ultrafast | 4 | 95 fps | 2.39 GB/s | 62.66% |
189
+ | 🐢 | slow | 4 | 37 fps | 0.94 GB/s | |
190
+ | 🚀 | slow | 4 | 43 fps | 1.07 GB/s | 16.22% |
191
+ | 🌀 | slow | 4 | 44 fps | 1.11 GB/s | 20.65% |
192
+
193
+ </details>
194
+
195
+ <details>
196
+ <summary><b>Desktop</b> • (AMD Ryzen 9 5900x) • (NVIDIA RTX 3060 12 GB) • (DDR4 2x32 GB 3200 MT/s) • (Windows 11)</summary>
197
+ <br>
198
+ </details>
199
+
200
+ <br>
201
+
202
+ <div align="justify">
203
+
204
+ # 🌀 Conclusion
205
+
206
+ TurboPipe significantly increases the feeding speed of FFmpeg with data, especially at higher resolutions. However, if there's few CPU compute available, or the video is too hard to encode (slow preset), the gains are insignificant over the other methods (bottleneck). Multi-buffering didn't prove to have an advantage, debugging shows that TurboPipe C++ is often starved of data to write (as the file stream is buffered on the OS most likely), and the context switching, or cache misses, might be the cause of the slowdown.
207
+
208
+ Interestingly, due either Linux's scheduler on AMD Ryzen CPUs, or their operating philosophy, it was experimentally seen that Ryzen's frenetic thread switching degrades a bit the single thread performance, which can be _"fixed"_ with prepending the command with `taskset --cpu 0,2` (not recommended at all), comparatively speaking to Windows performance on the same system (Linux 🚀 = Windows 🐢). This can also be due the topology of tested CPUs having more than one Core Complex Die (CCD). Intel CPUs seem to stick to the same thread for longer, which makes the Python threaded method an unecessary overhead.
209
+
210
+ ### Personal experience
211
+
212
+ On realistically loads, like [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow)'s default lightweight shader export, TurboPipe increases rendering speed from 1080p260 to 1080p330 on my system, with mid 80% CPU usage than low 60%s. For [**DepthFlow**](https://github.com/BrokenSource/ShaderFlow)'s default depth video export, no gains are seen, as the CPU is almost saturated encoding at 1080p130.
213
+
214
+ </div>
215
+
216
+ <br>
217
+
218
+ # 📚 Future work
219
+
220
+ - Add support for NumPy arrays, memoryviews, and byte-like objects
221
+ - Improve the thread synchronization and/or use a ThreadPool
222
+ - Maybe use `mmap` instead of chunks writing
223
+ - Test on MacOS 🙈
@@ -0,0 +1,188 @@
1
+ > [!IMPORTANT]
2
+ > <sub>Also check out [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow), where **TurboPipe** shines! 😉</sub>
3
+
4
+ <div align="center">
5
+ <a href="https://brokensrc.dev/"><img src="https://raw.githubusercontent.com/BrokenSource/TurboPipe/main/turbopipe/resources/images/turbopipe.png" width="200"></a>
6
+ <h1>TurboPipe</h1>
7
+ <br>
8
+ Faster <a href="https://github.com/moderngl/moderngl"><b>ModernGL</b></a> inter-process data transfers
9
+ </div>
10
+
11
+ <br>
12
+
13
+ # 🔥 Description
14
+
15
+ > TurboPipe speeds up sending raw bytes from `moderngl.Buffer` objects primarily to `FFmpeg` subprocess
16
+
17
+ The **optimizations** involved are:
18
+
19
+ - **Zero-copy**: Avoid unnecessary memory copies or allocation (intermediate `buffer.read()`)
20
+ - **C++**: The core of TurboPipe is written in C++ for speed, efficiency and low-level control
21
+ - **Chunks**: Write in chunks of 4096 bytes (RAM page size), so the hardware is happy
22
+ - **Threaded**:
23
+ - Doesn't block Python code execution, allows to render next frame
24
+ - Decouples the main thread from the I/O thread for performance
25
+
26
+ ✅ Don't worry, there's proper **safety** in place. TurboPipe will block Python if a memory address is already queued for writing, and guarantees order of writes per file-descriptor. Just call `.sync()` when done 😉
27
+
28
+ <br>
29
+
30
+ # 📦 Installation
31
+
32
+ It couldn't be easier! Just install in your package manager:
33
+
34
+ ```bash
35
+ pip install turbopipe
36
+ poetry add turbopipe
37
+ pdm add turbopipe
38
+ rye add turbopipe
39
+ ```
40
+
41
+ <br>
42
+
43
+ # 🚀 Usage
44
+
45
+ See also the [**Examples**](examples) folder for more controlled usage, and [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow/blob/main/ShaderFlow/Scene.py) usage of it!
46
+
47
+ ```python
48
+ import subprocess
49
+ import moderngl
50
+ import turbopipe
51
+
52
+ # Create ModernGL objects
53
+ ctx = moderngl.create_standalone_context()
54
+ buffer = ctx.buffer(reserve=1920*1080*3)
55
+
56
+ # Make sure resolution, pixel format matches!
57
+ ffmpeg = subprocess.Popen(
58
+ 'ffmpeg -f rawvideo -pix_fmt rgb24 -s 1920x1080 -i - -f null -'.split(),
59
+ stdin=subprocess.PIPE
60
+ )
61
+
62
+ # Rendering loop of yours
63
+ for _ in range(100):
64
+ turbopipe.pipe(buffer, ffmpeg.stdin.fileno())
65
+
66
+ # Finalize writing
67
+ turbo.sync()
68
+ ffmpeg.stdin.close()
69
+ ffmpeg.wait()
70
+ ```
71
+
72
+ <br>
73
+
74
+ # ⭐️ Benchmarks
75
+
76
+ > [!NOTE]
77
+ > **The tests conditions are as follows**:
78
+ > - The tests are the average of 3 runs to ensure consistency, with 3 GB of the same data being piped
79
+ > - The data is a random noise per-buffer between 128-135. So, multi-buffers runs are a noise video
80
+ > - All resolutions are wide-screen (16:9) and have 3 components (RGB) with 3 bytes per pixel (SDR)
81
+ > - Multi-buffer cycles through a list of buffer (eg. 1, 2, 3, 1, 2, 3... for 3-buffers)
82
+ > - All FFmpeg outputs are scrapped with `-f null -` to avoid any disk I/O bottlenecks
83
+ > - The `gain` column is the percentage increase over the standard method
84
+ > - When `x264` is Null, no encoding took place (passthrough)
85
+ > - The test cases emoji signifies:
86
+ > - 🐢: Standard `ffmpeg.stdin.write(buffer.read())` on just the main thread, pure Python
87
+ > - 🚀: Threaded `ffmpeg.stdin.write(buffer.read())` with a queue (similar to turbopipe)
88
+ > - 🌀: The magic of `turbopipe.pipe(buffer, ffmpeg.stdin.fileno())`
89
+ >
90
+ > Also see [`benchmark.py`](examples/benchmark.py) for the implementation
91
+
92
+ ✅ Check out benchmarks in a couple of systems below:
93
+
94
+ <details>
95
+ <summary><b>Desktop</b> • (AMD Ryzen 9 5900x) • (NVIDIA RTX 3060 12 GB) • (DDR4 2x32 GB 3200 MT/s) • (Arch Linux)</summary>
96
+ <br>
97
+
98
+ | 720p | x264 | Buffers | Framerate | Bandwidth | Gain |
99
+ |:----:|:----------|:---------:|----------:|------------:|---------:|
100
+ | 🐢 | Null | 1 | 882 fps | 2.44 GB/s | |
101
+ | 🚀 | Null | 1 | 793 fps | 2.19 GB/s | -10.04% |
102
+ | 🌀 | Null | 1 | 1911 fps | 5.28 GB/s | 116.70% |
103
+ | 🐢 | Null | 4 | 818 fps | 2.26 GB/s | |
104
+ | 🚀 | Null | 4 | 684 fps | 1.89 GB/s | -16.35% |
105
+ | 🌀 | Null | 4 | 1494 fps | 4.13 GB/s | 82.73% |
106
+ | 🐢 | ultrafast | 4 | 664 fps | 1.84 GB/s | |
107
+ | 🚀 | ultrafast | 4 | 635 fps | 1.76 GB/s | -4.33% |
108
+ | 🌀 | ultrafast | 4 | 869 fps | 2.40 GB/s | 31.00% |
109
+ | 🐢 | slow | 4 | 204 fps | 0.57 GB/s | |
110
+ | 🚀 | slow | 4 | 205 fps | 0.57 GB/s | 0.58% |
111
+ | 🌀 | slow | 4 | 208 fps | 0.58 GB/s | 2.22% |
112
+
113
+ | 1080p | x264 | Buffers | Framerate | Bandwidth | Gain |
114
+ |:-----:|:----------|:---------:|----------:|------------:|--------:|
115
+ | 🐢 | Null | 1 | 385 fps | 2.40 GB/s | |
116
+ | 🚀 | Null | 1 | 369 fps | 2.30 GB/s | -3.91% |
117
+ | 🌀 | Null | 1 | 641 fps | 3.99 GB/s | 66.54% |
118
+ | 🐢 | Null | 4 | 387 fps | 2.41 GB/s | |
119
+ | 🚀 | Null | 4 | 359 fps | 2.23 GB/s | -7.21% |
120
+ | 🌀 | Null | 4 | 632 fps | 3.93 GB/s | 63.40% |
121
+ | 🐢 | ultrafast | 4 | 272 fps | 1.70 GB/s | |
122
+ | 🚀 | ultrafast | 4 | 266 fps | 1.66 GB/s | -2.14% |
123
+ | 🌀 | ultrafast | 4 | 405 fps | 2.53 GB/s | 49.24% |
124
+ | 🐢 | slow | 4 | 117 fps | 0.73 GB/s | |
125
+ | 🚀 | slow | 4 | 122 fps | 0.76 GB/s | 4.43% |
126
+ | 🌀 | slow | 4 | 124 fps | 0.77 GB/s | 6.48% |
127
+
128
+ | 1440p | x264 | Buffers | Framerate | Bandwidth | Gain |
129
+ |:-----:|:----------|:---------:|----------:|------------:|--------:|
130
+ | 🐢 | Null | 1 | 204 fps | 2.26 GB/s | |
131
+ | 🚀 | Null | 1 | 241 fps | 2.67 GB/s | 18.49% |
132
+ | 🌀 | Null | 1 | 297 fps | 3.29 GB/s | 45.67% |
133
+ | 🐢 | Null | 4 | 230 fps | 2.54 GB/s | |
134
+ | 🚀 | Null | 4 | 235 fps | 2.61 GB/s | 2.52% |
135
+ | 🌀 | Null | 4 | 411 fps | 4.55 GB/s | 78.97% |
136
+ | 🐢 | ultrafast | 4 | 146 fps | 1.62 GB/s | |
137
+ | 🚀 | ultrafast | 4 | 153 fps | 1.70 GB/s | 5.21% |
138
+ | 🌀 | ultrafast | 4 | 216 fps | 2.39 GB/s | 47.96% |
139
+ | 🐢 | slow | 4 | 73 fps | 0.82 GB/s | |
140
+ | 🚀 | slow | 4 | 78 fps | 0.86 GB/s | 7.06% |
141
+ | 🌀 | slow | 4 | 79 fps | 0.88 GB/s | 9.27% |
142
+
143
+ | 2160p | x264 | Buffers | Framerate | Bandwidth | Gain |
144
+ |:-----:|:----------|:---------:|----------:|------------:|---------:|
145
+ | 🐢 | Null | 1 | 81 fps | 2.03 GB/s | |
146
+ | 🚀 | Null | 1 | 107 fps | 2.67 GB/s | 32.26% |
147
+ | 🌀 | Null | 1 | 213 fps | 5.31 GB/s | 163.47% |
148
+ | 🐢 | Null | 4 | 87 fps | 2.18 GB/s | |
149
+ | 🚀 | Null | 4 | 109 fps | 2.72 GB/s | 25.43% |
150
+ | 🌀 | Null | 4 | 212 fps | 5.28 GB/s | 143.72% |
151
+ | 🐢 | ultrafast | 4 | 59 fps | 1.48 GB/s | |
152
+ | 🚀 | ultrafast | 4 | 67 fps | 1.68 GB/s | 14.46% |
153
+ | 🌀 | ultrafast | 4 | 95 fps | 2.39 GB/s | 62.66% |
154
+ | 🐢 | slow | 4 | 37 fps | 0.94 GB/s | |
155
+ | 🚀 | slow | 4 | 43 fps | 1.07 GB/s | 16.22% |
156
+ | 🌀 | slow | 4 | 44 fps | 1.11 GB/s | 20.65% |
157
+
158
+ </details>
159
+
160
+ <details>
161
+ <summary><b>Desktop</b> • (AMD Ryzen 9 5900x) • (NVIDIA RTX 3060 12 GB) • (DDR4 2x32 GB 3200 MT/s) • (Windows 11)</summary>
162
+ <br>
163
+ </details>
164
+
165
+ <br>
166
+
167
+ <div align="justify">
168
+
169
+ # 🌀 Conclusion
170
+
171
+ TurboPipe significantly increases the feeding speed of FFmpeg with data, especially at higher resolutions. However, if there's few CPU compute available, or the video is too hard to encode (slow preset), the gains are insignificant over the other methods (bottleneck). Multi-buffering didn't prove to have an advantage, debugging shows that TurboPipe C++ is often starved of data to write (as the file stream is buffered on the OS most likely), and the context switching, or cache misses, might be the cause of the slowdown.
172
+
173
+ Interestingly, due either Linux's scheduler on AMD Ryzen CPUs, or their operating philosophy, it was experimentally seen that Ryzen's frenetic thread switching degrades a bit the single thread performance, which can be _"fixed"_ with prepending the command with `taskset --cpu 0,2` (not recommended at all), comparatively speaking to Windows performance on the same system (Linux 🚀 = Windows 🐢). This can also be due the topology of tested CPUs having more than one Core Complex Die (CCD). Intel CPUs seem to stick to the same thread for longer, which makes the Python threaded method an unecessary overhead.
174
+
175
+ ### Personal experience
176
+
177
+ On realistically loads, like [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow)'s default lightweight shader export, TurboPipe increases rendering speed from 1080p260 to 1080p330 on my system, with mid 80% CPU usage than low 60%s. For [**DepthFlow**](https://github.com/BrokenSource/ShaderFlow)'s default depth video export, no gains are seen, as the CPU is almost saturated encoding at 1080p130.
178
+
179
+ </div>
180
+
181
+ <br>
182
+
183
+ # 📚 Future work
184
+
185
+ - Add support for NumPy arrays, memoryviews, and byte-like objects
186
+ - Improve the thread synchronization and/or use a ThreadPool
187
+ - Maybe use `mmap` instead of chunks writing
188
+ - Test on MacOS 🙈
@@ -0,0 +1,82 @@
1
+ import contextlib
2
+ import random
3
+ import subprocess
4
+ from typing import Generator
5
+
6
+ import moderngl
7
+ import tqdm
8
+
9
+ import turbopipe
10
+
11
+ # User constants
12
+ WIDTH, HEIGHT = (1920, 1080)
13
+ FRAMERATE = 60
14
+ DURATION = 30
15
+
16
+ # Calculate constants
17
+ BYTES_PER_FRAME = (WIDTH * HEIGHT * 3)
18
+ TOTAL_FRAMES = (DURATION * FRAMERATE)
19
+ TOTAL_BYTES = (BYTES_PER_FRAME * TOTAL_FRAMES)
20
+
21
+ # Create ModernGL objects
22
+ ctx = moderngl.create_standalone_context()
23
+ buffer = ctx.buffer(reserve=BYTES_PER_FRAME)
24
+
25
+ # Let's play fair and avoid any OS/Python/Hardware optimizations
26
+ buffer.write(bytearray(random.getrandbits(8) for _ in range(BYTES_PER_FRAME)))
27
+
28
+ # -------------------------------------------------------------------------------------------------|
29
+
30
+ @contextlib.contextmanager
31
+ def FFmpeg() -> Generator[subprocess.Popen, None, None]:
32
+ try:
33
+ ffmpeg = subprocess.Popen([
34
+ "ffmpeg",
35
+ "-hide_banner",
36
+ "-loglevel", "error",
37
+ "-f", "rawvideo",
38
+ "-pix_fmt", "rgb24",
39
+ "-s", f"{WIDTH}x{HEIGHT}",
40
+ "-r", str(FRAMERATE),
41
+ "-i", "-",
42
+ "-f", "null",
43
+ "-", "-y"
44
+ ], stdin=subprocess.PIPE)
45
+
46
+ yield ffmpeg
47
+ finally:
48
+ ffmpeg.stdin.close()
49
+ ffmpeg.wait()
50
+
51
+ # -------------------------------------------------------------------------------------------------|
52
+
53
+ @contextlib.contextmanager
54
+ def Progress():
55
+ with tqdm.tqdm(total=TOTAL_FRAMES, unit="Frame", smoothing=0) as frame_bar, \
56
+ tqdm.tqdm(total=TOTAL_BYTES, unit="B", smoothing=0, unit_scale=True) as byte_bar:
57
+ def next():
58
+ byte_bar.update(BYTES_PER_FRAME)
59
+ frame_bar.update(1)
60
+ yield next
61
+
62
+ # -------------------------------------------------------------------------------------------------|
63
+
64
+ print("\n:: Traditional method\n")
65
+
66
+ with Progress() as progress, FFmpeg() as ffmpeg:
67
+ for frame in range(TOTAL_FRAMES):
68
+ ffmpeg.stdin.write(buffer.read())
69
+ progress()
70
+
71
+ print("\n:: TurboPipe method\n")
72
+
73
+ with Progress() as progress, FFmpeg() as ffmpeg:
74
+ fileno = ffmpeg.stdin.fileno()
75
+
76
+ for frame in range(TOTAL_FRAMES):
77
+ turbopipe.pipe(buffer, fileno)
78
+ progress()
79
+
80
+ turbopipe.sync()
81
+
82
+ turbopipe.close()