turbopipe 1.2.2__tar.gz → 1.2.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of turbopipe might be problematic. Click here for more details.

@@ -2,102 +2,93 @@ name: release
2
2
 
3
3
  on:
4
4
  workflow_dispatch:
5
- push:
6
- paths:
7
- - 'turbopipe/version.py'
8
5
 
9
6
  jobs:
10
7
  sdist:
11
- name: Package source
8
+ name: Make sdist
12
9
  runs-on: ubuntu-latest
13
-
14
10
  steps:
15
11
  - uses: actions/checkout@v4
16
- - uses: actions/setup-python@v5
17
-
18
- - name: deps
19
- run: python -m pip install -U pip wheel build
12
+ - uses: astral-sh/setup-uv@v6
20
13
 
21
- - name: sdist
22
- run: python -m build --sdist -o package
14
+ - name: Make sdist
15
+ run: uv build --sdist
23
16
 
24
17
  - name: upload
25
18
  uses: actions/upload-artifact@v4
26
19
  with:
27
20
  name: package-sdist
28
- path: package/*.tar.gz
21
+ path: dist/*.tar.gz
29
22
 
30
23
  wheels:
31
- name: Build Python wheels on ${{matrix.os}}
24
+ name: Make wheels for ${{matrix.os}}
32
25
  runs-on: ${{matrix.os}}
33
26
  strategy:
34
27
  matrix:
35
- os: [ubuntu-latest, windows-latest, macos-14]
36
-
28
+ os: [
29
+ ubuntu-latest,
30
+ windows-latest,
31
+ macos-latest
32
+ ]
37
33
  env:
38
- CIBW_BUILD: cp37-* cp38-* cp39-* cp310-* cp311-* cp312-* cp313-*
39
- CIBW_ARCHS_LINUX: auto
40
- CIBW_ARCHS_MACOS: arm64 x86_64
41
- CIBW_ARCHS_WINDOWS: auto
42
- CIBW_SKIP: '*musllinux* *i686* *-win32'
43
-
34
+ CIBW_BUILD: cp39-* cp310-* cp311-* cp312-* cp313-* cp313t-*
35
+ CIBW_ENABLE: cpython-freethreading
36
+ CIBW_BUILD_FRONTEND: "build[uv]"
37
+ CIBW_ARCHS_LINUX: x86_64 aarch64
38
+ CIBW_ARCHS_MACOS: x86_64 arm64
39
+ CIBW_SKIP: "*-win32 *-manylinux_i686 *-musllinux*"
44
40
  steps:
45
41
  - uses: actions/checkout@v4
46
42
  - uses: actions/setup-python@v5
43
+ - uses: astral-sh/setup-uv@v6
47
44
 
48
45
  - name: Install MSVC
49
46
  if: matrix.os == 'windows-latest'
50
47
  uses: bus1/cabuild/action/msdevshell@v1
51
48
 
52
- - name: deps
53
- run: python -m pip install cibuildwheel==2.21.3
49
+ - name: Install QEMU
50
+ if: runner.os == 'linux'
51
+ uses: docker/setup-qemu-action@v3
52
+ with:
53
+ platforms: all
54
54
 
55
- - name: wheels
56
- run: python -m cibuildwheel --output-dir package
55
+ - name: Make wheels
56
+ run: uvx cibuildwheel==2.23.3 --output-dir dist
57
57
 
58
58
  - name: upload
59
59
  uses: actions/upload-artifact@v4
60
60
  with:
61
- name: package-wheels-${{ matrix.os }}
62
- path: package/*.whl
61
+ name: package-wheels-${{matrix.os}}
62
+ path: dist/*.whl
63
63
 
64
64
  publish:
65
65
  needs: [sdist, wheels]
66
66
  name: Publish to PyPI
67
67
  runs-on: ubuntu-latest
68
-
68
+ permissions:
69
+ id-token: write
69
70
  steps:
70
- - uses: actions/checkout@v4
71
- - uses: actions/setup-python@v5
71
+ - uses: astral-sh/setup-uv@v6
72
72
 
73
73
  - name: Download artifacts
74
74
  uses: actions/download-artifact@v4
75
75
  with:
76
76
  pattern: 'package-*'
77
77
  merge-multiple: true
78
- path: package
78
+ path: dist
79
79
 
80
- - name: deps
81
- run: python -m pip install -U twine
82
-
83
- - name: publish
84
- env:
85
- TWINE_USERNAME: __token__
86
- TWINE_PASSWORD: ${{secrets.PYPI_TOKEN}}
87
- run: twine upload package/*
80
+ - name: Publish
81
+ run: uv publish dist/*
88
82
 
89
83
  tag:
90
84
  needs: publish
91
85
  name: Create Release Tag
92
86
  runs-on: ubuntu-latest
93
-
94
87
  steps:
95
88
  - uses: actions/checkout@v4
96
- - uses: actions/setup-python@v5
97
89
 
98
90
  - name: Get version
99
- run: echo VERSION=$(python turbopipe/version.py) >> $GITHUB_ENV
100
- shell: bash
91
+ run: echo VERSION=$(uv run turbopipe/version.py) >> $GITHUB_ENV
101
92
 
102
93
  - name: Create Release Tag
103
94
  run: |
@@ -0,0 +1 @@
1
+ 3.13
@@ -1,41 +1,18 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: turbopipe
3
- Version: 1.2.2
3
+ Version: 1.2.3
4
4
  Summary: 🌀 Faster ModernGL Buffers inter-process data transfers for subprocesses
5
5
  Author-Email: Tremeschin <29046864+Tremeschin@users.noreply.github.com>
6
- License: MIT License
7
-
8
- Copyright (c) 2024 Gabriel Tremeschin
9
-
10
- Permission is hereby granted, free of charge, to any person obtaining a copy
11
- of this software and associated documentation files (the "Software"), to deal
12
- in the Software without restriction, including without limitation the rights
13
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
- copies of the Software, and to permit persons to whom the Software is
15
- furnished to do so, subject to the following conditions:
16
-
17
- The above copyright notice and this permission notice shall be included in all
18
- copies or substantial portions of the Software.
19
-
20
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
- SOFTWARE.
27
-
28
- Project-URL: issues, https://github.com/BrokenSource/TurboPipe/issues
29
- Project-URL: repository, https://github.com/BrokenSource/TurboPipe
30
- Project-URL: documentation, https://github.com/BrokenSource/TurboPipe
31
- Project-URL: homepage, https://brokensrc.dev
6
+ License-Expression: MIT
7
+ Project-URL: GitHub, https://github.com/BrokenSource/TurboPipe
8
+ Project-URL: Changelog, https://brokensrc.dev/about/changelog
9
+ Project-URL: Funding, https://brokensrc.dev/about/sponsors
10
+ Project-URL: Contact, https://brokensrc.dev/about/contact
11
+ Project-URL: Homepage, https://brokensrc.dev
32
12
  Requires-Python: >=3.7
33
13
  Requires-Dist: moderngl
34
14
  Description-Content-Type: text/markdown
35
15
 
36
- > [!IMPORTANT]
37
- > <sub>Also check out [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow), where **TurboPipe** shines! 😉</sub>
38
- <!-- PyPI -->
39
16
  <div align="center">
40
17
  <a href="https://brokensrc.dev/"><img src="https://raw.githubusercontent.com/BrokenSource/TurboPipe/main/turbopipe/resources/images/turbopipe.png" width="200"></a>
41
18
  <h1>TurboPipe</h1>
@@ -57,15 +34,17 @@ Description-Content-Type: text/markdown
57
34
 
58
35
  The **optimizations** involved are:
59
36
 
60
- - **Zero-copy**: Avoid unnecessary memory copies or allocation (intermediate `buffer.read()`)
37
+ - **Zero-copy**: Avoid unnecessary memory copies or allocation (intermediate `buffer.read`)
61
38
  - **C++**: The core of TurboPipe is written in C++ for speed, efficiency and low-level control
62
- - **Chunks**: Write in chunks of 4096 bytes (RAM page size), so the hardware is happy (Unix)
63
39
  - **Threaded**:
64
40
  - Doesn't block Python code execution, allows to render next frame
65
41
  - Decouples the main thread from the I/O thread for performance
42
+ - **Chunks**: Write in chunks of 4096 bytes (RAM page size), so the hardware is happy (Unix)
66
43
 
67
44
  ✅ Don't worry, there's proper **safety** in place. TurboPipe will block Python if a memory address is already queued for writing, and guarantees order of writes per file-descriptor. Just call `.sync()` when done 😉
68
45
 
46
+ <sub>Also check out [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow), where **TurboPipe** shines! 😉</sub>
47
+
69
48
  <br>
70
49
 
71
50
  # 📦 Installation
@@ -90,7 +69,7 @@ rye add turbopipe
90
69
 
91
70
  # 🚀 Usage
92
71
 
93
- See also the [**Examples**](https://github.com/BrokenSource/TurboPipe/tree/main/examples) folder for comparisons, and [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow/blob/main/ShaderFlow/Scene.py) usage of it!
72
+ See also the [**Examples**](https://github.com/BrokenSource/TurboPipe/tree/main/examples) folder for comparisons, and [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow/blob/main/ShaderFlow/Exporting.py)'s usage of it!
94
73
 
95
74
  ```python
96
75
  import subprocess
@@ -98,27 +77,53 @@ import subprocess
98
77
  import moderngl
99
78
  import turbopipe
100
79
 
101
- # Create ModernGL objects
80
+ # Create ModernGL objects and proxy buffers
102
81
  ctx = moderngl.create_standalone_context()
103
- buffers = [ctx.buffer(reserve=1920*1080*3) for _ in range(2)]
82
+ width, height, duration, fps = (1920, 1080, 10, 60)
83
+ buffers = [
84
+ ctx.buffer(reserve=(width*height*3))
85
+ for _ in range(nbuffers := 2)
86
+ ]
87
+
88
+ # Create your FBO, Textures, Shaders, etc.
104
89
 
105
90
  # Make sure resolution, pixel format matches!
106
- ffmpeg = subprocess.Popen(
107
- 'ffmpeg -f rawvideo -pix_fmt rgb24 -r 60 -s 1920x1080 -i - -f null -'.split(),
108
- stdin=subprocess.PIPE
109
- )
110
-
111
- # Rendering loop of yours (eg. 1m footage)
112
- for frame in range(60 * 60):
113
- buffer = buffers[frame % len(buffer)]
91
+ ffmpeg = subprocess.Popen((
92
+ "ffmpeg",
93
+ "-f", "rawvideo",
94
+ "-pix_fmt", "rgb24",
95
+ "-r", str(fps),
96
+ "-s", f"{width}x{height}",
97
+ "-i", "-",
98
+ "-f", "null",
99
+ "output.mp4"
100
+ ), stdin=subprocess.PIPE)
101
+
102
+ # Rendering loop of yours
103
+ for frame in range(duration*fps):
104
+ buffer = buffers[frame % nbuffers]
105
+
106
+ # Wait queued writes before copying
114
107
  turbopipe.sync(buffer)
115
108
  fbo.read_into(buffer)
109
+
110
+ # Doesn't lock the GIL, writes in parallel
116
111
  turbopipe.pipe(buffer, ffmpeg.stdin.fileno())
117
112
 
118
- # Finalize writing, encoding
113
+ # Wait for queued writes, clean memory
114
+ for buffer in buffers:
115
+ turbopipe.sync(buffer)
116
+ buffer.release()
117
+
118
+ # Signal stdin stream is done
119
119
  ffmpeg.stdin.close()
120
- turbopipe.close()
120
+
121
+ # wait for encoding to finish
121
122
  ffmpeg.wait()
123
+
124
+ # Warn: Albeit rare, only call close when no other data
125
+ # write is pending, as it might skip a frame or halt
126
+ turbopipe.close()
122
127
  ```
123
128
 
124
129
  <br>
@@ -370,5 +375,5 @@ On realistically loads, like [**ShaderFlow**](https://github.com/BrokenSource/Sh
370
375
  # 📚 Future work
371
376
 
372
377
  - Disable/investigate performance degradation on Windows iGPUs
373
- - Improve the thread synchronization and/or use a ThreadPool
374
378
  - Maybe use `mmap` instead of chunks writing on Linux
379
+ - Split the code into a libturbopipe? Not sure where it would be useful 😅
@@ -8,7 +8,7 @@ project('turbopipe', 'cpp',
8
8
 
9
9
  default_options: [
10
10
  'warning_level=3',
11
- 'cpp_std=c++14',
11
+ 'cpp_std=c++20',
12
12
  'buildtype=release',
13
13
  'optimization=3'
14
14
  ]
@@ -1,16 +1,17 @@
1
1
  [project.urls]
2
- issues = "https://github.com/BrokenSource/TurboPipe/issues"
3
- repository = "https://github.com/BrokenSource/TurboPipe"
4
- documentation = "https://github.com/BrokenSource/TurboPipe"
5
- homepage = "https://brokensrc.dev"
2
+ GitHub = "https://github.com/BrokenSource/TurboPipe"
3
+ Changelog = "https://brokensrc.dev/about/changelog"
4
+ Funding = "https://brokensrc.dev/about/sponsors"
5
+ Contact = "https://brokensrc.dev/about/contact"
6
+ Homepage = "https://brokensrc.dev"
6
7
 
7
8
  [project]
8
9
  name = "turbopipe"
9
- dynamic = ["version"]
10
10
  description = "🌀 Faster ModernGL Buffers inter-process data transfers for subprocesses"
11
11
  authors = [{name="Tremeschin", email="29046864+Tremeschin@users.noreply.github.com"}]
12
- readme = "Readme.md"
13
- license = {file="License.md"}
12
+ dynamic = ["version"]
13
+ readme = "readme.md"
14
+ license = "MIT"
14
15
  dependencies = ["moderngl"]
15
16
  requires-python = ">=3.7"
16
17
 
@@ -18,5 +19,8 @@ requires-python = ">=3.7"
18
19
  requires = ["meson-python", "ninja"]
19
20
  build-backend = "mesonpy"
20
21
 
22
+ [tool.uv]
23
+ managed = false
24
+
21
25
  [tool.ruff.format]
22
26
  exclude = ["*"]
@@ -1,6 +1,3 @@
1
- > [!IMPORTANT]
2
- > <sub>Also check out [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow), where **TurboPipe** shines! 😉</sub>
3
- <!-- PyPI -->
4
1
  <div align="center">
5
2
  <a href="https://brokensrc.dev/"><img src="https://raw.githubusercontent.com/BrokenSource/TurboPipe/main/turbopipe/resources/images/turbopipe.png" width="200"></a>
6
3
  <h1>TurboPipe</h1>
@@ -22,15 +19,17 @@
22
19
 
23
20
  The **optimizations** involved are:
24
21
 
25
- - **Zero-copy**: Avoid unnecessary memory copies or allocation (intermediate `buffer.read()`)
22
+ - **Zero-copy**: Avoid unnecessary memory copies or allocation (intermediate `buffer.read`)
26
23
  - **C++**: The core of TurboPipe is written in C++ for speed, efficiency and low-level control
27
- - **Chunks**: Write in chunks of 4096 bytes (RAM page size), so the hardware is happy (Unix)
28
24
  - **Threaded**:
29
25
  - Doesn't block Python code execution, allows to render next frame
30
26
  - Decouples the main thread from the I/O thread for performance
27
+ - **Chunks**: Write in chunks of 4096 bytes (RAM page size), so the hardware is happy (Unix)
31
28
 
32
29
  ✅ Don't worry, there's proper **safety** in place. TurboPipe will block Python if a memory address is already queued for writing, and guarantees order of writes per file-descriptor. Just call `.sync()` when done 😉
33
30
 
31
+ <sub>Also check out [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow), where **TurboPipe** shines! 😉</sub>
32
+
34
33
  <br>
35
34
 
36
35
  # 📦 Installation
@@ -55,7 +54,7 @@ rye add turbopipe
55
54
 
56
55
  # 🚀 Usage
57
56
 
58
- See also the [**Examples**](https://github.com/BrokenSource/TurboPipe/tree/main/examples) folder for comparisons, and [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow/blob/main/ShaderFlow/Scene.py) usage of it!
57
+ See also the [**Examples**](https://github.com/BrokenSource/TurboPipe/tree/main/examples) folder for comparisons, and [**ShaderFlow**](https://github.com/BrokenSource/ShaderFlow/blob/main/ShaderFlow/Exporting.py)'s usage of it!
59
58
 
60
59
  ```python
61
60
  import subprocess
@@ -63,27 +62,53 @@ import subprocess
63
62
  import moderngl
64
63
  import turbopipe
65
64
 
66
- # Create ModernGL objects
65
+ # Create ModernGL objects and proxy buffers
67
66
  ctx = moderngl.create_standalone_context()
68
- buffers = [ctx.buffer(reserve=1920*1080*3) for _ in range(2)]
67
+ width, height, duration, fps = (1920, 1080, 10, 60)
68
+ buffers = [
69
+ ctx.buffer(reserve=(width*height*3))
70
+ for _ in range(nbuffers := 2)
71
+ ]
72
+
73
+ # Create your FBO, Textures, Shaders, etc.
69
74
 
70
75
  # Make sure resolution, pixel format matches!
71
- ffmpeg = subprocess.Popen(
72
- 'ffmpeg -f rawvideo -pix_fmt rgb24 -r 60 -s 1920x1080 -i - -f null -'.split(),
73
- stdin=subprocess.PIPE
74
- )
75
-
76
- # Rendering loop of yours (eg. 1m footage)
77
- for frame in range(60 * 60):
78
- buffer = buffers[frame % len(buffer)]
76
+ ffmpeg = subprocess.Popen((
77
+ "ffmpeg",
78
+ "-f", "rawvideo",
79
+ "-pix_fmt", "rgb24",
80
+ "-r", str(fps),
81
+ "-s", f"{width}x{height}",
82
+ "-i", "-",
83
+ "-f", "null",
84
+ "output.mp4"
85
+ ), stdin=subprocess.PIPE)
86
+
87
+ # Rendering loop of yours
88
+ for frame in range(duration*fps):
89
+ buffer = buffers[frame % nbuffers]
90
+
91
+ # Wait queued writes before copying
79
92
  turbopipe.sync(buffer)
80
93
  fbo.read_into(buffer)
94
+
95
+ # Doesn't lock the GIL, writes in parallel
81
96
  turbopipe.pipe(buffer, ffmpeg.stdin.fileno())
82
97
 
83
- # Finalize writing, encoding
98
+ # Wait for queued writes, clean memory
99
+ for buffer in buffers:
100
+ turbopipe.sync(buffer)
101
+ buffer.release()
102
+
103
+ # Signal stdin stream is done
84
104
  ffmpeg.stdin.close()
85
- turbopipe.close()
105
+
106
+ # wait for encoding to finish
86
107
  ffmpeg.wait()
108
+
109
+ # Warn: Albeit rare, only call close when no other data
110
+ # write is pending, as it might skip a frame or halt
111
+ turbopipe.close()
87
112
  ```
88
113
 
89
114
  <br>
@@ -335,5 +360,5 @@ On realistically loads, like [**ShaderFlow**](https://github.com/BrokenSource/Sh
335
360
  # 📚 Future work
336
361
 
337
362
  - Disable/investigate performance degradation on Windows iGPUs
338
- - Improve the thread synchronization and/or use a ThreadPool
339
363
  - Maybe use `mmap` instead of chunks writing on Linux
364
+ - Split the code into a libturbopipe? Not sure where it would be useful 😅
@@ -0,0 +1,29 @@
1
+ from typing import Optional, Union
2
+
3
+ from moderngl import Buffer
4
+
5
+ from turbopipe import _turbopipe
6
+
7
+ __all__ = [
8
+ "pipe",
9
+ "sync",
10
+ "close"
11
+ ]
12
+
13
+ def pipe(buffer: Union[Buffer, memoryview], fileno: int) -> None:
14
+ """Pipe a buffer contents to a file descriptor, fast and threaded"""
15
+ if isinstance(buffer, Buffer):
16
+ buffer = memoryview(buffer.mglo)
17
+ _turbopipe.pipe(buffer, fileno)
18
+ del buffer
19
+
20
+ def sync(buffer: Optional[Union[Buffer, memoryview]]=None) -> None:
21
+ """Wait for pending operations on a buffer to finish"""
22
+ if isinstance(buffer, Buffer):
23
+ buffer = memoryview(buffer.mglo)
24
+ _turbopipe.sync(buffer)
25
+ del buffer
26
+
27
+ def close() -> None:
28
+ """Syncs and deletes objects"""
29
+ _turbopipe.close()
@@ -1,9 +1,6 @@
1
1
  // ------------------------------------------------------------------------------------------------|
2
- //
3
2
  // TurboPipe - Faster ModernGL Buffers inter-process data transfers for subprocesses
4
- //
5
- // (c) 2024, Tremeschin, MIT License
6
- //
3
+ // (c) MIT License 2024-2025, Tremeschin
7
4
  // ------------------------------------------------------------------------------------------------|
8
5
 
9
6
  #define PY_SSIZE_T_CLEAN
@@ -29,9 +26,9 @@ using namespace std;
29
26
  // TurboPipe internals
30
27
 
31
28
  struct Work {
32
- void* data;
33
- int file;
29
+ void* data;
34
30
  size_t size;
31
+ int file;
35
32
  };
36
33
 
37
34
  class TurboPipe {
@@ -39,108 +36,110 @@ public:
39
36
  TurboPipe(): running(true) {}
40
37
  ~TurboPipe() {close();}
41
38
 
42
- void pipe(PyObject* memoryview, int file) {
43
- Py_buffer view = *PyMemoryView_GET_BUFFER(memoryview);
44
- this->_pipe(view.buf, view.len, file);
39
+ void pipe(PyObject* view, int file) {
40
+ Py_buffer data = *PyMemoryView_GET_BUFFER(view);
41
+ this->_pipe(data.buf, (size_t) data.len, file);
45
42
  }
46
43
 
47
- void sync(PyObject* memoryview=nullptr) {
48
- void* data = nullptr;
49
-
50
- if (memoryview != nullptr) {
51
- Py_buffer view = *PyMemoryView_GET_BUFFER(memoryview);
52
- data = view.buf;
53
- }
54
-
55
- // Wait for some or all queues to be empty, as they are erased when
56
- // each thread's writing loop is done, guaranteeing finish
57
- for (auto& values: queue) {
58
- while (true) {
59
- {
60
- // Prevent segfault on iteration on changing data
61
- lock_guard<mutex> lock(mutexes[values.first]);
62
-
63
- // Either all empty or some memory not queued (None or specific)
64
- if (data != nullptr && values.second.find(data) == values.second.end())
65
- break;
66
- if (data == nullptr && values.second.empty())
67
- break;
68
- }
69
- this_thread::sleep_for(chrono::microseconds(200));
70
- }
71
- }
44
+ void sync(PyObject* view=nullptr) {
45
+ if (view != nullptr)
46
+ this->_sync((*PyMemoryView_GET_BUFFER(view)).buf);
47
+ else
48
+ this->_sync(nullptr);
72
49
  }
73
50
 
74
51
  void close() {
75
- sync();
76
- running = false;
77
- signal.notify_all();
78
- for (auto& pair: threads)
52
+ this->_sync();
53
+ this->running = false;
54
+ for (auto& pair: this->signal)
55
+ pair.second.notify_all();
56
+ for (auto& pair: this->threads)
79
57
  pair.second.join();
80
- threads.clear();
58
+ this->threads.clear();
81
59
  }
82
60
 
83
61
  private:
84
- unordered_map<int, unordered_map<void*, condition_variable>> pending;
62
+ unordered_map<int, condition_variable> pending;
63
+ unordered_map<int, condition_variable> signal;
85
64
  unordered_map<int, unordered_set<void*>> queue;
86
65
  unordered_map<int, deque<Work>> stream;
87
66
  unordered_map<int, thread> threads;
88
67
  unordered_map<int, mutex> mutexes;
89
- condition_variable signal;
90
68
  bool running;
91
69
 
92
70
  void _pipe(void* data, size_t size, int file) {
93
- Work work = {data, file, size};
94
- unique_lock<mutex> lock(mutexes[file]);
71
+ unique_lock<mutex> lock(this->mutexes[file]);
95
72
 
96
73
  /* Notify this memory is queued, wait if pending */ {
97
- if (!queue[file].insert(data).second) {
98
- pending[file][data].wait(lock, [this, file, data] {
99
- return queue[file].find(data) == queue[file].end();
74
+ if (!this->queue[file].insert(data).second) {
75
+ this->pending[file].wait(lock, [this, file, data] {
76
+ return this->queue[file].find(data) == this->queue[file].end();
100
77
  });
101
78
  }
102
79
  }
103
80
 
104
81
  /* Add another job to the queue */ {
105
- stream[file].push_back(work);
106
- queue[file].insert(data);
82
+ this->stream[file].push_back(Work{data, size, file});
83
+ this->queue[file].insert(data);
107
84
  this->running = true;
108
85
  lock.unlock();
109
86
  }
110
87
 
111
88
  // Each file descriptor has its own thread
112
- if (threads.find(file) == threads.end())
113
- threads[file] = thread(&TurboPipe::worker, this, file);
89
+ if (this->threads.find(file) == this->threads.end())
90
+ this->threads[file] = thread(&TurboPipe::worker, this, file);
114
91
 
115
- signal.notify_all();
92
+ // Trigger the worker to write the data
93
+ this->signal[file].notify_all();
94
+ }
95
+
96
+ void _sync(void* data=nullptr) {
97
+ for (auto& values: this->queue) {
98
+ while (true) {
99
+ {
100
+ // Prevent segfault on iteration on changing data
101
+ lock_guard<mutex> lock(this->mutexes[values.first]);
102
+
103
+ // Continue if specific data is not in queue
104
+ if (data != nullptr)
105
+ if (values.second.find(data) == values.second.end())
106
+ break;
107
+
108
+ // Continue if all queues are empty
109
+ if (data == nullptr)
110
+ if (values.second.empty())
111
+ break;
112
+ }
113
+ this_thread::sleep_for(chrono::microseconds(200));
114
+ }
115
+ }
116
116
  }
117
117
 
118
118
  void worker(int file) {
119
119
  while (this->running) {
120
- unique_lock<mutex> lock(mutexes[file]);
120
+ unique_lock<mutex> lock(this->mutexes[file]);
121
121
 
122
- signal.wait(lock, [this, file] {
123
- return (!stream[file].empty() || !this->running);
122
+ this->signal[file].wait(lock, [this, file] {
123
+ return (!this->stream[file].empty() || !this->running);
124
124
  });
125
125
 
126
126
  // Skip on false positives, exit condition
127
- if (stream[file].empty()) continue;
127
+ if ( this->stream[file].empty()) continue;
128
128
  if (!this->running) break;
129
129
 
130
130
  // Get the next work item
131
- Work work = stream[file].front();
132
- stream[file].pop_front();
131
+ Work work = this->stream[file].front();
132
+ this->stream[file].pop_front();
133
133
  lock.unlock();
134
134
 
135
135
  #ifdef _WIN32
136
- // Windows doesn't like chunked writes ??
137
- write(work.file, (char*) work.data, work.size);
136
+ // Fixme: Windows doesn't like chunked writes?
137
+ write(work.file, (char*) work.data, static_cast<unsigned int>(work.size));
138
138
  #else
139
- // Optimization: Write in chunks of 4096 (RAM page size)
140
139
  size_t tell = 0;
141
140
  while (tell < work.size) {
142
141
  size_t chunk = min(work.size - tell, static_cast<size_t>(4096));
143
- size_t written = write(work.file, (char*) work.data + tell, chunk);
142
+ int written = write(work.file, (char*) work.data + tell, chunk);
144
143
  if (written == -1) break;
145
144
  tell += written;
146
145
  }
@@ -149,9 +148,9 @@ private:
149
148
  lock.lock();
150
149
 
151
150
  /* Signal work is done */ {
152
- pending[file][work.data].notify_all();
153
- queue[file].erase(work.data);
154
- signal.notify_all();
151
+ this->pending[file].notify_all();
152
+ this->queue[file].erase(work.data);
153
+ this->signal[file].notify_all();
155
154
  }
156
155
  }
157
156
  }
@@ -167,15 +166,15 @@ static PyObject* turbopipe_pipe(
167
166
  PyObject* Py_UNUSED(self),
168
167
  PyObject* args
169
168
  ) {
170
- PyObject* memoryview;
169
+ PyObject* view;
171
170
  PyObject* file;
172
- if (!PyArg_ParseTuple(args, "OO", &memoryview, &file))
171
+ if (!PyArg_ParseTuple(args, "OO", &view, &file))
173
172
  return NULL;
174
- if (!PyMemoryView_Check(memoryview)) {
173
+ if (!PyMemoryView_Check(view)) {
175
174
  PyErr_SetString(PyExc_TypeError, "Expected a memoryview object");
176
175
  return NULL;
177
176
  }
178
- turbopipe->pipe(memoryview, PyLong_AsLong(file));
177
+ turbopipe->pipe(view, PyLong_AsLong(file));
179
178
  Py_RETURN_NONE;
180
179
  }
181
180
 
@@ -183,14 +182,14 @@ static PyObject* turbopipe_sync(
183
182
  PyObject* Py_UNUSED(self),
184
183
  PyObject* args
185
184
  ) {
186
- PyObject* memoryview;
187
- if (!PyArg_ParseTuple(args, "|O", &memoryview))
185
+ PyObject* view;
186
+ if (!PyArg_ParseTuple(args, "|O", &view))
188
187
  return NULL;
189
- if (memoryview != nullptr && !PyMemoryView_Check(memoryview)) {
188
+ if (view != nullptr && !PyMemoryView_Check(view)) {
190
189
  PyErr_SetString(PyExc_TypeError, "Expected a memoryview object or None");
191
190
  return NULL;
192
191
  }
193
- turbopipe->sync(memoryview);
192
+ turbopipe->sync(view);
194
193
  Py_RETURN_NONE;
195
194
  }
196
195
 
@@ -216,18 +215,25 @@ static PyMethodDef TurboPipeMethods[] = {
216
215
  {NULL, NULL, 0, NULL}
217
216
  };
218
217
 
219
- static struct PyModuleDef turbopipe_module = {
220
- PyModuleDef_HEAD_INIT,
221
- "_turbopipe",
222
- NULL, -1,
223
- TurboPipeMethods,
224
- NULL, NULL, NULL, NULL
218
+ static struct PyModuleDef TurboPipeModule = {
219
+ .m_base = PyModuleDef_HEAD_INIT,
220
+ .m_name = "_turbopipe",
221
+ .m_doc = NULL,
222
+ .m_size = -1,
223
+ .m_methods = TurboPipeMethods,
224
+ .m_slots = NULL,
225
+ .m_traverse = NULL,
226
+ .m_clear = NULL,
227
+ .m_free = NULL
225
228
  };
226
229
 
227
230
  PyMODINIT_FUNC PyInit__turbopipe(void) {
228
- PyObject* module = PyModule_Create(&turbopipe_module);
231
+ PyObject* module = PyModule_Create(&TurboPipeModule);
229
232
  if (module == NULL)
230
233
  return NULL;
234
+ #ifdef Py_GIL_DISABLED
235
+ PyUnstable_Module_SetGIL(module, Py_MOD_GIL_NOT_USED);
236
+ #endif
231
237
  turbopipe = new TurboPipe();
232
238
  Py_AtExit(turbopipe_exit);
233
239
  return module;
@@ -1,5 +1,5 @@
1
1
  #!/usr/bin/env python3
2
- __version__ = "1.2.2"
2
+ __version__ = "1.2.3"
3
3
 
4
4
  if __name__ == "__main__":
5
5
  print(__version__)
@@ -1,47 +0,0 @@
1
- from typing import Optional, Union
2
-
3
- from moderngl import Buffer
4
-
5
- from turbopipe import _turbopipe
6
-
7
-
8
- def pipe(buffer: Union[Buffer, memoryview], fileno: int) -> None:
9
- """
10
- Pipe the content of a moderngl.Buffer or memoryview to a file descriptor, fast, threaded and
11
- blocking when needed. Call `sync(buffer)` before this, and `sync()` when done for
12
-
13
- Usage:
14
- ```python
15
- # Assuming `buffer = ctx.buffer(...)`
16
- # Note: Use as `fbo.read_into(buffer)`
17
-
18
- # As a open() file
19
- with open("file.bin", "wb") as file:
20
- turbopipe.pipe(buffer, file)
21
-
22
- # As a subprocess
23
- child = subprocess.Popen(..., stdin=subprocess.PIPE)
24
- turbopipe.pipe(buffer, child.stdin.fileno())
25
- ```
26
- """
27
- if isinstance(buffer, Buffer):
28
- buffer = memoryview(buffer.mglo)
29
- _turbopipe.pipe(buffer, fileno)
30
- del buffer
31
-
32
- def sync(buffer: Optional[Union[Buffer, memoryview]]=None) -> None:
33
- """Waits for any pending write operation on a buffer, or 'all buffers' if None, to finish"""
34
- if isinstance(buffer, Buffer):
35
- buffer = memoryview(buffer.mglo)
36
- _turbopipe.sync(buffer)
37
- del buffer
38
-
39
- def close() -> None:
40
- """Syncs and deletes objects"""
41
- _turbopipe.close()
42
-
43
- __all__ = [
44
- "pipe",
45
- "sync",
46
- "close"
47
- ]
File without changes
File without changes
File without changes
File without changes