turbopipe 1.1.0__tar.gz → 1.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of turbopipe might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: turbopipe
3
- Version: 1.1.0
3
+ Version: 1.2.1
4
4
  Summary: 🌀 Faster MemoryView inter-process data transfers for subprocesses
5
5
  Home-page: https://brokensrc.dev
6
6
  Author-Email: Tremeschin <29046864+Tremeschin@users.noreply.github.com>
@@ -100,7 +100,7 @@ import turbopipe
100
100
 
101
101
  # Create ModernGL objects
102
102
  ctx = moderngl.create_standalone_context()
103
- buffer = ctx.buffer(reserve=1920*1080*3)
103
+ buffers = [ctx.buffer(reserve=1920*1080*3) for _ in range(2)]
104
104
 
105
105
  # Make sure resolution, pixel format matches!
106
106
  ffmpeg = subprocess.Popen(
@@ -109,7 +109,10 @@ ffmpeg = subprocess.Popen(
109
109
  )
110
110
 
111
111
  # Rendering loop of yours (eg. 1m footage)
112
- for _ in range(60 * 60):
112
+ for frame in range(60 * 60):
113
+ buffer = buffers[frame % len(buffer)]
114
+ turbopipe.sync(buffer)
115
+ fbo.read_into(buffer)
113
116
  turbopipe.pipe(buffer, ffmpeg.stdin.fileno())
114
117
 
115
118
  # Finalize writing, encoding
@@ -65,7 +65,7 @@ import turbopipe
65
65
 
66
66
  # Create ModernGL objects
67
67
  ctx = moderngl.create_standalone_context()
68
- buffer = ctx.buffer(reserve=1920*1080*3)
68
+ buffers = [ctx.buffer(reserve=1920*1080*3) for _ in range(2)]
69
69
 
70
70
  # Make sure resolution, pixel format matches!
71
71
  ffmpeg = subprocess.Popen(
@@ -74,7 +74,10 @@ ffmpeg = subprocess.Popen(
74
74
  )
75
75
 
76
76
  # Rendering loop of yours (eg. 1m footage)
77
- for _ in range(60 * 60):
77
+ for frame in range(60 * 60):
78
+ buffer = buffers[frame % len(buffer)]
79
+ turbopipe.sync(buffer)
80
+ fbo.read_into(buffer)
78
81
  turbopipe.pipe(buffer, ffmpeg.stdin.fileno())
79
82
 
80
83
  # Finalize writing, encoding
@@ -15,15 +15,8 @@ dependencies = ["moderngl"]
15
15
  requires-python = ">=3.7"
16
16
 
17
17
  [build-system]
18
- requires = ["meson-python", "ninja", "hatch-fancy-pypi-readme"]
18
+ requires = ["meson-python", "ninja"]
19
19
  build-backend = "mesonpy"
20
20
 
21
- [tool.hatch.metadata.hooks.fancy-pypi-readme]
22
- content-type = "text/markdown"
23
-
24
- [[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]]
25
- start-after = "<!-- PyPI -->"
26
- path = "Readme.md"
27
-
28
21
  [tool.ruff.format]
29
22
  exclude = ["*"]
@@ -1,4 +1,4 @@
1
- from typing import Union
1
+ from typing import Optional, Union
2
2
 
3
3
  from moderngl import Buffer
4
4
 
@@ -7,8 +7,8 @@ from turbopipe import _turbopipe
7
7
 
8
8
  def pipe(buffer: Union[Buffer, memoryview], fileno: int) -> None:
9
9
  """
10
- Pipe the content of a moderngl.Buffer or memoryview to a file descriptor,
11
- Fast, threaded and non-blocking. Call `sync()` when done!
10
+ Pipe the content of a moderngl.Buffer or memoryview to a file descriptor, fast, threaded and
11
+ blocking when needed. Call `sync(buffer)` before this, and `sync()` when done for
12
12
 
13
13
  Usage:
14
14
  ```python
@@ -29,9 +29,12 @@ def pipe(buffer: Union[Buffer, memoryview], fileno: int) -> None:
29
29
  _turbopipe.pipe(buffer, fileno)
30
30
  del buffer
31
31
 
32
- def sync() -> None:
33
- """Waits for all jobs to finish"""
34
- _turbopipe.sync()
32
+ def sync(buffer: Optional[Union[Buffer, memoryview]]=None) -> None:
33
+ """Waits for any pending write operation on a buffer, or 'all buffers' if None, to finish"""
34
+ if isinstance(buffer, Buffer):
35
+ buffer = memoryview(buffer.mglo)
36
+ _turbopipe.sync(buffer)
37
+ del buffer
35
38
 
36
39
  def close() -> None:
37
40
  """Syncs and deletes objects"""
@@ -23,20 +23,15 @@
23
23
  #include <unordered_map>
24
24
  #include <deque>
25
25
 
26
- #define dict std::unordered_map
27
26
  using namespace std;
28
27
 
29
28
  // ------------------------------------------------------------------------------------------------|
30
29
  // TurboPipe internals
31
30
 
32
31
  struct Work {
33
- void* map;
32
+ void* data;
34
33
  int file;
35
34
  size_t size;
36
-
37
- int hash() {
38
- return std::hash<int>()(file) ^ std::hash<void*>()(map);
39
- }
40
35
  };
41
36
 
42
37
  class TurboPipe {
@@ -49,12 +44,29 @@ public:
49
44
  this->_pipe(view.buf, view.len, file);
50
45
  }
51
46
 
52
- void sync() {
53
- // Wait for all queues to be empty, as they are erased when
47
+ void sync(PyObject* memoryview=nullptr) {
48
+ void* data = nullptr;
49
+
50
+ if (memoryview != nullptr) {
51
+ Py_buffer view = *PyMemoryView_GET_BUFFER(memoryview);
52
+ data = view.buf;
53
+ }
54
+
55
+ // Wait for some or all queues to be empty, as they are erased when
54
56
  // each thread's writing loop is done, guaranteeing finish
55
57
  for (auto& values: queue) {
56
- while (!values.second.empty()) {
57
- this_thread::sleep_for(chrono::milliseconds(1));
58
+ while (true) {
59
+ {
60
+ // Prevent segfault on iteration on changing data
61
+ lock_guard<mutex> lock(mutexes[values.first]);
62
+
63
+ // Either all empty or some memory not queued (None or specific)
64
+ if (data != nullptr && values.second.find(data) == values.second.end())
65
+ break;
66
+ if (data == nullptr && values.second.empty())
67
+ break;
68
+ }
69
+ this_thread::sleep_for(chrono::microseconds(200));
58
70
  }
59
71
  }
60
72
  }
@@ -69,32 +81,32 @@ public:
69
81
  }
70
82
 
71
83
  private:
72
- dict<int, dict<int, condition_variable>> pending;
73
- dict<int, unordered_set<int>> queue;
74
- dict<int, deque<Work>> stream;
75
- dict<int, thread> threads;
76
- dict<int, mutex> mutexes;
84
+ unordered_map<int, unordered_map<void*, condition_variable>> pending;
85
+ unordered_map<int, unordered_set<void*>> queue;
86
+ unordered_map<int, deque<Work>> stream;
87
+ unordered_map<int, thread> threads;
88
+ unordered_map<int, mutex> mutexes;
77
89
  condition_variable signal;
78
90
  bool running;
79
91
 
80
92
  void _pipe(void* data, size_t size, int file) {
81
93
  Work work = {data, file, size};
82
- int hash = work.hash();
83
-
84
94
  unique_lock<mutex> lock(mutexes[file]);
85
95
 
86
- // Notify this hash is queued, wait if pending
87
- if (!queue[file].insert(hash).second) {
88
- pending[file][hash].wait(lock, [this, file, hash] {
89
- return queue[file].find(hash) == queue[file].end();
90
- });
96
+ /* Notify this memory is queued, wait if pending */ {
97
+ if (!queue[file].insert(data).second) {
98
+ pending[file][data].wait(lock, [this, file, data] {
99
+ return queue[file].find(data) == queue[file].end();
100
+ });
101
+ }
91
102
  }
92
103
 
93
- // Add another job to the queue
94
- stream[file].push_back(work);
95
- queue[file].insert(hash);
96
- this->running = true;
97
- lock.unlock();
104
+ /* Add another job to the queue */ {
105
+ stream[file].push_back(work);
106
+ queue[file].insert(data);
107
+ this->running = true;
108
+ lock.unlock();
109
+ }
98
110
 
99
111
  // Each file descriptor has its own thread
100
112
  if (threads.find(file) == threads.end())
@@ -122,24 +134,25 @@ private:
122
134
 
123
135
  #ifdef _WIN32
124
136
  // Windows doesn't like chunked writes ??
125
- write(work.file, (char*) work.map, work.size);
137
+ write(work.file, (char*) work.data, work.size);
126
138
  #else
127
139
  // Optimization: Write in chunks of 4096 (RAM page size)
128
140
  size_t tell = 0;
129
141
  while (tell < work.size) {
130
142
  size_t chunk = min(work.size - tell, static_cast<size_t>(4096));
131
- size_t written = write(work.file, (char*) work.map + tell, chunk);
143
+ size_t written = write(work.file, (char*) work.data + tell, chunk);
132
144
  if (written == -1) break;
133
145
  tell += written;
134
146
  }
135
147
  #endif
136
148
 
137
- // Signal work is done
138
149
  lock.lock();
139
- int hash = work.hash();
140
- pending[file][hash].notify_all();
141
- queue[file].erase(hash);
142
- signal.notify_all();
150
+
151
+ /* Signal work is done */ {
152
+ pending[file][work.data].notify_all();
153
+ queue[file].erase(work.data);
154
+ signal.notify_all();
155
+ }
143
156
  }
144
157
  }
145
158
  };
@@ -168,9 +181,16 @@ static PyObject* turbopipe_pipe(
168
181
 
169
182
  static PyObject* turbopipe_sync(
170
183
  PyObject* Py_UNUSED(self),
171
- PyObject* Py_UNUSED(args)
184
+ PyObject* args
172
185
  ) {
173
- turbopipe->sync();
186
+ PyObject* memoryview;
187
+ if (!PyArg_ParseTuple(args, "|O", &memoryview))
188
+ return NULL;
189
+ if (memoryview != nullptr && !PyMemoryView_Check(memoryview)) {
190
+ PyErr_SetString(PyExc_TypeError, "Expected a memoryview object or None");
191
+ return NULL;
192
+ }
193
+ turbopipe->sync(memoryview);
174
194
  Py_RETURN_NONE;
175
195
  }
176
196
 
@@ -191,7 +211,7 @@ static void turbopipe_exit() {
191
211
 
192
212
  static PyMethodDef TurboPipeMethods[] = {
193
213
  {"pipe", (PyCFunction) turbopipe_pipe, METH_VARARGS, ""},
194
- {"sync", (PyCFunction) turbopipe_sync, METH_NOARGS, ""},
214
+ {"sync", (PyCFunction) turbopipe_sync, METH_VARARGS, ""},
195
215
  {"close", (PyCFunction) turbopipe_close, METH_NOARGS, ""},
196
216
  {NULL, NULL, 0, NULL}
197
217
  };
@@ -1,5 +1,5 @@
1
1
  #!/usr/bin/env python3
2
- __version__ = "1.1.0"
2
+ __version__ = "1.2.1"
3
3
 
4
4
  if __name__ == "__main__":
5
5
  print(__version__)
File without changes
File without changes
File without changes
File without changes
File without changes