turbopipe 1.1.0__tar.gz → 1.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of turbopipe might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: turbopipe
3
- Version: 1.1.0
3
+ Version: 1.2.0
4
4
  Summary: 🌀 Faster MemoryView inter-process data transfers for subprocesses
5
5
  Home-page: https://brokensrc.dev
6
6
  Author-Email: Tremeschin <29046864+Tremeschin@users.noreply.github.com>
@@ -15,15 +15,8 @@ dependencies = ["moderngl"]
15
15
  requires-python = ">=3.7"
16
16
 
17
17
  [build-system]
18
- requires = ["meson-python", "ninja", "hatch-fancy-pypi-readme"]
18
+ requires = ["meson-python", "ninja"]
19
19
  build-backend = "mesonpy"
20
20
 
21
- [tool.hatch.metadata.hooks.fancy-pypi-readme]
22
- content-type = "text/markdown"
23
-
24
- [[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]]
25
- start-after = "<!-- PyPI -->"
26
- path = "Readme.md"
27
-
28
21
  [tool.ruff.format]
29
22
  exclude = ["*"]
@@ -1,4 +1,4 @@
1
- from typing import Union
1
+ from typing import Optional, Union
2
2
 
3
3
  from moderngl import Buffer
4
4
 
@@ -7,8 +7,8 @@ from turbopipe import _turbopipe
7
7
 
8
8
  def pipe(buffer: Union[Buffer, memoryview], fileno: int) -> None:
9
9
  """
10
- Pipe the content of a moderngl.Buffer or memoryview to a file descriptor,
11
- Fast, threaded and non-blocking. Call `sync()` when done!
10
+ Pipe the content of a moderngl.Buffer or memoryview to a file descriptor, fast, threaded and
11
+ blocking when needed. Call `sync(buffer)` before this, and `sync()` when done for
12
12
 
13
13
  Usage:
14
14
  ```python
@@ -29,9 +29,12 @@ def pipe(buffer: Union[Buffer, memoryview], fileno: int) -> None:
29
29
  _turbopipe.pipe(buffer, fileno)
30
30
  del buffer
31
31
 
32
- def sync() -> None:
33
- """Waits for all jobs to finish"""
34
- _turbopipe.sync()
32
+ def sync(buffer: Optional[Union[Buffer, memoryview]]=None) -> None:
33
+ """Waits for any pending write operation on a buffer, or 'all buffers' if None, to finish"""
34
+ if isinstance(buffer, Buffer):
35
+ buffer = memoryview(buffer.mglo)
36
+ _turbopipe.sync(buffer)
37
+ del buffer
35
38
 
36
39
  def close() -> None:
37
40
  """Syncs and deletes objects"""
@@ -30,13 +30,9 @@ using namespace std;
30
30
  // TurboPipe internals
31
31
 
32
32
  struct Work {
33
- void* map;
33
+ void* data;
34
34
  int file;
35
35
  size_t size;
36
-
37
- int hash() {
38
- return std::hash<int>()(file) ^ std::hash<void*>()(map);
39
- }
40
36
  };
41
37
 
42
38
  class TurboPipe {
@@ -49,12 +45,23 @@ public:
49
45
  this->_pipe(view.buf, view.len, file);
50
46
  }
51
47
 
52
- void sync() {
53
- // Wait for all queues to be empty, as they are erased when
48
+ void sync(PyObject* memoryview=nullptr) {
49
+ void* data = nullptr;
50
+
51
+ if (memoryview != nullptr) {
52
+ Py_buffer view = *PyMemoryView_GET_BUFFER(memoryview);
53
+ data = view.buf;
54
+ }
55
+
56
+ // Wait for some or all queues to be empty, as they are erased when
54
57
  // each thread's writing loop is done, guaranteeing finish
55
58
  for (auto& values: queue) {
56
- while (!values.second.empty()) {
57
- this_thread::sleep_for(chrono::milliseconds(1));
59
+ while (true) {
60
+ if (data != nullptr && values.second.find(data) == values.second.end())
61
+ break;
62
+ if (data == nullptr && values.second.empty())
63
+ break;
64
+ this_thread::sleep_for(chrono::microseconds(200));
58
65
  }
59
66
  }
60
67
  }
@@ -69,8 +76,8 @@ public:
69
76
  }
70
77
 
71
78
  private:
72
- dict<int, dict<int, condition_variable>> pending;
73
- dict<int, unordered_set<int>> queue;
79
+ dict<int, dict<void*, condition_variable>> pending;
80
+ dict<int, unordered_set<void*>> queue;
74
81
  dict<int, deque<Work>> stream;
75
82
  dict<int, thread> threads;
76
83
  dict<int, mutex> mutexes;
@@ -79,20 +86,18 @@ private:
79
86
 
80
87
  void _pipe(void* data, size_t size, int file) {
81
88
  Work work = {data, file, size};
82
- int hash = work.hash();
83
-
84
89
  unique_lock<mutex> lock(mutexes[file]);
85
90
 
86
- // Notify this hash is queued, wait if pending
87
- if (!queue[file].insert(hash).second) {
88
- pending[file][hash].wait(lock, [this, file, hash] {
89
- return queue[file].find(hash) == queue[file].end();
91
+ // Notify this memory is queued, wait if pending
92
+ if (!queue[file].insert(data).second) {
93
+ pending[file][data].wait(lock, [this, file, data] {
94
+ return queue[file].find(data) == queue[file].end();
90
95
  });
91
96
  }
92
97
 
93
98
  // Add another job to the queue
94
99
  stream[file].push_back(work);
95
- queue[file].insert(hash);
100
+ queue[file].insert(data);
96
101
  this->running = true;
97
102
  lock.unlock();
98
103
 
@@ -122,13 +127,13 @@ private:
122
127
 
123
128
  #ifdef _WIN32
124
129
  // Windows doesn't like chunked writes ??
125
- write(work.file, (char*) work.map, work.size);
130
+ write(work.file, (char*) work.data, work.size);
126
131
  #else
127
132
  // Optimization: Write in chunks of 4096 (RAM page size)
128
133
  size_t tell = 0;
129
134
  while (tell < work.size) {
130
135
  size_t chunk = min(work.size - tell, static_cast<size_t>(4096));
131
- size_t written = write(work.file, (char*) work.map + tell, chunk);
136
+ size_t written = write(work.file, (char*) work.data + tell, chunk);
132
137
  if (written == -1) break;
133
138
  tell += written;
134
139
  }
@@ -136,9 +141,8 @@ private:
136
141
 
137
142
  // Signal work is done
138
143
  lock.lock();
139
- int hash = work.hash();
140
- pending[file][hash].notify_all();
141
- queue[file].erase(hash);
144
+ pending[file][work.data].notify_all();
145
+ queue[file].erase(work.data);
142
146
  signal.notify_all();
143
147
  }
144
148
  }
@@ -168,9 +172,16 @@ static PyObject* turbopipe_pipe(
168
172
 
169
173
  static PyObject* turbopipe_sync(
170
174
  PyObject* Py_UNUSED(self),
171
- PyObject* Py_UNUSED(args)
175
+ PyObject* args
172
176
  ) {
173
- turbopipe->sync();
177
+ PyObject* memoryview;
178
+ if (!PyArg_ParseTuple(args, "|O", &memoryview))
179
+ return NULL;
180
+ if (memoryview != nullptr && !PyMemoryView_Check(memoryview)) {
181
+ PyErr_SetString(PyExc_TypeError, "Expected a memoryview object or None");
182
+ return NULL;
183
+ }
184
+ turbopipe->sync(memoryview);
174
185
  Py_RETURN_NONE;
175
186
  }
176
187
 
@@ -191,7 +202,7 @@ static void turbopipe_exit() {
191
202
 
192
203
  static PyMethodDef TurboPipeMethods[] = {
193
204
  {"pipe", (PyCFunction) turbopipe_pipe, METH_VARARGS, ""},
194
- {"sync", (PyCFunction) turbopipe_sync, METH_NOARGS, ""},
205
+ {"sync", (PyCFunction) turbopipe_sync, METH_VARARGS, ""},
195
206
  {"close", (PyCFunction) turbopipe_close, METH_NOARGS, ""},
196
207
  {NULL, NULL, 0, NULL}
197
208
  };
@@ -1,5 +1,5 @@
1
1
  #!/usr/bin/env python3
2
- __version__ = "1.1.0"
2
+ __version__ = "1.2.0"
3
3
 
4
4
  if __name__ == "__main__":
5
5
  print(__version__)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes