jmux 0.0.5__tar.gz → 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {jmux-0.0.5 → jmux-0.1.0}/.gitignore +8 -1
- {jmux-0.0.5/src/jmux.egg-info → jmux-0.1.0}/PKG-INFO +184 -20
- {jmux-0.0.5 → jmux-0.1.0}/README.md +181 -19
- {jmux-0.0.5 → jmux-0.1.0}/pyproject.toml +7 -2
- jmux-0.1.0/src/jmux/__init__.py +11 -0
- {jmux-0.0.5 → jmux-0.1.0}/src/jmux/awaitable.py +28 -16
- jmux-0.1.0/src/jmux/base.py +9 -0
- jmux-0.1.0/src/jmux/cli.py +62 -0
- {jmux-0.0.5 → jmux-0.1.0}/src/jmux/decoder.py +20 -4
- {jmux-0.0.5 → jmux-0.1.0}/src/jmux/demux.py +99 -57
- jmux-0.1.0/src/jmux/generator.py +381 -0
- {jmux-0.0.5 → jmux-0.1.0}/src/jmux/types.py +5 -0
- {jmux-0.0.5 → jmux-0.1.0/src/jmux.egg-info}/PKG-INFO +184 -20
- {jmux-0.0.5 → jmux-0.1.0}/src/jmux.egg-info/SOURCES.txt +8 -1
- jmux-0.1.0/src/jmux.egg-info/entry_points.txt +2 -0
- {jmux-0.0.5 → jmux-0.1.0}/src/jmux.egg-info/requires.txt +2 -0
- jmux-0.1.0/tests/conftest.py +6 -0
- jmux-0.1.0/tests/test_awaitables.py +469 -0
- jmux-0.1.0/tests/test_cli.py +70 -0
- jmux-0.1.0/tests/test_decoder.py +425 -0
- {jmux-0.0.5 → jmux-0.1.0}/tests/test_demux__parse.py +991 -13
- {jmux-0.0.5 → jmux-0.1.0}/tests/test_demux__stream.py +384 -68
- jmux-0.1.0/tests/test_demux__validate.py +627 -0
- jmux-0.1.0/tests/test_generator.py +338 -0
- jmux-0.1.0/tests/test_helpers.py +226 -0
- jmux-0.1.0/tests/test_pda.py +266 -0
- jmux-0.0.5/src/jmux/__init__.py +0 -0
- jmux-0.0.5/tests/conftest.py +0 -6
- jmux-0.0.5/tests/test_awaitables.py +0 -78
- jmux-0.0.5/tests/test_decoder.py +0 -32
- jmux-0.0.5/tests/test_demux__validate.py +0 -134
- jmux-0.0.5/tests/test_helpers.py +0 -96
- {jmux-0.0.5 → jmux-0.1.0}/.github/workflows/ci.yml +0 -0
- {jmux-0.0.5 → jmux-0.1.0}/LICENSE +0 -0
- {jmux-0.0.5 → jmux-0.1.0}/setup.cfg +0 -0
- {jmux-0.0.5 → jmux-0.1.0}/src/jmux/error.py +0 -0
- {jmux-0.0.5 → jmux-0.1.0}/src/jmux/helpers.py +0 -0
- {jmux-0.0.5 → jmux-0.1.0}/src/jmux/pda.py +0 -0
- {jmux-0.0.5 → jmux-0.1.0}/src/jmux.egg-info/dependency_links.txt +0 -0
- {jmux-0.0.5 → jmux-0.1.0}/src/jmux.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: jmux
|
|
3
|
-
Version: 0.0
|
|
3
|
+
Version: 0.1.0
|
|
4
4
|
Summary: JMux: A Python package for demultiplexing a JSON string into multiple awaitable variables.
|
|
5
5
|
Author-email: "Johannes A.I. Unruh" <johannes@unruh.ai>
|
|
6
6
|
License: MIT License
|
|
@@ -40,10 +40,12 @@ Requires-Dist: pydantic>=2.0.0
|
|
|
40
40
|
Provides-Extra: test
|
|
41
41
|
Requires-Dist: pytest; extra == "test"
|
|
42
42
|
Requires-Dist: pytest-anyio; extra == "test"
|
|
43
|
+
Requires-Dist: trio; extra == "test"
|
|
43
44
|
Provides-Extra: dev
|
|
44
45
|
Requires-Dist: ruff; extra == "dev"
|
|
45
46
|
Requires-Dist: pytest; extra == "dev"
|
|
46
47
|
Requires-Dist: pytest-anyio; extra == "dev"
|
|
48
|
+
Requires-Dist: trio; extra == "dev"
|
|
47
49
|
Requires-Dist: uv; extra == "dev"
|
|
48
50
|
Requires-Dist: build; extra == "dev"
|
|
49
51
|
Requires-Dist: twine; extra == "dev"
|
|
@@ -61,8 +63,10 @@ This package is inspired by `Snapshot Streaming` mentioned in the [`WWDC25: Meet
|
|
|
61
63
|
|
|
62
64
|
## Features
|
|
63
65
|
|
|
64
|
-
- **Asynchronous by Design**: Built on top of `
|
|
66
|
+
- **Asynchronous by Design**: Built on top of `anyio`, JMux supports both `asyncio` and `trio` backends, making it perfect for modern, high-performance Python applications.
|
|
67
|
+
- **Python 3.10+**: Supports Python 3.10 and newer versions.
|
|
65
68
|
- **Pydantic Integration**: Validate your `JMux` classes against Pydantic models to ensure type safety and consistency.
|
|
69
|
+
- **Code Generation**: Automatically generate JMux classes from `StreamableBaseModel` subclasses using the `jmux generate` CLI command.
|
|
66
70
|
- **Awaitable and Streamable Sinks**: Use `AwaitableValue` for single values and `StreamableValues` for streams of values.
|
|
67
71
|
- **Robust Error Handling**: JMux provides a comprehensive set of exceptions to handle parsing errors and other issues.
|
|
68
72
|
- **Lightweight**: JMux has only a few external dependencies, making it easy to integrate into any project.
|
|
@@ -75,21 +79,155 @@ You can install JMux from PyPI using pip:
|
|
|
75
79
|
pip install jmux
|
|
76
80
|
```
|
|
77
81
|
|
|
82
|
+
## Code Generation
|
|
83
|
+
|
|
84
|
+
JMux provides a CLI tool to automatically generate JMux classes from Pydantic models. Instead of manually writing both a Pydantic model and a corresponding JMux class, you can define your models using `StreamableBaseModel` and let JMux generate the demultiplexer classes for you.
|
|
85
|
+
|
|
86
|
+
### Defining Models with StreamableBaseModel
|
|
87
|
+
|
|
88
|
+
Use `StreamableBaseModel` as your base class instead of `pydantic.BaseModel`:
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
from typing import Annotated
|
|
92
|
+
from jmux import StreamableBaseModel, Streamed
|
|
93
|
+
|
|
94
|
+
class LlmResponse(StreamableBaseModel):
|
|
95
|
+
thought: str
|
|
96
|
+
tool_code: Annotated[str, Streamed]
|
|
97
|
+
tags: list[str]
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
### Type Mappings
|
|
101
|
+
|
|
102
|
+
The generator converts your model fields to JMux types as follows:
|
|
103
|
+
|
|
104
|
+
| Model Field Type | Generated JMux Type |
|
|
105
|
+
| ----------------------------- | --------------------------------- |
|
|
106
|
+
| `str`, `int`, `float`, `bool` | `AwaitableValue[T]` |
|
|
107
|
+
| `Enum` | `AwaitableValue[EnumType]` |
|
|
108
|
+
| `T \| None` | `AwaitableValue[T \| None]` |
|
|
109
|
+
| `list[T]` | `StreamableValues[T]` |
|
|
110
|
+
| `Annotated[str, Streamed]` | `StreamableValues[str]` |
|
|
111
|
+
| Nested `StreamableBaseModel` | `AwaitableValue[NestedModelJMux]` |
|
|
112
|
+
|
|
113
|
+
The `Streamed` marker is useful when you want to stream a string field character-by-character (e.g., for real-time display of LLM output) rather than awaiting the complete value.
|
|
114
|
+
|
|
115
|
+
### Using the CLI
|
|
116
|
+
|
|
117
|
+
Run the `jmux generate` command to scan your codebase and generate JMux classes:
|
|
118
|
+
|
|
119
|
+
```bash
|
|
120
|
+
jmux generate --root <directory>
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
This will:
|
|
124
|
+
|
|
125
|
+
1. Recursively scan `<directory>` for Python files containing `StreamableBaseModel` subclasses
|
|
126
|
+
2. Generate corresponding JMux classes with the suffix `JMux` (e.g., `LlmResponse` → `LlmResponseJMux`)
|
|
127
|
+
3. Write the generated code to `src/jmux/generated/__init__.py`
|
|
128
|
+
|
|
129
|
+
### Example
|
|
130
|
+
|
|
131
|
+
Given this model:
|
|
132
|
+
|
|
133
|
+
```python
|
|
134
|
+
from typing import Annotated
|
|
135
|
+
from jmux import StreamableBaseModel, Streamed
|
|
136
|
+
|
|
137
|
+
class LlmResponse(StreamableBaseModel):
|
|
138
|
+
thought: str
|
|
139
|
+
tool_code: Annotated[str, Streamed]
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
Running `jmux generate` produces:
|
|
143
|
+
|
|
144
|
+
```python
|
|
145
|
+
from jmux.awaitable import AwaitableValue, StreamableValues
|
|
146
|
+
from jmux.demux import JMux
|
|
147
|
+
|
|
148
|
+
class LlmResponseJMux(JMux):
|
|
149
|
+
thought: AwaitableValue[str]
|
|
150
|
+
tool_code: StreamableValues[str]
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
You can then import and use the generated class:
|
|
154
|
+
|
|
155
|
+
```python
|
|
156
|
+
from jmux.generated import LlmResponseJMux
|
|
157
|
+
|
|
158
|
+
jmux_instance = LlmResponseJMux()
|
|
159
|
+
```
|
|
160
|
+
|
|
78
161
|
## Usage with LLMs (e.g., `litellm`)
|
|
79
162
|
|
|
80
163
|
The primary use case for `jmux` is to process streaming JSON responses from LLMs. This allows you to react to parts of the data as it arrives, rather than waiting for the entire JSON object to be transmitted. While this should be obvious, I should mention, that **the order in which the pydantic model defines the properties, defines which stream is filled first**.
|
|
81
164
|
|
|
82
|
-
|
|
165
|
+
### Using Code Generation (Recommended)
|
|
166
|
+
|
|
167
|
+
The easiest way to use JMux with LLMs is to define your models using `StreamableBaseModel` and generate JMux classes automatically:
|
|
168
|
+
|
|
169
|
+
```python
|
|
170
|
+
from typing import Annotated
|
|
171
|
+
from jmux import StreamableBaseModel, Streamed
|
|
172
|
+
|
|
173
|
+
class LlmResponse(StreamableBaseModel): # Use `StreamableBaseModel` so that the CLI can find the `pydantic` models to parse
|
|
174
|
+
thought: str
|
|
175
|
+
tool_code: Annotated[str, Streamed]
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
Then run `jmux generate --root .` to generate the `LlmResponseJMux` class. You can then use it directly:
|
|
179
|
+
|
|
180
|
+
```python
|
|
181
|
+
import anyio
|
|
182
|
+
from jmux.generated import LlmResponseJMux
|
|
183
|
+
|
|
184
|
+
async def mock_llm_stream():
|
|
185
|
+
json_stream = '{"thought": "I need to write some code.", "tool_code": "print(\'Hello, World!\')"}'
|
|
186
|
+
for char in json_stream:
|
|
187
|
+
yield char
|
|
188
|
+
await anyio.sleep(0.01)
|
|
189
|
+
|
|
190
|
+
async def process_llm_response():
|
|
191
|
+
jmux_instance = LlmResponseJMux()
|
|
192
|
+
|
|
193
|
+
async def feed_stream():
|
|
194
|
+
async for chunk in mock_llm_stream():
|
|
195
|
+
await jmux_instance.feed_chunks(chunk)
|
|
196
|
+
|
|
197
|
+
async def consume_thought():
|
|
198
|
+
thought = await jmux_instance.thought
|
|
199
|
+
print(f"LLM's thought received: '{thought}'")
|
|
200
|
+
|
|
201
|
+
async def consume_tool_code():
|
|
202
|
+
print("Receiving tool code...")
|
|
203
|
+
full_code = ""
|
|
204
|
+
async for code_fragment in jmux_instance.tool_code:
|
|
205
|
+
full_code += code_fragment
|
|
206
|
+
print(f" -> Received fragment: {code_fragment}")
|
|
207
|
+
print(f"Full tool code received: {full_code}")
|
|
208
|
+
|
|
209
|
+
async with anyio.create_task_group() as tg:
|
|
210
|
+
tg.start_soon(feed_stream)
|
|
211
|
+
tg.start_soon(consume_thought)
|
|
212
|
+
tg.start_soon(consume_tool_code)
|
|
213
|
+
|
|
214
|
+
if __name__ == "__main__":
|
|
215
|
+
anyio.run(process_llm_response, backend="asyncio")
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
### Manual Approach
|
|
219
|
+
|
|
220
|
+
If you prefer more control, you can manually define both the Pydantic model and the JMux class:
|
|
83
221
|
|
|
84
222
|
```python
|
|
85
|
-
import
|
|
223
|
+
import anyio
|
|
86
224
|
from pydantic import BaseModel
|
|
87
225
|
from jmux import JMux, AwaitableValue, StreamableValues
|
|
88
226
|
# litellm is used conceptually here
|
|
89
227
|
# from litellm import acompletion
|
|
90
228
|
|
|
91
229
|
# 1. Define the Pydantic model for the expected JSON response
|
|
92
|
-
class LlmResponse(BaseModel):
|
|
230
|
+
class LlmResponse(BaseModel): # No need to use `StreamableBaseModel` here, since it is only used for detection purposes
|
|
93
231
|
thought: str # **This property is filled first**
|
|
94
232
|
tool_code: str
|
|
95
233
|
|
|
@@ -106,7 +244,7 @@ async def mock_llm_stream():
|
|
|
106
244
|
json_stream = '{"thought": "I need to write some code.", "tool_code": "print(\'Hello, World!\')"}'
|
|
107
245
|
for char in json_stream:
|
|
108
246
|
yield char
|
|
109
|
-
await
|
|
247
|
+
await anyio.sleep(0.01) # Simulate network latency
|
|
110
248
|
|
|
111
249
|
# Main function to orchestrate the call and processing
|
|
112
250
|
async def process_llm_response():
|
|
@@ -132,15 +270,16 @@ async def process_llm_response():
|
|
|
132
270
|
print(f" -> Received fragment: {code_fragment}")
|
|
133
271
|
print(f"Full tool code received: {full_code}")
|
|
134
272
|
|
|
135
|
-
# Run all tasks concurrently
|
|
136
|
-
|
|
137
|
-
feed_stream
|
|
138
|
-
consume_thought
|
|
139
|
-
consume_tool_code
|
|
140
|
-
)
|
|
273
|
+
# Run all tasks concurrently using anyio task group
|
|
274
|
+
async with anyio.create_task_group() as tg:
|
|
275
|
+
tg.start_soon(feed_stream)
|
|
276
|
+
tg.start_soon(consume_thought)
|
|
277
|
+
tg.start_soon(consume_tool_code)
|
|
141
278
|
|
|
279
|
+
# Run with asyncio backend
|
|
142
280
|
if __name__ == "__main__":
|
|
143
|
-
|
|
281
|
+
anyio.run(process_llm_response, backend="asyncio")
|
|
282
|
+
# Or use trio: anyio.run(process_llm_response, backend="trio")
|
|
144
283
|
```
|
|
145
284
|
|
|
146
285
|
## Example Implementation
|
|
@@ -212,7 +351,7 @@ You can either `await awaitable_llm_result` if you need the full result, or use
|
|
|
212
351
|
Here is a simple example of how to use JMux to parse a JSON stream:
|
|
213
352
|
|
|
214
353
|
```python
|
|
215
|
-
import
|
|
354
|
+
import anyio
|
|
216
355
|
from enum import Enum
|
|
217
356
|
from types import NoneType
|
|
218
357
|
from pydantic import BaseModel
|
|
@@ -297,10 +436,13 @@ async def main():
|
|
|
297
436
|
nested_key_str = await key_nested.key_str
|
|
298
437
|
print(f"nested_key_str: {nested_key_str}")
|
|
299
438
|
|
|
300
|
-
|
|
439
|
+
async with anyio.create_task_group() as tg:
|
|
440
|
+
tg.start_soon(produce)
|
|
441
|
+
tg.start_soon(consume)
|
|
301
442
|
|
|
302
443
|
if __name__ == "__main__":
|
|
303
|
-
|
|
444
|
+
anyio.run(main, backend="asyncio")
|
|
445
|
+
# Or use trio: anyio.run(main, backend="trio")
|
|
304
446
|
```
|
|
305
447
|
|
|
306
448
|
## API Reference
|
|
@@ -349,13 +491,35 @@ Additionally the following type is supported without being wrapped into `list`:
|
|
|
349
491
|
|
|
350
492
|
This allows you to fully stream strings directly to a sink.
|
|
351
493
|
|
|
352
|
-
|
|
494
|
+
### Class `jmux.StreamableBaseModel`
|
|
353
495
|
|
|
354
|
-
|
|
496
|
+
A Pydantic `BaseModel` subclass used for defining models that can be automatically converted to JMux classes via the `jmux generate` CLI command.
|
|
497
|
+
|
|
498
|
+
```python
|
|
499
|
+
from jmux import StreamableBaseModel
|
|
355
500
|
|
|
356
|
-
|
|
501
|
+
class MyModel(StreamableBaseModel):
|
|
502
|
+
name: str
|
|
503
|
+
age: int
|
|
504
|
+
```
|
|
505
|
+
|
|
506
|
+
### Class `jmux.Streamed`
|
|
507
|
+
|
|
508
|
+
A marker class used with `typing.Annotated` to indicate that a string field should be streamed character-by-character rather than awaited as a complete value.
|
|
357
509
|
|
|
358
|
-
|
|
510
|
+
```python
|
|
511
|
+
from typing import Annotated
|
|
512
|
+
from jmux import StreamableBaseModel, Streamed
|
|
513
|
+
|
|
514
|
+
class MyModel(StreamableBaseModel):
|
|
515
|
+
content: Annotated[str, Streamed]
|
|
516
|
+
```
|
|
517
|
+
|
|
518
|
+
When generating JMux classes, fields annotated with `Streamed` will be converted to `StreamableValues[str]` instead of `AwaitableValue[str]`.
|
|
519
|
+
|
|
520
|
+
## License
|
|
521
|
+
|
|
522
|
+
This project is licensed under the terms of the MIT license. See the [LICENSE](LICENSE) file for details.
|
|
359
523
|
|
|
360
524
|
## Contributions
|
|
361
525
|
|
|
@@ -8,8 +8,10 @@ This package is inspired by `Snapshot Streaming` mentioned in the [`WWDC25: Meet
|
|
|
8
8
|
|
|
9
9
|
## Features
|
|
10
10
|
|
|
11
|
-
- **Asynchronous by Design**: Built on top of `
|
|
11
|
+
- **Asynchronous by Design**: Built on top of `anyio`, JMux supports both `asyncio` and `trio` backends, making it perfect for modern, high-performance Python applications.
|
|
12
|
+
- **Python 3.10+**: Supports Python 3.10 and newer versions.
|
|
12
13
|
- **Pydantic Integration**: Validate your `JMux` classes against Pydantic models to ensure type safety and consistency.
|
|
14
|
+
- **Code Generation**: Automatically generate JMux classes from `StreamableBaseModel` subclasses using the `jmux generate` CLI command.
|
|
13
15
|
- **Awaitable and Streamable Sinks**: Use `AwaitableValue` for single values and `StreamableValues` for streams of values.
|
|
14
16
|
- **Robust Error Handling**: JMux provides a comprehensive set of exceptions to handle parsing errors and other issues.
|
|
15
17
|
- **Lightweight**: JMux has only a few external dependencies, making it easy to integrate into any project.
|
|
@@ -22,21 +24,155 @@ You can install JMux from PyPI using pip:
|
|
|
22
24
|
pip install jmux
|
|
23
25
|
```
|
|
24
26
|
|
|
27
|
+
## Code Generation
|
|
28
|
+
|
|
29
|
+
JMux provides a CLI tool to automatically generate JMux classes from Pydantic models. Instead of manually writing both a Pydantic model and a corresponding JMux class, you can define your models using `StreamableBaseModel` and let JMux generate the demultiplexer classes for you.
|
|
30
|
+
|
|
31
|
+
### Defining Models with StreamableBaseModel
|
|
32
|
+
|
|
33
|
+
Use `StreamableBaseModel` as your base class instead of `pydantic.BaseModel`:
|
|
34
|
+
|
|
35
|
+
```python
|
|
36
|
+
from typing import Annotated
|
|
37
|
+
from jmux import StreamableBaseModel, Streamed
|
|
38
|
+
|
|
39
|
+
class LlmResponse(StreamableBaseModel):
|
|
40
|
+
thought: str
|
|
41
|
+
tool_code: Annotated[str, Streamed]
|
|
42
|
+
tags: list[str]
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
### Type Mappings
|
|
46
|
+
|
|
47
|
+
The generator converts your model fields to JMux types as follows:
|
|
48
|
+
|
|
49
|
+
| Model Field Type | Generated JMux Type |
|
|
50
|
+
| ----------------------------- | --------------------------------- |
|
|
51
|
+
| `str`, `int`, `float`, `bool` | `AwaitableValue[T]` |
|
|
52
|
+
| `Enum` | `AwaitableValue[EnumType]` |
|
|
53
|
+
| `T \| None` | `AwaitableValue[T \| None]` |
|
|
54
|
+
| `list[T]` | `StreamableValues[T]` |
|
|
55
|
+
| `Annotated[str, Streamed]` | `StreamableValues[str]` |
|
|
56
|
+
| Nested `StreamableBaseModel` | `AwaitableValue[NestedModelJMux]` |
|
|
57
|
+
|
|
58
|
+
The `Streamed` marker is useful when you want to stream a string field character-by-character (e.g., for real-time display of LLM output) rather than awaiting the complete value.
|
|
59
|
+
|
|
60
|
+
### Using the CLI
|
|
61
|
+
|
|
62
|
+
Run the `jmux generate` command to scan your codebase and generate JMux classes:
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
jmux generate --root <directory>
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
This will:
|
|
69
|
+
|
|
70
|
+
1. Recursively scan `<directory>` for Python files containing `StreamableBaseModel` subclasses
|
|
71
|
+
2. Generate corresponding JMux classes with the suffix `JMux` (e.g., `LlmResponse` → `LlmResponseJMux`)
|
|
72
|
+
3. Write the generated code to `src/jmux/generated/__init__.py`
|
|
73
|
+
|
|
74
|
+
### Example
|
|
75
|
+
|
|
76
|
+
Given this model:
|
|
77
|
+
|
|
78
|
+
```python
|
|
79
|
+
from typing import Annotated
|
|
80
|
+
from jmux import StreamableBaseModel, Streamed
|
|
81
|
+
|
|
82
|
+
class LlmResponse(StreamableBaseModel):
|
|
83
|
+
thought: str
|
|
84
|
+
tool_code: Annotated[str, Streamed]
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
Running `jmux generate` produces:
|
|
88
|
+
|
|
89
|
+
```python
|
|
90
|
+
from jmux.awaitable import AwaitableValue, StreamableValues
|
|
91
|
+
from jmux.demux import JMux
|
|
92
|
+
|
|
93
|
+
class LlmResponseJMux(JMux):
|
|
94
|
+
thought: AwaitableValue[str]
|
|
95
|
+
tool_code: StreamableValues[str]
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
You can then import and use the generated class:
|
|
99
|
+
|
|
100
|
+
```python
|
|
101
|
+
from jmux.generated import LlmResponseJMux
|
|
102
|
+
|
|
103
|
+
jmux_instance = LlmResponseJMux()
|
|
104
|
+
```
|
|
105
|
+
|
|
25
106
|
## Usage with LLMs (e.g., `litellm`)
|
|
26
107
|
|
|
27
108
|
The primary use case for `jmux` is to process streaming JSON responses from LLMs. This allows you to react to parts of the data as it arrives, rather than waiting for the entire JSON object to be transmitted. While this should be obvious, I should mention, that **the order in which the pydantic model defines the properties, defines which stream is filled first**.
|
|
28
109
|
|
|
29
|
-
|
|
110
|
+
### Using Code Generation (Recommended)
|
|
111
|
+
|
|
112
|
+
The easiest way to use JMux with LLMs is to define your models using `StreamableBaseModel` and generate JMux classes automatically:
|
|
113
|
+
|
|
114
|
+
```python
|
|
115
|
+
from typing import Annotated
|
|
116
|
+
from jmux import StreamableBaseModel, Streamed
|
|
117
|
+
|
|
118
|
+
class LlmResponse(StreamableBaseModel): # Use `StreamableBaseModel` so that the CLI can find the `pydantic` models to parse
|
|
119
|
+
thought: str
|
|
120
|
+
tool_code: Annotated[str, Streamed]
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
Then run `jmux generate --root .` to generate the `LlmResponseJMux` class. You can then use it directly:
|
|
124
|
+
|
|
125
|
+
```python
|
|
126
|
+
import anyio
|
|
127
|
+
from jmux.generated import LlmResponseJMux
|
|
128
|
+
|
|
129
|
+
async def mock_llm_stream():
|
|
130
|
+
json_stream = '{"thought": "I need to write some code.", "tool_code": "print(\'Hello, World!\')"}'
|
|
131
|
+
for char in json_stream:
|
|
132
|
+
yield char
|
|
133
|
+
await anyio.sleep(0.01)
|
|
134
|
+
|
|
135
|
+
async def process_llm_response():
|
|
136
|
+
jmux_instance = LlmResponseJMux()
|
|
137
|
+
|
|
138
|
+
async def feed_stream():
|
|
139
|
+
async for chunk in mock_llm_stream():
|
|
140
|
+
await jmux_instance.feed_chunks(chunk)
|
|
141
|
+
|
|
142
|
+
async def consume_thought():
|
|
143
|
+
thought = await jmux_instance.thought
|
|
144
|
+
print(f"LLM's thought received: '{thought}'")
|
|
145
|
+
|
|
146
|
+
async def consume_tool_code():
|
|
147
|
+
print("Receiving tool code...")
|
|
148
|
+
full_code = ""
|
|
149
|
+
async for code_fragment in jmux_instance.tool_code:
|
|
150
|
+
full_code += code_fragment
|
|
151
|
+
print(f" -> Received fragment: {code_fragment}")
|
|
152
|
+
print(f"Full tool code received: {full_code}")
|
|
153
|
+
|
|
154
|
+
async with anyio.create_task_group() as tg:
|
|
155
|
+
tg.start_soon(feed_stream)
|
|
156
|
+
tg.start_soon(consume_thought)
|
|
157
|
+
tg.start_soon(consume_tool_code)
|
|
158
|
+
|
|
159
|
+
if __name__ == "__main__":
|
|
160
|
+
anyio.run(process_llm_response, backend="asyncio")
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
### Manual Approach
|
|
164
|
+
|
|
165
|
+
If you prefer more control, you can manually define both the Pydantic model and the JMux class:
|
|
30
166
|
|
|
31
167
|
```python
|
|
32
|
-
import
|
|
168
|
+
import anyio
|
|
33
169
|
from pydantic import BaseModel
|
|
34
170
|
from jmux import JMux, AwaitableValue, StreamableValues
|
|
35
171
|
# litellm is used conceptually here
|
|
36
172
|
# from litellm import acompletion
|
|
37
173
|
|
|
38
174
|
# 1. Define the Pydantic model for the expected JSON response
|
|
39
|
-
class LlmResponse(BaseModel):
|
|
175
|
+
class LlmResponse(BaseModel): # No need to use `StreamableBaseModel` here, since it is only used for detection purposes
|
|
40
176
|
thought: str # **This property is filled first**
|
|
41
177
|
tool_code: str
|
|
42
178
|
|
|
@@ -53,7 +189,7 @@ async def mock_llm_stream():
|
|
|
53
189
|
json_stream = '{"thought": "I need to write some code.", "tool_code": "print(\'Hello, World!\')"}'
|
|
54
190
|
for char in json_stream:
|
|
55
191
|
yield char
|
|
56
|
-
await
|
|
192
|
+
await anyio.sleep(0.01) # Simulate network latency
|
|
57
193
|
|
|
58
194
|
# Main function to orchestrate the call and processing
|
|
59
195
|
async def process_llm_response():
|
|
@@ -79,15 +215,16 @@ async def process_llm_response():
|
|
|
79
215
|
print(f" -> Received fragment: {code_fragment}")
|
|
80
216
|
print(f"Full tool code received: {full_code}")
|
|
81
217
|
|
|
82
|
-
# Run all tasks concurrently
|
|
83
|
-
|
|
84
|
-
feed_stream
|
|
85
|
-
consume_thought
|
|
86
|
-
consume_tool_code
|
|
87
|
-
)
|
|
218
|
+
# Run all tasks concurrently using anyio task group
|
|
219
|
+
async with anyio.create_task_group() as tg:
|
|
220
|
+
tg.start_soon(feed_stream)
|
|
221
|
+
tg.start_soon(consume_thought)
|
|
222
|
+
tg.start_soon(consume_tool_code)
|
|
88
223
|
|
|
224
|
+
# Run with asyncio backend
|
|
89
225
|
if __name__ == "__main__":
|
|
90
|
-
|
|
226
|
+
anyio.run(process_llm_response, backend="asyncio")
|
|
227
|
+
# Or use trio: anyio.run(process_llm_response, backend="trio")
|
|
91
228
|
```
|
|
92
229
|
|
|
93
230
|
## Example Implementation
|
|
@@ -159,7 +296,7 @@ You can either `await awaitable_llm_result` if you need the full result, or use
|
|
|
159
296
|
Here is a simple example of how to use JMux to parse a JSON stream:
|
|
160
297
|
|
|
161
298
|
```python
|
|
162
|
-
import
|
|
299
|
+
import anyio
|
|
163
300
|
from enum import Enum
|
|
164
301
|
from types import NoneType
|
|
165
302
|
from pydantic import BaseModel
|
|
@@ -244,10 +381,13 @@ async def main():
|
|
|
244
381
|
nested_key_str = await key_nested.key_str
|
|
245
382
|
print(f"nested_key_str: {nested_key_str}")
|
|
246
383
|
|
|
247
|
-
|
|
384
|
+
async with anyio.create_task_group() as tg:
|
|
385
|
+
tg.start_soon(produce)
|
|
386
|
+
tg.start_soon(consume)
|
|
248
387
|
|
|
249
388
|
if __name__ == "__main__":
|
|
250
|
-
|
|
389
|
+
anyio.run(main, backend="asyncio")
|
|
390
|
+
# Or use trio: anyio.run(main, backend="trio")
|
|
251
391
|
```
|
|
252
392
|
|
|
253
393
|
## API Reference
|
|
@@ -296,13 +436,35 @@ Additionally the following type is supported without being wrapped into `list`:
|
|
|
296
436
|
|
|
297
437
|
This allows you to fully stream strings directly to a sink.
|
|
298
438
|
|
|
299
|
-
|
|
439
|
+
### Class `jmux.StreamableBaseModel`
|
|
300
440
|
|
|
301
|
-
|
|
441
|
+
A Pydantic `BaseModel` subclass used for defining models that can be automatically converted to JMux classes via the `jmux generate` CLI command.
|
|
442
|
+
|
|
443
|
+
```python
|
|
444
|
+
from jmux import StreamableBaseModel
|
|
302
445
|
|
|
303
|
-
|
|
446
|
+
class MyModel(StreamableBaseModel):
|
|
447
|
+
name: str
|
|
448
|
+
age: int
|
|
449
|
+
```
|
|
450
|
+
|
|
451
|
+
### Class `jmux.Streamed`
|
|
452
|
+
|
|
453
|
+
A marker class used with `typing.Annotated` to indicate that a string field should be streamed character-by-character rather than awaited as a complete value.
|
|
304
454
|
|
|
305
|
-
|
|
455
|
+
```python
|
|
456
|
+
from typing import Annotated
|
|
457
|
+
from jmux import StreamableBaseModel, Streamed
|
|
458
|
+
|
|
459
|
+
class MyModel(StreamableBaseModel):
|
|
460
|
+
content: Annotated[str, Streamed]
|
|
461
|
+
```
|
|
462
|
+
|
|
463
|
+
When generating JMux classes, fields annotated with `Streamed` will be converted to `StreamableValues[str]` instead of `AwaitableValue[str]`.
|
|
464
|
+
|
|
465
|
+
## License
|
|
466
|
+
|
|
467
|
+
This project is licensed under the terms of the MIT license. See the [LICENSE](LICENSE) file for details.
|
|
306
468
|
|
|
307
469
|
## Contributions
|
|
308
470
|
|
|
@@ -17,6 +17,9 @@ authors = [{ name = "Johannes A.I. Unruh", email = "johannes@unruh.ai" }]
|
|
|
17
17
|
dependencies = ["anyio>=4.0.0", "pydantic>=2.0.0"]
|
|
18
18
|
keywords = ["demultiplexer", "python", "package", "json"]
|
|
19
19
|
|
|
20
|
+
[project.scripts]
|
|
21
|
+
jmux = "jmux.cli:main"
|
|
22
|
+
|
|
20
23
|
[tool.setuptools_scm]
|
|
21
24
|
|
|
22
25
|
[project.urls]
|
|
@@ -24,11 +27,12 @@ Homepage = "https://github.com/jaunruh/jmux"
|
|
|
24
27
|
Repository = "https://github.com/jaunruh/jmux"
|
|
25
28
|
|
|
26
29
|
[project.optional-dependencies]
|
|
27
|
-
test = ["pytest", "pytest-anyio"]
|
|
30
|
+
test = ["pytest", "pytest-anyio", "trio"]
|
|
28
31
|
dev = [
|
|
29
32
|
"ruff",
|
|
30
33
|
"pytest",
|
|
31
34
|
"pytest-anyio",
|
|
35
|
+
"trio",
|
|
32
36
|
"uv",
|
|
33
37
|
"build",
|
|
34
38
|
"twine",
|
|
@@ -39,8 +43,9 @@ dev = [
|
|
|
39
43
|
[tool.ruff]
|
|
40
44
|
# Ruff configuration example
|
|
41
45
|
line-length = 88
|
|
42
|
-
select = ["E", "F", "I"]
|
|
46
|
+
select = ["E", "F", "I"] # Errors, Fixes, Import sorting
|
|
43
47
|
ignore = []
|
|
48
|
+
exclude = ["src/jmux/generated/"]
|
|
44
49
|
|
|
45
50
|
# Or only in test files:
|
|
46
51
|
[tool.ruff.per-file-ignores]
|