qtype 0.0.16__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qtype/application/commons/tools.py +1 -1
- qtype/application/converters/tools_from_api.py +5 -5
- qtype/application/converters/tools_from_module.py +2 -2
- qtype/application/converters/types.py +14 -43
- qtype/application/documentation.py +1 -1
- qtype/application/facade.py +94 -73
- qtype/base/types.py +227 -7
- qtype/cli.py +4 -0
- qtype/commands/convert.py +20 -8
- qtype/commands/generate.py +19 -27
- qtype/commands/run.py +73 -36
- qtype/commands/serve.py +74 -54
- qtype/commands/validate.py +34 -8
- qtype/commands/visualize.py +46 -22
- qtype/dsl/__init__.py +6 -5
- qtype/dsl/custom_types.py +1 -1
- qtype/dsl/domain_types.py +65 -5
- qtype/dsl/linker.py +384 -0
- qtype/dsl/loader.py +315 -0
- qtype/dsl/model.py +612 -363
- qtype/dsl/parser.py +200 -0
- qtype/dsl/types.py +50 -0
- qtype/interpreter/api.py +57 -136
- qtype/interpreter/auth/aws.py +19 -9
- qtype/interpreter/auth/generic.py +93 -16
- qtype/interpreter/base/base_step_executor.py +436 -0
- qtype/interpreter/base/batch_step_executor.py +171 -0
- qtype/interpreter/base/exceptions.py +50 -0
- qtype/interpreter/base/executor_context.py +74 -0
- qtype/interpreter/base/factory.py +117 -0
- qtype/interpreter/base/progress_tracker.py +110 -0
- qtype/interpreter/base/secrets.py +339 -0
- qtype/interpreter/base/step_cache.py +74 -0
- qtype/interpreter/base/stream_emitter.py +469 -0
- qtype/interpreter/conversions.py +462 -22
- qtype/interpreter/converters.py +77 -0
- qtype/interpreter/endpoints.py +355 -0
- qtype/interpreter/executors/agent_executor.py +242 -0
- qtype/interpreter/executors/aggregate_executor.py +93 -0
- qtype/interpreter/executors/decoder_executor.py +163 -0
- qtype/interpreter/executors/doc_to_text_executor.py +112 -0
- qtype/interpreter/executors/document_embedder_executor.py +107 -0
- qtype/interpreter/executors/document_search_executor.py +122 -0
- qtype/interpreter/executors/document_source_executor.py +118 -0
- qtype/interpreter/executors/document_splitter_executor.py +105 -0
- qtype/interpreter/executors/echo_executor.py +63 -0
- qtype/interpreter/executors/field_extractor_executor.py +160 -0
- qtype/interpreter/executors/file_source_executor.py +101 -0
- qtype/interpreter/executors/file_writer_executor.py +110 -0
- qtype/interpreter/executors/index_upsert_executor.py +228 -0
- qtype/interpreter/executors/invoke_embedding_executor.py +92 -0
- qtype/interpreter/executors/invoke_flow_executor.py +51 -0
- qtype/interpreter/executors/invoke_tool_executor.py +358 -0
- qtype/interpreter/executors/llm_inference_executor.py +272 -0
- qtype/interpreter/executors/prompt_template_executor.py +78 -0
- qtype/interpreter/executors/sql_source_executor.py +106 -0
- qtype/interpreter/executors/vector_search_executor.py +91 -0
- qtype/interpreter/flow.py +159 -22
- qtype/interpreter/metadata_api.py +115 -0
- qtype/interpreter/resource_cache.py +5 -4
- qtype/interpreter/rich_progress.py +225 -0
- qtype/interpreter/stream/chat/__init__.py +15 -0
- qtype/interpreter/stream/chat/converter.py +391 -0
- qtype/interpreter/{chat → stream/chat}/file_conversions.py +2 -2
- qtype/interpreter/stream/chat/ui_request_to_domain_type.py +140 -0
- qtype/interpreter/stream/chat/vercel.py +609 -0
- qtype/interpreter/stream/utils/__init__.py +15 -0
- qtype/interpreter/stream/utils/build_vercel_ai_formatter.py +74 -0
- qtype/interpreter/stream/utils/callback_to_stream.py +66 -0
- qtype/interpreter/stream/utils/create_streaming_response.py +18 -0
- qtype/interpreter/stream/utils/default_chat_extract_text.py +20 -0
- qtype/interpreter/stream/utils/error_streaming_response.py +20 -0
- qtype/interpreter/telemetry.py +135 -8
- qtype/interpreter/tools/__init__.py +5 -0
- qtype/interpreter/tools/function_tool_helper.py +265 -0
- qtype/interpreter/types.py +330 -0
- qtype/interpreter/typing.py +83 -89
- qtype/interpreter/ui/404/index.html +1 -1
- qtype/interpreter/ui/404.html +1 -1
- qtype/interpreter/ui/_next/static/{nUaw6_IwRwPqkzwe5s725 → 20HoJN6otZ_LyHLHpCPE6}/_buildManifest.js +1 -1
- qtype/interpreter/ui/_next/static/chunks/{393-8fd474427f8e19ce.js → 434-b2112d19f25c44ff.js} +3 -3
- qtype/interpreter/ui/_next/static/chunks/app/page-8c67d16ac90d23cb.js +1 -0
- qtype/interpreter/ui/_next/static/chunks/ba12c10f-546f2714ff8abc66.js +1 -0
- qtype/interpreter/ui/_next/static/css/8a8d1269e362fef7.css +3 -0
- qtype/interpreter/ui/icon.png +0 -0
- qtype/interpreter/ui/index.html +1 -1
- qtype/interpreter/ui/index.txt +4 -4
- qtype/semantic/checker.py +583 -0
- qtype/semantic/generate.py +262 -83
- qtype/semantic/loader.py +95 -0
- qtype/semantic/model.py +436 -159
- qtype/semantic/resolver.py +63 -19
- qtype/semantic/visualize.py +28 -31
- {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/METADATA +16 -3
- qtype-0.1.1.dist-info/RECORD +135 -0
- qtype/dsl/base_types.py +0 -38
- qtype/dsl/validator.py +0 -465
- qtype/interpreter/batch/__init__.py +0 -0
- qtype/interpreter/batch/file_sink_source.py +0 -162
- qtype/interpreter/batch/flow.py +0 -95
- qtype/interpreter/batch/sql_source.py +0 -92
- qtype/interpreter/batch/step.py +0 -74
- qtype/interpreter/batch/types.py +0 -41
- qtype/interpreter/batch/utils.py +0 -178
- qtype/interpreter/chat/chat_api.py +0 -237
- qtype/interpreter/chat/vercel.py +0 -314
- qtype/interpreter/exceptions.py +0 -10
- qtype/interpreter/step.py +0 -67
- qtype/interpreter/steps/__init__.py +0 -0
- qtype/interpreter/steps/agent.py +0 -114
- qtype/interpreter/steps/condition.py +0 -36
- qtype/interpreter/steps/decoder.py +0 -88
- qtype/interpreter/steps/llm_inference.py +0 -171
- qtype/interpreter/steps/prompt_template.py +0 -54
- qtype/interpreter/steps/search.py +0 -24
- qtype/interpreter/steps/tool.py +0 -219
- qtype/interpreter/streaming_helpers.py +0 -123
- qtype/interpreter/ui/_next/static/chunks/app/page-7e26b6156cfb55d3.js +0 -1
- qtype/interpreter/ui/_next/static/chunks/ba12c10f-22556063851a6df2.js +0 -1
- qtype/interpreter/ui/_next/static/css/b40532b0db09cce3.css +0 -3
- qtype/interpreter/ui/favicon.ico +0 -0
- qtype/loader.py +0 -390
- qtype-0.0.16.dist-info/RECORD +0 -106
- /qtype/interpreter/ui/_next/static/{nUaw6_IwRwPqkzwe5s725 → 20HoJN6otZ_LyHLHpCPE6}/_ssgManifest.js +0 -0
- {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/WHEEL +0 -0
- {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/entry_points.txt +0 -0
- {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/licenses/LICENSE +0 -0
- {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import threading
|
|
5
|
+
from collections import deque
|
|
6
|
+
from typing import Deque, Dict
|
|
7
|
+
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
from rich.live import Live
|
|
10
|
+
from rich.panel import Panel
|
|
11
|
+
from rich.progress import (
|
|
12
|
+
Progress,
|
|
13
|
+
ProgressColumn,
|
|
14
|
+
TaskProgressColumn,
|
|
15
|
+
TextColumn,
|
|
16
|
+
TimeElapsedColumn,
|
|
17
|
+
TimeRemainingColumn,
|
|
18
|
+
)
|
|
19
|
+
from rich.text import Text
|
|
20
|
+
|
|
21
|
+
from qtype.interpreter.types import ProgressCallback
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class RateColumn(ProgressColumn):
|
|
27
|
+
"""Show processing speed as '123 msg/s' based on task.speed."""
|
|
28
|
+
|
|
29
|
+
def __init__(self, unit: str = "msg") -> None:
|
|
30
|
+
super().__init__()
|
|
31
|
+
self.unit = unit
|
|
32
|
+
|
|
33
|
+
def render(self, task) -> Text: # type: ignore[override]
|
|
34
|
+
speed = task.speed or 0.0
|
|
35
|
+
|
|
36
|
+
if speed <= 0:
|
|
37
|
+
return Text(f"- {self.unit}/s")
|
|
38
|
+
|
|
39
|
+
# Simple formatting similar-ish to tqdm
|
|
40
|
+
if speed < 1:
|
|
41
|
+
rate_str = f"{speed:.2f}"
|
|
42
|
+
elif speed < 100:
|
|
43
|
+
rate_str = f"{speed:4.1f}"
|
|
44
|
+
else:
|
|
45
|
+
rate_str = f"{speed:4.0f}"
|
|
46
|
+
|
|
47
|
+
return Text(f"{rate_str} {self.unit}/s")
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class SparklineColumn(ProgressColumn):
|
|
51
|
+
"""Tiny throughput trend graph using block characters."""
|
|
52
|
+
|
|
53
|
+
def __init__(self, max_samples: int = 20) -> None:
|
|
54
|
+
super().__init__()
|
|
55
|
+
self.max_samples = max_samples
|
|
56
|
+
# Per-task speed history
|
|
57
|
+
self._history: Dict[int, Deque[float]] = {}
|
|
58
|
+
|
|
59
|
+
def render(self, task) -> Text: # type: ignore[override]
|
|
60
|
+
speed = task.speed or 0.0
|
|
61
|
+
|
|
62
|
+
history = self._history.get(task.id)
|
|
63
|
+
if history is None:
|
|
64
|
+
history = self._history[task.id] = deque(maxlen=self.max_samples)
|
|
65
|
+
|
|
66
|
+
history.append(speed)
|
|
67
|
+
|
|
68
|
+
if not history or all(v <= 0 for v in history):
|
|
69
|
+
return Text("")
|
|
70
|
+
|
|
71
|
+
min_s = min(history)
|
|
72
|
+
max_s = max(history)
|
|
73
|
+
rng = max(max_s - min_s, 1e-9)
|
|
74
|
+
|
|
75
|
+
blocks = "▁▂▃▄▅▆▇█"
|
|
76
|
+
n_blocks = len(blocks)
|
|
77
|
+
|
|
78
|
+
chars = []
|
|
79
|
+
for v in history:
|
|
80
|
+
idx = int((v - min_s) / rng * (n_blocks - 1))
|
|
81
|
+
chars.append(blocks[idx])
|
|
82
|
+
|
|
83
|
+
return Text("".join(chars))
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class RichProgressCallback(ProgressCallback):
|
|
87
|
+
"""Progress callback that uses Rich to display progress bars.
|
|
88
|
+
|
|
89
|
+
Displays a progress row for each step, updating in place.
|
|
90
|
+
Colors the step label based on error rate:
|
|
91
|
+
- Green: error rate <= 1%
|
|
92
|
+
- Yellow: 1% < error rate <= 5%
|
|
93
|
+
- Red: error rate > 5%
|
|
94
|
+
|
|
95
|
+
Attributes:
|
|
96
|
+
order: Optional list defining the order of steps progress rows.
|
|
97
|
+
"""
|
|
98
|
+
|
|
99
|
+
def __init__(
|
|
100
|
+
self,
|
|
101
|
+
order: list[str] | None = None,
|
|
102
|
+
) -> None:
|
|
103
|
+
super().__init__()
|
|
104
|
+
self.order = order or []
|
|
105
|
+
self._lock = threading.Lock()
|
|
106
|
+
self.console = Console()
|
|
107
|
+
|
|
108
|
+
# One shared Progress instance for all steps
|
|
109
|
+
# Columns: description | bar | % | rate | sparkline | ✔ | ✖ | elapsed | remaining
|
|
110
|
+
self.progress = Progress(
|
|
111
|
+
TextColumn("[progress.description]{task.description}"),
|
|
112
|
+
TaskProgressColumn(),
|
|
113
|
+
RateColumn(unit="msg"),
|
|
114
|
+
SparklineColumn(max_samples=20),
|
|
115
|
+
TextColumn("[green]✔[/green] {task.fields[succeeded]} succeeded"),
|
|
116
|
+
TextColumn("[red]✖[/red] {task.fields[errors]} errors"),
|
|
117
|
+
TextColumn("[cyan]⟳[/cyan] {task.fields[cache_hits]} hits"),
|
|
118
|
+
TextColumn(
|
|
119
|
+
"[magenta]✗[/magenta] {task.fields[cache_misses]} misses"
|
|
120
|
+
),
|
|
121
|
+
TimeElapsedColumn(),
|
|
122
|
+
TimeRemainingColumn(),
|
|
123
|
+
console=self.console,
|
|
124
|
+
expand=True,
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# Wrap progress in a panel
|
|
128
|
+
self.panel = Panel(
|
|
129
|
+
self.progress,
|
|
130
|
+
title="[bold cyan]Flow Progress[/bold cyan]",
|
|
131
|
+
border_style="bright_blue",
|
|
132
|
+
padding=(1, 2),
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
# Live container for the panel
|
|
136
|
+
self.live = Live(
|
|
137
|
+
self.panel,
|
|
138
|
+
console=self.console,
|
|
139
|
+
refresh_per_second=10,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Map step_id -> Rich task id
|
|
143
|
+
self.tasks: Dict[str, int] = {}
|
|
144
|
+
self._started = False
|
|
145
|
+
|
|
146
|
+
# Pre-create tasks in the desired order if provided
|
|
147
|
+
for step_id in self.order:
|
|
148
|
+
task_id = self.progress.add_task(
|
|
149
|
+
f"Step {step_id}",
|
|
150
|
+
total=None, # we’ll update this once we know it
|
|
151
|
+
succeeded=0,
|
|
152
|
+
errors=0,
|
|
153
|
+
)
|
|
154
|
+
self.tasks[step_id] = task_id
|
|
155
|
+
|
|
156
|
+
def _ensure_started(self) -> None:
|
|
157
|
+
if not self._started:
|
|
158
|
+
self.live.start()
|
|
159
|
+
self._started = True
|
|
160
|
+
|
|
161
|
+
def __call__(
|
|
162
|
+
self,
|
|
163
|
+
step_id: str,
|
|
164
|
+
items_processed: int,
|
|
165
|
+
items_in_error: int,
|
|
166
|
+
items_succeeded: int,
|
|
167
|
+
total_items: int | None,
|
|
168
|
+
cache_hits: int | None = None,
|
|
169
|
+
cache_misses: int | None = None,
|
|
170
|
+
) -> None:
|
|
171
|
+
with self._lock:
|
|
172
|
+
self._ensure_started()
|
|
173
|
+
|
|
174
|
+
# Create a task lazily if we didn't pre-create it
|
|
175
|
+
if step_id not in self.tasks:
|
|
176
|
+
task_id = self.progress.add_task(
|
|
177
|
+
f"Step {step_id}",
|
|
178
|
+
total=total_items,
|
|
179
|
+
succeeded=items_succeeded,
|
|
180
|
+
errors=items_in_error,
|
|
181
|
+
cache_hits=cache_hits,
|
|
182
|
+
cache_misses=cache_misses,
|
|
183
|
+
)
|
|
184
|
+
self.tasks[step_id] = task_id
|
|
185
|
+
|
|
186
|
+
task_id = self.tasks[step_id]
|
|
187
|
+
color = self.compute_color(items_processed, items_in_error)
|
|
188
|
+
|
|
189
|
+
update_kwargs = {
|
|
190
|
+
"completed": items_processed,
|
|
191
|
+
"succeeded": items_succeeded,
|
|
192
|
+
"errors": items_in_error,
|
|
193
|
+
"description": f"[{color}]Step {step_id}[/{color}]",
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
update_kwargs["cache_hits"] = (
|
|
197
|
+
cache_hits if cache_hits is not None else "-"
|
|
198
|
+
)
|
|
199
|
+
update_kwargs["cache_misses"] = (
|
|
200
|
+
cache_misses if cache_misses is not None else "-"
|
|
201
|
+
)
|
|
202
|
+
if total_items is not None:
|
|
203
|
+
update_kwargs["total"] = total_items
|
|
204
|
+
|
|
205
|
+
self.progress.update(task_id, **update_kwargs)
|
|
206
|
+
|
|
207
|
+
def compute_color(self, items_processed: int, items_in_error: int) -> str:
|
|
208
|
+
# Avoid divide-by-zero
|
|
209
|
+
if items_processed == 0:
|
|
210
|
+
return "green"
|
|
211
|
+
|
|
212
|
+
error_rate = items_in_error / items_processed
|
|
213
|
+
|
|
214
|
+
if error_rate > 0.05:
|
|
215
|
+
return "red"
|
|
216
|
+
elif error_rate > 0.01:
|
|
217
|
+
return "yellow"
|
|
218
|
+
else:
|
|
219
|
+
return "green"
|
|
220
|
+
|
|
221
|
+
def close(self) -> None:
|
|
222
|
+
with self._lock:
|
|
223
|
+
if self._started:
|
|
224
|
+
self.live.stop()
|
|
225
|
+
self._started = False
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Stream and chat utilities for QType interpreter.
|
|
3
|
+
|
|
4
|
+
This package provides conversions between QType's internal streaming
|
|
5
|
+
events and external chat protocols like Vercel AI SDK.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from qtype.interpreter.stream.chat.converter import (
|
|
11
|
+
StreamEventConverter,
|
|
12
|
+
format_stream_events_as_sse,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
__all__ = ["StreamEventConverter", "format_stream_events_as_sse"]
|
|
@@ -0,0 +1,391 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Converter for transforming StreamEvents to Vercel AI SDK UIMessageChunks.
|
|
3
|
+
|
|
4
|
+
This module provides a stateful converter that transforms internal StreamEvent
|
|
5
|
+
types (emitted by step executors) into Vercel AI SDK UIMessageChunk types
|
|
6
|
+
suitable for streaming to the frontend via SSE.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
converter = StreamEventConverter()
|
|
10
|
+
for event in stream_events:
|
|
11
|
+
for chunk in converter.convert(event):
|
|
12
|
+
# Send chunk to frontend
|
|
13
|
+
yield f"data: {chunk.model_dump_json(by_alias=True)}\n\n"
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
from __future__ import annotations
|
|
17
|
+
|
|
18
|
+
import uuid
|
|
19
|
+
from collections.abc import AsyncIterator, Iterator
|
|
20
|
+
from typing import Any
|
|
21
|
+
|
|
22
|
+
from qtype.interpreter.stream.chat.vercel import (
|
|
23
|
+
ErrorChunk,
|
|
24
|
+
FinishChunk,
|
|
25
|
+
FinishStepChunk,
|
|
26
|
+
MessageMetadataChunk,
|
|
27
|
+
ReasoningDeltaChunk,
|
|
28
|
+
ReasoningEndChunk,
|
|
29
|
+
ReasoningStartChunk,
|
|
30
|
+
StartChunk,
|
|
31
|
+
StartStepChunk,
|
|
32
|
+
TextDeltaChunk,
|
|
33
|
+
TextEndChunk,
|
|
34
|
+
TextStartChunk,
|
|
35
|
+
ToolInputAvailableChunk,
|
|
36
|
+
ToolInputDeltaChunk,
|
|
37
|
+
ToolInputStartChunk,
|
|
38
|
+
ToolOutputAvailableChunk,
|
|
39
|
+
ToolOutputErrorChunk,
|
|
40
|
+
UIMessageChunk,
|
|
41
|
+
)
|
|
42
|
+
from qtype.interpreter.types import (
|
|
43
|
+
ErrorEvent,
|
|
44
|
+
ReasoningStreamDeltaEvent,
|
|
45
|
+
ReasoningStreamEndEvent,
|
|
46
|
+
ReasoningStreamStartEvent,
|
|
47
|
+
StatusEvent,
|
|
48
|
+
StepEndEvent,
|
|
49
|
+
StepStartEvent,
|
|
50
|
+
StreamEvent,
|
|
51
|
+
TextStreamDeltaEvent,
|
|
52
|
+
TextStreamEndEvent,
|
|
53
|
+
TextStreamStartEvent,
|
|
54
|
+
ToolExecutionEndEvent,
|
|
55
|
+
ToolExecutionErrorEvent,
|
|
56
|
+
ToolExecutionStartEvent,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class StreamEventConverter:
|
|
61
|
+
"""
|
|
62
|
+
Converts internal StreamEvents to Vercel AI SDK UIMessageChunks.
|
|
63
|
+
|
|
64
|
+
This converter maintains state to track active text streams and generates
|
|
65
|
+
appropriate Vercel chunks for each event type. Some events map to multiple
|
|
66
|
+
chunks (e.g., StatusEvent becomes a wrapped step with text chunks).
|
|
67
|
+
|
|
68
|
+
Example:
|
|
69
|
+
```python
|
|
70
|
+
converter = StreamEventConverter()
|
|
71
|
+
|
|
72
|
+
# Convert a status message
|
|
73
|
+
event = StatusEvent(step=step, message="Processing...")
|
|
74
|
+
for chunk in converter.convert(event):
|
|
75
|
+
# Yields: StartStepChunk, TextStartChunk, TextDeltaChunk,
|
|
76
|
+
# TextEndChunk, FinishStepChunk
|
|
77
|
+
send_to_client(chunk)
|
|
78
|
+
|
|
79
|
+
# Convert text streaming
|
|
80
|
+
start_event = TextStreamStartEvent(step=step, stream_id="s1")
|
|
81
|
+
for chunk in converter.convert(start_event):
|
|
82
|
+
# Yields: TextStartChunk
|
|
83
|
+
send_to_client(chunk)
|
|
84
|
+
|
|
85
|
+
delta_event = TextStreamDeltaEvent(
|
|
86
|
+
step=step, stream_id="s1", delta="Hello"
|
|
87
|
+
)
|
|
88
|
+
for chunk in converter.convert(delta_event):
|
|
89
|
+
# Yields: TextDeltaChunk
|
|
90
|
+
send_to_client(chunk)
|
|
91
|
+
```
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
def __init__(self) -> None:
|
|
95
|
+
"""Initialize the converter with empty state."""
|
|
96
|
+
# Map stream_id to Vercel chunk_id for all streams (text, reasoning, etc.)
|
|
97
|
+
self._active_streams: dict[str, str] = {}
|
|
98
|
+
|
|
99
|
+
def convert(self, event: StreamEvent) -> Iterator[UIMessageChunk]:
|
|
100
|
+
"""
|
|
101
|
+
Convert a StreamEvent to one or more Vercel UIMessageChunks.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
event: The StreamEvent to convert
|
|
105
|
+
|
|
106
|
+
Yields:
|
|
107
|
+
One or more UIMessageChunk instances
|
|
108
|
+
"""
|
|
109
|
+
# Use pattern matching for clean dispatch
|
|
110
|
+
match event.type:
|
|
111
|
+
case "text_stream_start":
|
|
112
|
+
yield from self._convert_text_stream_start(event) # type: ignore[arg-type]
|
|
113
|
+
case "text_stream_delta":
|
|
114
|
+
yield from self._convert_text_stream_delta(event) # type: ignore[arg-type]
|
|
115
|
+
case "text_stream_end":
|
|
116
|
+
yield from self._convert_text_stream_end(event) # type: ignore[arg-type]
|
|
117
|
+
case "reasoning_stream_start":
|
|
118
|
+
yield from self._convert_reasoning_stream_start(event) # type: ignore[arg-type]
|
|
119
|
+
case "reasoning_stream_delta":
|
|
120
|
+
yield from self._convert_reasoning_stream_delta(event) # type: ignore[arg-type]
|
|
121
|
+
case "reasoning_stream_end":
|
|
122
|
+
yield from self._convert_reasoning_stream_end(event) # type: ignore[arg-type]
|
|
123
|
+
case "status":
|
|
124
|
+
yield from self._convert_status(event) # type: ignore[arg-type]
|
|
125
|
+
case "step_start":
|
|
126
|
+
yield from self._convert_step_start(event) # type: ignore[arg-type]
|
|
127
|
+
case "step_end":
|
|
128
|
+
yield from self._convert_step_end(event) # type: ignore[arg-type]
|
|
129
|
+
case "tool_execution_start":
|
|
130
|
+
yield from self._convert_tool_execution_start(event) # type: ignore[arg-type]
|
|
131
|
+
case "tool_execution_end":
|
|
132
|
+
yield from self._convert_tool_execution_end(event) # type: ignore[arg-type]
|
|
133
|
+
case "tool_execution_error":
|
|
134
|
+
yield from self._convert_tool_execution_error(event) # type: ignore[arg-type]
|
|
135
|
+
case "error":
|
|
136
|
+
yield from self._convert_error(event) # type: ignore[arg-type]
|
|
137
|
+
case _:
|
|
138
|
+
# Unknown event type - log warning but don't fail
|
|
139
|
+
pass
|
|
140
|
+
|
|
141
|
+
def _convert_text_stream_start(
|
|
142
|
+
self, event: TextStreamStartEvent
|
|
143
|
+
) -> Iterator[UIMessageChunk]:
|
|
144
|
+
"""
|
|
145
|
+
Convert TextStreamStartEvent to TextStartChunk.
|
|
146
|
+
|
|
147
|
+
Registers the stream_id and creates a new Vercel chunk ID.
|
|
148
|
+
"""
|
|
149
|
+
chunk_id = str(uuid.uuid4())
|
|
150
|
+
self._active_streams[event.stream_id] = chunk_id
|
|
151
|
+
yield TextStartChunk(id=chunk_id)
|
|
152
|
+
|
|
153
|
+
def _convert_text_stream_delta(
|
|
154
|
+
self, event: TextStreamDeltaEvent
|
|
155
|
+
) -> Iterator[UIMessageChunk]:
|
|
156
|
+
"""
|
|
157
|
+
Convert TextStreamDeltaEvent to TextDeltaChunk.
|
|
158
|
+
|
|
159
|
+
Uses the chunk ID registered during text_stream_start.
|
|
160
|
+
"""
|
|
161
|
+
chunk_id = self._active_streams.get(event.stream_id)
|
|
162
|
+
if chunk_id:
|
|
163
|
+
yield TextDeltaChunk(id=chunk_id, delta=event.delta)
|
|
164
|
+
|
|
165
|
+
def _convert_reasoning_stream_delta(
|
|
166
|
+
self, event: ReasoningStreamDeltaEvent
|
|
167
|
+
) -> Iterator[UIMessageChunk]:
|
|
168
|
+
"""
|
|
169
|
+
Convert ReasoningStreamDeltaEvent to ReasoningDeltaChunk.
|
|
170
|
+
|
|
171
|
+
Uses the chunk ID registered during text_stream_start.
|
|
172
|
+
"""
|
|
173
|
+
chunk_id = self._active_streams.get(event.stream_id)
|
|
174
|
+
if chunk_id:
|
|
175
|
+
yield ReasoningDeltaChunk(id=chunk_id, delta=event.delta)
|
|
176
|
+
|
|
177
|
+
def _convert_text_stream_end(
|
|
178
|
+
self, event: TextStreamEndEvent
|
|
179
|
+
) -> Iterator[UIMessageChunk]:
|
|
180
|
+
"""
|
|
181
|
+
Convert TextStreamEndEvent to TextEndChunk.
|
|
182
|
+
|
|
183
|
+
Cleans up the stream_id registration.
|
|
184
|
+
"""
|
|
185
|
+
chunk_id = self._active_streams.pop(event.stream_id, None)
|
|
186
|
+
if chunk_id:
|
|
187
|
+
yield TextEndChunk(id=chunk_id)
|
|
188
|
+
|
|
189
|
+
def _convert_reasoning_stream_start(
|
|
190
|
+
self, event: ReasoningStreamStartEvent
|
|
191
|
+
) -> Iterator[UIMessageChunk]:
|
|
192
|
+
"""
|
|
193
|
+
Convert ReasoningStreamStartEvent to ReasoningStartChunk.
|
|
194
|
+
|
|
195
|
+
Registers the stream_id and creates a new Vercel chunk ID for reasoning.
|
|
196
|
+
"""
|
|
197
|
+
chunk_id = str(uuid.uuid4())
|
|
198
|
+
self._active_streams[event.stream_id] = chunk_id
|
|
199
|
+
yield ReasoningStartChunk(id=chunk_id)
|
|
200
|
+
|
|
201
|
+
def _convert_reasoning_stream_delta(
|
|
202
|
+
self, event: ReasoningStreamDeltaEvent
|
|
203
|
+
) -> Iterator[UIMessageChunk]:
|
|
204
|
+
"""
|
|
205
|
+
Convert ReasoningStreamDeltaEvent to ReasoningDeltaChunk.
|
|
206
|
+
|
|
207
|
+
Uses the chunk ID registered during reasoning_stream_start.
|
|
208
|
+
"""
|
|
209
|
+
chunk_id = self._active_streams.get(event.stream_id)
|
|
210
|
+
if chunk_id:
|
|
211
|
+
yield ReasoningDeltaChunk(id=chunk_id, delta=event.delta)
|
|
212
|
+
|
|
213
|
+
def _convert_reasoning_stream_end(
|
|
214
|
+
self, event: ReasoningStreamEndEvent
|
|
215
|
+
) -> Iterator[UIMessageChunk]:
|
|
216
|
+
"""
|
|
217
|
+
Convert ReasoningStreamEndEvent to ReasoningEndChunk.
|
|
218
|
+
|
|
219
|
+
Cleans up the stream_id registration.
|
|
220
|
+
"""
|
|
221
|
+
chunk_id = self._active_streams.pop(event.stream_id, None)
|
|
222
|
+
if chunk_id:
|
|
223
|
+
yield ReasoningEndChunk(id=chunk_id)
|
|
224
|
+
|
|
225
|
+
def _convert_status(self, event: StatusEvent) -> Iterator[UIMessageChunk]:
|
|
226
|
+
"""
|
|
227
|
+
Convert StatusEvent to MessageMetadataChunk.
|
|
228
|
+
|
|
229
|
+
Status messages are sent as message metadata with the 'statusMessage'
|
|
230
|
+
key, allowing the frontend to display them separately from content.
|
|
231
|
+
"""
|
|
232
|
+
yield MessageMetadataChunk(
|
|
233
|
+
messageMetadata={"statusMessage": event.message}
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
def _convert_step_start(
|
|
237
|
+
self, event: StepStartEvent
|
|
238
|
+
) -> Iterator[UIMessageChunk]:
|
|
239
|
+
"""Convert StepStartEvent to StartStepChunk."""
|
|
240
|
+
yield StartStepChunk()
|
|
241
|
+
yield MessageMetadataChunk(messageMetadata={"step_id": event.step.id})
|
|
242
|
+
|
|
243
|
+
def _convert_step_end(
|
|
244
|
+
self, event: StepEndEvent
|
|
245
|
+
) -> Iterator[UIMessageChunk]:
|
|
246
|
+
"""Convert StepEndEvent to FinishStepChunk."""
|
|
247
|
+
yield FinishStepChunk()
|
|
248
|
+
|
|
249
|
+
def _convert_tool_execution_start(
|
|
250
|
+
self, event: ToolExecutionStartEvent
|
|
251
|
+
) -> Iterator[UIMessageChunk]:
|
|
252
|
+
"""
|
|
253
|
+
Convert ToolExecutionStartEvent to proper tool input sequence.
|
|
254
|
+
|
|
255
|
+
Following Vercel's protocol:
|
|
256
|
+
1. ToolInputStartChunk - Begin receiving tool input
|
|
257
|
+
2. ToolInputDeltaChunk - Incremental input text (JSON being parsed)
|
|
258
|
+
3. ToolInputAvailableChunk - Complete input ready, tool can execute
|
|
259
|
+
"""
|
|
260
|
+
# 1. Start tool input streaming
|
|
261
|
+
yield ToolInputStartChunk(
|
|
262
|
+
toolCallId=event.tool_call_id,
|
|
263
|
+
toolName=event.tool_name,
|
|
264
|
+
providerExecuted=True, # Tools are executed on the server
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
# 2. Stream the input as JSON text delta
|
|
268
|
+
import json
|
|
269
|
+
|
|
270
|
+
input_json = json.dumps(event.tool_input)
|
|
271
|
+
yield ToolInputDeltaChunk(
|
|
272
|
+
toolCallId=event.tool_call_id,
|
|
273
|
+
inputTextDelta=input_json,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# 3. Signal input is complete and ready for execution
|
|
277
|
+
yield ToolInputAvailableChunk(
|
|
278
|
+
toolCallId=event.tool_call_id,
|
|
279
|
+
toolName=event.tool_name,
|
|
280
|
+
input=event.tool_input,
|
|
281
|
+
providerExecuted=True, # Tools are executed on the server
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
def _convert_tool_execution_end(
|
|
285
|
+
self, event: ToolExecutionEndEvent
|
|
286
|
+
) -> Iterator[UIMessageChunk]:
|
|
287
|
+
"""
|
|
288
|
+
Convert ToolExecutionEndEvent to ToolOutputAvailableChunk.
|
|
289
|
+
|
|
290
|
+
Signals successful tool completion with output.
|
|
291
|
+
"""
|
|
292
|
+
yield ToolOutputAvailableChunk(
|
|
293
|
+
toolCallId=event.tool_call_id,
|
|
294
|
+
output=event.tool_output,
|
|
295
|
+
providerExecuted=True, # Tools are executed on the server
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
def _convert_tool_execution_error(
|
|
299
|
+
self, event: ToolExecutionErrorEvent
|
|
300
|
+
) -> Iterator[UIMessageChunk]:
|
|
301
|
+
"""
|
|
302
|
+
Convert ToolExecutionErrorEvent to ToolOutputErrorChunk.
|
|
303
|
+
|
|
304
|
+
Signals tool execution failure with error message.
|
|
305
|
+
"""
|
|
306
|
+
yield ToolOutputErrorChunk(
|
|
307
|
+
toolCallId=event.tool_call_id,
|
|
308
|
+
errorText=event.error_message,
|
|
309
|
+
providerExecuted=True, # Tools are executed on the server
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
def _convert_error(self, event: ErrorEvent) -> Iterator[UIMessageChunk]:
|
|
313
|
+
"""
|
|
314
|
+
Convert ErrorEvent to ErrorChunk.
|
|
315
|
+
|
|
316
|
+
General error that occurred during execution.
|
|
317
|
+
"""
|
|
318
|
+
yield ErrorChunk(errorText=event.error_message)
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
async def format_stream_events_as_sse(
|
|
322
|
+
event_stream: AsyncIterator[StreamEvent | None],
|
|
323
|
+
message_id: str | None = None,
|
|
324
|
+
output_metadata: dict[str, Any] | None = None,
|
|
325
|
+
) -> AsyncIterator[str]:
|
|
326
|
+
"""
|
|
327
|
+
Convert a stream of StreamEvents to SSE-formatted strings.
|
|
328
|
+
|
|
329
|
+
This function orchestrates the conversion of StreamEvents to
|
|
330
|
+
UIMessageChunks and formats them as Server-Sent Events for
|
|
331
|
+
the Vercel AI SDK protocol.
|
|
332
|
+
|
|
333
|
+
Args:
|
|
334
|
+
event_stream: Async iterator yielding StreamEvents (None signals end)
|
|
335
|
+
message_id: Optional message ID (generated if not provided)
|
|
336
|
+
output_metadata: Optional dict to include in FinishChunk metadata
|
|
337
|
+
|
|
338
|
+
Yields:
|
|
339
|
+
SSE formatted strings (data: {json}\\n\\n)
|
|
340
|
+
|
|
341
|
+
Example:
|
|
342
|
+
```python
|
|
343
|
+
async def stream_events():
|
|
344
|
+
yield StatusEvent(step=step, message="Processing...")
|
|
345
|
+
yield TextStreamStartEvent(step=step, stream_id="s1")
|
|
346
|
+
yield TextStreamDeltaEvent(step=step, stream_id="s1", delta="Hi")
|
|
347
|
+
yield TextStreamEndEvent(step=step, stream_id="s1")
|
|
348
|
+
yield None # Signal completion
|
|
349
|
+
|
|
350
|
+
async for sse_line in format_stream_events_as_sse(
|
|
351
|
+
stream_events(),
|
|
352
|
+
output_metadata={"result": "success"}
|
|
353
|
+
):
|
|
354
|
+
# Send to client via StreamingResponse
|
|
355
|
+
pass
|
|
356
|
+
```
|
|
357
|
+
"""
|
|
358
|
+
# Start message with unique ID
|
|
359
|
+
if message_id is None:
|
|
360
|
+
message_id = str(uuid.uuid4())
|
|
361
|
+
|
|
362
|
+
start_chunk = StartChunk(messageId=message_id) # type: ignore[arg-type]
|
|
363
|
+
yield (
|
|
364
|
+
f"data: "
|
|
365
|
+
f"{start_chunk.model_dump_json(by_alias=True, exclude_none=True)}"
|
|
366
|
+
f"\n\n"
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
# Create converter for stateful event-to-chunk conversion
|
|
370
|
+
converter = StreamEventConverter()
|
|
371
|
+
|
|
372
|
+
# Process events and convert to chunks
|
|
373
|
+
async for event in event_stream:
|
|
374
|
+
if event is None:
|
|
375
|
+
break # End of stream
|
|
376
|
+
|
|
377
|
+
# Convert event to chunks and yield as SSE
|
|
378
|
+
for chunk in converter.convert(event):
|
|
379
|
+
yield (
|
|
380
|
+
f"data: "
|
|
381
|
+
f"{chunk.model_dump_json(by_alias=True, exclude_none=True)}"
|
|
382
|
+
f"\n\n"
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
# End message stream with optional metadata
|
|
386
|
+
finish_chunk = FinishChunk(messageMetadata=output_metadata) # type: ignore[arg-type]
|
|
387
|
+
yield (
|
|
388
|
+
f"data: "
|
|
389
|
+
f"{finish_chunk.model_dump_json(by_alias=True, exclude_none=True)}"
|
|
390
|
+
f"\n\n"
|
|
391
|
+
)
|
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
import base64
|
|
2
2
|
|
|
3
|
-
import magic
|
|
4
3
|
import requests
|
|
5
4
|
|
|
6
|
-
from qtype.
|
|
5
|
+
from qtype.base.types import PrimitiveTypeEnum
|
|
7
6
|
from qtype.dsl.domain_types import ChatContent
|
|
8
7
|
|
|
9
8
|
|
|
@@ -17,6 +16,7 @@ def file_to_content(url: str) -> ChatContent:
|
|
|
17
16
|
Returns:
|
|
18
17
|
A ChatContent block with type 'file' and the file URL as content.
|
|
19
18
|
"""
|
|
19
|
+
import magic
|
|
20
20
|
|
|
21
21
|
# Get the bytes from the url.
|
|
22
22
|
if url.startswith("data:"):
|