qtype 0.0.16__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. qtype/application/commons/tools.py +1 -1
  2. qtype/application/converters/tools_from_api.py +5 -5
  3. qtype/application/converters/tools_from_module.py +2 -2
  4. qtype/application/converters/types.py +14 -43
  5. qtype/application/documentation.py +1 -1
  6. qtype/application/facade.py +94 -73
  7. qtype/base/types.py +227 -7
  8. qtype/cli.py +4 -0
  9. qtype/commands/convert.py +20 -8
  10. qtype/commands/generate.py +19 -27
  11. qtype/commands/run.py +73 -36
  12. qtype/commands/serve.py +74 -54
  13. qtype/commands/validate.py +34 -8
  14. qtype/commands/visualize.py +46 -22
  15. qtype/dsl/__init__.py +6 -5
  16. qtype/dsl/custom_types.py +1 -1
  17. qtype/dsl/domain_types.py +65 -5
  18. qtype/dsl/linker.py +384 -0
  19. qtype/dsl/loader.py +315 -0
  20. qtype/dsl/model.py +612 -363
  21. qtype/dsl/parser.py +200 -0
  22. qtype/dsl/types.py +50 -0
  23. qtype/interpreter/api.py +57 -136
  24. qtype/interpreter/auth/aws.py +19 -9
  25. qtype/interpreter/auth/generic.py +93 -16
  26. qtype/interpreter/base/base_step_executor.py +436 -0
  27. qtype/interpreter/base/batch_step_executor.py +171 -0
  28. qtype/interpreter/base/exceptions.py +50 -0
  29. qtype/interpreter/base/executor_context.py +74 -0
  30. qtype/interpreter/base/factory.py +117 -0
  31. qtype/interpreter/base/progress_tracker.py +110 -0
  32. qtype/interpreter/base/secrets.py +339 -0
  33. qtype/interpreter/base/step_cache.py +74 -0
  34. qtype/interpreter/base/stream_emitter.py +469 -0
  35. qtype/interpreter/conversions.py +462 -22
  36. qtype/interpreter/converters.py +77 -0
  37. qtype/interpreter/endpoints.py +355 -0
  38. qtype/interpreter/executors/agent_executor.py +242 -0
  39. qtype/interpreter/executors/aggregate_executor.py +93 -0
  40. qtype/interpreter/executors/decoder_executor.py +163 -0
  41. qtype/interpreter/executors/doc_to_text_executor.py +112 -0
  42. qtype/interpreter/executors/document_embedder_executor.py +107 -0
  43. qtype/interpreter/executors/document_search_executor.py +122 -0
  44. qtype/interpreter/executors/document_source_executor.py +118 -0
  45. qtype/interpreter/executors/document_splitter_executor.py +105 -0
  46. qtype/interpreter/executors/echo_executor.py +63 -0
  47. qtype/interpreter/executors/field_extractor_executor.py +160 -0
  48. qtype/interpreter/executors/file_source_executor.py +101 -0
  49. qtype/interpreter/executors/file_writer_executor.py +110 -0
  50. qtype/interpreter/executors/index_upsert_executor.py +228 -0
  51. qtype/interpreter/executors/invoke_embedding_executor.py +92 -0
  52. qtype/interpreter/executors/invoke_flow_executor.py +51 -0
  53. qtype/interpreter/executors/invoke_tool_executor.py +358 -0
  54. qtype/interpreter/executors/llm_inference_executor.py +272 -0
  55. qtype/interpreter/executors/prompt_template_executor.py +78 -0
  56. qtype/interpreter/executors/sql_source_executor.py +106 -0
  57. qtype/interpreter/executors/vector_search_executor.py +91 -0
  58. qtype/interpreter/flow.py +159 -22
  59. qtype/interpreter/metadata_api.py +115 -0
  60. qtype/interpreter/resource_cache.py +5 -4
  61. qtype/interpreter/rich_progress.py +225 -0
  62. qtype/interpreter/stream/chat/__init__.py +15 -0
  63. qtype/interpreter/stream/chat/converter.py +391 -0
  64. qtype/interpreter/{chat → stream/chat}/file_conversions.py +2 -2
  65. qtype/interpreter/stream/chat/ui_request_to_domain_type.py +140 -0
  66. qtype/interpreter/stream/chat/vercel.py +609 -0
  67. qtype/interpreter/stream/utils/__init__.py +15 -0
  68. qtype/interpreter/stream/utils/build_vercel_ai_formatter.py +74 -0
  69. qtype/interpreter/stream/utils/callback_to_stream.py +66 -0
  70. qtype/interpreter/stream/utils/create_streaming_response.py +18 -0
  71. qtype/interpreter/stream/utils/default_chat_extract_text.py +20 -0
  72. qtype/interpreter/stream/utils/error_streaming_response.py +20 -0
  73. qtype/interpreter/telemetry.py +135 -8
  74. qtype/interpreter/tools/__init__.py +5 -0
  75. qtype/interpreter/tools/function_tool_helper.py +265 -0
  76. qtype/interpreter/types.py +330 -0
  77. qtype/interpreter/typing.py +83 -89
  78. qtype/interpreter/ui/404/index.html +1 -1
  79. qtype/interpreter/ui/404.html +1 -1
  80. qtype/interpreter/ui/_next/static/{nUaw6_IwRwPqkzwe5s725 → 20HoJN6otZ_LyHLHpCPE6}/_buildManifest.js +1 -1
  81. qtype/interpreter/ui/_next/static/chunks/{393-8fd474427f8e19ce.js → 434-b2112d19f25c44ff.js} +3 -3
  82. qtype/interpreter/ui/_next/static/chunks/app/page-8c67d16ac90d23cb.js +1 -0
  83. qtype/interpreter/ui/_next/static/chunks/ba12c10f-546f2714ff8abc66.js +1 -0
  84. qtype/interpreter/ui/_next/static/css/8a8d1269e362fef7.css +3 -0
  85. qtype/interpreter/ui/icon.png +0 -0
  86. qtype/interpreter/ui/index.html +1 -1
  87. qtype/interpreter/ui/index.txt +4 -4
  88. qtype/semantic/checker.py +583 -0
  89. qtype/semantic/generate.py +262 -83
  90. qtype/semantic/loader.py +95 -0
  91. qtype/semantic/model.py +436 -159
  92. qtype/semantic/resolver.py +63 -19
  93. qtype/semantic/visualize.py +28 -31
  94. {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/METADATA +16 -3
  95. qtype-0.1.1.dist-info/RECORD +135 -0
  96. qtype/dsl/base_types.py +0 -38
  97. qtype/dsl/validator.py +0 -465
  98. qtype/interpreter/batch/__init__.py +0 -0
  99. qtype/interpreter/batch/file_sink_source.py +0 -162
  100. qtype/interpreter/batch/flow.py +0 -95
  101. qtype/interpreter/batch/sql_source.py +0 -92
  102. qtype/interpreter/batch/step.py +0 -74
  103. qtype/interpreter/batch/types.py +0 -41
  104. qtype/interpreter/batch/utils.py +0 -178
  105. qtype/interpreter/chat/chat_api.py +0 -237
  106. qtype/interpreter/chat/vercel.py +0 -314
  107. qtype/interpreter/exceptions.py +0 -10
  108. qtype/interpreter/step.py +0 -67
  109. qtype/interpreter/steps/__init__.py +0 -0
  110. qtype/interpreter/steps/agent.py +0 -114
  111. qtype/interpreter/steps/condition.py +0 -36
  112. qtype/interpreter/steps/decoder.py +0 -88
  113. qtype/interpreter/steps/llm_inference.py +0 -171
  114. qtype/interpreter/steps/prompt_template.py +0 -54
  115. qtype/interpreter/steps/search.py +0 -24
  116. qtype/interpreter/steps/tool.py +0 -219
  117. qtype/interpreter/streaming_helpers.py +0 -123
  118. qtype/interpreter/ui/_next/static/chunks/app/page-7e26b6156cfb55d3.js +0 -1
  119. qtype/interpreter/ui/_next/static/chunks/ba12c10f-22556063851a6df2.js +0 -1
  120. qtype/interpreter/ui/_next/static/css/b40532b0db09cce3.css +0 -3
  121. qtype/interpreter/ui/favicon.ico +0 -0
  122. qtype/loader.py +0 -390
  123. qtype-0.0.16.dist-info/RECORD +0 -106
  124. /qtype/interpreter/ui/_next/static/{nUaw6_IwRwPqkzwe5s725 → 20HoJN6otZ_LyHLHpCPE6}/_ssgManifest.js +0 -0
  125. {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/WHEEL +0 -0
  126. {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/entry_points.txt +0 -0
  127. {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/licenses/LICENSE +0 -0
  128. {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,609 @@
1
+ """
2
+ Pydantic models for Vercel AI SDK UI types.
3
+
4
+ This module reproduces the exact TypeScript type shapes from the AI SDK UI
5
+ as Pydantic models for use in Python implementations.
6
+
7
+ Based on Vercel AI SDK v5.0.2:
8
+ https://github.com/vercel/ai/tree/ai@5.0.2/packages/ai/src/ui
9
+
10
+ ## Streaming Protocol
11
+
12
+ The Vercel AI SDK uses Server-Sent Events (SSE) to stream UIMessageChunks
13
+ from the server to the client. Each chunk must be sent as:
14
+
15
+ data: {chunk_json}\\n\\n
16
+
17
+ ## Understanding Steps
18
+
19
+ Steps are visual grouping markers in the UI. The StartStepChunk and
20
+ FinishStepChunk are added to the UIMessage.parts array alongside content
21
+ chunks (text, reasoning, tools) to create visual groupings.
22
+
23
+ How it works:
24
+ 1. Send StartStepChunk - adds a 'step-start' marker to message.parts
25
+ 2. Send content chunks - text/reasoning/tool chunks are added to message.parts
26
+ 3. Send FinishStepChunk - adds a boundary AND resets activeTextParts/activeReasoningParts
27
+
28
+ The final UIMessage.parts array will contain ALL parts in sequence:
29
+ [
30
+ { type: 'step-start' },
31
+ { type: 'text', text: 'Hello world', state: 'done' },
32
+ { type: 'step-start' },
33
+ { type: 'text', text: 'Step 2', state: 'done' },
34
+ ]
35
+
36
+ Streaming Example - LLM generating a response as a step:
37
+ StartStepChunk() # Marker added to parts
38
+ TextStartChunk(id="llm-response") # Text part added to parts
39
+ TextDeltaChunk(id="llm-response", delta="The ") # Updates text in parts
40
+ TextDeltaChunk(id="llm-response", delta="answer ") # Updates text in parts
41
+ TextDeltaChunk(id="llm-response", delta="is 42") # Updates text in parts
42
+ TextEndChunk(id="llm-response") # Marks text as done
43
+ FinishStepChunk() # Resets active parts
44
+
45
+ Streaming Example - File writer status as a step:
46
+ StartStepChunk() # Marker added to parts
47
+ TextStartChunk(id="file-status") # Text part added to parts
48
+ TextDeltaChunk(id="file-status", delta="Writing 3 records...")
49
+ TextEndChunk(id="file-status") # Marks text as done
50
+ FinishStepChunk() # Resets active parts
51
+
52
+ Streaming Example - Multiple steps in sequence:
53
+ StartStepChunk() # Step 1 marker
54
+ TextStartChunk(id="step1")
55
+ TextDeltaChunk(id="step1", delta="Step 1 content")
56
+ TextEndChunk(id="step1")
57
+ FinishStepChunk() # Resets for next step
58
+
59
+ StartStepChunk() # Step 2 marker
60
+ TextStartChunk(id="step2")
61
+ TextDeltaChunk(id="step2", delta="Step 2 content")
62
+ TextEndChunk(id="step2")
63
+ FinishStepChunk() # Resets for next step
64
+
65
+ ## Tool Execution
66
+
67
+ Tools have a multi-stage lifecycle:
68
+
69
+ 1. ToolInputStartChunk - Begin receiving tool input
70
+ 2. ToolInputDeltaChunk - Incremental input text (JSON being parsed)
71
+ 3. ToolInputAvailableChunk - Complete input ready, tool can execute
72
+ 4. ToolOutputAvailableChunk - Tool completed successfully
73
+ OR ToolOutputErrorChunk - Tool execution failed
74
+
75
+ ## Complete Message Stream Pattern
76
+
77
+ StartChunk(messageId="msg-123")
78
+ StartStepChunk()
79
+ TextStartChunk(id="text-1")
80
+ TextDeltaChunk(id="text-1", delta="Hello")
81
+ TextDeltaChunk(id="text-1", delta=" world")
82
+ TextEndChunk(id="text-1")
83
+ FinishStepChunk()
84
+ FinishChunk()
85
+ """
86
+
87
+ from __future__ import annotations
88
+
89
+ from typing import Any, Literal, Union
90
+
91
+ from pydantic import BaseModel, Field
92
+
93
+
94
+ # Provider metadata
95
+ class ProviderMetadata(BaseModel):
96
+ """Provider-specific metadata.
97
+
98
+ Reproduces: ProviderMetadata
99
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/types/provider-metadata.ts
100
+ """
101
+
102
+ model_config = {"extra": "allow"}
103
+
104
+
105
+ # UI Message Parts (final state in UIMessage.parts)
106
+ # https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui/ui-messages.ts
107
+ class TextUIPart(BaseModel):
108
+ """A text part of a message.
109
+
110
+ Reproduces: TextUIPart
111
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui/ui-messages.ts
112
+ """
113
+
114
+ type: Literal["text"] = "text"
115
+ text: str
116
+ state: Literal["streaming", "done"] | None = None
117
+ provider_metadata: ProviderMetadata | None = Field(
118
+ default=None, alias="providerMetadata"
119
+ )
120
+
121
+
122
+ class ReasoningUIPart(BaseModel):
123
+ """A reasoning part of a message.
124
+
125
+ Reproduces: ReasoningUIPart
126
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui/ui-messages.ts
127
+ """
128
+
129
+ type: Literal["reasoning"] = "reasoning"
130
+ text: str
131
+ state: Literal["streaming", "done"] | None = None
132
+ provider_metadata: ProviderMetadata | None = Field(
133
+ default=None, alias="providerMetadata"
134
+ )
135
+
136
+
137
+ class SourceUrlUIPart(BaseModel):
138
+ """A source URL part of a message.
139
+
140
+ Reproduces: SourceUrlUIPart
141
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui/ui-messages.ts
142
+ """
143
+
144
+ type: Literal["source-url"] = "source-url"
145
+ source_id: str = Field(alias="sourceId")
146
+ url: str
147
+ title: str | None = None
148
+ provider_metadata: ProviderMetadata | None = Field(
149
+ default=None, alias="providerMetadata"
150
+ )
151
+
152
+
153
+ class SourceDocumentUIPart(BaseModel):
154
+ """A document source part of a message.
155
+
156
+ Reproduces: SourceDocumentUIPart
157
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui/ui-messages.ts
158
+ """
159
+
160
+ type: Literal["source-document"] = "source-document"
161
+ source_id: str = Field(alias="sourceId")
162
+ media_type: str = Field(alias="mediaType")
163
+ title: str
164
+ filename: str | None = None
165
+ provider_metadata: ProviderMetadata | None = Field(
166
+ default=None, alias="providerMetadata"
167
+ )
168
+
169
+
170
+ class FileUIPart(BaseModel):
171
+ """A file part of a message.
172
+
173
+ Reproduces: FileUIPart
174
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui/ui-messages.ts
175
+ """
176
+
177
+ type: Literal["file"] = "file"
178
+ media_type: str = Field(alias="mediaType")
179
+ filename: str | None = None
180
+ url: str
181
+ provider_metadata: ProviderMetadata | None = Field(
182
+ default=None, alias="providerMetadata"
183
+ )
184
+
185
+
186
+ class StepStartUIPart(BaseModel):
187
+ """A step boundary part of a message.
188
+
189
+ Reproduces: StepStartUIPart
190
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui/ui-messages.ts
191
+ """
192
+
193
+ type: Literal["step-start"] = "step-start"
194
+
195
+
196
+ # Union type for UI message parts
197
+ UIMessagePart = Union[
198
+ TextUIPart,
199
+ ReasoningUIPart,
200
+ SourceUrlUIPart,
201
+ SourceDocumentUIPart,
202
+ FileUIPart,
203
+ StepStartUIPart,
204
+ ]
205
+
206
+
207
+ # UI Message
208
+ class UIMessage(BaseModel):
209
+ """AI SDK UI Message.
210
+
211
+ Reproduces: UIMessage
212
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui/ui-messages.ts
213
+ """
214
+
215
+ id: str
216
+ role: Literal["system", "user", "assistant"]
217
+ metadata: dict[str, Any] | None = None
218
+ parts: list[UIMessagePart]
219
+
220
+
221
+ # Chat Request (the request body sent from frontend)
222
+ class ChatRequest(BaseModel):
223
+ """Chat request format sent from AI SDK UI/React.
224
+
225
+ Reproduces: ChatRequest
226
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui/chat-transport.ts
227
+ """
228
+
229
+ id: str # chatId
230
+ messages: list[UIMessage]
231
+ trigger: Literal["submit-message", "regenerate-message"]
232
+ message_id: str | None = Field(default=None, alias="messageId")
233
+
234
+
235
+ class CompletionRequest(BaseModel):
236
+ """Completion request format sent from AI SDK UI/React useCompletion hook.
237
+
238
+ The useCompletion hook sends { prompt: string, ...body } where body can
239
+ contain any additional fields needed by the flow.
240
+
241
+ Reproduces: Request body from useCompletion
242
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui/call-completion-api.ts
243
+ """
244
+
245
+ prompt: str
246
+ model_config = {"extra": "allow"} # Allow arbitrary additional fields
247
+
248
+
249
+ # UI Message Chunks (streaming events)
250
+ # https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
251
+
252
+
253
+ class TextStartChunk(BaseModel):
254
+ """Text start chunk - begins a text content section.
255
+
256
+ Reproduces: UIMessageChunk (text-start variant)
257
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
258
+ """
259
+
260
+ type: Literal["text-start"] = "text-start"
261
+ id: str
262
+ provider_metadata: ProviderMetadata | None = Field(
263
+ default=None, alias="providerMetadata"
264
+ )
265
+
266
+
267
+ class TextDeltaChunk(BaseModel):
268
+ """Text delta chunk - incremental text content.
269
+
270
+ Reproduces: UIMessageChunk (text-delta variant)
271
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
272
+ """
273
+
274
+ type: Literal["text-delta"] = "text-delta"
275
+ id: str
276
+ delta: str
277
+ provider_metadata: ProviderMetadata | None = Field(
278
+ default=None, alias="providerMetadata"
279
+ )
280
+
281
+
282
+ class TextEndChunk(BaseModel):
283
+ """Text end chunk - completes a text content section.
284
+
285
+ Reproduces: UIMessageChunk (text-end variant)
286
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
287
+ """
288
+
289
+ type: Literal["text-end"] = "text-end"
290
+ id: str
291
+ provider_metadata: ProviderMetadata | None = Field(
292
+ default=None, alias="providerMetadata"
293
+ )
294
+
295
+
296
+ class ReasoningStartChunk(BaseModel):
297
+ """Reasoning start chunk - begins a reasoning section.
298
+
299
+ Reproduces: UIMessageChunk (reasoning-start variant)
300
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
301
+ """
302
+
303
+ type: Literal["reasoning-start"] = "reasoning-start"
304
+ id: str
305
+ provider_metadata: ProviderMetadata | None = Field(
306
+ default=None, alias="providerMetadata"
307
+ )
308
+
309
+
310
+ class ReasoningDeltaChunk(BaseModel):
311
+ """Reasoning delta chunk - incremental reasoning content.
312
+
313
+ Reproduces: UIMessageChunk (reasoning-delta variant)
314
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
315
+ """
316
+
317
+ type: Literal["reasoning-delta"] = "reasoning-delta"
318
+ id: str
319
+ delta: str
320
+ provider_metadata: ProviderMetadata | None = Field(
321
+ default=None, alias="providerMetadata"
322
+ )
323
+
324
+
325
+ class ReasoningEndChunk(BaseModel):
326
+ """Reasoning end chunk - completes a reasoning section.
327
+
328
+ Reproduces: UIMessageChunk (reasoning-end variant)
329
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
330
+ """
331
+
332
+ type: Literal["reasoning-end"] = "reasoning-end"
333
+ id: str
334
+ provider_metadata: ProviderMetadata | None = Field(
335
+ default=None, alias="providerMetadata"
336
+ )
337
+
338
+
339
+ class ToolInputStartChunk(BaseModel):
340
+ """Tool input start chunk - begins tool input streaming.
341
+
342
+ Reproduces: UIMessageChunk (tool-input-start variant)
343
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
344
+ """
345
+
346
+ type: Literal["tool-input-start"] = "tool-input-start"
347
+ tool_call_id: str = Field(alias="toolCallId")
348
+ tool_name: str = Field(alias="toolName")
349
+ provider_executed: bool | None = Field(
350
+ default=None, alias="providerExecuted"
351
+ )
352
+ dynamic: bool | None = None
353
+
354
+
355
+ class ToolInputDeltaChunk(BaseModel):
356
+ """Tool input delta chunk - incremental tool input text.
357
+
358
+ Reproduces: UIMessageChunk (tool-input-delta variant)
359
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
360
+ """
361
+
362
+ type: Literal["tool-input-delta"] = "tool-input-delta"
363
+ tool_call_id: str = Field(alias="toolCallId")
364
+ input_text_delta: str = Field(alias="inputTextDelta")
365
+
366
+
367
+ class ToolInputAvailableChunk(BaseModel):
368
+ """Tool input available chunk - complete tool input ready.
369
+
370
+ Reproduces: UIMessageChunk (tool-input-available variant)
371
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
372
+ """
373
+
374
+ type: Literal["tool-input-available"] = "tool-input-available"
375
+ tool_call_id: str = Field(alias="toolCallId")
376
+ tool_name: str = Field(alias="toolName")
377
+ input: Any
378
+ provider_executed: bool | None = Field(
379
+ default=None, alias="providerExecuted"
380
+ )
381
+ provider_metadata: ProviderMetadata | None = Field(
382
+ default=None, alias="providerMetadata"
383
+ )
384
+ dynamic: bool | None = None
385
+
386
+
387
+ class ToolOutputAvailableChunk(BaseModel):
388
+ """Tool output available chunk - tool execution completed.
389
+
390
+ Reproduces: UIMessageChunk (tool-output-available variant)
391
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
392
+ """
393
+
394
+ type: Literal["tool-output-available"] = "tool-output-available"
395
+ tool_call_id: str = Field(alias="toolCallId")
396
+ output: Any
397
+ provider_executed: bool | None = Field(
398
+ default=None, alias="providerExecuted"
399
+ )
400
+ dynamic: bool | None = None
401
+
402
+
403
+ class ToolOutputErrorChunk(BaseModel):
404
+ """Tool output error chunk - tool execution failed.
405
+
406
+ Reproduces: UIMessageChunk (tool-output-error variant)
407
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
408
+ """
409
+
410
+ type: Literal["tool-output-error"] = "tool-output-error"
411
+ tool_call_id: str = Field(alias="toolCallId")
412
+ error_text: str = Field(alias="errorText")
413
+ provider_executed: bool | None = Field(
414
+ default=None, alias="providerExecuted"
415
+ )
416
+ dynamic: bool | None = None
417
+
418
+
419
+ class SourceUrlChunk(BaseModel):
420
+ """Source URL chunk - references a URL source.
421
+
422
+ Reproduces: UIMessageChunk (source-url variant)
423
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
424
+ """
425
+
426
+ type: Literal["source-url"] = "source-url"
427
+ source_id: str = Field(alias="sourceId")
428
+ url: str
429
+ title: str | None = None
430
+ provider_metadata: ProviderMetadata | None = Field(
431
+ default=None, alias="providerMetadata"
432
+ )
433
+
434
+
435
+ class SourceDocumentChunk(BaseModel):
436
+ """Source document chunk - references a document source.
437
+
438
+ Reproduces: UIMessageChunk (source-document variant)
439
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
440
+ """
441
+
442
+ type: Literal["source-document"] = "source-document"
443
+ source_id: str = Field(alias="sourceId")
444
+ media_type: str = Field(alias="mediaType")
445
+ title: str
446
+ filename: str | None = None
447
+ provider_metadata: ProviderMetadata | None = Field(
448
+ default=None, alias="providerMetadata"
449
+ )
450
+
451
+
452
+ class FileChunk(BaseModel):
453
+ """File chunk - includes a file in the message.
454
+
455
+ Reproduces: UIMessageChunk (file variant)
456
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
457
+ """
458
+
459
+ type: Literal["file"] = "file"
460
+ url: str
461
+ media_type: str = Field(alias="mediaType")
462
+ provider_metadata: ProviderMetadata | None = Field(
463
+ default=None, alias="providerMetadata"
464
+ )
465
+
466
+
467
+ class ErrorChunk(BaseModel):
468
+ """Error chunk - signals an error occurred.
469
+
470
+ Reproduces: UIMessageChunk (error variant)
471
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
472
+ """
473
+
474
+ type: Literal["error"] = "error"
475
+ error_text: str = Field(alias="errorText")
476
+
477
+
478
+ class StartStepChunk(BaseModel):
479
+ """Start step chunk - marks the beginning of a step boundary.
480
+
481
+ This is a boundary marker with NO fields. Content between
482
+ start-step and finish-step is grouped as a single step in the UI.
483
+
484
+ Reproduces: UIMessageChunk (start-step variant)
485
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
486
+
487
+ Example usage:
488
+ StartStepChunk()
489
+ TextStartChunk(id="step-1-text")
490
+ TextDeltaChunk(id="step-1-text", delta="Processing...")
491
+ TextEndChunk(id="step-1-text")
492
+ FinishStepChunk()
493
+ """
494
+
495
+ type: Literal["start-step"] = "start-step"
496
+
497
+
498
+ class FinishStepChunk(BaseModel):
499
+ """Finish step chunk - marks the end of a step boundary.
500
+
501
+ This is a boundary marker with NO fields. When received, it
502
+ resets activeTextParts and activeReasoningParts in the processor.
503
+
504
+ Reproduces: UIMessageChunk (finish-step variant)
505
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
506
+ """
507
+
508
+ type: Literal["finish-step"] = "finish-step"
509
+
510
+
511
+ class StartChunk(BaseModel):
512
+ """Start chunk - marks the beginning of message streaming.
513
+
514
+ Reproduces: UIMessageChunk (start variant)
515
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
516
+ """
517
+
518
+ type: Literal["start"] = "start"
519
+ message_id: str | None = Field(default=None, alias="messageId")
520
+ message_metadata: dict[str, Any] | None = Field(
521
+ default=None, alias="messageMetadata"
522
+ )
523
+
524
+
525
+ # temp
526
+ class ToolStarted(BaseModel):
527
+ """Start chunk.
528
+
529
+ Reproduces: Tool Started from ui/ui-message-chunks.ts
530
+ """
531
+
532
+ type: Literal["tool_started"] = "tool_started"
533
+ message_id: str | None = Field(default=None, alias="messageId")
534
+ message_metadata: dict[str, Any] | None = Field(
535
+ default=None, alias="messageMetadata"
536
+ )
537
+
538
+
539
+ class ToolResultReceived(BaseModel):
540
+ """Start chunk.
541
+
542
+ Reproduces: Tool Result Received from ui/ui-message-chunks.ts
543
+ """
544
+
545
+ type: Literal["tool_result_received"] = "tool_result_received"
546
+ message_id: str | None = Field(default=None, alias="messageId")
547
+ message_metadata: dict[str, Any] | None = Field(
548
+ default=None, alias="messageMetadata"
549
+ )
550
+
551
+
552
+ class FinishChunk(BaseModel):
553
+ """Finish chunk - marks the completion of message streaming.
554
+
555
+ Reproduces: UIMessageChunk (finish variant)
556
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
557
+ """
558
+
559
+ type: Literal["finish"] = "finish"
560
+ message_metadata: dict[str, Any] | None = Field(
561
+ default=None, alias="messageMetadata"
562
+ )
563
+
564
+
565
+ class AbortChunk(BaseModel):
566
+ """Abort chunk - signals streaming was aborted.
567
+
568
+ Reproduces: UIMessageChunk (abort variant)
569
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
570
+ """
571
+
572
+ type: Literal["abort"] = "abort"
573
+
574
+
575
+ class MessageMetadataChunk(BaseModel):
576
+ """Message metadata chunk - updates message metadata during stream.
577
+
578
+ Reproduces: UIMessageChunk (message-metadata variant)
579
+ https://github.com/vercel/ai/blob/ai@5.0.2/packages/ai/src/ui-message-stream/ui-message-chunks.ts
580
+ """
581
+
582
+ type: Literal["message-metadata"] = "message-metadata"
583
+ message_metadata: dict[str, Any] = Field(alias="messageMetadata")
584
+
585
+
586
+ # Union type for all UI message chunks
587
+ UIMessageChunk = Union[
588
+ TextStartChunk,
589
+ TextDeltaChunk,
590
+ TextEndChunk,
591
+ ReasoningStartChunk,
592
+ ReasoningDeltaChunk,
593
+ ReasoningEndChunk,
594
+ ToolInputStartChunk,
595
+ ToolInputDeltaChunk,
596
+ ToolInputAvailableChunk,
597
+ ToolOutputAvailableChunk,
598
+ ToolOutputErrorChunk,
599
+ SourceUrlChunk,
600
+ SourceDocumentChunk,
601
+ FileChunk,
602
+ ErrorChunk,
603
+ StartStepChunk,
604
+ FinishStepChunk,
605
+ StartChunk,
606
+ FinishChunk,
607
+ AbortChunk,
608
+ MessageMetadataChunk,
609
+ ]
@@ -0,0 +1,15 @@
1
+ from __future__ import annotations
2
+
3
+ from .build_vercel_ai_formatter import build_vercel_ai_formatter
4
+ from .callback_to_stream import callback_to_async_iterator
5
+ from .create_streaming_response import create_streaming_response
6
+ from .default_chat_extract_text import default_chat_extract_text
7
+ from .error_streaming_response import error_streaming_response
8
+
9
+ __all__ = [
10
+ "build_vercel_ai_formatter",
11
+ "callback_to_async_iterator",
12
+ "create_streaming_response",
13
+ "error_streaming_response",
14
+ "default_chat_extract_text",
15
+ ]
@@ -0,0 +1,74 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import uuid
5
+ from collections.abc import Callable, Generator, Iterable
6
+ from concurrent.futures import Future
7
+ from typing import Any
8
+
9
+ from qtype.interpreter.stream.chat.vercel import (
10
+ ErrorChunk,
11
+ FinishChunk,
12
+ StartChunk,
13
+ TextDeltaChunk,
14
+ TextEndChunk,
15
+ TextStartChunk,
16
+ )
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ def build_vercel_ai_formatter(
22
+ stream_generator: Iterable[tuple[Any, Any]],
23
+ result_future: Future,
24
+ extract_text: Callable[[Any], str],
25
+ ) -> Generator[str, None, None]:
26
+ """
27
+ Convert a low-level stream of (step, message) into Vercel AI UI protocol SSE.
28
+
29
+ Args:
30
+ stream_generator: Iterable yielding (step, message) pairs.
31
+ result_future: Future representing completion of the flow.
32
+ extract_text: Function to extract textual content from a message object.
33
+
34
+ Yields:
35
+ Lines formatted as 'data: {...}\\n\\n' for SSE.
36
+ """
37
+ start_chunk = StartChunk(messageId=str(uuid.uuid4())) # type: ignore[arg-type]
38
+ yield f"data: {start_chunk.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
39
+
40
+ text_id = str(uuid.uuid4())
41
+ text_started = False
42
+
43
+ for _step, message in stream_generator:
44
+ try:
45
+ content = extract_text(message)
46
+ except Exception as exc: # Defensive; continue streaming
47
+ logger.debug(
48
+ "Failed extracting text from message: %s", exc, exc_info=True
49
+ )
50
+ continue
51
+
52
+ if not content or not content.strip():
53
+ continue
54
+
55
+ if not text_started:
56
+ text_start = TextStartChunk(id=text_id)
57
+ yield f"data: {text_start.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
58
+ text_started = True
59
+
60
+ text_delta = TextDeltaChunk(id=text_id, delta=content)
61
+ yield f"data: {text_delta.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
62
+
63
+ if text_started:
64
+ text_end = TextEndChunk(id=text_id)
65
+ yield f"data: {text_end.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
66
+
67
+ try:
68
+ result_future.result(timeout=5.0)
69
+ finish_chunk = FinishChunk()
70
+ yield f"data: {finish_chunk.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
71
+ except Exception as exc:
72
+ logger.error("Error finalizing flow execution: %s", exc, exc_info=True)
73
+ error_chunk = ErrorChunk(errorText=str(exc))
74
+ yield f"data: {error_chunk.model_dump_json(by_alias=True, exclude_none=True)}\n\n"