qtype 0.0.16__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. qtype/application/commons/tools.py +1 -1
  2. qtype/application/converters/tools_from_api.py +5 -5
  3. qtype/application/converters/tools_from_module.py +2 -2
  4. qtype/application/converters/types.py +14 -43
  5. qtype/application/documentation.py +1 -1
  6. qtype/application/facade.py +92 -71
  7. qtype/base/types.py +227 -7
  8. qtype/commands/convert.py +20 -8
  9. qtype/commands/generate.py +19 -27
  10. qtype/commands/run.py +54 -36
  11. qtype/commands/serve.py +74 -54
  12. qtype/commands/validate.py +34 -8
  13. qtype/commands/visualize.py +46 -22
  14. qtype/dsl/__init__.py +6 -5
  15. qtype/dsl/custom_types.py +1 -1
  16. qtype/dsl/domain_types.py +65 -5
  17. qtype/dsl/linker.py +384 -0
  18. qtype/dsl/loader.py +315 -0
  19. qtype/dsl/model.py +612 -363
  20. qtype/dsl/parser.py +200 -0
  21. qtype/dsl/types.py +50 -0
  22. qtype/interpreter/api.py +57 -136
  23. qtype/interpreter/auth/aws.py +19 -9
  24. qtype/interpreter/auth/generic.py +93 -16
  25. qtype/interpreter/base/base_step_executor.py +429 -0
  26. qtype/interpreter/base/batch_step_executor.py +171 -0
  27. qtype/interpreter/base/exceptions.py +50 -0
  28. qtype/interpreter/base/executor_context.py +74 -0
  29. qtype/interpreter/base/factory.py +117 -0
  30. qtype/interpreter/base/progress_tracker.py +75 -0
  31. qtype/interpreter/base/secrets.py +339 -0
  32. qtype/interpreter/base/step_cache.py +73 -0
  33. qtype/interpreter/base/stream_emitter.py +469 -0
  34. qtype/interpreter/conversions.py +455 -21
  35. qtype/interpreter/converters.py +73 -0
  36. qtype/interpreter/endpoints.py +355 -0
  37. qtype/interpreter/executors/agent_executor.py +242 -0
  38. qtype/interpreter/executors/aggregate_executor.py +93 -0
  39. qtype/interpreter/executors/decoder_executor.py +163 -0
  40. qtype/interpreter/executors/doc_to_text_executor.py +112 -0
  41. qtype/interpreter/executors/document_embedder_executor.py +75 -0
  42. qtype/interpreter/executors/document_search_executor.py +122 -0
  43. qtype/interpreter/executors/document_source_executor.py +118 -0
  44. qtype/interpreter/executors/document_splitter_executor.py +105 -0
  45. qtype/interpreter/executors/echo_executor.py +63 -0
  46. qtype/interpreter/executors/field_extractor_executor.py +160 -0
  47. qtype/interpreter/executors/file_source_executor.py +101 -0
  48. qtype/interpreter/executors/file_writer_executor.py +110 -0
  49. qtype/interpreter/executors/index_upsert_executor.py +228 -0
  50. qtype/interpreter/executors/invoke_embedding_executor.py +92 -0
  51. qtype/interpreter/executors/invoke_flow_executor.py +51 -0
  52. qtype/interpreter/executors/invoke_tool_executor.py +353 -0
  53. qtype/interpreter/executors/llm_inference_executor.py +272 -0
  54. qtype/interpreter/executors/prompt_template_executor.py +78 -0
  55. qtype/interpreter/executors/sql_source_executor.py +106 -0
  56. qtype/interpreter/executors/vector_search_executor.py +91 -0
  57. qtype/interpreter/flow.py +147 -22
  58. qtype/interpreter/metadata_api.py +115 -0
  59. qtype/interpreter/resource_cache.py +5 -4
  60. qtype/interpreter/stream/chat/__init__.py +15 -0
  61. qtype/interpreter/stream/chat/converter.py +391 -0
  62. qtype/interpreter/{chat → stream/chat}/file_conversions.py +2 -2
  63. qtype/interpreter/stream/chat/ui_request_to_domain_type.py +140 -0
  64. qtype/interpreter/stream/chat/vercel.py +609 -0
  65. qtype/interpreter/stream/utils/__init__.py +15 -0
  66. qtype/interpreter/stream/utils/build_vercel_ai_formatter.py +74 -0
  67. qtype/interpreter/stream/utils/callback_to_stream.py +66 -0
  68. qtype/interpreter/stream/utils/create_streaming_response.py +18 -0
  69. qtype/interpreter/stream/utils/default_chat_extract_text.py +20 -0
  70. qtype/interpreter/stream/utils/error_streaming_response.py +20 -0
  71. qtype/interpreter/telemetry.py +135 -8
  72. qtype/interpreter/tools/__init__.py +5 -0
  73. qtype/interpreter/tools/function_tool_helper.py +265 -0
  74. qtype/interpreter/types.py +328 -0
  75. qtype/interpreter/typing.py +83 -89
  76. qtype/interpreter/ui/404/index.html +1 -1
  77. qtype/interpreter/ui/404.html +1 -1
  78. qtype/interpreter/ui/_next/static/{nUaw6_IwRwPqkzwe5s725 → 20HoJN6otZ_LyHLHpCPE6}/_buildManifest.js +1 -1
  79. qtype/interpreter/ui/_next/static/chunks/{393-8fd474427f8e19ce.js → 434-b2112d19f25c44ff.js} +3 -3
  80. qtype/interpreter/ui/_next/static/chunks/app/page-8c67d16ac90d23cb.js +1 -0
  81. qtype/interpreter/ui/_next/static/chunks/ba12c10f-546f2714ff8abc66.js +1 -0
  82. qtype/interpreter/ui/_next/static/css/8a8d1269e362fef7.css +3 -0
  83. qtype/interpreter/ui/icon.png +0 -0
  84. qtype/interpreter/ui/index.html +1 -1
  85. qtype/interpreter/ui/index.txt +4 -4
  86. qtype/semantic/checker.py +583 -0
  87. qtype/semantic/generate.py +262 -83
  88. qtype/semantic/loader.py +95 -0
  89. qtype/semantic/model.py +436 -159
  90. qtype/semantic/resolver.py +59 -17
  91. qtype/semantic/visualize.py +28 -31
  92. {qtype-0.0.16.dist-info → qtype-0.1.0.dist-info}/METADATA +16 -3
  93. qtype-0.1.0.dist-info/RECORD +134 -0
  94. qtype/dsl/base_types.py +0 -38
  95. qtype/dsl/validator.py +0 -465
  96. qtype/interpreter/batch/__init__.py +0 -0
  97. qtype/interpreter/batch/file_sink_source.py +0 -162
  98. qtype/interpreter/batch/flow.py +0 -95
  99. qtype/interpreter/batch/sql_source.py +0 -92
  100. qtype/interpreter/batch/step.py +0 -74
  101. qtype/interpreter/batch/types.py +0 -41
  102. qtype/interpreter/batch/utils.py +0 -178
  103. qtype/interpreter/chat/chat_api.py +0 -237
  104. qtype/interpreter/chat/vercel.py +0 -314
  105. qtype/interpreter/exceptions.py +0 -10
  106. qtype/interpreter/step.py +0 -67
  107. qtype/interpreter/steps/__init__.py +0 -0
  108. qtype/interpreter/steps/agent.py +0 -114
  109. qtype/interpreter/steps/condition.py +0 -36
  110. qtype/interpreter/steps/decoder.py +0 -88
  111. qtype/interpreter/steps/llm_inference.py +0 -171
  112. qtype/interpreter/steps/prompt_template.py +0 -54
  113. qtype/interpreter/steps/search.py +0 -24
  114. qtype/interpreter/steps/tool.py +0 -219
  115. qtype/interpreter/streaming_helpers.py +0 -123
  116. qtype/interpreter/ui/_next/static/chunks/app/page-7e26b6156cfb55d3.js +0 -1
  117. qtype/interpreter/ui/_next/static/chunks/ba12c10f-22556063851a6df2.js +0 -1
  118. qtype/interpreter/ui/_next/static/css/b40532b0db09cce3.css +0 -3
  119. qtype/interpreter/ui/favicon.ico +0 -0
  120. qtype/loader.py +0 -390
  121. qtype-0.0.16.dist-info/RECORD +0 -106
  122. /qtype/interpreter/ui/_next/static/{nUaw6_IwRwPqkzwe5s725 → 20HoJN6otZ_LyHLHpCPE6}/_ssgManifest.js +0 -0
  123. {qtype-0.0.16.dist-info → qtype-0.1.0.dist-info}/WHEEL +0 -0
  124. {qtype-0.0.16.dist-info → qtype-0.1.0.dist-info}/entry_points.txt +0 -0
  125. {qtype-0.0.16.dist-info → qtype-0.1.0.dist-info}/licenses/LICENSE +0 -0
  126. {qtype-0.0.16.dist-info → qtype-0.1.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,391 @@
1
+ """
2
+ Converter for transforming StreamEvents to Vercel AI SDK UIMessageChunks.
3
+
4
+ This module provides a stateful converter that transforms internal StreamEvent
5
+ types (emitted by step executors) into Vercel AI SDK UIMessageChunk types
6
+ suitable for streaming to the frontend via SSE.
7
+
8
+ Usage:
9
+ converter = StreamEventConverter()
10
+ for event in stream_events:
11
+ for chunk in converter.convert(event):
12
+ # Send chunk to frontend
13
+ yield f"data: {chunk.model_dump_json(by_alias=True)}\n\n"
14
+ """
15
+
16
+ from __future__ import annotations
17
+
18
+ import uuid
19
+ from collections.abc import AsyncIterator, Iterator
20
+ from typing import Any
21
+
22
+ from qtype.interpreter.stream.chat.vercel import (
23
+ ErrorChunk,
24
+ FinishChunk,
25
+ FinishStepChunk,
26
+ MessageMetadataChunk,
27
+ ReasoningDeltaChunk,
28
+ ReasoningEndChunk,
29
+ ReasoningStartChunk,
30
+ StartChunk,
31
+ StartStepChunk,
32
+ TextDeltaChunk,
33
+ TextEndChunk,
34
+ TextStartChunk,
35
+ ToolInputAvailableChunk,
36
+ ToolInputDeltaChunk,
37
+ ToolInputStartChunk,
38
+ ToolOutputAvailableChunk,
39
+ ToolOutputErrorChunk,
40
+ UIMessageChunk,
41
+ )
42
+ from qtype.interpreter.types import (
43
+ ErrorEvent,
44
+ ReasoningStreamDeltaEvent,
45
+ ReasoningStreamEndEvent,
46
+ ReasoningStreamStartEvent,
47
+ StatusEvent,
48
+ StepEndEvent,
49
+ StepStartEvent,
50
+ StreamEvent,
51
+ TextStreamDeltaEvent,
52
+ TextStreamEndEvent,
53
+ TextStreamStartEvent,
54
+ ToolExecutionEndEvent,
55
+ ToolExecutionErrorEvent,
56
+ ToolExecutionStartEvent,
57
+ )
58
+
59
+
60
+ class StreamEventConverter:
61
+ """
62
+ Converts internal StreamEvents to Vercel AI SDK UIMessageChunks.
63
+
64
+ This converter maintains state to track active text streams and generates
65
+ appropriate Vercel chunks for each event type. Some events map to multiple
66
+ chunks (e.g., StatusEvent becomes a wrapped step with text chunks).
67
+
68
+ Example:
69
+ ```python
70
+ converter = StreamEventConverter()
71
+
72
+ # Convert a status message
73
+ event = StatusEvent(step=step, message="Processing...")
74
+ for chunk in converter.convert(event):
75
+ # Yields: StartStepChunk, TextStartChunk, TextDeltaChunk,
76
+ # TextEndChunk, FinishStepChunk
77
+ send_to_client(chunk)
78
+
79
+ # Convert text streaming
80
+ start_event = TextStreamStartEvent(step=step, stream_id="s1")
81
+ for chunk in converter.convert(start_event):
82
+ # Yields: TextStartChunk
83
+ send_to_client(chunk)
84
+
85
+ delta_event = TextStreamDeltaEvent(
86
+ step=step, stream_id="s1", delta="Hello"
87
+ )
88
+ for chunk in converter.convert(delta_event):
89
+ # Yields: TextDeltaChunk
90
+ send_to_client(chunk)
91
+ ```
92
+ """
93
+
94
+ def __init__(self) -> None:
95
+ """Initialize the converter with empty state."""
96
+ # Map stream_id to Vercel chunk_id for all streams (text, reasoning, etc.)
97
+ self._active_streams: dict[str, str] = {}
98
+
99
+ def convert(self, event: StreamEvent) -> Iterator[UIMessageChunk]:
100
+ """
101
+ Convert a StreamEvent to one or more Vercel UIMessageChunks.
102
+
103
+ Args:
104
+ event: The StreamEvent to convert
105
+
106
+ Yields:
107
+ One or more UIMessageChunk instances
108
+ """
109
+ # Use pattern matching for clean dispatch
110
+ match event.type:
111
+ case "text_stream_start":
112
+ yield from self._convert_text_stream_start(event) # type: ignore[arg-type]
113
+ case "text_stream_delta":
114
+ yield from self._convert_text_stream_delta(event) # type: ignore[arg-type]
115
+ case "text_stream_end":
116
+ yield from self._convert_text_stream_end(event) # type: ignore[arg-type]
117
+ case "reasoning_stream_start":
118
+ yield from self._convert_reasoning_stream_start(event) # type: ignore[arg-type]
119
+ case "reasoning_stream_delta":
120
+ yield from self._convert_reasoning_stream_delta(event) # type: ignore[arg-type]
121
+ case "reasoning_stream_end":
122
+ yield from self._convert_reasoning_stream_end(event) # type: ignore[arg-type]
123
+ case "status":
124
+ yield from self._convert_status(event) # type: ignore[arg-type]
125
+ case "step_start":
126
+ yield from self._convert_step_start(event) # type: ignore[arg-type]
127
+ case "step_end":
128
+ yield from self._convert_step_end(event) # type: ignore[arg-type]
129
+ case "tool_execution_start":
130
+ yield from self._convert_tool_execution_start(event) # type: ignore[arg-type]
131
+ case "tool_execution_end":
132
+ yield from self._convert_tool_execution_end(event) # type: ignore[arg-type]
133
+ case "tool_execution_error":
134
+ yield from self._convert_tool_execution_error(event) # type: ignore[arg-type]
135
+ case "error":
136
+ yield from self._convert_error(event) # type: ignore[arg-type]
137
+ case _:
138
+ # Unknown event type - log warning but don't fail
139
+ pass
140
+
141
+ def _convert_text_stream_start(
142
+ self, event: TextStreamStartEvent
143
+ ) -> Iterator[UIMessageChunk]:
144
+ """
145
+ Convert TextStreamStartEvent to TextStartChunk.
146
+
147
+ Registers the stream_id and creates a new Vercel chunk ID.
148
+ """
149
+ chunk_id = str(uuid.uuid4())
150
+ self._active_streams[event.stream_id] = chunk_id
151
+ yield TextStartChunk(id=chunk_id)
152
+
153
+ def _convert_text_stream_delta(
154
+ self, event: TextStreamDeltaEvent
155
+ ) -> Iterator[UIMessageChunk]:
156
+ """
157
+ Convert TextStreamDeltaEvent to TextDeltaChunk.
158
+
159
+ Uses the chunk ID registered during text_stream_start.
160
+ """
161
+ chunk_id = self._active_streams.get(event.stream_id)
162
+ if chunk_id:
163
+ yield TextDeltaChunk(id=chunk_id, delta=event.delta)
164
+
165
+ def _convert_reasoning_stream_delta(
166
+ self, event: ReasoningStreamDeltaEvent
167
+ ) -> Iterator[UIMessageChunk]:
168
+ """
169
+ Convert ReasoningStreamDeltaEvent to ReasoningDeltaChunk.
170
+
171
+ Uses the chunk ID registered during text_stream_start.
172
+ """
173
+ chunk_id = self._active_streams.get(event.stream_id)
174
+ if chunk_id:
175
+ yield ReasoningDeltaChunk(id=chunk_id, delta=event.delta)
176
+
177
+ def _convert_text_stream_end(
178
+ self, event: TextStreamEndEvent
179
+ ) -> Iterator[UIMessageChunk]:
180
+ """
181
+ Convert TextStreamEndEvent to TextEndChunk.
182
+
183
+ Cleans up the stream_id registration.
184
+ """
185
+ chunk_id = self._active_streams.pop(event.stream_id, None)
186
+ if chunk_id:
187
+ yield TextEndChunk(id=chunk_id)
188
+
189
+ def _convert_reasoning_stream_start(
190
+ self, event: ReasoningStreamStartEvent
191
+ ) -> Iterator[UIMessageChunk]:
192
+ """
193
+ Convert ReasoningStreamStartEvent to ReasoningStartChunk.
194
+
195
+ Registers the stream_id and creates a new Vercel chunk ID for reasoning.
196
+ """
197
+ chunk_id = str(uuid.uuid4())
198
+ self._active_streams[event.stream_id] = chunk_id
199
+ yield ReasoningStartChunk(id=chunk_id)
200
+
201
+ def _convert_reasoning_stream_delta(
202
+ self, event: ReasoningStreamDeltaEvent
203
+ ) -> Iterator[UIMessageChunk]:
204
+ """
205
+ Convert ReasoningStreamDeltaEvent to ReasoningDeltaChunk.
206
+
207
+ Uses the chunk ID registered during reasoning_stream_start.
208
+ """
209
+ chunk_id = self._active_streams.get(event.stream_id)
210
+ if chunk_id:
211
+ yield ReasoningDeltaChunk(id=chunk_id, delta=event.delta)
212
+
213
+ def _convert_reasoning_stream_end(
214
+ self, event: ReasoningStreamEndEvent
215
+ ) -> Iterator[UIMessageChunk]:
216
+ """
217
+ Convert ReasoningStreamEndEvent to ReasoningEndChunk.
218
+
219
+ Cleans up the stream_id registration.
220
+ """
221
+ chunk_id = self._active_streams.pop(event.stream_id, None)
222
+ if chunk_id:
223
+ yield ReasoningEndChunk(id=chunk_id)
224
+
225
+ def _convert_status(self, event: StatusEvent) -> Iterator[UIMessageChunk]:
226
+ """
227
+ Convert StatusEvent to MessageMetadataChunk.
228
+
229
+ Status messages are sent as message metadata with the 'statusMessage'
230
+ key, allowing the frontend to display them separately from content.
231
+ """
232
+ yield MessageMetadataChunk(
233
+ messageMetadata={"statusMessage": event.message}
234
+ )
235
+
236
+ def _convert_step_start(
237
+ self, event: StepStartEvent
238
+ ) -> Iterator[UIMessageChunk]:
239
+ """Convert StepStartEvent to StartStepChunk."""
240
+ yield StartStepChunk()
241
+ yield MessageMetadataChunk(messageMetadata={"step_id": event.step.id})
242
+
243
+ def _convert_step_end(
244
+ self, event: StepEndEvent
245
+ ) -> Iterator[UIMessageChunk]:
246
+ """Convert StepEndEvent to FinishStepChunk."""
247
+ yield FinishStepChunk()
248
+
249
+ def _convert_tool_execution_start(
250
+ self, event: ToolExecutionStartEvent
251
+ ) -> Iterator[UIMessageChunk]:
252
+ """
253
+ Convert ToolExecutionStartEvent to proper tool input sequence.
254
+
255
+ Following Vercel's protocol:
256
+ 1. ToolInputStartChunk - Begin receiving tool input
257
+ 2. ToolInputDeltaChunk - Incremental input text (JSON being parsed)
258
+ 3. ToolInputAvailableChunk - Complete input ready, tool can execute
259
+ """
260
+ # 1. Start tool input streaming
261
+ yield ToolInputStartChunk(
262
+ toolCallId=event.tool_call_id,
263
+ toolName=event.tool_name,
264
+ providerExecuted=True, # Tools are executed on the server
265
+ )
266
+
267
+ # 2. Stream the input as JSON text delta
268
+ import json
269
+
270
+ input_json = json.dumps(event.tool_input)
271
+ yield ToolInputDeltaChunk(
272
+ toolCallId=event.tool_call_id,
273
+ inputTextDelta=input_json,
274
+ )
275
+
276
+ # 3. Signal input is complete and ready for execution
277
+ yield ToolInputAvailableChunk(
278
+ toolCallId=event.tool_call_id,
279
+ toolName=event.tool_name,
280
+ input=event.tool_input,
281
+ providerExecuted=True, # Tools are executed on the server
282
+ )
283
+
284
+ def _convert_tool_execution_end(
285
+ self, event: ToolExecutionEndEvent
286
+ ) -> Iterator[UIMessageChunk]:
287
+ """
288
+ Convert ToolExecutionEndEvent to ToolOutputAvailableChunk.
289
+
290
+ Signals successful tool completion with output.
291
+ """
292
+ yield ToolOutputAvailableChunk(
293
+ toolCallId=event.tool_call_id,
294
+ output=event.tool_output,
295
+ providerExecuted=True, # Tools are executed on the server
296
+ )
297
+
298
+ def _convert_tool_execution_error(
299
+ self, event: ToolExecutionErrorEvent
300
+ ) -> Iterator[UIMessageChunk]:
301
+ """
302
+ Convert ToolExecutionErrorEvent to ToolOutputErrorChunk.
303
+
304
+ Signals tool execution failure with error message.
305
+ """
306
+ yield ToolOutputErrorChunk(
307
+ toolCallId=event.tool_call_id,
308
+ errorText=event.error_message,
309
+ providerExecuted=True, # Tools are executed on the server
310
+ )
311
+
312
+ def _convert_error(self, event: ErrorEvent) -> Iterator[UIMessageChunk]:
313
+ """
314
+ Convert ErrorEvent to ErrorChunk.
315
+
316
+ General error that occurred during execution.
317
+ """
318
+ yield ErrorChunk(errorText=event.error_message)
319
+
320
+
321
+ async def format_stream_events_as_sse(
322
+ event_stream: AsyncIterator[StreamEvent | None],
323
+ message_id: str | None = None,
324
+ output_metadata: dict[str, Any] | None = None,
325
+ ) -> AsyncIterator[str]:
326
+ """
327
+ Convert a stream of StreamEvents to SSE-formatted strings.
328
+
329
+ This function orchestrates the conversion of StreamEvents to
330
+ UIMessageChunks and formats them as Server-Sent Events for
331
+ the Vercel AI SDK protocol.
332
+
333
+ Args:
334
+ event_stream: Async iterator yielding StreamEvents (None signals end)
335
+ message_id: Optional message ID (generated if not provided)
336
+ output_metadata: Optional dict to include in FinishChunk metadata
337
+
338
+ Yields:
339
+ SSE formatted strings (data: {json}\\n\\n)
340
+
341
+ Example:
342
+ ```python
343
+ async def stream_events():
344
+ yield StatusEvent(step=step, message="Processing...")
345
+ yield TextStreamStartEvent(step=step, stream_id="s1")
346
+ yield TextStreamDeltaEvent(step=step, stream_id="s1", delta="Hi")
347
+ yield TextStreamEndEvent(step=step, stream_id="s1")
348
+ yield None # Signal completion
349
+
350
+ async for sse_line in format_stream_events_as_sse(
351
+ stream_events(),
352
+ output_metadata={"result": "success"}
353
+ ):
354
+ # Send to client via StreamingResponse
355
+ pass
356
+ ```
357
+ """
358
+ # Start message with unique ID
359
+ if message_id is None:
360
+ message_id = str(uuid.uuid4())
361
+
362
+ start_chunk = StartChunk(messageId=message_id) # type: ignore[arg-type]
363
+ yield (
364
+ f"data: "
365
+ f"{start_chunk.model_dump_json(by_alias=True, exclude_none=True)}"
366
+ f"\n\n"
367
+ )
368
+
369
+ # Create converter for stateful event-to-chunk conversion
370
+ converter = StreamEventConverter()
371
+
372
+ # Process events and convert to chunks
373
+ async for event in event_stream:
374
+ if event is None:
375
+ break # End of stream
376
+
377
+ # Convert event to chunks and yield as SSE
378
+ for chunk in converter.convert(event):
379
+ yield (
380
+ f"data: "
381
+ f"{chunk.model_dump_json(by_alias=True, exclude_none=True)}"
382
+ f"\n\n"
383
+ )
384
+
385
+ # End message stream with optional metadata
386
+ finish_chunk = FinishChunk(messageMetadata=output_metadata) # type: ignore[arg-type]
387
+ yield (
388
+ f"data: "
389
+ f"{finish_chunk.model_dump_json(by_alias=True, exclude_none=True)}"
390
+ f"\n\n"
391
+ )
@@ -1,9 +1,8 @@
1
1
  import base64
2
2
 
3
- import magic
4
3
  import requests
5
4
 
6
- from qtype.dsl.base_types import PrimitiveTypeEnum
5
+ from qtype.base.types import PrimitiveTypeEnum
7
6
  from qtype.dsl.domain_types import ChatContent
8
7
 
9
8
 
@@ -17,6 +16,7 @@ def file_to_content(url: str) -> ChatContent:
17
16
  Returns:
18
17
  A ChatContent block with type 'file' and the file URL as content.
19
18
  """
19
+ import magic
20
20
 
21
21
  # Get the bytes from the url.
22
22
  if url.startswith("data:"):
@@ -0,0 +1,140 @@
1
+ from __future__ import annotations
2
+
3
+ from pydantic import BaseModel
4
+
5
+ from qtype.base.types import PrimitiveTypeEnum
6
+ from qtype.dsl.domain_types import ChatContent, ChatMessage, MessageRole
7
+ from qtype.interpreter.stream.chat.file_conversions import file_to_content
8
+ from qtype.interpreter.stream.chat.vercel import (
9
+ ChatRequest,
10
+ CompletionRequest,
11
+ UIMessage,
12
+ )
13
+
14
+
15
+ def ui_request_to_domain_type(request: ChatRequest) -> list[ChatMessage]:
16
+ """
17
+ Convert a ChatRequest to domain-specific ChatMessages.
18
+
19
+ Processes all UI messages from the AI SDK UI/React request format.
20
+ Returns the full conversation history for context.
21
+ """
22
+ if not request.messages:
23
+ raise ValueError("No messages provided in request.")
24
+
25
+ # Convert each UIMessage to a domain-specific ChatMessage
26
+ return [
27
+ _ui_message_to_domain_type(message) for message in request.messages
28
+ ]
29
+
30
+
31
+ def _ui_message_to_domain_type(message: UIMessage) -> ChatMessage:
32
+ """
33
+ Convert a UIMessage to a domain-specific ChatMessage.
34
+
35
+ Creates one block for each part in the message content.
36
+ """
37
+ blocks = []
38
+
39
+ for part in message.parts:
40
+ if part.type == "text":
41
+ blocks.append(
42
+ ChatContent(type=PrimitiveTypeEnum.text, content=part.text)
43
+ )
44
+ elif part.type == "reasoning":
45
+ blocks.append(
46
+ ChatContent(type=PrimitiveTypeEnum.text, content=part.text)
47
+ )
48
+ elif part.type == "file":
49
+ blocks.append(
50
+ file_to_content(part.url) # type: ignore
51
+ )
52
+ elif part.type == "source-url":
53
+ # Source URLs are references that might be displayed as citations
54
+ # Store as structured citation data
55
+ citation_data = {
56
+ "source_id": part.source_id, # type: ignore
57
+ "url": part.url, # type: ignore
58
+ "title": part.title, # type: ignore
59
+ }
60
+ blocks.append(
61
+ ChatContent(
62
+ type=PrimitiveTypeEnum.citation_url,
63
+ content=citation_data,
64
+ )
65
+ )
66
+ elif part.type == "source-document":
67
+ # Source documents are references to documents
68
+ # Store as structured citation data
69
+ citation_data = {
70
+ "source_id": part.source_id, # type: ignore
71
+ "title": part.title, # type: ignore
72
+ "filename": part.filename, # type: ignore
73
+ "media_type": part.media_type, # type: ignore
74
+ }
75
+ blocks.append(
76
+ ChatContent(
77
+ type=PrimitiveTypeEnum.citation_document,
78
+ content=citation_data,
79
+ )
80
+ )
81
+ elif part.type == "step-start":
82
+ # Step boundaries might not need content blocks
83
+ continue
84
+ else:
85
+ # Log unknown part types for debugging
86
+ raise ValueError(f"Unknown part type: {part.type}")
87
+
88
+ # If no blocks were created, raise an error
89
+ if not blocks:
90
+ raise ValueError(
91
+ "No valid content blocks created from UIMessage parts."
92
+ )
93
+
94
+ return ChatMessage(
95
+ role=MessageRole(message.role),
96
+ blocks=blocks,
97
+ )
98
+
99
+
100
+ def completion_request_to_input_model(
101
+ request: CompletionRequest, input_model: type[BaseModel]
102
+ ) -> BaseModel:
103
+ """
104
+ Convert a CompletionRequest to a flow's input model.
105
+
106
+ The CompletionRequest has a required 'prompt' field.
107
+ This function maps the request data to the flow's input shape.
108
+
109
+ Args:
110
+ request: The Vercel CompletionRequest with prompt and additional fields
111
+ input_model: The Pydantic model class created by create_input_shape()
112
+
113
+ Returns:
114
+ An instance of input_model with data from the request
115
+
116
+ Raises:
117
+ ValueError: If required fields are missing or data doesn't match schema
118
+ """
119
+
120
+ prompt_str = request.prompt
121
+
122
+ # Get the field name from the input model
123
+ # The semantic checker ensures there's exactly one field for Complete flows
124
+ field_names = list(input_model.model_fields.keys())
125
+ if len(field_names) != 1:
126
+ raise ValueError(
127
+ (
128
+ f"Expected exactly one input field for Complete flow, "
129
+ f"found {len(field_names)}: {field_names}"
130
+ )
131
+ )
132
+ field_name = field_names[0]
133
+
134
+ # Create instance of the input model with the prompt mapped to the field
135
+ try:
136
+ return input_model(**{field_name: prompt_str})
137
+ except Exception as e:
138
+ raise ValueError(
139
+ f"Failed to map CompletionRequest to input model: {e}"
140
+ ) from e