qtype 0.0.16__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qtype/application/commons/tools.py +1 -1
- qtype/application/converters/tools_from_api.py +5 -5
- qtype/application/converters/tools_from_module.py +2 -2
- qtype/application/converters/types.py +14 -43
- qtype/application/documentation.py +1 -1
- qtype/application/facade.py +94 -73
- qtype/base/types.py +227 -7
- qtype/cli.py +4 -0
- qtype/commands/convert.py +20 -8
- qtype/commands/generate.py +19 -27
- qtype/commands/run.py +73 -36
- qtype/commands/serve.py +74 -54
- qtype/commands/validate.py +34 -8
- qtype/commands/visualize.py +46 -22
- qtype/dsl/__init__.py +6 -5
- qtype/dsl/custom_types.py +1 -1
- qtype/dsl/domain_types.py +65 -5
- qtype/dsl/linker.py +384 -0
- qtype/dsl/loader.py +315 -0
- qtype/dsl/model.py +612 -363
- qtype/dsl/parser.py +200 -0
- qtype/dsl/types.py +50 -0
- qtype/interpreter/api.py +57 -136
- qtype/interpreter/auth/aws.py +19 -9
- qtype/interpreter/auth/generic.py +93 -16
- qtype/interpreter/base/base_step_executor.py +436 -0
- qtype/interpreter/base/batch_step_executor.py +171 -0
- qtype/interpreter/base/exceptions.py +50 -0
- qtype/interpreter/base/executor_context.py +74 -0
- qtype/interpreter/base/factory.py +117 -0
- qtype/interpreter/base/progress_tracker.py +110 -0
- qtype/interpreter/base/secrets.py +339 -0
- qtype/interpreter/base/step_cache.py +74 -0
- qtype/interpreter/base/stream_emitter.py +469 -0
- qtype/interpreter/conversions.py +462 -22
- qtype/interpreter/converters.py +77 -0
- qtype/interpreter/endpoints.py +355 -0
- qtype/interpreter/executors/agent_executor.py +242 -0
- qtype/interpreter/executors/aggregate_executor.py +93 -0
- qtype/interpreter/executors/decoder_executor.py +163 -0
- qtype/interpreter/executors/doc_to_text_executor.py +112 -0
- qtype/interpreter/executors/document_embedder_executor.py +107 -0
- qtype/interpreter/executors/document_search_executor.py +122 -0
- qtype/interpreter/executors/document_source_executor.py +118 -0
- qtype/interpreter/executors/document_splitter_executor.py +105 -0
- qtype/interpreter/executors/echo_executor.py +63 -0
- qtype/interpreter/executors/field_extractor_executor.py +160 -0
- qtype/interpreter/executors/file_source_executor.py +101 -0
- qtype/interpreter/executors/file_writer_executor.py +110 -0
- qtype/interpreter/executors/index_upsert_executor.py +228 -0
- qtype/interpreter/executors/invoke_embedding_executor.py +92 -0
- qtype/interpreter/executors/invoke_flow_executor.py +51 -0
- qtype/interpreter/executors/invoke_tool_executor.py +358 -0
- qtype/interpreter/executors/llm_inference_executor.py +272 -0
- qtype/interpreter/executors/prompt_template_executor.py +78 -0
- qtype/interpreter/executors/sql_source_executor.py +106 -0
- qtype/interpreter/executors/vector_search_executor.py +91 -0
- qtype/interpreter/flow.py +159 -22
- qtype/interpreter/metadata_api.py +115 -0
- qtype/interpreter/resource_cache.py +5 -4
- qtype/interpreter/rich_progress.py +225 -0
- qtype/interpreter/stream/chat/__init__.py +15 -0
- qtype/interpreter/stream/chat/converter.py +391 -0
- qtype/interpreter/{chat → stream/chat}/file_conversions.py +2 -2
- qtype/interpreter/stream/chat/ui_request_to_domain_type.py +140 -0
- qtype/interpreter/stream/chat/vercel.py +609 -0
- qtype/interpreter/stream/utils/__init__.py +15 -0
- qtype/interpreter/stream/utils/build_vercel_ai_formatter.py +74 -0
- qtype/interpreter/stream/utils/callback_to_stream.py +66 -0
- qtype/interpreter/stream/utils/create_streaming_response.py +18 -0
- qtype/interpreter/stream/utils/default_chat_extract_text.py +20 -0
- qtype/interpreter/stream/utils/error_streaming_response.py +20 -0
- qtype/interpreter/telemetry.py +135 -8
- qtype/interpreter/tools/__init__.py +5 -0
- qtype/interpreter/tools/function_tool_helper.py +265 -0
- qtype/interpreter/types.py +330 -0
- qtype/interpreter/typing.py +83 -89
- qtype/interpreter/ui/404/index.html +1 -1
- qtype/interpreter/ui/404.html +1 -1
- qtype/interpreter/ui/_next/static/{nUaw6_IwRwPqkzwe5s725 → 20HoJN6otZ_LyHLHpCPE6}/_buildManifest.js +1 -1
- qtype/interpreter/ui/_next/static/chunks/{393-8fd474427f8e19ce.js → 434-b2112d19f25c44ff.js} +3 -3
- qtype/interpreter/ui/_next/static/chunks/app/page-8c67d16ac90d23cb.js +1 -0
- qtype/interpreter/ui/_next/static/chunks/ba12c10f-546f2714ff8abc66.js +1 -0
- qtype/interpreter/ui/_next/static/css/8a8d1269e362fef7.css +3 -0
- qtype/interpreter/ui/icon.png +0 -0
- qtype/interpreter/ui/index.html +1 -1
- qtype/interpreter/ui/index.txt +4 -4
- qtype/semantic/checker.py +583 -0
- qtype/semantic/generate.py +262 -83
- qtype/semantic/loader.py +95 -0
- qtype/semantic/model.py +436 -159
- qtype/semantic/resolver.py +63 -19
- qtype/semantic/visualize.py +28 -31
- {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/METADATA +16 -3
- qtype-0.1.1.dist-info/RECORD +135 -0
- qtype/dsl/base_types.py +0 -38
- qtype/dsl/validator.py +0 -465
- qtype/interpreter/batch/__init__.py +0 -0
- qtype/interpreter/batch/file_sink_source.py +0 -162
- qtype/interpreter/batch/flow.py +0 -95
- qtype/interpreter/batch/sql_source.py +0 -92
- qtype/interpreter/batch/step.py +0 -74
- qtype/interpreter/batch/types.py +0 -41
- qtype/interpreter/batch/utils.py +0 -178
- qtype/interpreter/chat/chat_api.py +0 -237
- qtype/interpreter/chat/vercel.py +0 -314
- qtype/interpreter/exceptions.py +0 -10
- qtype/interpreter/step.py +0 -67
- qtype/interpreter/steps/__init__.py +0 -0
- qtype/interpreter/steps/agent.py +0 -114
- qtype/interpreter/steps/condition.py +0 -36
- qtype/interpreter/steps/decoder.py +0 -88
- qtype/interpreter/steps/llm_inference.py +0 -171
- qtype/interpreter/steps/prompt_template.py +0 -54
- qtype/interpreter/steps/search.py +0 -24
- qtype/interpreter/steps/tool.py +0 -219
- qtype/interpreter/streaming_helpers.py +0 -123
- qtype/interpreter/ui/_next/static/chunks/app/page-7e26b6156cfb55d3.js +0 -1
- qtype/interpreter/ui/_next/static/chunks/ba12c10f-22556063851a6df2.js +0 -1
- qtype/interpreter/ui/_next/static/css/b40532b0db09cce3.css +0 -3
- qtype/interpreter/ui/favicon.ico +0 -0
- qtype/loader.py +0 -390
- qtype-0.0.16.dist-info/RECORD +0 -106
- /qtype/interpreter/ui/_next/static/{nUaw6_IwRwPqkzwe5s725 → 20HoJN6otZ_LyHLHpCPE6}/_ssgManifest.js +0 -0
- {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/WHEEL +0 -0
- {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/entry_points.txt +0 -0
- {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/licenses/LICENSE +0 -0
- {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/top_level.txt +0 -0
|
@@ -1,237 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import logging
|
|
4
|
-
import uuid
|
|
5
|
-
from collections.abc import Generator
|
|
6
|
-
from typing import Any
|
|
7
|
-
|
|
8
|
-
from fastapi import FastAPI
|
|
9
|
-
from fastapi.responses import StreamingResponse
|
|
10
|
-
|
|
11
|
-
from qtype.dsl.base_types import PrimitiveTypeEnum
|
|
12
|
-
from qtype.dsl.domain_types import ChatContent, ChatMessage, MessageRole
|
|
13
|
-
from qtype.interpreter.chat.file_conversions import file_to_content
|
|
14
|
-
from qtype.interpreter.chat.vercel import (
|
|
15
|
-
ChatRequest,
|
|
16
|
-
ErrorChunk,
|
|
17
|
-
FinishChunk,
|
|
18
|
-
StartChunk,
|
|
19
|
-
TextDeltaChunk,
|
|
20
|
-
TextEndChunk,
|
|
21
|
-
TextStartChunk,
|
|
22
|
-
UIMessage,
|
|
23
|
-
)
|
|
24
|
-
from qtype.interpreter.flow import execute_flow
|
|
25
|
-
from qtype.interpreter.streaming_helpers import create_streaming_generator
|
|
26
|
-
from qtype.semantic.model import Flow
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def _ui_request_to_domain_type(request: ChatRequest) -> list[ChatMessage]:
|
|
30
|
-
"""
|
|
31
|
-
Convert a ChatRequest to domain-specific ChatMessages.
|
|
32
|
-
|
|
33
|
-
Processes all UI messages from the AI SDK UI/React request format.
|
|
34
|
-
Returns the full conversation history for context.
|
|
35
|
-
"""
|
|
36
|
-
if not request.messages:
|
|
37
|
-
raise ValueError("No messages provided in request.")
|
|
38
|
-
|
|
39
|
-
# Convert each UIMessage to a domain-specific ChatMessage
|
|
40
|
-
return [
|
|
41
|
-
_ui_message_to_domain_type(message) for message in request.messages
|
|
42
|
-
]
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
def _ui_message_to_domain_type(message: UIMessage) -> ChatMessage:
|
|
46
|
-
"""
|
|
47
|
-
Convert a UIMessage to a domain-specific ChatMessage.
|
|
48
|
-
|
|
49
|
-
Creates one block for each part in the message content.
|
|
50
|
-
"""
|
|
51
|
-
blocks = []
|
|
52
|
-
|
|
53
|
-
for part in message.parts:
|
|
54
|
-
if part.type == "text":
|
|
55
|
-
blocks.append(
|
|
56
|
-
ChatContent(type=PrimitiveTypeEnum.text, content=part.text)
|
|
57
|
-
)
|
|
58
|
-
elif part.type == "reasoning":
|
|
59
|
-
blocks.append(
|
|
60
|
-
ChatContent(type=PrimitiveTypeEnum.text, content=part.text)
|
|
61
|
-
)
|
|
62
|
-
elif part.type == "file":
|
|
63
|
-
blocks.append(
|
|
64
|
-
file_to_content(part.url) # type: ignore
|
|
65
|
-
)
|
|
66
|
-
elif part.type.startswith("tool-"):
|
|
67
|
-
raise NotImplementedError(
|
|
68
|
-
"Tool call part handling is not implemented yet."
|
|
69
|
-
)
|
|
70
|
-
elif part.type == "dynamic-tool":
|
|
71
|
-
raise NotImplementedError(
|
|
72
|
-
"Dynamic tool part handling is not implemented yet."
|
|
73
|
-
)
|
|
74
|
-
elif part.type == "step-start":
|
|
75
|
-
# Step boundaries might not need content blocks
|
|
76
|
-
continue
|
|
77
|
-
elif part.type in ["source-url", "source-document"]:
|
|
78
|
-
raise NotImplementedError(
|
|
79
|
-
"Source part handling is not implemented yet."
|
|
80
|
-
)
|
|
81
|
-
elif part.type.startswith("data-"):
|
|
82
|
-
raise NotImplementedError(
|
|
83
|
-
"Data part handling is not implemented yet."
|
|
84
|
-
)
|
|
85
|
-
else:
|
|
86
|
-
# Log unknown part types for debugging
|
|
87
|
-
raise ValueError(f"Unknown part type: {part.type}")
|
|
88
|
-
|
|
89
|
-
# If no blocks were created, raise an error
|
|
90
|
-
if not blocks:
|
|
91
|
-
raise ValueError(
|
|
92
|
-
"No valid content blocks created from UIMessage parts."
|
|
93
|
-
)
|
|
94
|
-
|
|
95
|
-
return ChatMessage(
|
|
96
|
-
role=MessageRole(message.role),
|
|
97
|
-
blocks=blocks,
|
|
98
|
-
)
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
def create_chat_flow_endpoint(app: FastAPI, flow: Flow) -> None:
|
|
102
|
-
"""
|
|
103
|
-
Create a chat endpoint for the given Flow.
|
|
104
|
-
|
|
105
|
-
This creates an endpoint at /flows/{flow_id}/chat that follows the
|
|
106
|
-
AI SDK UI/React request format and responds with streaming data.
|
|
107
|
-
|
|
108
|
-
Args:
|
|
109
|
-
app: The FastAPI application instance
|
|
110
|
-
flow: The Flow to create an endpoint for
|
|
111
|
-
"""
|
|
112
|
-
flow_id = flow.id
|
|
113
|
-
|
|
114
|
-
async def handle_chat_data(request: ChatRequest) -> StreamingResponse:
|
|
115
|
-
"""Handle chat requests for the specific flow."""
|
|
116
|
-
|
|
117
|
-
try:
|
|
118
|
-
# Convert AI SDK UI request to domain ChatMessages
|
|
119
|
-
messages = _ui_request_to_domain_type(request)
|
|
120
|
-
if not len(messages):
|
|
121
|
-
raise ValueError("No input messages received")
|
|
122
|
-
|
|
123
|
-
# Pop the last message as the current input
|
|
124
|
-
current_input = messages.pop()
|
|
125
|
-
if current_input.role != MessageRole.user:
|
|
126
|
-
raise ValueError(
|
|
127
|
-
f"Unexpected input {current_input} from non user role: {current_input.role}"
|
|
128
|
-
)
|
|
129
|
-
|
|
130
|
-
flow_copy = flow.model_copy(deep=True)
|
|
131
|
-
|
|
132
|
-
input_variable = [
|
|
133
|
-
var for var in flow_copy.inputs if var.type == ChatMessage
|
|
134
|
-
][0]
|
|
135
|
-
input_variable.value = current_input
|
|
136
|
-
|
|
137
|
-
# Pass conversation context to flow execution for memory population
|
|
138
|
-
execution_kwargs: Any = {
|
|
139
|
-
"session_id": request.id, # Use request ID as session identifier
|
|
140
|
-
"conversation_history": messages,
|
|
141
|
-
}
|
|
142
|
-
|
|
143
|
-
# Create a streaming generator for the flow execution
|
|
144
|
-
stream_generator, result_future = create_streaming_generator(
|
|
145
|
-
execute_flow, flow_copy, **execution_kwargs
|
|
146
|
-
)
|
|
147
|
-
except Exception as e:
|
|
148
|
-
error_chunk = ErrorChunk(errorText=str(e))
|
|
149
|
-
response = StreamingResponse(
|
|
150
|
-
[
|
|
151
|
-
f"data: {error_chunk.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
152
|
-
],
|
|
153
|
-
media_type="text/plain; charset=utf-8",
|
|
154
|
-
)
|
|
155
|
-
response.headers["x-vercel-ai-ui-message-stream"] = "v1"
|
|
156
|
-
return response
|
|
157
|
-
|
|
158
|
-
# Create generator that formats messages according to AI SDK UI streaming protocol
|
|
159
|
-
def vercel_ai_formatter() -> Generator[str, None, None]:
|
|
160
|
-
"""Format stream data according to AI SDK UI streaming protocol."""
|
|
161
|
-
|
|
162
|
-
# Send start chunk
|
|
163
|
-
start_chunk = StartChunk(messageId=str(uuid.uuid4())) # type: ignore
|
|
164
|
-
yield f"data: {start_chunk.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
165
|
-
|
|
166
|
-
# Track text content for proper streaming
|
|
167
|
-
text_id = str(uuid.uuid4())
|
|
168
|
-
text_started = False
|
|
169
|
-
|
|
170
|
-
for step, message in stream_generator:
|
|
171
|
-
if isinstance(message, ChatMessage):
|
|
172
|
-
# Convert ChatMessage to text content
|
|
173
|
-
content = " ".join(
|
|
174
|
-
[
|
|
175
|
-
block.content
|
|
176
|
-
for block in message.blocks
|
|
177
|
-
if hasattr(block, "content") and block.content
|
|
178
|
-
]
|
|
179
|
-
)
|
|
180
|
-
if content.strip():
|
|
181
|
-
# Start text block if not started
|
|
182
|
-
if not text_started:
|
|
183
|
-
text_start = TextStartChunk(id=text_id)
|
|
184
|
-
yield f"data: {text_start.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
185
|
-
text_started = True
|
|
186
|
-
|
|
187
|
-
# Send text delta
|
|
188
|
-
text_delta = TextDeltaChunk(id=text_id, delta=content)
|
|
189
|
-
yield f"data: {text_delta.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
190
|
-
else:
|
|
191
|
-
# Handle other message types as text deltas
|
|
192
|
-
text_content = str(message)
|
|
193
|
-
if text_content.strip():
|
|
194
|
-
# Start text block if not started
|
|
195
|
-
if not text_started:
|
|
196
|
-
text_start = TextStartChunk(id=text_id)
|
|
197
|
-
yield f"data: {text_start.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
198
|
-
text_started = True
|
|
199
|
-
|
|
200
|
-
# Send text delta
|
|
201
|
-
text_delta = TextDeltaChunk(
|
|
202
|
-
id=text_id, delta=text_content
|
|
203
|
-
)
|
|
204
|
-
yield f"data: {text_delta.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
205
|
-
|
|
206
|
-
# End text block if it was started
|
|
207
|
-
if text_started:
|
|
208
|
-
text_end = TextEndChunk(id=text_id)
|
|
209
|
-
yield f"data: {text_end.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
210
|
-
|
|
211
|
-
# Send finish chunk
|
|
212
|
-
try:
|
|
213
|
-
result_future.result(timeout=5.0)
|
|
214
|
-
finish_chunk = FinishChunk()
|
|
215
|
-
yield f"data: {finish_chunk.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
216
|
-
except Exception as e:
|
|
217
|
-
# Send error
|
|
218
|
-
error_chunk = ErrorChunk(errorText=str(e))
|
|
219
|
-
logging.error(
|
|
220
|
-
f"Error during flow execution: {e}", exc_info=True
|
|
221
|
-
)
|
|
222
|
-
yield f"data: {error_chunk.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
223
|
-
|
|
224
|
-
response = StreamingResponse(
|
|
225
|
-
vercel_ai_formatter(), media_type="text/plain; charset=utf-8"
|
|
226
|
-
)
|
|
227
|
-
response.headers["x-vercel-ai-ui-message-stream"] = "v1"
|
|
228
|
-
return response
|
|
229
|
-
|
|
230
|
-
# Add the endpoint to the FastAPI app
|
|
231
|
-
app.post(
|
|
232
|
-
f"/flows/{flow_id}/chat",
|
|
233
|
-
tags=["chat"],
|
|
234
|
-
summary=f"Chat with {flow_id} flow",
|
|
235
|
-
description=flow.description,
|
|
236
|
-
response_class=StreamingResponse,
|
|
237
|
-
)(handle_chat_data)
|
qtype/interpreter/chat/vercel.py
DELETED
|
@@ -1,314 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Pydantic models for Vercel AI SDK UI types.
|
|
3
|
-
|
|
4
|
-
This module reproduces the exact TypeScript type shapes from the AI SDK UI
|
|
5
|
-
as Pydantic models for use in Python implementations.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
from __future__ import annotations
|
|
9
|
-
|
|
10
|
-
from typing import Any, Literal, Union
|
|
11
|
-
|
|
12
|
-
from pydantic import BaseModel, Field
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
# Provider metadata
|
|
16
|
-
class ProviderMetadata(BaseModel):
|
|
17
|
-
"""Provider-specific metadata.
|
|
18
|
-
|
|
19
|
-
Reproduces: ProviderMetadata from ui/ui-message-chunks.ts
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
|
-
model_config = {"extra": "allow"}
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
# UI Message Parts
|
|
26
|
-
class TextUIPart(BaseModel):
|
|
27
|
-
"""A text part of a message.
|
|
28
|
-
|
|
29
|
-
Reproduces: TextUIPart from ui/ui-messages.ts
|
|
30
|
-
"""
|
|
31
|
-
|
|
32
|
-
type: Literal["text"] = "text"
|
|
33
|
-
text: str
|
|
34
|
-
state: Literal["streaming", "done"] | None = None
|
|
35
|
-
provider_metadata: ProviderMetadata | None = Field(
|
|
36
|
-
default=None, alias="providerMetadata"
|
|
37
|
-
)
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
class ReasoningUIPart(BaseModel):
|
|
41
|
-
"""A reasoning part of a message.
|
|
42
|
-
|
|
43
|
-
Reproduces: ReasoningUIPart from ui/ui-messages.ts
|
|
44
|
-
"""
|
|
45
|
-
|
|
46
|
-
type: Literal["reasoning"] = "reasoning"
|
|
47
|
-
text: str
|
|
48
|
-
state: Literal["streaming", "done"] | None = None
|
|
49
|
-
provider_metadata: ProviderMetadata | None = Field(
|
|
50
|
-
default=None, alias="providerMetadata"
|
|
51
|
-
)
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
class SourceUrlUIPart(BaseModel):
|
|
55
|
-
"""A source URL part of a message.
|
|
56
|
-
|
|
57
|
-
Reproduces: SourceUrlUIPart from ui/ui-messages.ts
|
|
58
|
-
"""
|
|
59
|
-
|
|
60
|
-
type: Literal["source-url"] = "source-url"
|
|
61
|
-
source_id: str = Field(alias="sourceId")
|
|
62
|
-
url: str
|
|
63
|
-
title: str | None = None
|
|
64
|
-
provider_metadata: ProviderMetadata | None = Field(
|
|
65
|
-
default=None, alias="providerMetadata"
|
|
66
|
-
)
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
class SourceDocumentUIPart(BaseModel):
|
|
70
|
-
"""A document source part of a message.
|
|
71
|
-
|
|
72
|
-
Reproduces: SourceDocumentUIPart from ui/ui-messages.ts
|
|
73
|
-
"""
|
|
74
|
-
|
|
75
|
-
type: Literal["source-document"] = "source-document"
|
|
76
|
-
source_id: str = Field(alias="sourceId")
|
|
77
|
-
media_type: str = Field(alias="mediaType")
|
|
78
|
-
title: str
|
|
79
|
-
filename: str | None = None
|
|
80
|
-
provider_metadata: ProviderMetadata | None = Field(
|
|
81
|
-
default=None, alias="providerMetadata"
|
|
82
|
-
)
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
class FileUIPart(BaseModel):
|
|
86
|
-
"""A file part of a message.
|
|
87
|
-
|
|
88
|
-
Reproduces: FileUIPart from ui/ui-messages.ts
|
|
89
|
-
"""
|
|
90
|
-
|
|
91
|
-
type: Literal["file"] = "file"
|
|
92
|
-
media_type: str = Field(alias="mediaType")
|
|
93
|
-
filename: str | None = None
|
|
94
|
-
url: str
|
|
95
|
-
provider_metadata: ProviderMetadata | None = Field(
|
|
96
|
-
default=None, alias="providerMetadata"
|
|
97
|
-
)
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
class StepStartUIPart(BaseModel):
|
|
101
|
-
"""A step boundary part of a message.
|
|
102
|
-
|
|
103
|
-
Reproduces: StepStartUIPart from ui/ui-messages.ts
|
|
104
|
-
"""
|
|
105
|
-
|
|
106
|
-
type: Literal["step-start"] = "step-start"
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
# Union type for UI message parts
|
|
110
|
-
UIMessagePart = Union[
|
|
111
|
-
TextUIPart,
|
|
112
|
-
ReasoningUIPart,
|
|
113
|
-
SourceUrlUIPart,
|
|
114
|
-
SourceDocumentUIPart,
|
|
115
|
-
FileUIPart,
|
|
116
|
-
StepStartUIPart,
|
|
117
|
-
]
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
# UI Message
|
|
121
|
-
class UIMessage(BaseModel):
|
|
122
|
-
"""AI SDK UI Message.
|
|
123
|
-
|
|
124
|
-
Reproduces: UIMessage from ui/ui-messages.ts
|
|
125
|
-
"""
|
|
126
|
-
|
|
127
|
-
id: str
|
|
128
|
-
role: Literal["system", "user", "assistant"]
|
|
129
|
-
metadata: dict[str, Any] | None = None
|
|
130
|
-
parts: list[UIMessagePart]
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
# Chat Request (the request body sent from frontend)
|
|
134
|
-
class ChatRequest(BaseModel):
|
|
135
|
-
"""Chat request format sent from AI SDK UI/React.
|
|
136
|
-
|
|
137
|
-
Reproduces: ChatRequest from ui/chat-transport.ts
|
|
138
|
-
"""
|
|
139
|
-
|
|
140
|
-
id: str # chatId
|
|
141
|
-
messages: list[UIMessage]
|
|
142
|
-
trigger: Literal["submit-message", "regenerate-message"]
|
|
143
|
-
message_id: str | None = Field(default=None, alias="messageId")
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
# UI Message Chunks (streaming events)
|
|
147
|
-
class TextStartChunk(BaseModel):
|
|
148
|
-
"""Text start chunk.
|
|
149
|
-
|
|
150
|
-
Reproduces: TextStartChunk from ui/ui-message-chunks.ts
|
|
151
|
-
"""
|
|
152
|
-
|
|
153
|
-
type: Literal["text-start"] = "text-start"
|
|
154
|
-
id: str
|
|
155
|
-
provider_metadata: ProviderMetadata | None = Field(
|
|
156
|
-
default=None, alias="providerMetadata"
|
|
157
|
-
)
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
class TextDeltaChunk(BaseModel):
|
|
161
|
-
"""Text delta chunk.
|
|
162
|
-
|
|
163
|
-
Reproduces: TextDeltaChunk from ui/ui-message-chunks.ts
|
|
164
|
-
"""
|
|
165
|
-
|
|
166
|
-
type: Literal["text-delta"] = "text-delta"
|
|
167
|
-
id: str
|
|
168
|
-
delta: str
|
|
169
|
-
provider_metadata: ProviderMetadata | None = Field(
|
|
170
|
-
default=None, alias="providerMetadata"
|
|
171
|
-
)
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
class TextEndChunk(BaseModel):
|
|
175
|
-
"""Text end chunk.
|
|
176
|
-
|
|
177
|
-
Reproduces: TextEndChunk from ui/ui-message-chunks.ts
|
|
178
|
-
"""
|
|
179
|
-
|
|
180
|
-
type: Literal["text-end"] = "text-end"
|
|
181
|
-
id: str
|
|
182
|
-
provider_metadata: ProviderMetadata | None = Field(
|
|
183
|
-
default=None, alias="providerMetadata"
|
|
184
|
-
)
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
class ReasoningStartChunk(BaseModel):
|
|
188
|
-
"""Reasoning start chunk.
|
|
189
|
-
|
|
190
|
-
Reproduces: ReasoningStartChunk from ui/ui-message-chunks.ts
|
|
191
|
-
"""
|
|
192
|
-
|
|
193
|
-
type: Literal["reasoning-start"] = "reasoning-start"
|
|
194
|
-
id: str
|
|
195
|
-
provider_metadata: ProviderMetadata | None = Field(
|
|
196
|
-
default=None, alias="providerMetadata"
|
|
197
|
-
)
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
class ReasoningDeltaChunk(BaseModel):
|
|
201
|
-
"""Reasoning delta chunk.
|
|
202
|
-
|
|
203
|
-
Reproduces: ReasoningDeltaChunk from ui/ui-message-chunks.ts
|
|
204
|
-
"""
|
|
205
|
-
|
|
206
|
-
type: Literal["reasoning-delta"] = "reasoning-delta"
|
|
207
|
-
id: str
|
|
208
|
-
delta: str
|
|
209
|
-
provider_metadata: ProviderMetadata | None = Field(
|
|
210
|
-
default=None, alias="providerMetadata"
|
|
211
|
-
)
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
class ReasoningEndChunk(BaseModel):
|
|
215
|
-
"""Reasoning end chunk.
|
|
216
|
-
|
|
217
|
-
Reproduces: ReasoningEndChunk from ui/ui-message-chunks.ts
|
|
218
|
-
"""
|
|
219
|
-
|
|
220
|
-
type: Literal["reasoning-end"] = "reasoning-end"
|
|
221
|
-
id: str
|
|
222
|
-
provider_metadata: ProviderMetadata | None = Field(
|
|
223
|
-
default=None, alias="providerMetadata"
|
|
224
|
-
)
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
class ErrorChunk(BaseModel):
|
|
228
|
-
"""Error chunk.
|
|
229
|
-
|
|
230
|
-
Reproduces: ErrorChunk from ui/ui-message-chunks.ts
|
|
231
|
-
"""
|
|
232
|
-
|
|
233
|
-
type: Literal["error"] = "error"
|
|
234
|
-
error_text: str = Field(alias="errorText")
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
class StartStepChunk(BaseModel):
|
|
238
|
-
"""Start step chunk.
|
|
239
|
-
|
|
240
|
-
Reproduces: StartStepChunk from ui/ui-message-chunks.ts
|
|
241
|
-
"""
|
|
242
|
-
|
|
243
|
-
type: Literal["start-step"] = "start-step"
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
class FinishStepChunk(BaseModel):
|
|
247
|
-
"""Finish step chunk.
|
|
248
|
-
|
|
249
|
-
Reproduces: FinishStepChunk from ui/ui-message-chunks.ts
|
|
250
|
-
"""
|
|
251
|
-
|
|
252
|
-
type: Literal["finish-step"] = "finish-step"
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
class StartChunk(BaseModel):
|
|
256
|
-
"""Start chunk.
|
|
257
|
-
|
|
258
|
-
Reproduces: StartChunk from ui/ui-message-chunks.ts
|
|
259
|
-
"""
|
|
260
|
-
|
|
261
|
-
type: Literal["start"] = "start"
|
|
262
|
-
message_id: str | None = Field(default=None, alias="messageId")
|
|
263
|
-
message_metadata: dict[str, Any] | None = Field(
|
|
264
|
-
default=None, alias="messageMetadata"
|
|
265
|
-
)
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
class FinishChunk(BaseModel):
|
|
269
|
-
"""Finish chunk.
|
|
270
|
-
|
|
271
|
-
Reproduces: FinishChunk from ui/ui-message-chunks.ts
|
|
272
|
-
"""
|
|
273
|
-
|
|
274
|
-
type: Literal["finish"] = "finish"
|
|
275
|
-
message_metadata: dict[str, Any] | None = Field(
|
|
276
|
-
default=None, alias="messageMetadata"
|
|
277
|
-
)
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
class AbortChunk(BaseModel):
|
|
281
|
-
"""Abort chunk.
|
|
282
|
-
|
|
283
|
-
Reproduces: AbortChunk from ui/ui-message-chunks.ts
|
|
284
|
-
"""
|
|
285
|
-
|
|
286
|
-
type: Literal["abort"] = "abort"
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
class MessageMetadataChunk(BaseModel):
|
|
290
|
-
"""Message metadata chunk.
|
|
291
|
-
|
|
292
|
-
Reproduces: MessageMetadataChunk from ui/ui-message-chunks.ts
|
|
293
|
-
"""
|
|
294
|
-
|
|
295
|
-
type: Literal["message-metadata"] = "message-metadata"
|
|
296
|
-
message_metadata: dict[str, Any] = Field(alias="messageMetadata")
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
# Union type for all UI message chunks
|
|
300
|
-
UIMessageChunk = Union[
|
|
301
|
-
TextStartChunk,
|
|
302
|
-
TextDeltaChunk,
|
|
303
|
-
TextEndChunk,
|
|
304
|
-
ReasoningStartChunk,
|
|
305
|
-
ReasoningDeltaChunk,
|
|
306
|
-
ReasoningEndChunk,
|
|
307
|
-
ErrorChunk,
|
|
308
|
-
StartStepChunk,
|
|
309
|
-
FinishStepChunk,
|
|
310
|
-
StartChunk,
|
|
311
|
-
FinishChunk,
|
|
312
|
-
AbortChunk,
|
|
313
|
-
MessageMetadataChunk,
|
|
314
|
-
]
|
qtype/interpreter/exceptions.py
DELETED
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
from typing import Any
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
class InterpreterError(Exception):
|
|
5
|
-
"""Base exception class for ProtoGen interpreter errors."""
|
|
6
|
-
|
|
7
|
-
def __init__(self, message: str, details: Any = None) -> None:
|
|
8
|
-
super().__init__(message)
|
|
9
|
-
self.message = message
|
|
10
|
-
self.details = details
|
qtype/interpreter/step.py
DELETED
|
@@ -1,67 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import logging
|
|
4
|
-
from typing import Any
|
|
5
|
-
|
|
6
|
-
from qtype.interpreter.exceptions import InterpreterError
|
|
7
|
-
from qtype.interpreter.steps import (
|
|
8
|
-
agent,
|
|
9
|
-
condition,
|
|
10
|
-
decoder,
|
|
11
|
-
llm_inference,
|
|
12
|
-
prompt_template,
|
|
13
|
-
search,
|
|
14
|
-
tool,
|
|
15
|
-
)
|
|
16
|
-
from qtype.semantic.model import (
|
|
17
|
-
Agent,
|
|
18
|
-
Condition,
|
|
19
|
-
Decoder,
|
|
20
|
-
Flow,
|
|
21
|
-
Invoke,
|
|
22
|
-
LLMInference,
|
|
23
|
-
PromptTemplate,
|
|
24
|
-
Search,
|
|
25
|
-
Step,
|
|
26
|
-
Variable,
|
|
27
|
-
)
|
|
28
|
-
|
|
29
|
-
logger = logging.getLogger(__name__)
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
def execute_step(step: Step, **kwargs: dict[str, Any]) -> list[Variable]:
|
|
33
|
-
"""Execute a single step within a flow.
|
|
34
|
-
|
|
35
|
-
Args:
|
|
36
|
-
step: The step to execute.
|
|
37
|
-
**kwargs: Additional keyword arguments.
|
|
38
|
-
"""
|
|
39
|
-
logger.debug(f"Executing step: {step.id} with kwargs: {kwargs}")
|
|
40
|
-
|
|
41
|
-
unset_inputs = [input for input in step.inputs if not input.is_set()]
|
|
42
|
-
if unset_inputs:
|
|
43
|
-
raise InterpreterError(
|
|
44
|
-
f"The following inputs are required but have no values: {', '.join([input.id for input in unset_inputs])}"
|
|
45
|
-
)
|
|
46
|
-
|
|
47
|
-
if isinstance(step, Agent):
|
|
48
|
-
return agent.execute(step=step, **kwargs) # type: ignore[arg-type]
|
|
49
|
-
elif isinstance(step, Condition):
|
|
50
|
-
return condition.execute(condition=step, **kwargs)
|
|
51
|
-
elif isinstance(step, Decoder):
|
|
52
|
-
return decoder.execute(step=step, **kwargs) # type: ignore[arg-type]
|
|
53
|
-
elif isinstance(step, Flow):
|
|
54
|
-
from .flow import execute_flow
|
|
55
|
-
|
|
56
|
-
return execute_flow(step, **kwargs) # type: ignore[arg-type]
|
|
57
|
-
elif isinstance(step, LLMInference):
|
|
58
|
-
return llm_inference.execute(step, **kwargs) # type: ignore[arg-type]
|
|
59
|
-
elif isinstance(step, PromptTemplate):
|
|
60
|
-
return prompt_template.execute(step, **kwargs) # type: ignore[arg-type]
|
|
61
|
-
elif isinstance(step, Search):
|
|
62
|
-
return search.execute(step, **kwargs) # type: ignore[arg-type]
|
|
63
|
-
elif isinstance(step, Invoke):
|
|
64
|
-
return tool.execute(step, **kwargs) # type: ignore[arg-type]
|
|
65
|
-
else:
|
|
66
|
-
# Handle other step types if necessary
|
|
67
|
-
raise InterpreterError(f"Unsupported step type: {type(step).__name__}")
|
|
File without changes
|