qtype 0.0.12__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. qtype/application/commons/tools.py +1 -1
  2. qtype/application/converters/tools_from_api.py +476 -11
  3. qtype/application/converters/tools_from_module.py +38 -14
  4. qtype/application/converters/types.py +15 -30
  5. qtype/application/documentation.py +1 -1
  6. qtype/application/facade.py +102 -85
  7. qtype/base/types.py +227 -7
  8. qtype/cli.py +5 -1
  9. qtype/commands/convert.py +52 -6
  10. qtype/commands/generate.py +44 -4
  11. qtype/commands/run.py +78 -36
  12. qtype/commands/serve.py +74 -44
  13. qtype/commands/validate.py +37 -14
  14. qtype/commands/visualize.py +46 -25
  15. qtype/dsl/__init__.py +6 -5
  16. qtype/dsl/custom_types.py +1 -1
  17. qtype/dsl/domain_types.py +86 -5
  18. qtype/dsl/linker.py +384 -0
  19. qtype/dsl/loader.py +315 -0
  20. qtype/dsl/model.py +751 -263
  21. qtype/dsl/parser.py +200 -0
  22. qtype/dsl/types.py +50 -0
  23. qtype/interpreter/api.py +63 -136
  24. qtype/interpreter/auth/aws.py +19 -9
  25. qtype/interpreter/auth/generic.py +93 -16
  26. qtype/interpreter/base/base_step_executor.py +436 -0
  27. qtype/interpreter/base/batch_step_executor.py +171 -0
  28. qtype/interpreter/base/exceptions.py +50 -0
  29. qtype/interpreter/base/executor_context.py +91 -0
  30. qtype/interpreter/base/factory.py +84 -0
  31. qtype/interpreter/base/progress_tracker.py +110 -0
  32. qtype/interpreter/base/secrets.py +339 -0
  33. qtype/interpreter/base/step_cache.py +74 -0
  34. qtype/interpreter/base/stream_emitter.py +469 -0
  35. qtype/interpreter/conversions.py +471 -22
  36. qtype/interpreter/converters.py +79 -0
  37. qtype/interpreter/endpoints.py +355 -0
  38. qtype/interpreter/executors/agent_executor.py +242 -0
  39. qtype/interpreter/executors/aggregate_executor.py +93 -0
  40. qtype/interpreter/executors/bedrock_reranker_executor.py +195 -0
  41. qtype/interpreter/executors/decoder_executor.py +163 -0
  42. qtype/interpreter/executors/doc_to_text_executor.py +112 -0
  43. qtype/interpreter/executors/document_embedder_executor.py +107 -0
  44. qtype/interpreter/executors/document_search_executor.py +113 -0
  45. qtype/interpreter/executors/document_source_executor.py +118 -0
  46. qtype/interpreter/executors/document_splitter_executor.py +105 -0
  47. qtype/interpreter/executors/echo_executor.py +63 -0
  48. qtype/interpreter/executors/field_extractor_executor.py +165 -0
  49. qtype/interpreter/executors/file_source_executor.py +101 -0
  50. qtype/interpreter/executors/file_writer_executor.py +110 -0
  51. qtype/interpreter/executors/index_upsert_executor.py +232 -0
  52. qtype/interpreter/executors/invoke_embedding_executor.py +92 -0
  53. qtype/interpreter/executors/invoke_flow_executor.py +51 -0
  54. qtype/interpreter/executors/invoke_tool_executor.py +358 -0
  55. qtype/interpreter/executors/llm_inference_executor.py +272 -0
  56. qtype/interpreter/executors/prompt_template_executor.py +78 -0
  57. qtype/interpreter/executors/sql_source_executor.py +106 -0
  58. qtype/interpreter/executors/vector_search_executor.py +91 -0
  59. qtype/interpreter/flow.py +173 -22
  60. qtype/interpreter/logging_progress.py +61 -0
  61. qtype/interpreter/metadata_api.py +115 -0
  62. qtype/interpreter/resource_cache.py +5 -4
  63. qtype/interpreter/rich_progress.py +225 -0
  64. qtype/interpreter/stream/chat/__init__.py +15 -0
  65. qtype/interpreter/stream/chat/converter.py +391 -0
  66. qtype/interpreter/{chat → stream/chat}/file_conversions.py +2 -2
  67. qtype/interpreter/stream/chat/ui_request_to_domain_type.py +140 -0
  68. qtype/interpreter/stream/chat/vercel.py +609 -0
  69. qtype/interpreter/stream/utils/__init__.py +15 -0
  70. qtype/interpreter/stream/utils/build_vercel_ai_formatter.py +74 -0
  71. qtype/interpreter/stream/utils/callback_to_stream.py +66 -0
  72. qtype/interpreter/stream/utils/create_streaming_response.py +18 -0
  73. qtype/interpreter/stream/utils/default_chat_extract_text.py +20 -0
  74. qtype/interpreter/stream/utils/error_streaming_response.py +20 -0
  75. qtype/interpreter/telemetry.py +135 -8
  76. qtype/interpreter/tools/__init__.py +5 -0
  77. qtype/interpreter/tools/function_tool_helper.py +265 -0
  78. qtype/interpreter/types.py +330 -0
  79. qtype/interpreter/typing.py +83 -89
  80. qtype/interpreter/ui/404/index.html +1 -1
  81. qtype/interpreter/ui/404.html +1 -1
  82. qtype/interpreter/ui/_next/static/{OT8QJQW3J70VbDWWfrEMT → 20HoJN6otZ_LyHLHpCPE6}/_buildManifest.js +1 -1
  83. qtype/interpreter/ui/_next/static/chunks/434-b2112d19f25c44ff.js +36 -0
  84. qtype/interpreter/ui/_next/static/chunks/{964-ed4ab073db645007.js → 964-2b041321a01cbf56.js} +1 -1
  85. qtype/interpreter/ui/_next/static/chunks/app/{layout-5ccbc44fd528d089.js → layout-a05273ead5de2c41.js} +1 -1
  86. qtype/interpreter/ui/_next/static/chunks/app/page-8c67d16ac90d23cb.js +1 -0
  87. qtype/interpreter/ui/_next/static/chunks/ba12c10f-546f2714ff8abc66.js +1 -0
  88. qtype/interpreter/ui/_next/static/chunks/{main-6d261b6c5d6fb6c2.js → main-e26b9cb206da2cac.js} +1 -1
  89. qtype/interpreter/ui/_next/static/chunks/webpack-08642e441b39b6c2.js +1 -0
  90. qtype/interpreter/ui/_next/static/css/8a8d1269e362fef7.css +3 -0
  91. qtype/interpreter/ui/_next/static/media/4cf2300e9c8272f7-s.p.woff2 +0 -0
  92. qtype/interpreter/ui/icon.png +0 -0
  93. qtype/interpreter/ui/index.html +1 -1
  94. qtype/interpreter/ui/index.txt +5 -5
  95. qtype/semantic/checker.py +643 -0
  96. qtype/semantic/generate.py +268 -85
  97. qtype/semantic/loader.py +95 -0
  98. qtype/semantic/model.py +535 -163
  99. qtype/semantic/resolver.py +63 -19
  100. qtype/semantic/visualize.py +50 -35
  101. {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/METADATA +21 -4
  102. qtype-0.1.3.dist-info/RECORD +137 -0
  103. qtype/dsl/base_types.py +0 -38
  104. qtype/dsl/validator.py +0 -464
  105. qtype/interpreter/batch/__init__.py +0 -0
  106. qtype/interpreter/batch/flow.py +0 -95
  107. qtype/interpreter/batch/sql_source.py +0 -95
  108. qtype/interpreter/batch/step.py +0 -63
  109. qtype/interpreter/batch/types.py +0 -41
  110. qtype/interpreter/batch/utils.py +0 -179
  111. qtype/interpreter/chat/chat_api.py +0 -237
  112. qtype/interpreter/chat/vercel.py +0 -314
  113. qtype/interpreter/exceptions.py +0 -10
  114. qtype/interpreter/step.py +0 -67
  115. qtype/interpreter/steps/__init__.py +0 -0
  116. qtype/interpreter/steps/agent.py +0 -114
  117. qtype/interpreter/steps/condition.py +0 -36
  118. qtype/interpreter/steps/decoder.py +0 -88
  119. qtype/interpreter/steps/llm_inference.py +0 -150
  120. qtype/interpreter/steps/prompt_template.py +0 -54
  121. qtype/interpreter/steps/search.py +0 -24
  122. qtype/interpreter/steps/tool.py +0 -53
  123. qtype/interpreter/streaming_helpers.py +0 -123
  124. qtype/interpreter/ui/_next/static/chunks/736-7fc606e244fedcb1.js +0 -36
  125. qtype/interpreter/ui/_next/static/chunks/app/page-c72e847e888e549d.js +0 -1
  126. qtype/interpreter/ui/_next/static/chunks/ba12c10f-22556063851a6df2.js +0 -1
  127. qtype/interpreter/ui/_next/static/chunks/webpack-8289c17c67827f22.js +0 -1
  128. qtype/interpreter/ui/_next/static/css/a262c53826df929b.css +0 -3
  129. qtype/interpreter/ui/_next/static/media/569ce4b8f30dc480-s.p.woff2 +0 -0
  130. qtype/interpreter/ui/favicon.ico +0 -0
  131. qtype/loader.py +0 -389
  132. qtype-0.0.12.dist-info/RECORD +0 -105
  133. /qtype/interpreter/ui/_next/static/{OT8QJQW3J70VbDWWfrEMT → 20HoJN6otZ_LyHLHpCPE6}/_ssgManifest.js +0 -0
  134. {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/WHEEL +0 -0
  135. {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/entry_points.txt +0 -0
  136. {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/licenses/LICENSE +0 -0
  137. {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/top_level.txt +0 -0
@@ -1,314 +0,0 @@
1
- """
2
- Pydantic models for Vercel AI SDK UI types.
3
-
4
- This module reproduces the exact TypeScript type shapes from the AI SDK UI
5
- as Pydantic models for use in Python implementations.
6
- """
7
-
8
- from __future__ import annotations
9
-
10
- from typing import Any, Literal, Union
11
-
12
- from pydantic import BaseModel, Field
13
-
14
-
15
- # Provider metadata
16
- class ProviderMetadata(BaseModel):
17
- """Provider-specific metadata.
18
-
19
- Reproduces: ProviderMetadata from ui/ui-message-chunks.ts
20
- """
21
-
22
- model_config = {"extra": "allow"}
23
-
24
-
25
- # UI Message Parts
26
- class TextUIPart(BaseModel):
27
- """A text part of a message.
28
-
29
- Reproduces: TextUIPart from ui/ui-messages.ts
30
- """
31
-
32
- type: Literal["text"] = "text"
33
- text: str
34
- state: Literal["streaming", "done"] | None = None
35
- provider_metadata: ProviderMetadata | None = Field(
36
- default=None, alias="providerMetadata"
37
- )
38
-
39
-
40
- class ReasoningUIPart(BaseModel):
41
- """A reasoning part of a message.
42
-
43
- Reproduces: ReasoningUIPart from ui/ui-messages.ts
44
- """
45
-
46
- type: Literal["reasoning"] = "reasoning"
47
- text: str
48
- state: Literal["streaming", "done"] | None = None
49
- provider_metadata: ProviderMetadata | None = Field(
50
- default=None, alias="providerMetadata"
51
- )
52
-
53
-
54
- class SourceUrlUIPart(BaseModel):
55
- """A source URL part of a message.
56
-
57
- Reproduces: SourceUrlUIPart from ui/ui-messages.ts
58
- """
59
-
60
- type: Literal["source-url"] = "source-url"
61
- source_id: str = Field(alias="sourceId")
62
- url: str
63
- title: str | None = None
64
- provider_metadata: ProviderMetadata | None = Field(
65
- default=None, alias="providerMetadata"
66
- )
67
-
68
-
69
- class SourceDocumentUIPart(BaseModel):
70
- """A document source part of a message.
71
-
72
- Reproduces: SourceDocumentUIPart from ui/ui-messages.ts
73
- """
74
-
75
- type: Literal["source-document"] = "source-document"
76
- source_id: str = Field(alias="sourceId")
77
- media_type: str = Field(alias="mediaType")
78
- title: str
79
- filename: str | None = None
80
- provider_metadata: ProviderMetadata | None = Field(
81
- default=None, alias="providerMetadata"
82
- )
83
-
84
-
85
- class FileUIPart(BaseModel):
86
- """A file part of a message.
87
-
88
- Reproduces: FileUIPart from ui/ui-messages.ts
89
- """
90
-
91
- type: Literal["file"] = "file"
92
- media_type: str = Field(alias="mediaType")
93
- filename: str | None = None
94
- url: str
95
- provider_metadata: ProviderMetadata | None = Field(
96
- default=None, alias="providerMetadata"
97
- )
98
-
99
-
100
- class StepStartUIPart(BaseModel):
101
- """A step boundary part of a message.
102
-
103
- Reproduces: StepStartUIPart from ui/ui-messages.ts
104
- """
105
-
106
- type: Literal["step-start"] = "step-start"
107
-
108
-
109
- # Union type for UI message parts
110
- UIMessagePart = Union[
111
- TextUIPart,
112
- ReasoningUIPart,
113
- SourceUrlUIPart,
114
- SourceDocumentUIPart,
115
- FileUIPart,
116
- StepStartUIPart,
117
- ]
118
-
119
-
120
- # UI Message
121
- class UIMessage(BaseModel):
122
- """AI SDK UI Message.
123
-
124
- Reproduces: UIMessage from ui/ui-messages.ts
125
- """
126
-
127
- id: str
128
- role: Literal["system", "user", "assistant"]
129
- metadata: dict[str, Any] | None = None
130
- parts: list[UIMessagePart]
131
-
132
-
133
- # Chat Request (the request body sent from frontend)
134
- class ChatRequest(BaseModel):
135
- """Chat request format sent from AI SDK UI/React.
136
-
137
- Reproduces: ChatRequest from ui/chat-transport.ts
138
- """
139
-
140
- id: str # chatId
141
- messages: list[UIMessage]
142
- trigger: Literal["submit-message", "regenerate-message"]
143
- message_id: str | None = Field(default=None, alias="messageId")
144
-
145
-
146
- # UI Message Chunks (streaming events)
147
- class TextStartChunk(BaseModel):
148
- """Text start chunk.
149
-
150
- Reproduces: TextStartChunk from ui/ui-message-chunks.ts
151
- """
152
-
153
- type: Literal["text-start"] = "text-start"
154
- id: str
155
- provider_metadata: ProviderMetadata | None = Field(
156
- default=None, alias="providerMetadata"
157
- )
158
-
159
-
160
- class TextDeltaChunk(BaseModel):
161
- """Text delta chunk.
162
-
163
- Reproduces: TextDeltaChunk from ui/ui-message-chunks.ts
164
- """
165
-
166
- type: Literal["text-delta"] = "text-delta"
167
- id: str
168
- delta: str
169
- provider_metadata: ProviderMetadata | None = Field(
170
- default=None, alias="providerMetadata"
171
- )
172
-
173
-
174
- class TextEndChunk(BaseModel):
175
- """Text end chunk.
176
-
177
- Reproduces: TextEndChunk from ui/ui-message-chunks.ts
178
- """
179
-
180
- type: Literal["text-end"] = "text-end"
181
- id: str
182
- provider_metadata: ProviderMetadata | None = Field(
183
- default=None, alias="providerMetadata"
184
- )
185
-
186
-
187
- class ReasoningStartChunk(BaseModel):
188
- """Reasoning start chunk.
189
-
190
- Reproduces: ReasoningStartChunk from ui/ui-message-chunks.ts
191
- """
192
-
193
- type: Literal["reasoning-start"] = "reasoning-start"
194
- id: str
195
- provider_metadata: ProviderMetadata | None = Field(
196
- default=None, alias="providerMetadata"
197
- )
198
-
199
-
200
- class ReasoningDeltaChunk(BaseModel):
201
- """Reasoning delta chunk.
202
-
203
- Reproduces: ReasoningDeltaChunk from ui/ui-message-chunks.ts
204
- """
205
-
206
- type: Literal["reasoning-delta"] = "reasoning-delta"
207
- id: str
208
- delta: str
209
- provider_metadata: ProviderMetadata | None = Field(
210
- default=None, alias="providerMetadata"
211
- )
212
-
213
-
214
- class ReasoningEndChunk(BaseModel):
215
- """Reasoning end chunk.
216
-
217
- Reproduces: ReasoningEndChunk from ui/ui-message-chunks.ts
218
- """
219
-
220
- type: Literal["reasoning-end"] = "reasoning-end"
221
- id: str
222
- provider_metadata: ProviderMetadata | None = Field(
223
- default=None, alias="providerMetadata"
224
- )
225
-
226
-
227
- class ErrorChunk(BaseModel):
228
- """Error chunk.
229
-
230
- Reproduces: ErrorChunk from ui/ui-message-chunks.ts
231
- """
232
-
233
- type: Literal["error"] = "error"
234
- error_text: str = Field(alias="errorText")
235
-
236
-
237
- class StartStepChunk(BaseModel):
238
- """Start step chunk.
239
-
240
- Reproduces: StartStepChunk from ui/ui-message-chunks.ts
241
- """
242
-
243
- type: Literal["start-step"] = "start-step"
244
-
245
-
246
- class FinishStepChunk(BaseModel):
247
- """Finish step chunk.
248
-
249
- Reproduces: FinishStepChunk from ui/ui-message-chunks.ts
250
- """
251
-
252
- type: Literal["finish-step"] = "finish-step"
253
-
254
-
255
- class StartChunk(BaseModel):
256
- """Start chunk.
257
-
258
- Reproduces: StartChunk from ui/ui-message-chunks.ts
259
- """
260
-
261
- type: Literal["start"] = "start"
262
- message_id: str | None = Field(default=None, alias="messageId")
263
- message_metadata: dict[str, Any] | None = Field(
264
- default=None, alias="messageMetadata"
265
- )
266
-
267
-
268
- class FinishChunk(BaseModel):
269
- """Finish chunk.
270
-
271
- Reproduces: FinishChunk from ui/ui-message-chunks.ts
272
- """
273
-
274
- type: Literal["finish"] = "finish"
275
- message_metadata: dict[str, Any] | None = Field(
276
- default=None, alias="messageMetadata"
277
- )
278
-
279
-
280
- class AbortChunk(BaseModel):
281
- """Abort chunk.
282
-
283
- Reproduces: AbortChunk from ui/ui-message-chunks.ts
284
- """
285
-
286
- type: Literal["abort"] = "abort"
287
-
288
-
289
- class MessageMetadataChunk(BaseModel):
290
- """Message metadata chunk.
291
-
292
- Reproduces: MessageMetadataChunk from ui/ui-message-chunks.ts
293
- """
294
-
295
- type: Literal["message-metadata"] = "message-metadata"
296
- message_metadata: dict[str, Any] = Field(alias="messageMetadata")
297
-
298
-
299
- # Union type for all UI message chunks
300
- UIMessageChunk = Union[
301
- TextStartChunk,
302
- TextDeltaChunk,
303
- TextEndChunk,
304
- ReasoningStartChunk,
305
- ReasoningDeltaChunk,
306
- ReasoningEndChunk,
307
- ErrorChunk,
308
- StartStepChunk,
309
- FinishStepChunk,
310
- StartChunk,
311
- FinishChunk,
312
- AbortChunk,
313
- MessageMetadataChunk,
314
- ]
@@ -1,10 +0,0 @@
1
- from typing import Any
2
-
3
-
4
- class InterpreterError(Exception):
5
- """Base exception class for ProtoGen interpreter errors."""
6
-
7
- def __init__(self, message: str, details: Any = None) -> None:
8
- super().__init__(message)
9
- self.message = message
10
- self.details = details
qtype/interpreter/step.py DELETED
@@ -1,67 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import logging
4
- from typing import Any
5
-
6
- from qtype.interpreter.exceptions import InterpreterError
7
- from qtype.interpreter.steps import (
8
- agent,
9
- condition,
10
- decoder,
11
- llm_inference,
12
- prompt_template,
13
- search,
14
- tool,
15
- )
16
- from qtype.semantic.model import (
17
- Agent,
18
- Condition,
19
- Decoder,
20
- Flow,
21
- LLMInference,
22
- PromptTemplate,
23
- Search,
24
- Step,
25
- Tool,
26
- Variable,
27
- )
28
-
29
- logger = logging.getLogger(__name__)
30
-
31
-
32
- def execute_step(step: Step, **kwargs: dict[str, Any]) -> list[Variable]:
33
- """Execute a single step within a flow.
34
-
35
- Args:
36
- step: The step to execute.
37
- **kwargs: Additional keyword arguments.
38
- """
39
- logger.debug(f"Executing step: {step.id} with kwargs: {kwargs}")
40
-
41
- unset_inputs = [input for input in step.inputs if not input.is_set()]
42
- if unset_inputs:
43
- raise InterpreterError(
44
- f"The following inputs are required but have no values: {', '.join([input.id for input in unset_inputs])}"
45
- )
46
-
47
- if isinstance(step, Agent):
48
- return agent.execute(step=step, **kwargs) # type: ignore[arg-type]
49
- elif isinstance(step, Condition):
50
- return condition.execute(condition=step, **kwargs)
51
- elif isinstance(step, Decoder):
52
- return decoder.execute(step=step, **kwargs) # type: ignore[arg-type]
53
- elif isinstance(step, Flow):
54
- from .flow import execute_flow
55
-
56
- return execute_flow(step, **kwargs) # type: ignore[arg-type]
57
- elif isinstance(step, LLMInference):
58
- return llm_inference.execute(step, **kwargs) # type: ignore[arg-type]
59
- elif isinstance(step, PromptTemplate):
60
- return prompt_template.execute(step, **kwargs) # type: ignore[arg-type]
61
- elif isinstance(step, Search):
62
- return search.execute(step, **kwargs) # type: ignore[arg-type]
63
- elif isinstance(step, Tool):
64
- return tool.execute(step, **kwargs) # type: ignore[arg-type]
65
- else:
66
- # Handle other step types if necessary
67
- raise InterpreterError(f"Unsupported step type: {type(step).__name__}")
File without changes
@@ -1,114 +0,0 @@
1
- import asyncio
2
- import importlib
3
- import logging
4
- from typing import Any
5
-
6
- from llama_index.core.agent.workflow import ReActAgent
7
- from llama_index.core.base.llms.types import ChatMessage as LlamaChatMessage
8
- from llama_index.core.tools import AsyncBaseTool, FunctionTool
9
- from llama_index.core.workflow import Context
10
- from llama_index.core.workflow.handler import WorkflowHandler # type: ignore
11
-
12
- from qtype.dsl.domain_types import ChatMessage
13
- from qtype.interpreter.conversions import (
14
- from_chat_message,
15
- to_chat_message,
16
- to_llm,
17
- to_memory,
18
- )
19
- from qtype.interpreter.exceptions import InterpreterError
20
- from qtype.semantic.model import Agent, APITool, PythonFunctionTool, Variable
21
-
22
- logger = logging.getLogger(__name__)
23
-
24
-
25
- def to_llama_tool(tool: PythonFunctionTool) -> AsyncBaseTool:
26
- """Convert a qtype Tool to a LlamaIndex Tool."""
27
- # We want to get the function named by the tool -- get ".tools.<tool_name>"
28
- # This assumes the tool name matches a function in the .tools module
29
- module = importlib.import_module(tool.module_path)
30
- function = getattr(module, tool.function_name, None)
31
- if function is None:
32
- raise ValueError(
33
- f"Tool function '{tool.function_name}' not found in module '{tool.module_path}'."
34
- )
35
-
36
- return FunctionTool.from_defaults(
37
- fn=function, name=tool.name, description=tool.description
38
- )
39
-
40
-
41
- def execute(agent: Agent, **kwargs: dict[str, Any]) -> list[Variable]:
42
- """Execute an agent step.
43
-
44
- Args:
45
- agent: The agent step to execute.
46
- **kwargs: Additional keyword arguments.
47
- """
48
- logger.debug(f"Executing agent step: {agent.id}")
49
- if len(agent.outputs) != 1:
50
- raise InterpreterError(
51
- "LLMInference step must have exactly one output variable."
52
- )
53
- output_variable = agent.outputs[0]
54
-
55
- # prepare the input for the agent
56
- if len(agent.inputs) != 1:
57
- # TODO: Support multiple inputs by shoving it into the chat history?
58
- raise InterpreterError(
59
- "Agent step must have exactly one input variable."
60
- )
61
-
62
- input_variable = agent.inputs[0]
63
- if input_variable.type == ChatMessage:
64
- input: LlamaChatMessage | str = to_chat_message(input_variable.value) # type: ignore
65
- else:
66
- input: LlamaChatMessage | str = input_variable.value # type: ignore
67
-
68
- # Pepare the tools
69
- # TODO: support api tools
70
- if any(isinstance(tool, APITool) for tool in agent.tools):
71
- raise NotImplementedError(
72
- "APITool is not supported in the current implementation. Please use PythonFunctionTool."
73
- )
74
- tools = [
75
- to_llama_tool(tool) # type: ignore
76
- for tool in (agent.tools if agent.tools else [])
77
- ]
78
-
79
- # prep memory
80
- # Note to_memory is a cached resource so this will get existing memory if available
81
- memory = (
82
- to_memory(kwargs.get("session_id"), agent.memory)
83
- if agent.memory
84
- else None
85
- )
86
-
87
- # Run the agent
88
- async def run_agent() -> WorkflowHandler:
89
- logger.debug(
90
- f"Starting agent '{agent.id}' execution with input length: {len(str(input))} (ReAct mode)"
91
- )
92
- re_agent = ReActAgent(
93
- name=agent.id,
94
- tools=tools, # type: ignore
95
- system_prompt=agent.system_message,
96
- llm=to_llm(agent.model, agent.system_message), # type: ignore
97
- )
98
- ctx = Context(re_agent) # type: ignore
99
- # TODO: implement checkpoint_callback to call stream_fn?
100
- handler = re_agent.run(input, chat_memory=memory, ctx=ctx)
101
- result = await handler
102
- logger.debug(
103
- f"Agent '{agent.id}' execution completed successfully (ReAct mode)"
104
- )
105
- return result
106
-
107
- result = asyncio.run(run_agent())
108
-
109
- if output_variable.type == ChatMessage:
110
- output_variable.value = from_chat_message(result.response) # type: ignore
111
- else:
112
- output_variable.value = result.response.content # type: ignore
113
-
114
- return agent.outputs
@@ -1,36 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import Any
4
-
5
- from qtype.semantic.model import Condition, Variable
6
-
7
-
8
- def execute(condition: Condition, **kwargs: dict[str, Any]) -> list[Variable]:
9
- """Execute a condition step.
10
-
11
- Args:
12
- condition: The condition step to execute.
13
-
14
- Returns:
15
- A list of variables that are set based on the condition evaluation.
16
- """
17
- from qtype.interpreter.step import execute_step
18
-
19
- if not condition.inputs:
20
- raise ValueError(
21
- "Condition step requires at least one input variable."
22
- )
23
-
24
- if len(condition.inputs) != 1:
25
- raise ValueError(
26
- f"Condition step {condition.id} must have exactly one input, found {len(condition.inputs)}."
27
- )
28
- input_var = condition.inputs[0]
29
- if condition.equals.value == input_var.value: # type: ignore
30
- # If the condition is met, return the outputs
31
- return execute_step(condition.then, **kwargs)
32
- elif condition.else_:
33
- return execute_step(condition.else_, **kwargs)
34
- else:
35
- # If no else branch is defined, return an empty list
36
- return []
@@ -1,88 +0,0 @@
1
- import json
2
- import xml.etree.ElementTree as ET
3
- from typing import Any
4
-
5
- from qtype.dsl.model import DecoderFormat
6
- from qtype.semantic.model import Decoder, Variable
7
-
8
-
9
- def parse_json(input: str) -> dict[str, Any]:
10
- """Parse a JSON string into a Python object."""
11
- try:
12
- cleaned_response = input.strip()
13
- if cleaned_response.startswith("```json"):
14
- cleaned_response = cleaned_response[7:]
15
- if cleaned_response.endswith("```"):
16
- cleaned_response = cleaned_response[:-3]
17
- cleaned_response = cleaned_response.strip()
18
-
19
- # Parse the JSON
20
- parsed = json.loads(cleaned_response)
21
- if not isinstance(parsed, dict):
22
- raise ValueError(f"Parsed JSON is not an object: {parsed}")
23
- return parsed
24
- except json.JSONDecodeError as e:
25
- raise ValueError(f"Invalid JSON input: {e}")
26
-
27
-
28
- def parse_xml(input: str) -> dict[str, Any]:
29
- """Parse an XML string into a Python object."""
30
- try:
31
- cleaned_response = input.strip()
32
- if cleaned_response.startswith("```xml"):
33
- cleaned_response = cleaned_response[6:]
34
- if cleaned_response.endswith("```"):
35
- cleaned_response = cleaned_response[:-3]
36
- cleaned_response = cleaned_response.strip()
37
-
38
- cleaned_response = cleaned_response.replace("&", "&amp;")
39
- tree = ET.fromstring(cleaned_response)
40
- result = {c.tag: c.text for c in tree}
41
-
42
- return result
43
- except Exception as e:
44
- raise ValueError(f"Invalid XML input: {e}")
45
-
46
-
47
- def parse(input: str, format: DecoderFormat) -> dict[str, Any]:
48
- if format == DecoderFormat.json:
49
- return parse_json(input)
50
- elif format == DecoderFormat.xml:
51
- return parse_xml(input)
52
- else:
53
- raise ValueError(
54
- f"Unsupported decoder format: {format}. Supported formats are: {DecoderFormat.json}, {DecoderFormat.xml}."
55
- )
56
-
57
-
58
- def execute(decoder: Decoder, **kwargs: dict[str, Any]) -> list[Variable]:
59
- """Execute a decoder step with the provided arguments.
60
-
61
- Args:
62
- decoder: The decoder step to execute.
63
- **kwargs: Additional keyword arguments.
64
- """
65
-
66
- if len(decoder.inputs) != 1:
67
- raise ValueError(
68
- f"Decoder step {decoder.id} must have exactly one input, found {len(decoder.inputs)}."
69
- )
70
-
71
- # get the string value to decode
72
- input = decoder.inputs[0].value
73
- if not isinstance(input, str):
74
- raise ValueError(
75
- f"Input to decoder step {decoder.id} must be a string, found {type(input).__name__}."
76
- )
77
-
78
- result_dict = parse(input, decoder.format)
79
-
80
- # Set the output variables with the parsed results
81
- for output in decoder.outputs:
82
- if output.id in result_dict:
83
- output.value = result_dict[output.id]
84
- else:
85
- raise ValueError(
86
- f"Output variable {output.id} not found in decoded result: {result_dict}"
87
- )
88
- return decoder.outputs # type: ignore[no-any-return]