qtype 0.0.12__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qtype/application/commons/tools.py +1 -1
- qtype/application/converters/tools_from_api.py +476 -11
- qtype/application/converters/tools_from_module.py +38 -14
- qtype/application/converters/types.py +15 -30
- qtype/application/documentation.py +1 -1
- qtype/application/facade.py +102 -85
- qtype/base/types.py +227 -7
- qtype/cli.py +5 -1
- qtype/commands/convert.py +52 -6
- qtype/commands/generate.py +44 -4
- qtype/commands/run.py +78 -36
- qtype/commands/serve.py +74 -44
- qtype/commands/validate.py +37 -14
- qtype/commands/visualize.py +46 -25
- qtype/dsl/__init__.py +6 -5
- qtype/dsl/custom_types.py +1 -1
- qtype/dsl/domain_types.py +86 -5
- qtype/dsl/linker.py +384 -0
- qtype/dsl/loader.py +315 -0
- qtype/dsl/model.py +751 -263
- qtype/dsl/parser.py +200 -0
- qtype/dsl/types.py +50 -0
- qtype/interpreter/api.py +63 -136
- qtype/interpreter/auth/aws.py +19 -9
- qtype/interpreter/auth/generic.py +93 -16
- qtype/interpreter/base/base_step_executor.py +436 -0
- qtype/interpreter/base/batch_step_executor.py +171 -0
- qtype/interpreter/base/exceptions.py +50 -0
- qtype/interpreter/base/executor_context.py +91 -0
- qtype/interpreter/base/factory.py +84 -0
- qtype/interpreter/base/progress_tracker.py +110 -0
- qtype/interpreter/base/secrets.py +339 -0
- qtype/interpreter/base/step_cache.py +74 -0
- qtype/interpreter/base/stream_emitter.py +469 -0
- qtype/interpreter/conversions.py +471 -22
- qtype/interpreter/converters.py +79 -0
- qtype/interpreter/endpoints.py +355 -0
- qtype/interpreter/executors/agent_executor.py +242 -0
- qtype/interpreter/executors/aggregate_executor.py +93 -0
- qtype/interpreter/executors/bedrock_reranker_executor.py +195 -0
- qtype/interpreter/executors/decoder_executor.py +163 -0
- qtype/interpreter/executors/doc_to_text_executor.py +112 -0
- qtype/interpreter/executors/document_embedder_executor.py +107 -0
- qtype/interpreter/executors/document_search_executor.py +113 -0
- qtype/interpreter/executors/document_source_executor.py +118 -0
- qtype/interpreter/executors/document_splitter_executor.py +105 -0
- qtype/interpreter/executors/echo_executor.py +63 -0
- qtype/interpreter/executors/field_extractor_executor.py +165 -0
- qtype/interpreter/executors/file_source_executor.py +101 -0
- qtype/interpreter/executors/file_writer_executor.py +110 -0
- qtype/interpreter/executors/index_upsert_executor.py +232 -0
- qtype/interpreter/executors/invoke_embedding_executor.py +92 -0
- qtype/interpreter/executors/invoke_flow_executor.py +51 -0
- qtype/interpreter/executors/invoke_tool_executor.py +358 -0
- qtype/interpreter/executors/llm_inference_executor.py +272 -0
- qtype/interpreter/executors/prompt_template_executor.py +78 -0
- qtype/interpreter/executors/sql_source_executor.py +106 -0
- qtype/interpreter/executors/vector_search_executor.py +91 -0
- qtype/interpreter/flow.py +173 -22
- qtype/interpreter/logging_progress.py +61 -0
- qtype/interpreter/metadata_api.py +115 -0
- qtype/interpreter/resource_cache.py +5 -4
- qtype/interpreter/rich_progress.py +225 -0
- qtype/interpreter/stream/chat/__init__.py +15 -0
- qtype/interpreter/stream/chat/converter.py +391 -0
- qtype/interpreter/{chat → stream/chat}/file_conversions.py +2 -2
- qtype/interpreter/stream/chat/ui_request_to_domain_type.py +140 -0
- qtype/interpreter/stream/chat/vercel.py +609 -0
- qtype/interpreter/stream/utils/__init__.py +15 -0
- qtype/interpreter/stream/utils/build_vercel_ai_formatter.py +74 -0
- qtype/interpreter/stream/utils/callback_to_stream.py +66 -0
- qtype/interpreter/stream/utils/create_streaming_response.py +18 -0
- qtype/interpreter/stream/utils/default_chat_extract_text.py +20 -0
- qtype/interpreter/stream/utils/error_streaming_response.py +20 -0
- qtype/interpreter/telemetry.py +135 -8
- qtype/interpreter/tools/__init__.py +5 -0
- qtype/interpreter/tools/function_tool_helper.py +265 -0
- qtype/interpreter/types.py +330 -0
- qtype/interpreter/typing.py +83 -89
- qtype/interpreter/ui/404/index.html +1 -1
- qtype/interpreter/ui/404.html +1 -1
- qtype/interpreter/ui/_next/static/{OT8QJQW3J70VbDWWfrEMT → 20HoJN6otZ_LyHLHpCPE6}/_buildManifest.js +1 -1
- qtype/interpreter/ui/_next/static/chunks/434-b2112d19f25c44ff.js +36 -0
- qtype/interpreter/ui/_next/static/chunks/{964-ed4ab073db645007.js → 964-2b041321a01cbf56.js} +1 -1
- qtype/interpreter/ui/_next/static/chunks/app/{layout-5ccbc44fd528d089.js → layout-a05273ead5de2c41.js} +1 -1
- qtype/interpreter/ui/_next/static/chunks/app/page-8c67d16ac90d23cb.js +1 -0
- qtype/interpreter/ui/_next/static/chunks/ba12c10f-546f2714ff8abc66.js +1 -0
- qtype/interpreter/ui/_next/static/chunks/{main-6d261b6c5d6fb6c2.js → main-e26b9cb206da2cac.js} +1 -1
- qtype/interpreter/ui/_next/static/chunks/webpack-08642e441b39b6c2.js +1 -0
- qtype/interpreter/ui/_next/static/css/8a8d1269e362fef7.css +3 -0
- qtype/interpreter/ui/_next/static/media/4cf2300e9c8272f7-s.p.woff2 +0 -0
- qtype/interpreter/ui/icon.png +0 -0
- qtype/interpreter/ui/index.html +1 -1
- qtype/interpreter/ui/index.txt +5 -5
- qtype/semantic/checker.py +643 -0
- qtype/semantic/generate.py +268 -85
- qtype/semantic/loader.py +95 -0
- qtype/semantic/model.py +535 -163
- qtype/semantic/resolver.py +63 -19
- qtype/semantic/visualize.py +50 -35
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/METADATA +21 -4
- qtype-0.1.3.dist-info/RECORD +137 -0
- qtype/dsl/base_types.py +0 -38
- qtype/dsl/validator.py +0 -464
- qtype/interpreter/batch/__init__.py +0 -0
- qtype/interpreter/batch/flow.py +0 -95
- qtype/interpreter/batch/sql_source.py +0 -95
- qtype/interpreter/batch/step.py +0 -63
- qtype/interpreter/batch/types.py +0 -41
- qtype/interpreter/batch/utils.py +0 -179
- qtype/interpreter/chat/chat_api.py +0 -237
- qtype/interpreter/chat/vercel.py +0 -314
- qtype/interpreter/exceptions.py +0 -10
- qtype/interpreter/step.py +0 -67
- qtype/interpreter/steps/__init__.py +0 -0
- qtype/interpreter/steps/agent.py +0 -114
- qtype/interpreter/steps/condition.py +0 -36
- qtype/interpreter/steps/decoder.py +0 -88
- qtype/interpreter/steps/llm_inference.py +0 -150
- qtype/interpreter/steps/prompt_template.py +0 -54
- qtype/interpreter/steps/search.py +0 -24
- qtype/interpreter/steps/tool.py +0 -53
- qtype/interpreter/streaming_helpers.py +0 -123
- qtype/interpreter/ui/_next/static/chunks/736-7fc606e244fedcb1.js +0 -36
- qtype/interpreter/ui/_next/static/chunks/app/page-c72e847e888e549d.js +0 -1
- qtype/interpreter/ui/_next/static/chunks/ba12c10f-22556063851a6df2.js +0 -1
- qtype/interpreter/ui/_next/static/chunks/webpack-8289c17c67827f22.js +0 -1
- qtype/interpreter/ui/_next/static/css/a262c53826df929b.css +0 -3
- qtype/interpreter/ui/_next/static/media/569ce4b8f30dc480-s.p.woff2 +0 -0
- qtype/interpreter/ui/favicon.ico +0 -0
- qtype/loader.py +0 -389
- qtype-0.0.12.dist-info/RECORD +0 -105
- /qtype/interpreter/ui/_next/static/{OT8QJQW3J70VbDWWfrEMT → 20HoJN6otZ_LyHLHpCPE6}/_ssgManifest.js +0 -0
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/WHEEL +0 -0
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/entry_points.txt +0 -0
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/licenses/LICENSE +0 -0
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
"""Index upsert executor for inserting documents/chunks into indexes."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import uuid
|
|
7
|
+
from typing import AsyncIterator
|
|
8
|
+
|
|
9
|
+
from llama_index.core.schema import TextNode
|
|
10
|
+
from opensearchpy import AsyncOpenSearch
|
|
11
|
+
from pydantic import BaseModel
|
|
12
|
+
|
|
13
|
+
from qtype.dsl.domain_types import RAGChunk, RAGDocument
|
|
14
|
+
from qtype.interpreter.base.batch_step_executor import BatchedStepExecutor
|
|
15
|
+
from qtype.interpreter.base.executor_context import ExecutorContext
|
|
16
|
+
from qtype.interpreter.conversions import (
|
|
17
|
+
to_llama_vector_store_and_retriever,
|
|
18
|
+
to_opensearch_client,
|
|
19
|
+
)
|
|
20
|
+
from qtype.interpreter.types import FlowMessage
|
|
21
|
+
from qtype.semantic.model import DocumentIndex, IndexUpsert, VectorIndex
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class IndexUpsertExecutor(BatchedStepExecutor):
|
|
27
|
+
"""Executor for IndexUpsert steps supporting both vector and document indexes."""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self, step: IndexUpsert, context: ExecutorContext, **dependencies
|
|
31
|
+
):
|
|
32
|
+
super().__init__(step, context, **dependencies)
|
|
33
|
+
if not isinstance(step, IndexUpsert):
|
|
34
|
+
raise ValueError(
|
|
35
|
+
"IndexUpsertExecutor can only execute IndexUpsert steps."
|
|
36
|
+
)
|
|
37
|
+
self.step: IndexUpsert = step
|
|
38
|
+
|
|
39
|
+
# Determine index type and initialize appropriate client
|
|
40
|
+
if isinstance(self.step.index, VectorIndex):
|
|
41
|
+
# Vector index for RAGChunk embeddings
|
|
42
|
+
self._vector_store, _ = to_llama_vector_store_and_retriever(
|
|
43
|
+
self.step.index, self.context.secret_manager
|
|
44
|
+
)
|
|
45
|
+
self.index_type = "vector"
|
|
46
|
+
elif isinstance(self.step.index, DocumentIndex):
|
|
47
|
+
# Document index for text-based search
|
|
48
|
+
self._opensearch_client: AsyncOpenSearch = to_opensearch_client(
|
|
49
|
+
self.step.index, self.context.secret_manager
|
|
50
|
+
)
|
|
51
|
+
self._vector_store = None
|
|
52
|
+
self.index_type = "document"
|
|
53
|
+
self.index_name = self.step.index.name
|
|
54
|
+
self._document_index: DocumentIndex = self.step.index
|
|
55
|
+
else:
|
|
56
|
+
raise ValueError(
|
|
57
|
+
f"Unsupported index type: {type(self.step.index)}"
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
async def finalize(self) -> AsyncIterator[FlowMessage]:
|
|
61
|
+
"""Clean up resources after all messages are processed."""
|
|
62
|
+
if hasattr(self, "_opensearch_client") and self._opensearch_client:
|
|
63
|
+
try:
|
|
64
|
+
await self._opensearch_client.close()
|
|
65
|
+
except Exception:
|
|
66
|
+
pass
|
|
67
|
+
# Make this an async generator
|
|
68
|
+
return
|
|
69
|
+
yield # type: ignore[unreachable]
|
|
70
|
+
|
|
71
|
+
async def process_batch(
|
|
72
|
+
self, batch: list[FlowMessage]
|
|
73
|
+
) -> AsyncIterator[FlowMessage]:
|
|
74
|
+
"""Process a batch of FlowMessages for the IndexUpsert step.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
batch: A list of FlowMessages to process.
|
|
78
|
+
|
|
79
|
+
Yields:
|
|
80
|
+
FlowMessages: Success messages after upserting to the index
|
|
81
|
+
"""
|
|
82
|
+
logger.debug(
|
|
83
|
+
f"Executing IndexUpsert step: {self.step.id} with batch size: {len(batch)}"
|
|
84
|
+
)
|
|
85
|
+
if len(batch) == 0:
|
|
86
|
+
return
|
|
87
|
+
|
|
88
|
+
try:
|
|
89
|
+
if self.index_type == "vector":
|
|
90
|
+
result_iter = self._upsert_to_vector_store(batch)
|
|
91
|
+
else:
|
|
92
|
+
result_iter = self._upsert_to_document_index(batch)
|
|
93
|
+
async for message in result_iter:
|
|
94
|
+
yield message
|
|
95
|
+
|
|
96
|
+
except Exception as e:
|
|
97
|
+
logger.error(f"Error in IndexUpsert step {self.step.id}: {e}")
|
|
98
|
+
# Emit error event to stream so frontend can display it
|
|
99
|
+
await self.stream_emitter.error(str(e))
|
|
100
|
+
|
|
101
|
+
# Mark all messages with the error and yield them
|
|
102
|
+
for message in batch:
|
|
103
|
+
message.set_error(self.step.id, e)
|
|
104
|
+
yield message
|
|
105
|
+
|
|
106
|
+
async def _upsert_to_vector_store(
|
|
107
|
+
self, batch: list[FlowMessage]
|
|
108
|
+
) -> AsyncIterator[FlowMessage]:
|
|
109
|
+
"""Upsert items to vector store.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
items: List of RAGChunk or RAGDocument objects
|
|
113
|
+
"""
|
|
114
|
+
# safe since semantic validation checks input length
|
|
115
|
+
input_var = self.step.inputs[0]
|
|
116
|
+
|
|
117
|
+
# Collect all RAGChunks or RAGDocuments from the batch inputs
|
|
118
|
+
items = []
|
|
119
|
+
for message in batch:
|
|
120
|
+
input_data = message.variables.get(input_var.id)
|
|
121
|
+
if not isinstance(input_data, (RAGChunk, RAGDocument)):
|
|
122
|
+
raise ValueError(
|
|
123
|
+
f"IndexUpsert only supports RAGChunk or RAGDocument "
|
|
124
|
+
f"inputs. Got: {type(input_data)}"
|
|
125
|
+
)
|
|
126
|
+
items.append(input_data)
|
|
127
|
+
|
|
128
|
+
# Convert to LlamaIndex TextNode objects
|
|
129
|
+
nodes = []
|
|
130
|
+
for item in items:
|
|
131
|
+
if isinstance(item, RAGChunk):
|
|
132
|
+
node = TextNode(
|
|
133
|
+
id_=item.chunk_id,
|
|
134
|
+
text=str(item.content),
|
|
135
|
+
metadata=item.metadata,
|
|
136
|
+
embedding=item.vector,
|
|
137
|
+
)
|
|
138
|
+
else: # RAGDocument
|
|
139
|
+
# For documents, use file_id and convert content to string
|
|
140
|
+
node = TextNode(
|
|
141
|
+
id_=item.file_id,
|
|
142
|
+
text=str(item.content),
|
|
143
|
+
metadata=item.metadata,
|
|
144
|
+
embedding=None, # Documents don't have embeddings
|
|
145
|
+
)
|
|
146
|
+
nodes.append(node)
|
|
147
|
+
|
|
148
|
+
# Batch upsert all nodes to the vector store
|
|
149
|
+
await self._vector_store.async_add(nodes)
|
|
150
|
+
num_inserted = len(items)
|
|
151
|
+
|
|
152
|
+
# Emit status update
|
|
153
|
+
await self.stream_emitter.status(
|
|
154
|
+
f"Upserted {num_inserted} items to index {self.step.index.name}"
|
|
155
|
+
)
|
|
156
|
+
for message in batch:
|
|
157
|
+
yield message
|
|
158
|
+
|
|
159
|
+
async def _upsert_to_document_index(
|
|
160
|
+
self, batch: list[FlowMessage]
|
|
161
|
+
) -> AsyncIterator[FlowMessage]:
|
|
162
|
+
"""Upsert items to document index using bulk API.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
batch: List of FlowMessages containing documents to upsert
|
|
166
|
+
"""
|
|
167
|
+
|
|
168
|
+
bulk_body = []
|
|
169
|
+
message_by_id: dict[str, FlowMessage] = {}
|
|
170
|
+
|
|
171
|
+
for message in batch:
|
|
172
|
+
# Collect all input variables into a single document dict
|
|
173
|
+
doc_dict = {}
|
|
174
|
+
for input_var in self.step.inputs:
|
|
175
|
+
value = message.variables.get(input_var.id)
|
|
176
|
+
|
|
177
|
+
# Convert to dict if it's a Pydantic model
|
|
178
|
+
if isinstance(value, BaseModel):
|
|
179
|
+
value = value.model_dump()
|
|
180
|
+
|
|
181
|
+
# Merge into document dict
|
|
182
|
+
if isinstance(value, dict):
|
|
183
|
+
doc_dict.update(value)
|
|
184
|
+
else:
|
|
185
|
+
# Primitive types - use variable name as field name
|
|
186
|
+
doc_dict[input_var.id] = value
|
|
187
|
+
|
|
188
|
+
# Determine the document id field
|
|
189
|
+
id_field = None
|
|
190
|
+
if self._document_index.id_field is not None:
|
|
191
|
+
id_field = self._document_index.id_field
|
|
192
|
+
if id_field not in doc_dict:
|
|
193
|
+
raise ValueError(
|
|
194
|
+
f"Specified id_field '{id_field}' not found in inputs"
|
|
195
|
+
)
|
|
196
|
+
else:
|
|
197
|
+
# Auto-detect with fallback
|
|
198
|
+
for field in ["_id", "id", "doc_id", "document_id"]:
|
|
199
|
+
if field in doc_dict:
|
|
200
|
+
id_field = field
|
|
201
|
+
break
|
|
202
|
+
if id_field is not None:
|
|
203
|
+
doc_id = str(doc_dict[id_field])
|
|
204
|
+
else:
|
|
205
|
+
# Generate a UUID if no id field found
|
|
206
|
+
doc_id = str(uuid.uuid4())
|
|
207
|
+
|
|
208
|
+
# Add bulk action and document
|
|
209
|
+
bulk_body.append(
|
|
210
|
+
{"index": {"_index": self.index_name, "_id": doc_id}}
|
|
211
|
+
)
|
|
212
|
+
bulk_body.append(doc_dict)
|
|
213
|
+
message_by_id[doc_id] = message
|
|
214
|
+
|
|
215
|
+
# Execute bulk request asynchronously
|
|
216
|
+
response = await self._opensearch_client.bulk(body=bulk_body)
|
|
217
|
+
|
|
218
|
+
num_inserted = 0
|
|
219
|
+
for item in response["items"]:
|
|
220
|
+
doc_id = item["index"]["_id"]
|
|
221
|
+
message = message_by_id[doc_id]
|
|
222
|
+
if "error" in item.get("index", {}):
|
|
223
|
+
message.set_error(
|
|
224
|
+
self.step.id,
|
|
225
|
+
Exception(item["index"]["error"]),
|
|
226
|
+
)
|
|
227
|
+
else:
|
|
228
|
+
num_inserted += 1
|
|
229
|
+
yield message
|
|
230
|
+
await self.stream_emitter.status(
|
|
231
|
+
f"Upserted {num_inserted} items to index {self.step.index.name}, {len(batch) - num_inserted} errors occurred."
|
|
232
|
+
)
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
from typing import AsyncIterator
|
|
2
|
+
|
|
3
|
+
from openinference.semconv.trace import OpenInferenceSpanKindValues
|
|
4
|
+
|
|
5
|
+
from qtype.base.types import PrimitiveTypeEnum
|
|
6
|
+
from qtype.dsl.domain_types import Embedding
|
|
7
|
+
from qtype.interpreter.base.base_step_executor import StepExecutor
|
|
8
|
+
from qtype.interpreter.base.executor_context import ExecutorContext
|
|
9
|
+
from qtype.interpreter.conversions import to_embedding_model
|
|
10
|
+
from qtype.interpreter.types import FlowMessage
|
|
11
|
+
from qtype.semantic.model import InvokeEmbedding
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class InvokeEmbeddingExecutor(StepExecutor):
|
|
15
|
+
"""Executor for InvokeEmbedding steps."""
|
|
16
|
+
|
|
17
|
+
# Embedding operations should be marked as EMBEDDING type
|
|
18
|
+
span_kind = OpenInferenceSpanKindValues.EMBEDDING
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self, step: InvokeEmbedding, context: ExecutorContext, **dependencies
|
|
22
|
+
):
|
|
23
|
+
super().__init__(step, context, **dependencies)
|
|
24
|
+
if not isinstance(step, InvokeEmbedding):
|
|
25
|
+
raise ValueError(
|
|
26
|
+
(
|
|
27
|
+
"InvokeEmbeddingExecutor can only execute "
|
|
28
|
+
"InvokeEmbedding steps."
|
|
29
|
+
)
|
|
30
|
+
)
|
|
31
|
+
self.step: InvokeEmbedding = step
|
|
32
|
+
# Initialize the embedding model once for the executor
|
|
33
|
+
self.embedding_model = to_embedding_model(self.step.model)
|
|
34
|
+
|
|
35
|
+
async def process_message(
|
|
36
|
+
self,
|
|
37
|
+
message: FlowMessage,
|
|
38
|
+
) -> AsyncIterator[FlowMessage]:
|
|
39
|
+
"""Process a single FlowMessage for the InvokeEmbedding step.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
message: The FlowMessage to process.
|
|
43
|
+
Yields:
|
|
44
|
+
FlowMessage with embedding.
|
|
45
|
+
"""
|
|
46
|
+
input_id = self.step.inputs[0].id
|
|
47
|
+
input_type = self.step.inputs[0].type
|
|
48
|
+
output_id = self.step.outputs[0].id
|
|
49
|
+
|
|
50
|
+
try:
|
|
51
|
+
# Get the input value
|
|
52
|
+
input_value = message.variables.get(input_id)
|
|
53
|
+
|
|
54
|
+
if input_value is None:
|
|
55
|
+
raise ValueError(f"Input variable '{input_id}' is missing")
|
|
56
|
+
|
|
57
|
+
# Generate embedding based on input type
|
|
58
|
+
if input_type == PrimitiveTypeEnum.text:
|
|
59
|
+
if not isinstance(input_value, str):
|
|
60
|
+
input_value = str(input_value)
|
|
61
|
+
vector = await self.embedding_model.aget_text_embedding(
|
|
62
|
+
text=input_value
|
|
63
|
+
)
|
|
64
|
+
content = input_value
|
|
65
|
+
elif input_type == PrimitiveTypeEnum.image:
|
|
66
|
+
# For image embeddings
|
|
67
|
+
vector = await self.embedding_model.aget_image_embedding(
|
|
68
|
+
image_path=input_value
|
|
69
|
+
)
|
|
70
|
+
content = input_value
|
|
71
|
+
else:
|
|
72
|
+
raise ValueError(
|
|
73
|
+
(
|
|
74
|
+
f"Unsupported input type for embedding: "
|
|
75
|
+
f"{input_type}. Must be 'text' or 'image'."
|
|
76
|
+
)
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Create the Embedding object
|
|
80
|
+
embedding = Embedding(
|
|
81
|
+
vector=vector,
|
|
82
|
+
content=content,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# Yield the result
|
|
86
|
+
yield message.copy_with_variables({output_id: embedding})
|
|
87
|
+
|
|
88
|
+
except Exception as e:
|
|
89
|
+
# Emit error event to stream so frontend can display it
|
|
90
|
+
await self.stream_emitter.error(str(e))
|
|
91
|
+
message.set_error(self.step.id, e)
|
|
92
|
+
yield message
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from typing import AsyncIterator
|
|
2
|
+
|
|
3
|
+
from qtype.interpreter.base.base_step_executor import StepExecutor
|
|
4
|
+
from qtype.interpreter.base.executor_context import ExecutorContext
|
|
5
|
+
from qtype.interpreter.types import FlowMessage
|
|
6
|
+
from qtype.semantic.model import InvokeFlow
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class InvokeFlowExecutor(StepExecutor):
|
|
10
|
+
"""Executor for InvokeFlow steps."""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self, step: InvokeFlow, context: ExecutorContext, **dependencies
|
|
14
|
+
):
|
|
15
|
+
super().__init__(step, context, **dependencies)
|
|
16
|
+
if not isinstance(step, InvokeFlow):
|
|
17
|
+
raise ValueError(
|
|
18
|
+
("InvokeFlowExecutor can only execute InvokeFlow steps.")
|
|
19
|
+
)
|
|
20
|
+
self.step: InvokeFlow = step
|
|
21
|
+
|
|
22
|
+
async def process_message(
|
|
23
|
+
self, message: FlowMessage
|
|
24
|
+
) -> AsyncIterator[FlowMessage]:
|
|
25
|
+
"""Process a single FlowMessage for the InvokeFlow step.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
message: The FlowMessage to process.
|
|
29
|
+
Yields:
|
|
30
|
+
FlowMessage with results from the invoked flow.
|
|
31
|
+
"""
|
|
32
|
+
from qtype.interpreter.flow import run_flow
|
|
33
|
+
|
|
34
|
+
initial = message.copy_with_variables(
|
|
35
|
+
{
|
|
36
|
+
id: message.variables.get(var.id)
|
|
37
|
+
for var, id in self.step.input_bindings.items()
|
|
38
|
+
}
|
|
39
|
+
)
|
|
40
|
+
# Pass through context (already available as self.context)
|
|
41
|
+
result = await run_flow(
|
|
42
|
+
self.step.flow, [initial], context=self.context
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
for msg in result:
|
|
46
|
+
yield msg.copy_with_variables(
|
|
47
|
+
{
|
|
48
|
+
var.id: msg.variables.get(id)
|
|
49
|
+
for var, id in self.step.output_bindings.items()
|
|
50
|
+
}
|
|
51
|
+
)
|