qtype 0.0.12__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qtype/application/commons/tools.py +1 -1
- qtype/application/converters/tools_from_api.py +476 -11
- qtype/application/converters/tools_from_module.py +38 -14
- qtype/application/converters/types.py +15 -30
- qtype/application/documentation.py +1 -1
- qtype/application/facade.py +102 -85
- qtype/base/types.py +227 -7
- qtype/cli.py +5 -1
- qtype/commands/convert.py +52 -6
- qtype/commands/generate.py +44 -4
- qtype/commands/run.py +78 -36
- qtype/commands/serve.py +74 -44
- qtype/commands/validate.py +37 -14
- qtype/commands/visualize.py +46 -25
- qtype/dsl/__init__.py +6 -5
- qtype/dsl/custom_types.py +1 -1
- qtype/dsl/domain_types.py +86 -5
- qtype/dsl/linker.py +384 -0
- qtype/dsl/loader.py +315 -0
- qtype/dsl/model.py +751 -263
- qtype/dsl/parser.py +200 -0
- qtype/dsl/types.py +50 -0
- qtype/interpreter/api.py +63 -136
- qtype/interpreter/auth/aws.py +19 -9
- qtype/interpreter/auth/generic.py +93 -16
- qtype/interpreter/base/base_step_executor.py +436 -0
- qtype/interpreter/base/batch_step_executor.py +171 -0
- qtype/interpreter/base/exceptions.py +50 -0
- qtype/interpreter/base/executor_context.py +91 -0
- qtype/interpreter/base/factory.py +84 -0
- qtype/interpreter/base/progress_tracker.py +110 -0
- qtype/interpreter/base/secrets.py +339 -0
- qtype/interpreter/base/step_cache.py +74 -0
- qtype/interpreter/base/stream_emitter.py +469 -0
- qtype/interpreter/conversions.py +471 -22
- qtype/interpreter/converters.py +79 -0
- qtype/interpreter/endpoints.py +355 -0
- qtype/interpreter/executors/agent_executor.py +242 -0
- qtype/interpreter/executors/aggregate_executor.py +93 -0
- qtype/interpreter/executors/bedrock_reranker_executor.py +195 -0
- qtype/interpreter/executors/decoder_executor.py +163 -0
- qtype/interpreter/executors/doc_to_text_executor.py +112 -0
- qtype/interpreter/executors/document_embedder_executor.py +107 -0
- qtype/interpreter/executors/document_search_executor.py +113 -0
- qtype/interpreter/executors/document_source_executor.py +118 -0
- qtype/interpreter/executors/document_splitter_executor.py +105 -0
- qtype/interpreter/executors/echo_executor.py +63 -0
- qtype/interpreter/executors/field_extractor_executor.py +165 -0
- qtype/interpreter/executors/file_source_executor.py +101 -0
- qtype/interpreter/executors/file_writer_executor.py +110 -0
- qtype/interpreter/executors/index_upsert_executor.py +232 -0
- qtype/interpreter/executors/invoke_embedding_executor.py +92 -0
- qtype/interpreter/executors/invoke_flow_executor.py +51 -0
- qtype/interpreter/executors/invoke_tool_executor.py +358 -0
- qtype/interpreter/executors/llm_inference_executor.py +272 -0
- qtype/interpreter/executors/prompt_template_executor.py +78 -0
- qtype/interpreter/executors/sql_source_executor.py +106 -0
- qtype/interpreter/executors/vector_search_executor.py +91 -0
- qtype/interpreter/flow.py +173 -22
- qtype/interpreter/logging_progress.py +61 -0
- qtype/interpreter/metadata_api.py +115 -0
- qtype/interpreter/resource_cache.py +5 -4
- qtype/interpreter/rich_progress.py +225 -0
- qtype/interpreter/stream/chat/__init__.py +15 -0
- qtype/interpreter/stream/chat/converter.py +391 -0
- qtype/interpreter/{chat → stream/chat}/file_conversions.py +2 -2
- qtype/interpreter/stream/chat/ui_request_to_domain_type.py +140 -0
- qtype/interpreter/stream/chat/vercel.py +609 -0
- qtype/interpreter/stream/utils/__init__.py +15 -0
- qtype/interpreter/stream/utils/build_vercel_ai_formatter.py +74 -0
- qtype/interpreter/stream/utils/callback_to_stream.py +66 -0
- qtype/interpreter/stream/utils/create_streaming_response.py +18 -0
- qtype/interpreter/stream/utils/default_chat_extract_text.py +20 -0
- qtype/interpreter/stream/utils/error_streaming_response.py +20 -0
- qtype/interpreter/telemetry.py +135 -8
- qtype/interpreter/tools/__init__.py +5 -0
- qtype/interpreter/tools/function_tool_helper.py +265 -0
- qtype/interpreter/types.py +330 -0
- qtype/interpreter/typing.py +83 -89
- qtype/interpreter/ui/404/index.html +1 -1
- qtype/interpreter/ui/404.html +1 -1
- qtype/interpreter/ui/_next/static/{OT8QJQW3J70VbDWWfrEMT → 20HoJN6otZ_LyHLHpCPE6}/_buildManifest.js +1 -1
- qtype/interpreter/ui/_next/static/chunks/434-b2112d19f25c44ff.js +36 -0
- qtype/interpreter/ui/_next/static/chunks/{964-ed4ab073db645007.js → 964-2b041321a01cbf56.js} +1 -1
- qtype/interpreter/ui/_next/static/chunks/app/{layout-5ccbc44fd528d089.js → layout-a05273ead5de2c41.js} +1 -1
- qtype/interpreter/ui/_next/static/chunks/app/page-8c67d16ac90d23cb.js +1 -0
- qtype/interpreter/ui/_next/static/chunks/ba12c10f-546f2714ff8abc66.js +1 -0
- qtype/interpreter/ui/_next/static/chunks/{main-6d261b6c5d6fb6c2.js → main-e26b9cb206da2cac.js} +1 -1
- qtype/interpreter/ui/_next/static/chunks/webpack-08642e441b39b6c2.js +1 -0
- qtype/interpreter/ui/_next/static/css/8a8d1269e362fef7.css +3 -0
- qtype/interpreter/ui/_next/static/media/4cf2300e9c8272f7-s.p.woff2 +0 -0
- qtype/interpreter/ui/icon.png +0 -0
- qtype/interpreter/ui/index.html +1 -1
- qtype/interpreter/ui/index.txt +5 -5
- qtype/semantic/checker.py +643 -0
- qtype/semantic/generate.py +268 -85
- qtype/semantic/loader.py +95 -0
- qtype/semantic/model.py +535 -163
- qtype/semantic/resolver.py +63 -19
- qtype/semantic/visualize.py +50 -35
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/METADATA +21 -4
- qtype-0.1.3.dist-info/RECORD +137 -0
- qtype/dsl/base_types.py +0 -38
- qtype/dsl/validator.py +0 -464
- qtype/interpreter/batch/__init__.py +0 -0
- qtype/interpreter/batch/flow.py +0 -95
- qtype/interpreter/batch/sql_source.py +0 -95
- qtype/interpreter/batch/step.py +0 -63
- qtype/interpreter/batch/types.py +0 -41
- qtype/interpreter/batch/utils.py +0 -179
- qtype/interpreter/chat/chat_api.py +0 -237
- qtype/interpreter/chat/vercel.py +0 -314
- qtype/interpreter/exceptions.py +0 -10
- qtype/interpreter/step.py +0 -67
- qtype/interpreter/steps/__init__.py +0 -0
- qtype/interpreter/steps/agent.py +0 -114
- qtype/interpreter/steps/condition.py +0 -36
- qtype/interpreter/steps/decoder.py +0 -88
- qtype/interpreter/steps/llm_inference.py +0 -150
- qtype/interpreter/steps/prompt_template.py +0 -54
- qtype/interpreter/steps/search.py +0 -24
- qtype/interpreter/steps/tool.py +0 -53
- qtype/interpreter/streaming_helpers.py +0 -123
- qtype/interpreter/ui/_next/static/chunks/736-7fc606e244fedcb1.js +0 -36
- qtype/interpreter/ui/_next/static/chunks/app/page-c72e847e888e549d.js +0 -1
- qtype/interpreter/ui/_next/static/chunks/ba12c10f-22556063851a6df2.js +0 -1
- qtype/interpreter/ui/_next/static/chunks/webpack-8289c17c67827f22.js +0 -1
- qtype/interpreter/ui/_next/static/css/a262c53826df929b.css +0 -3
- qtype/interpreter/ui/_next/static/media/569ce4b8f30dc480-s.p.woff2 +0 -0
- qtype/interpreter/ui/favicon.ico +0 -0
- qtype/loader.py +0 -389
- qtype-0.0.12.dist-info/RECORD +0 -105
- /qtype/interpreter/ui/_next/static/{OT8QJQW3J70VbDWWfrEMT → 20HoJN6otZ_LyHLHpCPE6}/_ssgManifest.js +0 -0
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/WHEEL +0 -0
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/entry_points.txt +0 -0
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/licenses/LICENSE +0 -0
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/top_level.txt +0 -0
|
@@ -1,150 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
from typing import Any, Callable
|
|
3
|
-
|
|
4
|
-
from llama_index.core.base.llms.types import ChatResponse, CompletionResponse
|
|
5
|
-
|
|
6
|
-
from qtype.dsl.domain_types import ChatMessage, Embedding
|
|
7
|
-
from qtype.interpreter.conversions import (
|
|
8
|
-
from_chat_message,
|
|
9
|
-
to_chat_message,
|
|
10
|
-
to_embedding_model,
|
|
11
|
-
to_llm,
|
|
12
|
-
to_memory,
|
|
13
|
-
)
|
|
14
|
-
from qtype.interpreter.exceptions import InterpreterError
|
|
15
|
-
from qtype.semantic.model import EmbeddingModel, LLMInference, Variable
|
|
16
|
-
|
|
17
|
-
logger = logging.getLogger(__name__)
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def execute(
|
|
21
|
-
li: LLMInference,
|
|
22
|
-
stream_fn: Callable | None = None,
|
|
23
|
-
**kwargs: dict[Any, Any],
|
|
24
|
-
) -> list[Variable]:
|
|
25
|
-
"""Execute a LLM inference step.
|
|
26
|
-
|
|
27
|
-
Args:
|
|
28
|
-
li: The LLM inference step to execute.
|
|
29
|
-
stream_fn: Optional streaming callback function.
|
|
30
|
-
**kwargs: Additional keyword arguments including conversation_history.
|
|
31
|
-
"""
|
|
32
|
-
logger.debug(f"Executing LLM inference step: {li.id}")
|
|
33
|
-
|
|
34
|
-
# Ensure we only have one output variable set.
|
|
35
|
-
if len(li.outputs) != 1:
|
|
36
|
-
raise InterpreterError(
|
|
37
|
-
"LLMInference step must have exactly one output variable."
|
|
38
|
-
)
|
|
39
|
-
output_variable = li.outputs[0]
|
|
40
|
-
|
|
41
|
-
# Determine if this is a chat session, completion, or embedding inference
|
|
42
|
-
if output_variable.type == Embedding:
|
|
43
|
-
if not isinstance(li.model, EmbeddingModel):
|
|
44
|
-
raise InterpreterError(
|
|
45
|
-
f"LLMInference step with Embedding output must use an embedding model, got {type(li.model)}"
|
|
46
|
-
)
|
|
47
|
-
if len(li.inputs) != 1:
|
|
48
|
-
raise InterpreterError(
|
|
49
|
-
"LLMInference step for completion must have exactly one input variable."
|
|
50
|
-
)
|
|
51
|
-
|
|
52
|
-
input = li.inputs[0].value
|
|
53
|
-
model = to_embedding_model(li.model)
|
|
54
|
-
result = model.get_text_embedding(text=input)
|
|
55
|
-
output_variable.value = Embedding(
|
|
56
|
-
vector=result,
|
|
57
|
-
source_text=input if isinstance(input, str) else None,
|
|
58
|
-
metadata=None,
|
|
59
|
-
)
|
|
60
|
-
elif output_variable.type == ChatMessage:
|
|
61
|
-
model = to_llm(li.model, li.system_message)
|
|
62
|
-
if not all(
|
|
63
|
-
isinstance(input.value, ChatMessage) for input in li.inputs
|
|
64
|
-
):
|
|
65
|
-
raise InterpreterError(
|
|
66
|
-
f"LLMInference step with ChatMessage output must have ChatMessage inputs. Got {li.inputs}"
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
# Current user input
|
|
70
|
-
inputs = [
|
|
71
|
-
to_chat_message(input.value) # type: ignore
|
|
72
|
-
for input in li.inputs
|
|
73
|
-
]
|
|
74
|
-
|
|
75
|
-
# The session id is used to isolate the memory from other "users"
|
|
76
|
-
session_id = kwargs.get("session_id")
|
|
77
|
-
|
|
78
|
-
# If memory is defined, use it.
|
|
79
|
-
if li.memory:
|
|
80
|
-
memory = to_memory(session_id, li.memory)
|
|
81
|
-
|
|
82
|
-
from llama_index.core.async_utils import asyncio_run
|
|
83
|
-
|
|
84
|
-
# add the inputs to the memory
|
|
85
|
-
asyncio_run(memory.aput_messages(inputs))
|
|
86
|
-
# Use the whole memory state as inputs to the llm
|
|
87
|
-
inputs = memory.get_all()
|
|
88
|
-
else:
|
|
89
|
-
# If memory is not defined, see if a conversation history was provided.
|
|
90
|
-
# This is the list of messages from the front end
|
|
91
|
-
conversation_history = kwargs.get("conversation_history", []) # type: ignore
|
|
92
|
-
if not isinstance(conversation_history, list):
|
|
93
|
-
raise ValueError(
|
|
94
|
-
"Unexpected error: conversation history is not a list."
|
|
95
|
-
)
|
|
96
|
-
history: list[ChatMessage] = conversation_history
|
|
97
|
-
inputs = [to_chat_message(msg) for msg in history] + inputs
|
|
98
|
-
|
|
99
|
-
# If the stream function is set, we'll stream the results
|
|
100
|
-
chat_result: ChatResponse
|
|
101
|
-
if stream_fn:
|
|
102
|
-
generator = model.stream_chat(
|
|
103
|
-
messages=inputs,
|
|
104
|
-
**(
|
|
105
|
-
li.model.inference_params
|
|
106
|
-
if li.model.inference_params
|
|
107
|
-
else {}
|
|
108
|
-
),
|
|
109
|
-
)
|
|
110
|
-
for chat_response in generator:
|
|
111
|
-
stream_fn(li, chat_response.delta)
|
|
112
|
-
# Get the final result for processing
|
|
113
|
-
chat_result = chat_response # Use the last result from streaming
|
|
114
|
-
else:
|
|
115
|
-
chat_result = model.chat(
|
|
116
|
-
messages=inputs,
|
|
117
|
-
**(
|
|
118
|
-
li.model.inference_params
|
|
119
|
-
if li.model.inference_params
|
|
120
|
-
else {}
|
|
121
|
-
),
|
|
122
|
-
)
|
|
123
|
-
output_variable.value = from_chat_message(chat_result.message)
|
|
124
|
-
if li.memory:
|
|
125
|
-
memory.put(chat_result.message)
|
|
126
|
-
else:
|
|
127
|
-
model = to_llm(li.model, li.system_message)
|
|
128
|
-
|
|
129
|
-
if len(li.inputs) != 1:
|
|
130
|
-
raise InterpreterError(
|
|
131
|
-
"LLMInference step for completion must have exactly one input variable."
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
input = li.inputs[0].value
|
|
135
|
-
if not isinstance(input, str):
|
|
136
|
-
logger.warning(
|
|
137
|
-
f"Input to LLMInference step {li.id} is not a string, converting: {input}"
|
|
138
|
-
)
|
|
139
|
-
input = str(input)
|
|
140
|
-
|
|
141
|
-
complete_result: CompletionResponse
|
|
142
|
-
if stream_fn:
|
|
143
|
-
generator = model.stream_complete(prompt=input)
|
|
144
|
-
for complete_result in generator:
|
|
145
|
-
stream_fn(li, complete_result.delta)
|
|
146
|
-
else:
|
|
147
|
-
complete_result = model.complete(prompt=input)
|
|
148
|
-
output_variable.value = complete_result.text
|
|
149
|
-
|
|
150
|
-
return li.outputs # type: ignore[return-value]
|
|
@@ -1,54 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import string
|
|
3
|
-
from typing import Any
|
|
4
|
-
|
|
5
|
-
from qtype.interpreter.exceptions import InterpreterError
|
|
6
|
-
from qtype.semantic.model import PromptTemplate, Variable
|
|
7
|
-
|
|
8
|
-
logger = logging.getLogger(__name__)
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
def get_format_arguments(format_string: str) -> set[str]:
|
|
12
|
-
formatter = string.Formatter()
|
|
13
|
-
arguments = []
|
|
14
|
-
for literal_text, field_name, format_spec, conversion in formatter.parse(
|
|
15
|
-
format_string
|
|
16
|
-
):
|
|
17
|
-
if field_name:
|
|
18
|
-
arguments.append(field_name)
|
|
19
|
-
return set(arguments)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def execute(step: PromptTemplate, **kwargs: dict[str, Any]) -> list[Variable]:
|
|
23
|
-
"""Execute a prompt template step.
|
|
24
|
-
|
|
25
|
-
Args:
|
|
26
|
-
step: The prompt template step to execute.
|
|
27
|
-
**kwargs: Additional keyword arguments.
|
|
28
|
-
"""
|
|
29
|
-
|
|
30
|
-
logger.debug(
|
|
31
|
-
f"Executing prompt template step: {step.id} with kwargs: {kwargs}"
|
|
32
|
-
)
|
|
33
|
-
|
|
34
|
-
format_args = get_format_arguments(step.template)
|
|
35
|
-
input_map = {
|
|
36
|
-
var.id: var.value
|
|
37
|
-
for var in step.inputs
|
|
38
|
-
if var.is_set() and var.id in format_args
|
|
39
|
-
}
|
|
40
|
-
missing = format_args - input_map.keys()
|
|
41
|
-
if missing:
|
|
42
|
-
raise InterpreterError(
|
|
43
|
-
f"The following fields are in the prompt template but not in the inputs: {missing}"
|
|
44
|
-
)
|
|
45
|
-
# Drop inputs that are not in format_args
|
|
46
|
-
result = step.template.format(**input_map)
|
|
47
|
-
|
|
48
|
-
if len(step.outputs) != 1:
|
|
49
|
-
raise InterpreterError(
|
|
50
|
-
f"PromptTemplate step {step.id} must have exactly one output variable."
|
|
51
|
-
)
|
|
52
|
-
step.outputs[0].value = result
|
|
53
|
-
|
|
54
|
-
return step.outputs # type: ignore[return-value, no-any-return]
|
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
from typing import Any
|
|
3
|
-
|
|
4
|
-
from qtype.semantic.model import Search, Variable
|
|
5
|
-
|
|
6
|
-
logger = logging.getLogger(__name__)
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def execute(search: Search, **kwargs: dict[str, Any]) -> list[Variable]:
|
|
10
|
-
"""Execute a search step.
|
|
11
|
-
|
|
12
|
-
Args:
|
|
13
|
-
search: The search step to execute.
|
|
14
|
-
|
|
15
|
-
Returns:
|
|
16
|
-
A list of variables that are set based on the search results.
|
|
17
|
-
"""
|
|
18
|
-
logger.info("Executing Search on: %s", search.index.id)
|
|
19
|
-
# TODO: implement search execution logic
|
|
20
|
-
raise NotImplementedError(
|
|
21
|
-
"Search execution is not yet implemented. This will be handled in a future update."
|
|
22
|
-
)
|
|
23
|
-
|
|
24
|
-
return [] # Return an empty list for now
|
qtype/interpreter/steps/tool.py
DELETED
|
@@ -1,53 +0,0 @@
|
|
|
1
|
-
import importlib
|
|
2
|
-
import logging
|
|
3
|
-
|
|
4
|
-
from qtype.interpreter.exceptions import InterpreterError
|
|
5
|
-
from qtype.semantic.model import PythonFunctionTool, Tool, Variable
|
|
6
|
-
|
|
7
|
-
logger = logging.getLogger(__name__)
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
def execute(tool: Tool, **kwargs: dict) -> list[Variable]:
|
|
11
|
-
"""Execute a tool step.
|
|
12
|
-
|
|
13
|
-
Args:
|
|
14
|
-
tool: The tool step to execute.
|
|
15
|
-
**kwargs: Additional keyword arguments.
|
|
16
|
-
"""
|
|
17
|
-
logger.debug(f"Executing tool step: {tool.id} with kwargs: {kwargs}")
|
|
18
|
-
|
|
19
|
-
if isinstance(tool, PythonFunctionTool):
|
|
20
|
-
# import the function dynamically
|
|
21
|
-
module = importlib.import_module(tool.module_path)
|
|
22
|
-
function = getattr(module, tool.function_name, None)
|
|
23
|
-
if function is None:
|
|
24
|
-
raise InterpreterError(
|
|
25
|
-
f"Function {tool.function_name} not found in {tool.module_path}"
|
|
26
|
-
)
|
|
27
|
-
# Call the function with the provided arguments
|
|
28
|
-
if any(not inputs.is_set() for inputs in tool.inputs):
|
|
29
|
-
raise InterpreterError(
|
|
30
|
-
f"Tool {tool.id} requires all inputs to be set. Missing inputs: {[var.id for var in tool.inputs if not var.is_set()]}"
|
|
31
|
-
)
|
|
32
|
-
inputs = {var.id: var.value for var in tool.inputs if var.is_set()}
|
|
33
|
-
results = function(**inputs)
|
|
34
|
-
else:
|
|
35
|
-
# TODO: support api tools
|
|
36
|
-
raise InterpreterError(f"Unsupported tool type: {type(tool).__name__}")
|
|
37
|
-
|
|
38
|
-
if isinstance(results, dict) and len(tool.outputs) > 1:
|
|
39
|
-
for var in tool.outputs:
|
|
40
|
-
if var.id in results:
|
|
41
|
-
var.value = results[var.id]
|
|
42
|
-
else:
|
|
43
|
-
raise InterpreterError(
|
|
44
|
-
f"Output variable {var.id} not found in function results."
|
|
45
|
-
)
|
|
46
|
-
elif len(tool.outputs) == 1:
|
|
47
|
-
tool.outputs[0].value = results
|
|
48
|
-
else:
|
|
49
|
-
raise InterpreterError(
|
|
50
|
-
f"The returned value {results} could not be assigned to outputs {[var.id for var in tool.outputs]}."
|
|
51
|
-
)
|
|
52
|
-
|
|
53
|
-
return tool.outputs # type: ignore[return-value]
|
|
@@ -1,123 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Streaming helpers for bridging callback-based execution with generator patterns.
|
|
3
|
-
|
|
4
|
-
This module provides utilities to convert callback-based streaming functions
|
|
5
|
-
into generators that can be used with FastAPI's StreamingResponse.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
from __future__ import annotations
|
|
9
|
-
|
|
10
|
-
import queue
|
|
11
|
-
import threading
|
|
12
|
-
from collections.abc import Callable, Generator
|
|
13
|
-
from concurrent.futures import Future
|
|
14
|
-
from typing import Any, TypeVar
|
|
15
|
-
|
|
16
|
-
from qtype.dsl.domain_types import ChatMessage
|
|
17
|
-
from qtype.semantic.model import Step
|
|
18
|
-
|
|
19
|
-
T = TypeVar("T")
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def create_streaming_generator(
|
|
23
|
-
execution_func: Callable[..., T],
|
|
24
|
-
*args: Any,
|
|
25
|
-
timeout: float = 30.0,
|
|
26
|
-
join_timeout: float = 5.0,
|
|
27
|
-
**kwargs: Any,
|
|
28
|
-
) -> tuple[Generator[tuple[Step, ChatMessage | str], None, None], Future[T]]:
|
|
29
|
-
"""
|
|
30
|
-
Convert a callback-based streaming function into a generator with result future.
|
|
31
|
-
|
|
32
|
-
This function executes the provided function in a separate thread and
|
|
33
|
-
converts its stream_fn callback pattern into a generator that yields
|
|
34
|
-
(step, message) tuples. Additionally returns a Future that will contain
|
|
35
|
-
the execution function's return value.
|
|
36
|
-
|
|
37
|
-
Args:
|
|
38
|
-
execution_func: Function to execute that accepts a stream_fn parameter
|
|
39
|
-
*args: Positional arguments to pass to execution_func
|
|
40
|
-
timeout: Timeout in seconds for queue.get() operations
|
|
41
|
-
join_timeout: Timeout in seconds for thread.join()
|
|
42
|
-
**kwargs: Keyword arguments to pass to execution_func
|
|
43
|
-
|
|
44
|
-
Returns:
|
|
45
|
-
Tuple of (generator, future) where:
|
|
46
|
-
- generator yields (Step, ChatMessage | str) tuples from streaming callback
|
|
47
|
-
- future will contain the return value of execution_func
|
|
48
|
-
|
|
49
|
-
Example:
|
|
50
|
-
```python
|
|
51
|
-
def my_flow_execution(flow: Flow, stream_fn: Callable | None = None):
|
|
52
|
-
# Some execution logic that calls stream_fn(step, message)
|
|
53
|
-
return {"status": "completed", "steps_executed": 3}
|
|
54
|
-
|
|
55
|
-
# Convert to generator with result
|
|
56
|
-
stream_gen, result_future = create_streaming_generator(
|
|
57
|
-
my_flow_execution,
|
|
58
|
-
flow_copy,
|
|
59
|
-
some_other_arg="value"
|
|
60
|
-
)
|
|
61
|
-
|
|
62
|
-
# Process streaming data
|
|
63
|
-
for step, message in stream_gen:
|
|
64
|
-
print(f"Step {step.id}: {message}")
|
|
65
|
-
|
|
66
|
-
# Get final result (blocks until execution completes)
|
|
67
|
-
final_result = result_future.result(timeout=10.0)
|
|
68
|
-
print(f"Execution result: {final_result}")
|
|
69
|
-
```
|
|
70
|
-
"""
|
|
71
|
-
# Create thread-safe queue for communication
|
|
72
|
-
stream_queue: queue.Queue[tuple[Step, ChatMessage | str] | None] = (
|
|
73
|
-
queue.Queue()
|
|
74
|
-
)
|
|
75
|
-
|
|
76
|
-
# Create future for the return value
|
|
77
|
-
result_future: Future[T] = Future()
|
|
78
|
-
|
|
79
|
-
def stream_callback(step: Step, msg: ChatMessage | str) -> None:
|
|
80
|
-
"""Callback function that pushes data to the queue."""
|
|
81
|
-
stream_queue.put((step, msg))
|
|
82
|
-
|
|
83
|
-
def execution_task() -> None:
|
|
84
|
-
"""Execute the function in a separate thread."""
|
|
85
|
-
try:
|
|
86
|
-
# Add the stream_fn callback to kwargs
|
|
87
|
-
kwargs_with_callback = kwargs.copy()
|
|
88
|
-
kwargs_with_callback["stream_fn"] = stream_callback
|
|
89
|
-
|
|
90
|
-
# Execute the function with the callback and capture result
|
|
91
|
-
result = execution_func(*args, **kwargs_with_callback)
|
|
92
|
-
result_future.set_result(result)
|
|
93
|
-
except Exception as e:
|
|
94
|
-
# Set exception on future if execution fails
|
|
95
|
-
result_future.set_exception(e)
|
|
96
|
-
finally:
|
|
97
|
-
# Signal end of stream
|
|
98
|
-
stream_queue.put(None)
|
|
99
|
-
|
|
100
|
-
# Start execution in separate thread
|
|
101
|
-
execution_thread = threading.Thread(target=execution_task)
|
|
102
|
-
execution_thread.start()
|
|
103
|
-
|
|
104
|
-
def generator() -> Generator[tuple[Step, ChatMessage | str], None, None]:
|
|
105
|
-
"""Generator that yields streaming data from the queue."""
|
|
106
|
-
try:
|
|
107
|
-
# Yield data as it becomes available
|
|
108
|
-
while True:
|
|
109
|
-
try:
|
|
110
|
-
# Wait for data with timeout to avoid hanging
|
|
111
|
-
data = stream_queue.get(timeout=timeout)
|
|
112
|
-
if data is None:
|
|
113
|
-
# End of stream signal
|
|
114
|
-
break
|
|
115
|
-
yield data
|
|
116
|
-
except queue.Empty:
|
|
117
|
-
# Handle timeout - break and let thread cleanup
|
|
118
|
-
break
|
|
119
|
-
finally:
|
|
120
|
-
# Ensure thread cleanup
|
|
121
|
-
execution_thread.join(timeout=join_timeout)
|
|
122
|
-
|
|
123
|
-
return generator(), result_future
|