qtype 0.0.12__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qtype/application/commons/tools.py +1 -1
- qtype/application/converters/tools_from_api.py +476 -11
- qtype/application/converters/tools_from_module.py +38 -14
- qtype/application/converters/types.py +15 -30
- qtype/application/documentation.py +1 -1
- qtype/application/facade.py +102 -85
- qtype/base/types.py +227 -7
- qtype/cli.py +5 -1
- qtype/commands/convert.py +52 -6
- qtype/commands/generate.py +44 -4
- qtype/commands/run.py +78 -36
- qtype/commands/serve.py +74 -44
- qtype/commands/validate.py +37 -14
- qtype/commands/visualize.py +46 -25
- qtype/dsl/__init__.py +6 -5
- qtype/dsl/custom_types.py +1 -1
- qtype/dsl/domain_types.py +86 -5
- qtype/dsl/linker.py +384 -0
- qtype/dsl/loader.py +315 -0
- qtype/dsl/model.py +751 -263
- qtype/dsl/parser.py +200 -0
- qtype/dsl/types.py +50 -0
- qtype/interpreter/api.py +63 -136
- qtype/interpreter/auth/aws.py +19 -9
- qtype/interpreter/auth/generic.py +93 -16
- qtype/interpreter/base/base_step_executor.py +436 -0
- qtype/interpreter/base/batch_step_executor.py +171 -0
- qtype/interpreter/base/exceptions.py +50 -0
- qtype/interpreter/base/executor_context.py +91 -0
- qtype/interpreter/base/factory.py +84 -0
- qtype/interpreter/base/progress_tracker.py +110 -0
- qtype/interpreter/base/secrets.py +339 -0
- qtype/interpreter/base/step_cache.py +74 -0
- qtype/interpreter/base/stream_emitter.py +469 -0
- qtype/interpreter/conversions.py +471 -22
- qtype/interpreter/converters.py +79 -0
- qtype/interpreter/endpoints.py +355 -0
- qtype/interpreter/executors/agent_executor.py +242 -0
- qtype/interpreter/executors/aggregate_executor.py +93 -0
- qtype/interpreter/executors/bedrock_reranker_executor.py +195 -0
- qtype/interpreter/executors/decoder_executor.py +163 -0
- qtype/interpreter/executors/doc_to_text_executor.py +112 -0
- qtype/interpreter/executors/document_embedder_executor.py +107 -0
- qtype/interpreter/executors/document_search_executor.py +113 -0
- qtype/interpreter/executors/document_source_executor.py +118 -0
- qtype/interpreter/executors/document_splitter_executor.py +105 -0
- qtype/interpreter/executors/echo_executor.py +63 -0
- qtype/interpreter/executors/field_extractor_executor.py +165 -0
- qtype/interpreter/executors/file_source_executor.py +101 -0
- qtype/interpreter/executors/file_writer_executor.py +110 -0
- qtype/interpreter/executors/index_upsert_executor.py +232 -0
- qtype/interpreter/executors/invoke_embedding_executor.py +92 -0
- qtype/interpreter/executors/invoke_flow_executor.py +51 -0
- qtype/interpreter/executors/invoke_tool_executor.py +358 -0
- qtype/interpreter/executors/llm_inference_executor.py +272 -0
- qtype/interpreter/executors/prompt_template_executor.py +78 -0
- qtype/interpreter/executors/sql_source_executor.py +106 -0
- qtype/interpreter/executors/vector_search_executor.py +91 -0
- qtype/interpreter/flow.py +173 -22
- qtype/interpreter/logging_progress.py +61 -0
- qtype/interpreter/metadata_api.py +115 -0
- qtype/interpreter/resource_cache.py +5 -4
- qtype/interpreter/rich_progress.py +225 -0
- qtype/interpreter/stream/chat/__init__.py +15 -0
- qtype/interpreter/stream/chat/converter.py +391 -0
- qtype/interpreter/{chat → stream/chat}/file_conversions.py +2 -2
- qtype/interpreter/stream/chat/ui_request_to_domain_type.py +140 -0
- qtype/interpreter/stream/chat/vercel.py +609 -0
- qtype/interpreter/stream/utils/__init__.py +15 -0
- qtype/interpreter/stream/utils/build_vercel_ai_formatter.py +74 -0
- qtype/interpreter/stream/utils/callback_to_stream.py +66 -0
- qtype/interpreter/stream/utils/create_streaming_response.py +18 -0
- qtype/interpreter/stream/utils/default_chat_extract_text.py +20 -0
- qtype/interpreter/stream/utils/error_streaming_response.py +20 -0
- qtype/interpreter/telemetry.py +135 -8
- qtype/interpreter/tools/__init__.py +5 -0
- qtype/interpreter/tools/function_tool_helper.py +265 -0
- qtype/interpreter/types.py +330 -0
- qtype/interpreter/typing.py +83 -89
- qtype/interpreter/ui/404/index.html +1 -1
- qtype/interpreter/ui/404.html +1 -1
- qtype/interpreter/ui/_next/static/{OT8QJQW3J70VbDWWfrEMT → 20HoJN6otZ_LyHLHpCPE6}/_buildManifest.js +1 -1
- qtype/interpreter/ui/_next/static/chunks/434-b2112d19f25c44ff.js +36 -0
- qtype/interpreter/ui/_next/static/chunks/{964-ed4ab073db645007.js → 964-2b041321a01cbf56.js} +1 -1
- qtype/interpreter/ui/_next/static/chunks/app/{layout-5ccbc44fd528d089.js → layout-a05273ead5de2c41.js} +1 -1
- qtype/interpreter/ui/_next/static/chunks/app/page-8c67d16ac90d23cb.js +1 -0
- qtype/interpreter/ui/_next/static/chunks/ba12c10f-546f2714ff8abc66.js +1 -0
- qtype/interpreter/ui/_next/static/chunks/{main-6d261b6c5d6fb6c2.js → main-e26b9cb206da2cac.js} +1 -1
- qtype/interpreter/ui/_next/static/chunks/webpack-08642e441b39b6c2.js +1 -0
- qtype/interpreter/ui/_next/static/css/8a8d1269e362fef7.css +3 -0
- qtype/interpreter/ui/_next/static/media/4cf2300e9c8272f7-s.p.woff2 +0 -0
- qtype/interpreter/ui/icon.png +0 -0
- qtype/interpreter/ui/index.html +1 -1
- qtype/interpreter/ui/index.txt +5 -5
- qtype/semantic/checker.py +643 -0
- qtype/semantic/generate.py +268 -85
- qtype/semantic/loader.py +95 -0
- qtype/semantic/model.py +535 -163
- qtype/semantic/resolver.py +63 -19
- qtype/semantic/visualize.py +50 -35
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/METADATA +21 -4
- qtype-0.1.3.dist-info/RECORD +137 -0
- qtype/dsl/base_types.py +0 -38
- qtype/dsl/validator.py +0 -464
- qtype/interpreter/batch/__init__.py +0 -0
- qtype/interpreter/batch/flow.py +0 -95
- qtype/interpreter/batch/sql_source.py +0 -95
- qtype/interpreter/batch/step.py +0 -63
- qtype/interpreter/batch/types.py +0 -41
- qtype/interpreter/batch/utils.py +0 -179
- qtype/interpreter/chat/chat_api.py +0 -237
- qtype/interpreter/chat/vercel.py +0 -314
- qtype/interpreter/exceptions.py +0 -10
- qtype/interpreter/step.py +0 -67
- qtype/interpreter/steps/__init__.py +0 -0
- qtype/interpreter/steps/agent.py +0 -114
- qtype/interpreter/steps/condition.py +0 -36
- qtype/interpreter/steps/decoder.py +0 -88
- qtype/interpreter/steps/llm_inference.py +0 -150
- qtype/interpreter/steps/prompt_template.py +0 -54
- qtype/interpreter/steps/search.py +0 -24
- qtype/interpreter/steps/tool.py +0 -53
- qtype/interpreter/streaming_helpers.py +0 -123
- qtype/interpreter/ui/_next/static/chunks/736-7fc606e244fedcb1.js +0 -36
- qtype/interpreter/ui/_next/static/chunks/app/page-c72e847e888e549d.js +0 -1
- qtype/interpreter/ui/_next/static/chunks/ba12c10f-22556063851a6df2.js +0 -1
- qtype/interpreter/ui/_next/static/chunks/webpack-8289c17c67827f22.js +0 -1
- qtype/interpreter/ui/_next/static/css/a262c53826df929b.css +0 -3
- qtype/interpreter/ui/_next/static/media/569ce4b8f30dc480-s.p.woff2 +0 -0
- qtype/interpreter/ui/favicon.ico +0 -0
- qtype/loader.py +0 -389
- qtype-0.0.12.dist-info/RECORD +0 -105
- /qtype/interpreter/ui/_next/static/{OT8QJQW3J70VbDWWfrEMT → 20HoJN6otZ_LyHLHpCPE6}/_ssgManifest.js +0 -0
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/WHEEL +0 -0
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/entry_points.txt +0 -0
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/licenses/LICENSE +0 -0
- {qtype-0.0.12.dist-info → qtype-0.1.3.dist-info}/top_level.txt +0 -0
qtype/interpreter/batch/step.py
DELETED
|
@@ -1,63 +0,0 @@
|
|
|
1
|
-
from functools import partial
|
|
2
|
-
from typing import Any, Tuple
|
|
3
|
-
|
|
4
|
-
import pandas as pd
|
|
5
|
-
|
|
6
|
-
from qtype.interpreter.batch.sql_source import execute_sql_source
|
|
7
|
-
from qtype.interpreter.batch.types import BatchConfig
|
|
8
|
-
from qtype.interpreter.batch.utils import (
|
|
9
|
-
batch_iterator,
|
|
10
|
-
single_step_adapter,
|
|
11
|
-
validate_inputs,
|
|
12
|
-
)
|
|
13
|
-
from qtype.interpreter.exceptions import InterpreterError
|
|
14
|
-
from qtype.semantic.model import (
|
|
15
|
-
Condition,
|
|
16
|
-
Decoder,
|
|
17
|
-
Flow,
|
|
18
|
-
PromptTemplate,
|
|
19
|
-
Search,
|
|
20
|
-
SQLSource,
|
|
21
|
-
Step,
|
|
22
|
-
Tool,
|
|
23
|
-
)
|
|
24
|
-
|
|
25
|
-
SINGLE_WRAP_STEPS = {Decoder, Condition, PromptTemplate, Search, Tool}
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
def batch_execute_step(
|
|
29
|
-
step: Step,
|
|
30
|
-
inputs: pd.DataFrame,
|
|
31
|
-
batch_config: BatchConfig,
|
|
32
|
-
**kwargs: dict[str, Any],
|
|
33
|
-
) -> Tuple[pd.DataFrame, pd.DataFrame]:
|
|
34
|
-
"""
|
|
35
|
-
Executes a given step in a batch processing pipeline.
|
|
36
|
-
|
|
37
|
-
Args:
|
|
38
|
-
step (Step): The step to be executed.
|
|
39
|
-
inputs (pd.DataFrame): The input data for the step.
|
|
40
|
-
batch_config (BatchConfig): Configuration for batch processing.
|
|
41
|
-
**kwargs: Additional keyword arguments.
|
|
42
|
-
|
|
43
|
-
Returns:
|
|
44
|
-
Tuple[pd.DataFrame, pd.DataFrame]: A tuple containing the output results and any rows that returned errors.
|
|
45
|
-
"""
|
|
46
|
-
|
|
47
|
-
validate_inputs(inputs, step)
|
|
48
|
-
|
|
49
|
-
if isinstance(step, Flow):
|
|
50
|
-
from qtype.interpreter.batch.flow import batch_execute_flow
|
|
51
|
-
|
|
52
|
-
return batch_execute_flow(step, inputs, batch_config, **kwargs)
|
|
53
|
-
elif isinstance(step, SQLSource):
|
|
54
|
-
return execute_sql_source(step, inputs, batch_config, **kwargs)
|
|
55
|
-
elif step in SINGLE_WRAP_STEPS:
|
|
56
|
-
return batch_iterator(
|
|
57
|
-
f=partial(single_step_adapter, step=step),
|
|
58
|
-
batch=inputs,
|
|
59
|
-
batch_config=batch_config,
|
|
60
|
-
)
|
|
61
|
-
# TODO: implement batching for multi-row steps. For example, llm inference can be sped up in batch...
|
|
62
|
-
else:
|
|
63
|
-
raise InterpreterError(f"Unsupported step type: {type(step).__name__}")
|
qtype/interpreter/batch/types.py
DELETED
|
@@ -1,41 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from enum import Enum
|
|
4
|
-
|
|
5
|
-
from pydantic import BaseModel, Field
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
class ErrorMode(str, Enum):
|
|
9
|
-
"""Error handling mode for batch processing."""
|
|
10
|
-
|
|
11
|
-
FAIL = "fail"
|
|
12
|
-
DROP = "drop"
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class BatchConfig(BaseModel):
|
|
16
|
-
"""Configuration for batch execution.
|
|
17
|
-
|
|
18
|
-
Attributes:
|
|
19
|
-
num_workers: Number of async workers for batch operations.
|
|
20
|
-
batch_size: Maximum number of rows to send to a step at a time.
|
|
21
|
-
error_mode: Error handling mode for batch processing.
|
|
22
|
-
"""
|
|
23
|
-
|
|
24
|
-
num_workers: int = Field(
|
|
25
|
-
default=4,
|
|
26
|
-
description="Number of async workers for batch operations",
|
|
27
|
-
gt=0,
|
|
28
|
-
)
|
|
29
|
-
batch_size: int = Field(
|
|
30
|
-
default=512,
|
|
31
|
-
description="Max number of rows to send to a step at a time",
|
|
32
|
-
gt=0,
|
|
33
|
-
)
|
|
34
|
-
error_mode: ErrorMode = Field(
|
|
35
|
-
default=ErrorMode.FAIL,
|
|
36
|
-
description="Error handling mode for batch processing",
|
|
37
|
-
)
|
|
38
|
-
write_errors_to: str | None = Field(
|
|
39
|
-
default=None,
|
|
40
|
-
description="If error mode is DROP, the errors for any step are saved to this directory",
|
|
41
|
-
)
|
qtype/interpreter/batch/utils.py
DELETED
|
@@ -1,179 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import copy
|
|
4
|
-
from typing import Any, Callable, Tuple
|
|
5
|
-
|
|
6
|
-
import pandas as pd
|
|
7
|
-
|
|
8
|
-
from qtype.interpreter.batch.types import BatchConfig, ErrorMode
|
|
9
|
-
from qtype.semantic.model import Step
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class InputMissingError(Exception):
|
|
13
|
-
"""Raised when a required input variable is missing from the DataFrame."""
|
|
14
|
-
|
|
15
|
-
pass
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def validate_inputs(batch: pd.DataFrame, step: Step) -> pd.DataFrame:
|
|
19
|
-
"""Ensures all input variables for the step are columns in the DataFrame.
|
|
20
|
-
If not, an Exception is raised.
|
|
21
|
-
|
|
22
|
-
Args:
|
|
23
|
-
batch: The input DataFrame to decode.
|
|
24
|
-
decoder: The decoder step to validate.
|
|
25
|
-
|
|
26
|
-
Returns:
|
|
27
|
-
A view of the dataframe with only the input columns for the step.
|
|
28
|
-
|
|
29
|
-
Raises:
|
|
30
|
-
InputMissingError: If the decoder step does not have exactly one input or if the input column is not in the DataFrame.
|
|
31
|
-
"""
|
|
32
|
-
input_ids = [input_var.id for input_var in step.inputs]
|
|
33
|
-
for input_var in input_ids:
|
|
34
|
-
if input_var not in batch.columns:
|
|
35
|
-
raise InputMissingError(
|
|
36
|
-
f"Input DataFrame must contain column '{input_var}' for step {step.id}."
|
|
37
|
-
)
|
|
38
|
-
return batch[input_ids]
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
def fail_mode_wrapper(
|
|
42
|
-
f: Callable[..., dict[str, Any]],
|
|
43
|
-
row: pd.Series,
|
|
44
|
-
batch_config: BatchConfig,
|
|
45
|
-
**kwargs: dict[str, Any],
|
|
46
|
-
) -> dict | Exception:
|
|
47
|
-
"""Executes a function with error handling based on the batch configuration.
|
|
48
|
-
|
|
49
|
-
Args:
|
|
50
|
-
f: The function to execute that can take any arguments and returns a dict of results.
|
|
51
|
-
row: The input row as a dictionary.
|
|
52
|
-
batch_config: Configuration for error handling.
|
|
53
|
-
**kwargs: Additional keyword arguments.
|
|
54
|
-
|
|
55
|
-
Returns:
|
|
56
|
-
The result of the function or an Exception if an error occurs and the error mode is set to CONTINUE.
|
|
57
|
-
"""
|
|
58
|
-
try:
|
|
59
|
-
# turn row into a dict and merge with kwargs
|
|
60
|
-
merged_kwargs = {**row.to_dict(), **kwargs}
|
|
61
|
-
return f(**merged_kwargs)
|
|
62
|
-
except Exception as e:
|
|
63
|
-
if batch_config.error_mode == ErrorMode.FAIL:
|
|
64
|
-
raise e
|
|
65
|
-
else:
|
|
66
|
-
return e
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
def single_step_adapter(
|
|
70
|
-
step: Step, **inputs: dict[str, Any]
|
|
71
|
-
) -> dict[str, Any]:
|
|
72
|
-
"""A batch adapter for steps that have no side effects or access shared resources."""
|
|
73
|
-
from qtype.interpreter.step import execute_step
|
|
74
|
-
|
|
75
|
-
step_clone = copy.deepcopy(step)
|
|
76
|
-
for input_var in step_clone.inputs:
|
|
77
|
-
if input_var.id in inputs:
|
|
78
|
-
input_var.value = inputs[input_var.id]
|
|
79
|
-
else:
|
|
80
|
-
raise ValueError(
|
|
81
|
-
f"Input variable '{input_var.id}' not found in inputs."
|
|
82
|
-
)
|
|
83
|
-
execute_step(step_clone)
|
|
84
|
-
return {
|
|
85
|
-
output_var.id: output_var.value for output_var in step_clone.outputs
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
def to_series(
|
|
90
|
-
rv: dict | Exception, error_col_name: str = "error"
|
|
91
|
-
) -> pd.Series:
|
|
92
|
-
# If rv is an exception, return a series with index "error"
|
|
93
|
-
if isinstance(rv, Exception):
|
|
94
|
-
return pd.Series({error_col_name: str(rv)})
|
|
95
|
-
return pd.Series(rv) # type: ignore[no-any-return]
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
def batch_iterator(
|
|
99
|
-
f: Callable[..., dict[str, Any]],
|
|
100
|
-
batch: pd.DataFrame,
|
|
101
|
-
batch_config: BatchConfig,
|
|
102
|
-
**kwargs: Any,
|
|
103
|
-
) -> Tuple[pd.DataFrame, pd.DataFrame]:
|
|
104
|
-
"""Executes a step over a batch of inputs with error handling.
|
|
105
|
-
|
|
106
|
-
Args:
|
|
107
|
-
step: The step to execute.
|
|
108
|
-
batch: The input DataFrame to process.
|
|
109
|
-
batch_config: Configuration for error handling.
|
|
110
|
-
**kwargs: Additional keyword arguments to pass to the step.
|
|
111
|
-
|
|
112
|
-
Returns:
|
|
113
|
-
A tuple containing two DataFrames:
|
|
114
|
-
- The first DataFrame contains successful results with output columns.
|
|
115
|
-
- The second DataFrame contains rows that encountered errors with an 'error' column.
|
|
116
|
-
"""
|
|
117
|
-
|
|
118
|
-
# Use a unique column name for errors
|
|
119
|
-
error_col = "error_" + str(id(f))
|
|
120
|
-
|
|
121
|
-
# If error_col is already in the dataframe, throw an exception
|
|
122
|
-
if error_col in batch.columns:
|
|
123
|
-
raise ValueError(
|
|
124
|
-
f"Error column name '{error_col}' already exists in the batch DataFrame."
|
|
125
|
-
)
|
|
126
|
-
|
|
127
|
-
def the_pipe(row: pd.Series) -> pd.Series:
|
|
128
|
-
return to_series(
|
|
129
|
-
fail_mode_wrapper(f, row, batch_config=batch_config, **kwargs),
|
|
130
|
-
error_col_name=error_col,
|
|
131
|
-
)
|
|
132
|
-
|
|
133
|
-
results = batch.apply(the_pipe, axis=1)
|
|
134
|
-
|
|
135
|
-
# Split the results into two dataframes, one where error_col is not defined, and one where it is.
|
|
136
|
-
success_mask = ~results[error_col].notna()
|
|
137
|
-
failed_mask = results[error_col].notna()
|
|
138
|
-
|
|
139
|
-
# Create success DataFrame (drop the error column)
|
|
140
|
-
success_df = results[success_mask].drop(columns=[error_col])
|
|
141
|
-
|
|
142
|
-
# Create failed DataFrame (keep only original columns plus error)
|
|
143
|
-
original_columns = batch.columns.tolist()
|
|
144
|
-
|
|
145
|
-
if failed_mask.any():
|
|
146
|
-
failed_df = results[failed_mask]
|
|
147
|
-
# Drop all the output columns from failed_df, keep only original input columns + error
|
|
148
|
-
failed_df = failed_df[original_columns + [error_col]]
|
|
149
|
-
else:
|
|
150
|
-
# No failed rows, create empty DataFrame with expected structure
|
|
151
|
-
failed_df = pd.DataFrame(columns=original_columns + [error_col])
|
|
152
|
-
|
|
153
|
-
return success_df, failed_df
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
def reconcile_results_and_errors(
|
|
157
|
-
results: list[pd.DataFrame], errors: list[pd.DataFrame]
|
|
158
|
-
) -> Tuple[pd.DataFrame, pd.DataFrame]:
|
|
159
|
-
"""
|
|
160
|
-
Concatenates lists of pandas DataFrames containing results and errors into single DataFrames.
|
|
161
|
-
|
|
162
|
-
If the input lists are empty, creates empty DataFrames as placeholders.
|
|
163
|
-
|
|
164
|
-
Args:
|
|
165
|
-
results (list[pd.DataFrame]): List of DataFrames containing results.
|
|
166
|
-
errors (list[pd.DataFrame]): List of DataFrames containing errors.
|
|
167
|
-
|
|
168
|
-
Returns:
|
|
169
|
-
Tuple[pd.DataFrame, pd.DataFrame]: A tuple containing:
|
|
170
|
-
- A single DataFrame with all results concatenated.
|
|
171
|
-
- A single DataFrame with all errors concatenated.
|
|
172
|
-
"""
|
|
173
|
-
if not results:
|
|
174
|
-
results = [pd.DataFrame({})]
|
|
175
|
-
if not errors:
|
|
176
|
-
errors = [pd.DataFrame({})]
|
|
177
|
-
return pd.concat(results, ignore_index=True), pd.concat(
|
|
178
|
-
errors, ignore_index=True
|
|
179
|
-
)
|
|
@@ -1,237 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import logging
|
|
4
|
-
import uuid
|
|
5
|
-
from collections.abc import Generator
|
|
6
|
-
from typing import Any
|
|
7
|
-
|
|
8
|
-
from fastapi import FastAPI
|
|
9
|
-
from fastapi.responses import StreamingResponse
|
|
10
|
-
|
|
11
|
-
from qtype.dsl.base_types import PrimitiveTypeEnum
|
|
12
|
-
from qtype.dsl.domain_types import ChatContent, ChatMessage, MessageRole
|
|
13
|
-
from qtype.interpreter.chat.file_conversions import file_to_content
|
|
14
|
-
from qtype.interpreter.chat.vercel import (
|
|
15
|
-
ChatRequest,
|
|
16
|
-
ErrorChunk,
|
|
17
|
-
FinishChunk,
|
|
18
|
-
StartChunk,
|
|
19
|
-
TextDeltaChunk,
|
|
20
|
-
TextEndChunk,
|
|
21
|
-
TextStartChunk,
|
|
22
|
-
UIMessage,
|
|
23
|
-
)
|
|
24
|
-
from qtype.interpreter.flow import execute_flow
|
|
25
|
-
from qtype.interpreter.streaming_helpers import create_streaming_generator
|
|
26
|
-
from qtype.semantic.model import Flow
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def _ui_request_to_domain_type(request: ChatRequest) -> list[ChatMessage]:
|
|
30
|
-
"""
|
|
31
|
-
Convert a ChatRequest to domain-specific ChatMessages.
|
|
32
|
-
|
|
33
|
-
Processes all UI messages from the AI SDK UI/React request format.
|
|
34
|
-
Returns the full conversation history for context.
|
|
35
|
-
"""
|
|
36
|
-
if not request.messages:
|
|
37
|
-
raise ValueError("No messages provided in request.")
|
|
38
|
-
|
|
39
|
-
# Convert each UIMessage to a domain-specific ChatMessage
|
|
40
|
-
return [
|
|
41
|
-
_ui_message_to_domain_type(message) for message in request.messages
|
|
42
|
-
]
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
def _ui_message_to_domain_type(message: UIMessage) -> ChatMessage:
|
|
46
|
-
"""
|
|
47
|
-
Convert a UIMessage to a domain-specific ChatMessage.
|
|
48
|
-
|
|
49
|
-
Creates one block for each part in the message content.
|
|
50
|
-
"""
|
|
51
|
-
blocks = []
|
|
52
|
-
|
|
53
|
-
for part in message.parts:
|
|
54
|
-
if part.type == "text":
|
|
55
|
-
blocks.append(
|
|
56
|
-
ChatContent(type=PrimitiveTypeEnum.text, content=part.text)
|
|
57
|
-
)
|
|
58
|
-
elif part.type == "reasoning":
|
|
59
|
-
blocks.append(
|
|
60
|
-
ChatContent(type=PrimitiveTypeEnum.text, content=part.text)
|
|
61
|
-
)
|
|
62
|
-
elif part.type == "file":
|
|
63
|
-
blocks.append(
|
|
64
|
-
file_to_content(part.url) # type: ignore
|
|
65
|
-
)
|
|
66
|
-
elif part.type.startswith("tool-"):
|
|
67
|
-
raise NotImplementedError(
|
|
68
|
-
"Tool call part handling is not implemented yet."
|
|
69
|
-
)
|
|
70
|
-
elif part.type == "dynamic-tool":
|
|
71
|
-
raise NotImplementedError(
|
|
72
|
-
"Dynamic tool part handling is not implemented yet."
|
|
73
|
-
)
|
|
74
|
-
elif part.type == "step-start":
|
|
75
|
-
# Step boundaries might not need content blocks
|
|
76
|
-
continue
|
|
77
|
-
elif part.type in ["source-url", "source-document"]:
|
|
78
|
-
raise NotImplementedError(
|
|
79
|
-
"Source part handling is not implemented yet."
|
|
80
|
-
)
|
|
81
|
-
elif part.type.startswith("data-"):
|
|
82
|
-
raise NotImplementedError(
|
|
83
|
-
"Data part handling is not implemented yet."
|
|
84
|
-
)
|
|
85
|
-
else:
|
|
86
|
-
# Log unknown part types for debugging
|
|
87
|
-
raise ValueError(f"Unknown part type: {part.type}")
|
|
88
|
-
|
|
89
|
-
# If no blocks were created, raise an error
|
|
90
|
-
if not blocks:
|
|
91
|
-
raise ValueError(
|
|
92
|
-
"No valid content blocks created from UIMessage parts."
|
|
93
|
-
)
|
|
94
|
-
|
|
95
|
-
return ChatMessage(
|
|
96
|
-
role=MessageRole(message.role),
|
|
97
|
-
blocks=blocks,
|
|
98
|
-
)
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
def create_chat_flow_endpoint(app: FastAPI, flow: Flow) -> None:
|
|
102
|
-
"""
|
|
103
|
-
Create a chat endpoint for the given Flow.
|
|
104
|
-
|
|
105
|
-
This creates an endpoint at /flows/{flow_id}/chat that follows the
|
|
106
|
-
AI SDK UI/React request format and responds with streaming data.
|
|
107
|
-
|
|
108
|
-
Args:
|
|
109
|
-
app: The FastAPI application instance
|
|
110
|
-
flow: The Flow to create an endpoint for
|
|
111
|
-
"""
|
|
112
|
-
flow_id = flow.id
|
|
113
|
-
|
|
114
|
-
async def handle_chat_data(request: ChatRequest) -> StreamingResponse:
|
|
115
|
-
"""Handle chat requests for the specific flow."""
|
|
116
|
-
|
|
117
|
-
try:
|
|
118
|
-
# Convert AI SDK UI request to domain ChatMessages
|
|
119
|
-
messages = _ui_request_to_domain_type(request)
|
|
120
|
-
if not len(messages):
|
|
121
|
-
raise ValueError("No input messages received")
|
|
122
|
-
|
|
123
|
-
# Pop the last message as the current input
|
|
124
|
-
current_input = messages.pop()
|
|
125
|
-
if current_input.role != MessageRole.user:
|
|
126
|
-
raise ValueError(
|
|
127
|
-
f"Unexpected input {current_input} from non user role: {current_input.role}"
|
|
128
|
-
)
|
|
129
|
-
|
|
130
|
-
flow_copy = flow.model_copy(deep=True)
|
|
131
|
-
|
|
132
|
-
input_variable = [
|
|
133
|
-
var for var in flow_copy.inputs if var.type == ChatMessage
|
|
134
|
-
][0]
|
|
135
|
-
input_variable.value = current_input
|
|
136
|
-
|
|
137
|
-
# Pass conversation context to flow execution for memory population
|
|
138
|
-
execution_kwargs: Any = {
|
|
139
|
-
"session_id": request.id, # Use request ID as session identifier
|
|
140
|
-
"conversation_history": messages,
|
|
141
|
-
}
|
|
142
|
-
|
|
143
|
-
# Create a streaming generator for the flow execution
|
|
144
|
-
stream_generator, result_future = create_streaming_generator(
|
|
145
|
-
execute_flow, flow_copy, **execution_kwargs
|
|
146
|
-
)
|
|
147
|
-
except Exception as e:
|
|
148
|
-
error_chunk = ErrorChunk(errorText=str(e))
|
|
149
|
-
response = StreamingResponse(
|
|
150
|
-
[
|
|
151
|
-
f"data: {error_chunk.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
152
|
-
],
|
|
153
|
-
media_type="text/plain; charset=utf-8",
|
|
154
|
-
)
|
|
155
|
-
response.headers["x-vercel-ai-ui-message-stream"] = "v1"
|
|
156
|
-
return response
|
|
157
|
-
|
|
158
|
-
# Create generator that formats messages according to AI SDK UI streaming protocol
|
|
159
|
-
def vercel_ai_formatter() -> Generator[str, None, None]:
|
|
160
|
-
"""Format stream data according to AI SDK UI streaming protocol."""
|
|
161
|
-
|
|
162
|
-
# Send start chunk
|
|
163
|
-
start_chunk = StartChunk(messageId=str(uuid.uuid4())) # type: ignore
|
|
164
|
-
yield f"data: {start_chunk.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
165
|
-
|
|
166
|
-
# Track text content for proper streaming
|
|
167
|
-
text_id = str(uuid.uuid4())
|
|
168
|
-
text_started = False
|
|
169
|
-
|
|
170
|
-
for step, message in stream_generator:
|
|
171
|
-
if isinstance(message, ChatMessage):
|
|
172
|
-
# Convert ChatMessage to text content
|
|
173
|
-
content = " ".join(
|
|
174
|
-
[
|
|
175
|
-
block.content
|
|
176
|
-
for block in message.blocks
|
|
177
|
-
if hasattr(block, "content") and block.content
|
|
178
|
-
]
|
|
179
|
-
)
|
|
180
|
-
if content.strip():
|
|
181
|
-
# Start text block if not started
|
|
182
|
-
if not text_started:
|
|
183
|
-
text_start = TextStartChunk(id=text_id)
|
|
184
|
-
yield f"data: {text_start.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
185
|
-
text_started = True
|
|
186
|
-
|
|
187
|
-
# Send text delta
|
|
188
|
-
text_delta = TextDeltaChunk(id=text_id, delta=content)
|
|
189
|
-
yield f"data: {text_delta.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
190
|
-
else:
|
|
191
|
-
# Handle other message types as text deltas
|
|
192
|
-
text_content = str(message)
|
|
193
|
-
if text_content.strip():
|
|
194
|
-
# Start text block if not started
|
|
195
|
-
if not text_started:
|
|
196
|
-
text_start = TextStartChunk(id=text_id)
|
|
197
|
-
yield f"data: {text_start.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
198
|
-
text_started = True
|
|
199
|
-
|
|
200
|
-
# Send text delta
|
|
201
|
-
text_delta = TextDeltaChunk(
|
|
202
|
-
id=text_id, delta=text_content
|
|
203
|
-
)
|
|
204
|
-
yield f"data: {text_delta.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
205
|
-
|
|
206
|
-
# End text block if it was started
|
|
207
|
-
if text_started:
|
|
208
|
-
text_end = TextEndChunk(id=text_id)
|
|
209
|
-
yield f"data: {text_end.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
210
|
-
|
|
211
|
-
# Send finish chunk
|
|
212
|
-
try:
|
|
213
|
-
result_future.result(timeout=5.0)
|
|
214
|
-
finish_chunk = FinishChunk()
|
|
215
|
-
yield f"data: {finish_chunk.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
216
|
-
except Exception as e:
|
|
217
|
-
# Send error
|
|
218
|
-
error_chunk = ErrorChunk(errorText=str(e))
|
|
219
|
-
logging.error(
|
|
220
|
-
f"Error during flow execution: {e}", exc_info=True
|
|
221
|
-
)
|
|
222
|
-
yield f"data: {error_chunk.model_dump_json(by_alias=True, exclude_none=True)}\n\n"
|
|
223
|
-
|
|
224
|
-
response = StreamingResponse(
|
|
225
|
-
vercel_ai_formatter(), media_type="text/plain; charset=utf-8"
|
|
226
|
-
)
|
|
227
|
-
response.headers["x-vercel-ai-ui-message-stream"] = "v1"
|
|
228
|
-
return response
|
|
229
|
-
|
|
230
|
-
# Add the endpoint to the FastAPI app
|
|
231
|
-
app.post(
|
|
232
|
-
f"/flows/{flow_id}/chat",
|
|
233
|
-
tags=["chat"],
|
|
234
|
-
summary=f"Chat with {flow_id} flow",
|
|
235
|
-
description=flow.description,
|
|
236
|
-
response_class=StreamingResponse,
|
|
237
|
-
)(handle_chat_data)
|