qtype 0.0.12__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. qtype/application/commons/tools.py +1 -1
  2. qtype/application/converters/tools_from_api.py +476 -11
  3. qtype/application/converters/tools_from_module.py +38 -14
  4. qtype/application/converters/types.py +15 -30
  5. qtype/application/documentation.py +1 -1
  6. qtype/application/facade.py +102 -85
  7. qtype/base/types.py +227 -7
  8. qtype/cli.py +5 -1
  9. qtype/commands/convert.py +52 -6
  10. qtype/commands/generate.py +44 -4
  11. qtype/commands/run.py +78 -36
  12. qtype/commands/serve.py +74 -44
  13. qtype/commands/validate.py +37 -14
  14. qtype/commands/visualize.py +46 -25
  15. qtype/dsl/__init__.py +6 -5
  16. qtype/dsl/custom_types.py +1 -1
  17. qtype/dsl/domain_types.py +86 -5
  18. qtype/dsl/linker.py +384 -0
  19. qtype/dsl/loader.py +315 -0
  20. qtype/dsl/model.py +753 -264
  21. qtype/dsl/parser.py +200 -0
  22. qtype/dsl/types.py +50 -0
  23. qtype/interpreter/api.py +63 -136
  24. qtype/interpreter/auth/aws.py +19 -9
  25. qtype/interpreter/auth/generic.py +93 -16
  26. qtype/interpreter/base/base_step_executor.py +436 -0
  27. qtype/interpreter/base/batch_step_executor.py +171 -0
  28. qtype/interpreter/base/exceptions.py +50 -0
  29. qtype/interpreter/base/executor_context.py +91 -0
  30. qtype/interpreter/base/factory.py +84 -0
  31. qtype/interpreter/base/progress_tracker.py +110 -0
  32. qtype/interpreter/base/secrets.py +339 -0
  33. qtype/interpreter/base/step_cache.py +74 -0
  34. qtype/interpreter/base/stream_emitter.py +469 -0
  35. qtype/interpreter/conversions.py +495 -24
  36. qtype/interpreter/converters.py +79 -0
  37. qtype/interpreter/endpoints.py +355 -0
  38. qtype/interpreter/executors/agent_executor.py +242 -0
  39. qtype/interpreter/executors/aggregate_executor.py +93 -0
  40. qtype/interpreter/executors/bedrock_reranker_executor.py +195 -0
  41. qtype/interpreter/executors/decoder_executor.py +163 -0
  42. qtype/interpreter/executors/doc_to_text_executor.py +112 -0
  43. qtype/interpreter/executors/document_embedder_executor.py +123 -0
  44. qtype/interpreter/executors/document_search_executor.py +113 -0
  45. qtype/interpreter/executors/document_source_executor.py +118 -0
  46. qtype/interpreter/executors/document_splitter_executor.py +105 -0
  47. qtype/interpreter/executors/echo_executor.py +63 -0
  48. qtype/interpreter/executors/field_extractor_executor.py +165 -0
  49. qtype/interpreter/executors/file_source_executor.py +101 -0
  50. qtype/interpreter/executors/file_writer_executor.py +110 -0
  51. qtype/interpreter/executors/index_upsert_executor.py +232 -0
  52. qtype/interpreter/executors/invoke_embedding_executor.py +104 -0
  53. qtype/interpreter/executors/invoke_flow_executor.py +51 -0
  54. qtype/interpreter/executors/invoke_tool_executor.py +358 -0
  55. qtype/interpreter/executors/llm_inference_executor.py +272 -0
  56. qtype/interpreter/executors/prompt_template_executor.py +78 -0
  57. qtype/interpreter/executors/sql_source_executor.py +106 -0
  58. qtype/interpreter/executors/vector_search_executor.py +91 -0
  59. qtype/interpreter/flow.py +172 -22
  60. qtype/interpreter/logging_progress.py +61 -0
  61. qtype/interpreter/metadata_api.py +115 -0
  62. qtype/interpreter/resource_cache.py +5 -4
  63. qtype/interpreter/rich_progress.py +225 -0
  64. qtype/interpreter/stream/chat/__init__.py +15 -0
  65. qtype/interpreter/stream/chat/converter.py +391 -0
  66. qtype/interpreter/{chat → stream/chat}/file_conversions.py +2 -2
  67. qtype/interpreter/stream/chat/ui_request_to_domain_type.py +140 -0
  68. qtype/interpreter/stream/chat/vercel.py +609 -0
  69. qtype/interpreter/stream/utils/__init__.py +15 -0
  70. qtype/interpreter/stream/utils/build_vercel_ai_formatter.py +74 -0
  71. qtype/interpreter/stream/utils/callback_to_stream.py +66 -0
  72. qtype/interpreter/stream/utils/create_streaming_response.py +18 -0
  73. qtype/interpreter/stream/utils/default_chat_extract_text.py +20 -0
  74. qtype/interpreter/stream/utils/error_streaming_response.py +20 -0
  75. qtype/interpreter/telemetry.py +135 -8
  76. qtype/interpreter/tools/__init__.py +5 -0
  77. qtype/interpreter/tools/function_tool_helper.py +265 -0
  78. qtype/interpreter/types.py +330 -0
  79. qtype/interpreter/typing.py +83 -89
  80. qtype/interpreter/ui/404/index.html +1 -1
  81. qtype/interpreter/ui/404.html +1 -1
  82. qtype/interpreter/ui/_next/static/{OT8QJQW3J70VbDWWfrEMT → 20HoJN6otZ_LyHLHpCPE6}/_buildManifest.js +1 -1
  83. qtype/interpreter/ui/_next/static/chunks/434-b2112d19f25c44ff.js +36 -0
  84. qtype/interpreter/ui/_next/static/chunks/{964-ed4ab073db645007.js → 964-2b041321a01cbf56.js} +1 -1
  85. qtype/interpreter/ui/_next/static/chunks/app/{layout-5ccbc44fd528d089.js → layout-a05273ead5de2c41.js} +1 -1
  86. qtype/interpreter/ui/_next/static/chunks/app/page-8c67d16ac90d23cb.js +1 -0
  87. qtype/interpreter/ui/_next/static/chunks/ba12c10f-546f2714ff8abc66.js +1 -0
  88. qtype/interpreter/ui/_next/static/chunks/{main-6d261b6c5d6fb6c2.js → main-e26b9cb206da2cac.js} +1 -1
  89. qtype/interpreter/ui/_next/static/chunks/webpack-08642e441b39b6c2.js +1 -0
  90. qtype/interpreter/ui/_next/static/css/8a8d1269e362fef7.css +3 -0
  91. qtype/interpreter/ui/_next/static/media/4cf2300e9c8272f7-s.p.woff2 +0 -0
  92. qtype/interpreter/ui/icon.png +0 -0
  93. qtype/interpreter/ui/index.html +1 -1
  94. qtype/interpreter/ui/index.txt +5 -5
  95. qtype/semantic/checker.py +643 -0
  96. qtype/semantic/generate.py +268 -85
  97. qtype/semantic/loader.py +95 -0
  98. qtype/semantic/model.py +535 -163
  99. qtype/semantic/resolver.py +63 -19
  100. qtype/semantic/visualize.py +50 -35
  101. {qtype-0.0.12.dist-info → qtype-0.1.7.dist-info}/METADATA +22 -5
  102. qtype-0.1.7.dist-info/RECORD +137 -0
  103. qtype/dsl/base_types.py +0 -38
  104. qtype/dsl/validator.py +0 -464
  105. qtype/interpreter/batch/__init__.py +0 -0
  106. qtype/interpreter/batch/flow.py +0 -95
  107. qtype/interpreter/batch/sql_source.py +0 -95
  108. qtype/interpreter/batch/step.py +0 -63
  109. qtype/interpreter/batch/types.py +0 -41
  110. qtype/interpreter/batch/utils.py +0 -179
  111. qtype/interpreter/chat/chat_api.py +0 -237
  112. qtype/interpreter/chat/vercel.py +0 -314
  113. qtype/interpreter/exceptions.py +0 -10
  114. qtype/interpreter/step.py +0 -67
  115. qtype/interpreter/steps/__init__.py +0 -0
  116. qtype/interpreter/steps/agent.py +0 -114
  117. qtype/interpreter/steps/condition.py +0 -36
  118. qtype/interpreter/steps/decoder.py +0 -88
  119. qtype/interpreter/steps/llm_inference.py +0 -150
  120. qtype/interpreter/steps/prompt_template.py +0 -54
  121. qtype/interpreter/steps/search.py +0 -24
  122. qtype/interpreter/steps/tool.py +0 -53
  123. qtype/interpreter/streaming_helpers.py +0 -123
  124. qtype/interpreter/ui/_next/static/chunks/736-7fc606e244fedcb1.js +0 -36
  125. qtype/interpreter/ui/_next/static/chunks/app/page-c72e847e888e549d.js +0 -1
  126. qtype/interpreter/ui/_next/static/chunks/ba12c10f-22556063851a6df2.js +0 -1
  127. qtype/interpreter/ui/_next/static/chunks/webpack-8289c17c67827f22.js +0 -1
  128. qtype/interpreter/ui/_next/static/css/a262c53826df929b.css +0 -3
  129. qtype/interpreter/ui/_next/static/media/569ce4b8f30dc480-s.p.woff2 +0 -0
  130. qtype/interpreter/ui/favicon.ico +0 -0
  131. qtype/loader.py +0 -389
  132. qtype-0.0.12.dist-info/RECORD +0 -105
  133. /qtype/interpreter/ui/_next/static/{OT8QJQW3J70VbDWWfrEMT → 20HoJN6otZ_LyHLHpCPE6}/_ssgManifest.js +0 -0
  134. {qtype-0.0.12.dist-info → qtype-0.1.7.dist-info}/WHEEL +0 -0
  135. {qtype-0.0.12.dist-info → qtype-0.1.7.dist-info}/entry_points.txt +0 -0
  136. {qtype-0.0.12.dist-info → qtype-0.1.7.dist-info}/licenses/LICENSE +0 -0
  137. {qtype-0.0.12.dist-info → qtype-0.1.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,106 @@
1
+ from typing import AsyncIterator
2
+
3
+ import boto3 # type: ignore[import-untyped]
4
+ import pandas as pd
5
+ import sqlalchemy
6
+ from sqlalchemy import create_engine
7
+ from sqlalchemy.exc import SQLAlchemyError
8
+
9
+ from qtype.interpreter.auth.generic import auth
10
+ from qtype.interpreter.base.base_step_executor import StepExecutor
11
+ from qtype.interpreter.base.executor_context import ExecutorContext
12
+ from qtype.interpreter.types import FlowMessage
13
+ from qtype.semantic.model import SQLSource
14
+
15
+
16
+ class SQLSourceExecutor(StepExecutor):
17
+ """Executor for SQLSource steps."""
18
+
19
+ def __init__(
20
+ self, step: SQLSource, context: ExecutorContext, **dependencies
21
+ ):
22
+ super().__init__(step, context, **dependencies)
23
+ if not isinstance(step, SQLSource):
24
+ raise ValueError(
25
+ "SQLSourceExecutor can only execute SQLSource steps."
26
+ )
27
+ self.step: SQLSource = step
28
+
29
+ async def process_message(
30
+ self,
31
+ message: FlowMessage,
32
+ ) -> AsyncIterator[FlowMessage]:
33
+ """Process a single FlowMessage for the SQLSource step.
34
+
35
+ Args:
36
+ message: The FlowMessage to process.
37
+ Yields:
38
+ FlowMessages with the results of SQL query execution.
39
+ """
40
+ # Create a database engine - resolve connection string if it's a SecretReference
41
+ connection_string = self._resolve_secret(self.step.connection)
42
+ connect_args = {}
43
+ if self.step.auth:
44
+ with auth(self.step.auth) as creds:
45
+ if isinstance(creds, boto3.Session):
46
+ connect_args["session"] = creds
47
+ engine = create_engine(connection_string, connect_args=connect_args)
48
+
49
+ output_columns = {output.id for output in self.step.outputs}
50
+ step_inputs = {i.id for i in self.step.inputs}
51
+
52
+ try:
53
+ # Make a dictionary of column_name: value from message variables
54
+ params = {
55
+ col: message.variables.get(col)
56
+ for col in step_inputs
57
+ if col in message.variables
58
+ }
59
+
60
+ await self.stream_emitter.status(
61
+ f"Executing SQL query with params: {params}",
62
+ )
63
+
64
+ # Execute the query and fetch the results into a DataFrame
65
+ with engine.connect() as connection:
66
+ result = connection.execute(
67
+ sqlalchemy.text(self.step.query),
68
+ parameters=params if params else None,
69
+ )
70
+ df = pd.DataFrame(
71
+ result.fetchall(), columns=list(result.keys())
72
+ )
73
+
74
+ # Confirm the outputs exist in the dataframe
75
+ columns = set(df.columns)
76
+ missing_columns = output_columns - columns
77
+ if missing_columns:
78
+ raise ValueError(
79
+ (
80
+ f"SQL Result was missing expected columns: "
81
+ f"{', '.join(missing_columns)}, it has columns: "
82
+ f"{', '.join(columns)}"
83
+ )
84
+ )
85
+
86
+ # Emit one message per result row
87
+ for _, row in df.iterrows():
88
+ # Create a dict with only the output columns
89
+ row_dict = {
90
+ str(k): v
91
+ for k, v in row.to_dict().items()
92
+ if str(k) in output_columns
93
+ }
94
+ # Merge with original message variables
95
+ yield message.copy_with_variables(new_variables=row_dict)
96
+
97
+ await self.stream_emitter.status(
98
+ f"Emitted {len(df)} rows from SQL query"
99
+ )
100
+
101
+ except SQLAlchemyError as e:
102
+ # Emit error event to stream so frontend can display it
103
+ await self.stream_emitter.error(str(e))
104
+ # Set error on the message and yield it
105
+ message.set_error(self.step.id, e)
106
+ yield message
@@ -0,0 +1,91 @@
1
+ """Vector search executor for retrieving relevant chunks from vector stores."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ from typing import AsyncIterator
7
+
8
+ from qtype.interpreter.base.base_step_executor import StepExecutor
9
+ from qtype.interpreter.base.executor_context import ExecutorContext
10
+ from qtype.interpreter.conversions import (
11
+ from_node_with_score,
12
+ to_llama_vector_store_and_retriever,
13
+ )
14
+ from qtype.interpreter.types import FlowMessage
15
+ from qtype.semantic.model import VectorIndex, VectorSearch
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class VectorSearchExecutor(StepExecutor):
21
+ """Executor for VectorSearch steps using LlamaIndex vector stores."""
22
+
23
+ def __init__(
24
+ self, step: VectorSearch, context: ExecutorContext, **dependencies
25
+ ):
26
+ super().__init__(step, context, **dependencies)
27
+ if not isinstance(step, VectorSearch):
28
+ raise ValueError(
29
+ "VectorSearchExecutor can only execute VectorSearch steps."
30
+ )
31
+ self.step: VectorSearch = step
32
+
33
+ if not isinstance(self.step.index, VectorIndex):
34
+ raise ValueError(
35
+ f"VectorSearch step {self.step.id} must reference a VectorIndex."
36
+ )
37
+ self.index: VectorIndex = self.step.index
38
+
39
+ # Get the vector store and retriever
40
+ self._vector_store, self._retriever = (
41
+ to_llama_vector_store_and_retriever(
42
+ self.step.index, self.context.secret_manager
43
+ )
44
+ )
45
+
46
+ async def process_message(
47
+ self,
48
+ message: FlowMessage,
49
+ ) -> AsyncIterator[FlowMessage]:
50
+ """Process a single FlowMessage for the VectorSearch step.
51
+
52
+ Args:
53
+ message: The FlowMessage to process.
54
+
55
+ Yields:
56
+ FlowMessage with search results.
57
+ """
58
+ try:
59
+ # Get the query from the input variable
60
+ # (validated to be exactly one text input)
61
+ input_var = self.step.inputs[0]
62
+ query = message.variables.get(input_var.id)
63
+
64
+ if not isinstance(query, str):
65
+ raise ValueError(
66
+ f"VectorSearch input must be text, got {type(query)}"
67
+ )
68
+
69
+ # Perform the vector search
70
+ logger.debug(f"Performing vector search with query: {query}")
71
+ nodes_with_scores = await self._retriever.aretrieve(query)
72
+
73
+ # Convert results to RAGSearchResult objects
74
+ search_results = [
75
+ from_node_with_score(node_with_score)
76
+ for node_with_score in nodes_with_scores
77
+ ]
78
+
79
+ # Set the output variable (validated to be exactly one output
80
+ # of type list[RAGSearchResult])
81
+ output_var = self.step.outputs[0]
82
+ output_vars = {output_var.id: search_results}
83
+
84
+ yield message.copy_with_variables(output_vars)
85
+
86
+ except Exception as e:
87
+ logger.error(f"Vector search failed: {e}", exc_info=True)
88
+ # Emit error event to stream so frontend can display it
89
+ await self.stream_emitter.error(str(e))
90
+ message.set_error(self.step.id, e)
91
+ yield message
qtype/interpreter/flow.py CHANGED
@@ -1,37 +1,187 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import json
3
4
  import logging
4
- from typing import Any
5
+ from collections.abc import AsyncIterator
5
6
 
6
- from qtype.interpreter.exceptions import InterpreterError
7
- from qtype.interpreter.step import execute_step
8
- from qtype.semantic.model import Flow, Variable
7
+ from openinference.semconv.trace import (
8
+ OpenInferenceSpanKindValues,
9
+ SpanAttributes,
10
+ )
11
+ from opentelemetry import context as otel_context
12
+ from opentelemetry import trace
13
+ from opentelemetry.trace import Status, StatusCode
14
+ from rich.console import Console
15
+
16
+ from qtype.interpreter.base import factory
17
+ from qtype.interpreter.base.executor_context import ExecutorContext
18
+ from qtype.interpreter.logging_progress import LoggingProgressCallback
19
+ from qtype.interpreter.rich_progress import RichProgressCallback
20
+ from qtype.interpreter.types import FlowMessage, ProgressCallback
21
+ from qtype.semantic.model import Flow
9
22
 
10
23
  logger = logging.getLogger(__name__)
11
24
 
12
25
 
13
- def execute_flow(flow: Flow, **kwargs: dict[Any, Any]) -> list[Variable]:
14
- """Execute a flow based on the provided arguments.
26
+ async def run_flow(
27
+ flow: Flow,
28
+ initial: list[FlowMessage] | AsyncIterator[FlowMessage] | FlowMessage,
29
+ show_progress: bool = False,
30
+ **kwargs,
31
+ ) -> list[FlowMessage]:
32
+ """
33
+ Main entrypoint for executing a flow.
15
34
 
16
35
  Args:
17
- flow: The flow to execute.
18
- inputs: The input variables for the flow.
19
- **kwargs: Additional keyword arguments.
36
+ flow: The flow to execute
37
+ initial: Initial FlowMessage(s) to start execution
38
+ **kwargs: Dependencies including:
39
+ - context: ExecutorContext with cross-cutting concerns (optional)
40
+ - Other executor-specific dependencies
41
+
42
+ Returns:
43
+ List of final FlowMessages after execution
20
44
  """
21
- logger.debug(f"Executing step: {flow.id} with kwargs: {kwargs}")
45
+ from qtype.interpreter.base.secrets import NoOpSecretManager
22
46
 
23
- unset_inputs = [input for input in flow.inputs if not input.is_set()]
24
- if unset_inputs:
25
- raise InterpreterError(
26
- f"The following inputs are required but have no values: {', '.join([input.id for input in unset_inputs])}"
47
+ # Wire up progress callback if requested
48
+ progress_callback: ProgressCallback | None = None
49
+ if show_progress:
50
+ console = Console()
51
+ if console.is_terminal:
52
+ progress_callback = RichProgressCallback()
53
+ else:
54
+ progress_callback = LoggingProgressCallback(log_every_seconds=120)
55
+
56
+ # Extract or create ExecutorContext
57
+ exec_context = kwargs.pop("context", None)
58
+ if exec_context is None:
59
+ exec_context = ExecutorContext(
60
+ secret_manager=NoOpSecretManager(),
61
+ tracer=trace.get_tracer(__name__),
62
+ on_progress=progress_callback,
27
63
  )
64
+ else:
65
+ if exec_context.on_progress is None and show_progress:
66
+ exec_context.on_progress = progress_callback
28
67
 
29
- for step in flow.steps:
30
- execute_step(step, **kwargs)
68
+ # Use tracer from context
69
+ tracer = exec_context.tracer or trace.get_tracer(__name__)
31
70
 
32
- unset_outputs = [output for output in flow.outputs if not output.is_set()]
33
- if unset_outputs:
34
- raise InterpreterError(
35
- f"The following outputs are required but have no values: {', '.join([output.id for output in unset_outputs])}"
36
- )
37
- return flow.outputs
71
+ # Start a span for the entire flow execution
72
+ span = tracer.start_span(
73
+ f"flow.{flow.id}",
74
+ attributes={
75
+ "flow.id": flow.id,
76
+ "flow.step_count": len(flow.steps),
77
+ SpanAttributes.OPENINFERENCE_SPAN_KIND: (
78
+ OpenInferenceSpanKindValues.CHAIN.value
79
+ ),
80
+ },
81
+ )
82
+
83
+ # Make this span the active context so step spans will nest under it
84
+ # Only attach if span is recording (i.e., real tracer is configured)
85
+ ctx = trace.set_span_in_context(span)
86
+ token = otel_context.attach(ctx) if span.is_recording() else None
87
+
88
+ try:
89
+ # 1. Get the execution plan is just the steps in order
90
+ execution_plan = flow.steps
91
+
92
+ # 2. Convert the initial input to an iterable of some kind. Record telemetry if possible.
93
+ if isinstance(initial, FlowMessage):
94
+ span.set_attribute("flow.input_count", 1)
95
+ input_vars = {k: v for k, v in initial.variables.items()}
96
+ span.set_attribute(
97
+ SpanAttributes.INPUT_VALUE,
98
+ json.dumps(input_vars, default=str),
99
+ )
100
+ span.set_attribute(
101
+ SpanAttributes.INPUT_MIME_TYPE, "application/json"
102
+ )
103
+ initial = [initial]
104
+
105
+ if isinstance(initial, list):
106
+ span.set_attribute("flow.input_count", len(initial))
107
+
108
+ # convert to async iterator
109
+ async def list_stream():
110
+ for message in initial:
111
+ yield message
112
+
113
+ current_stream = list_stream()
114
+ elif isinstance(initial, AsyncIterator):
115
+ # We can't know the count ahead of time
116
+ current_stream = initial
117
+ else:
118
+ raise ValueError(
119
+ "Initial input must be a FlowMessage, list of FlowMessages, "
120
+ "or AsyncIterator of FlowMessages"
121
+ )
122
+
123
+ # 4. Chain executors together in the main loop
124
+ for step in execution_plan:
125
+ executor = factory.create_executor(step, exec_context, **kwargs)
126
+ output_stream = executor.execute(
127
+ current_stream,
128
+ )
129
+ current_stream = output_stream
130
+
131
+ # 5. Collect the final results from the last stream
132
+ final_results = [state async for state in current_stream]
133
+
134
+ # Close the progress bars if any
135
+ if progress_callback is not None:
136
+ progress_callback.close()
137
+ # Record flow completion metrics
138
+ span.set_attribute("flow.output_count", len(final_results))
139
+ error_count = sum(1 for msg in final_results if msg.is_failed())
140
+ span.set_attribute("flow.error_count", error_count)
141
+
142
+ # Record output variables for observability
143
+ if len(final_results) == 1 and span.is_recording():
144
+ try:
145
+ output_vars = {
146
+ k: v
147
+ for msg in final_results
148
+ if not msg.is_failed()
149
+ for k, v in msg.variables.items()
150
+ }
151
+ span.set_attribute(
152
+ SpanAttributes.OUTPUT_VALUE,
153
+ json.dumps(output_vars, default=str),
154
+ )
155
+ span.set_attribute(
156
+ SpanAttributes.OUTPUT_MIME_TYPE, "application/json"
157
+ )
158
+ except Exception:
159
+ # If serialization fails, skip it
160
+ pass
161
+
162
+ if error_count > 0:
163
+ span.set_status(
164
+ Status(
165
+ StatusCode.ERROR,
166
+ f"{error_count} of {len(final_results)} messages failed",
167
+ )
168
+ )
169
+ else:
170
+ span.set_status(Status(StatusCode.OK))
171
+
172
+ return final_results
173
+
174
+ except Exception as e:
175
+ # Record the exception and set error status
176
+ span.record_exception(e)
177
+ span.set_status(Status(StatusCode.ERROR, f"Flow failed: {e}"))
178
+ raise
179
+ finally:
180
+ # Clean up context resources if we created it
181
+ if kwargs.get("context") is None:
182
+ exec_context.cleanup()
183
+ # Detach the context and end the span
184
+ # Only detach if we successfully attached (span was recording)
185
+ if token is not None:
186
+ otel_context.detach(token)
187
+ span.end()
@@ -0,0 +1,61 @@
1
+ import logging
2
+ import time
3
+
4
+ from qtype.interpreter.types import ProgressCallback
5
+
6
+
7
+ class LoggingProgressCallback(ProgressCallback):
8
+ def __init__(self, log_every_seconds: float = 120.0) -> None:
9
+ super().__init__()
10
+ self.log_every_seconds = log_every_seconds
11
+ self._last_log: dict[str, float] = {}
12
+ self._totals: dict[str, int | None] = {}
13
+
14
+ def __call__(
15
+ self,
16
+ step_id: str,
17
+ items_processed: int,
18
+ items_in_error: int,
19
+ items_succeeded: int,
20
+ total_items: int | None,
21
+ cache_hits: int | None,
22
+ cache_misses: int | None,
23
+ ) -> None:
24
+ logger = logging.getLogger(__name__)
25
+ now = time.monotonic()
26
+ last = self._last_log.get(step_id, 0.0)
27
+
28
+ self._totals[step_id] = total_items
29
+
30
+ if now - last < self.log_every_seconds:
31
+ return
32
+
33
+ self._last_log[step_id] = now
34
+ total_str = (
35
+ f"{items_processed}/{total_items}"
36
+ if total_items is not None
37
+ else f"{items_processed}"
38
+ )
39
+ if cache_hits is not None or cache_misses is not None:
40
+ logger.info(
41
+ "Step %s: processed=%s, succeeded=%s, errors=%s, "
42
+ "cache_hits=%s, cache_misses=%s",
43
+ step_id,
44
+ total_str,
45
+ items_succeeded,
46
+ items_in_error,
47
+ cache_hits if cache_hits is not None else "-",
48
+ cache_misses if cache_misses is not None else "-",
49
+ )
50
+ else:
51
+ logger.info(
52
+ "Step %s: processed=%s, succeeded=%s, errors=%s",
53
+ step_id,
54
+ total_str,
55
+ items_succeeded,
56
+ items_in_error,
57
+ )
58
+
59
+ def close(self) -> None:
60
+ # optional: final summary logging
61
+ pass
@@ -0,0 +1,115 @@
1
+ """Metadata API endpoints for flow discovery."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+ from fastapi import FastAPI
8
+ from pydantic import BaseModel, Field
9
+
10
+ from qtype.interpreter.typing import create_input_shape, create_output_shape
11
+ from qtype.semantic.model import Application, Flow
12
+
13
+
14
+ class FlowEndpoints(BaseModel):
15
+ """Available endpoints for a flow."""
16
+
17
+ rest: str = Field(..., description="REST execution endpoint")
18
+ stream: str | None = Field(
19
+ None,
20
+ description="Streaming endpoint (SSE) if flow has an interface",
21
+ )
22
+
23
+
24
+ class FlowMetadata(BaseModel):
25
+ """Metadata about a flow for frontend discovery."""
26
+
27
+ id: str = Field(..., description="Flow ID")
28
+ description: str | None = Field(None, description="Flow description")
29
+ interface_type: str | None = Field(
30
+ None,
31
+ description="Interface type: 'Complete' or 'Conversational'",
32
+ )
33
+ session_inputs: list[str] = Field(
34
+ default_factory=list,
35
+ description="Input variables that persist across session",
36
+ )
37
+ endpoints: FlowEndpoints = Field(
38
+ ..., description="Available API endpoints"
39
+ )
40
+ input_schema: dict[str, Any] = Field(
41
+ ..., description="JSON schema for input"
42
+ )
43
+ output_schema: dict[str, Any] = Field(
44
+ ..., description="JSON schema for output"
45
+ )
46
+
47
+
48
+ def create_metadata_endpoints(app: FastAPI, application: Application) -> None:
49
+ """
50
+ Create metadata endpoints for flow discovery.
51
+
52
+ Args:
53
+ app: FastAPI application instance
54
+ application: QType Application with flows
55
+ """
56
+
57
+ @app.get(
58
+ "/flows",
59
+ tags=["flows"],
60
+ summary="List all flows",
61
+ description="Get metadata for all available flows",
62
+ response_model=list[FlowMetadata],
63
+ )
64
+ async def list_flows() -> list[FlowMetadata]:
65
+ """List all flows with their metadata."""
66
+ flows_metadata = []
67
+
68
+ for flow in application.flows:
69
+ metadata = _create_flow_metadata(flow)
70
+ flows_metadata.append(metadata)
71
+
72
+ return flows_metadata
73
+
74
+
75
+ def _create_flow_metadata(flow: Flow) -> FlowMetadata:
76
+ """
77
+ Create metadata for a single flow.
78
+
79
+ Args:
80
+ flow: Flow to create metadata for
81
+
82
+ Returns:
83
+ FlowMetadata with all information
84
+ """
85
+ # Determine interface type
86
+ interface_type = None
87
+ session_inputs = []
88
+ if flow.interface:
89
+ interface_type = flow.interface.type
90
+ session_inputs = [
91
+ var.id if hasattr(var, "id") else str(var)
92
+ for var in flow.interface.session_inputs
93
+ ]
94
+
95
+ # Create schemas
96
+ input_model = create_input_shape(flow)
97
+ output_model = create_output_shape(flow)
98
+
99
+ # Determine streaming endpoint availability
100
+ stream_endpoint = (
101
+ f"/flows/{flow.id}/stream" if flow.interface is not None else None
102
+ )
103
+
104
+ return FlowMetadata(
105
+ id=flow.id,
106
+ description=flow.description,
107
+ interface_type=interface_type,
108
+ session_inputs=session_inputs,
109
+ endpoints=FlowEndpoints(
110
+ rest=f"/flows/{flow.id}",
111
+ stream=stream_endpoint,
112
+ ),
113
+ input_schema=input_model.model_json_schema(),
114
+ output_schema=output_model.model_json_schema(),
115
+ )
@@ -2,12 +2,13 @@ import functools
2
2
  import os
3
3
  from typing import Any, Callable
4
4
 
5
- from cachetools import LRUCache # type: ignore[import-untyped]
5
+ from cachetools import TTLCache # type: ignore[import-untyped]
6
6
 
7
- # Global LRU cache with a reasonable default size
7
+ # Global TTL cache with a reasonable default size and 55-minute TTL
8
8
  _RESOURCE_CACHE_MAX_SIZE = int(os.environ.get("RESOURCE_CACHE_MAX_SIZE", 128))
9
- _GLOBAL_RESOURCE_CACHE: LRUCache[Any, Any] = LRUCache(
10
- maxsize=_RESOURCE_CACHE_MAX_SIZE
9
+ _RESOURCE_CACHE_TTL = int(os.environ.get("RESOURCE_CACHE_TTL", 55 * 60))
10
+ _GLOBAL_RESOURCE_CACHE: TTLCache[Any, Any] = TTLCache(
11
+ maxsize=_RESOURCE_CACHE_MAX_SIZE, ttl=_RESOURCE_CACHE_TTL
11
12
  )
12
13
 
13
14