qtype 0.0.16__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. qtype/application/commons/tools.py +1 -1
  2. qtype/application/converters/tools_from_api.py +5 -5
  3. qtype/application/converters/tools_from_module.py +2 -2
  4. qtype/application/converters/types.py +14 -43
  5. qtype/application/documentation.py +1 -1
  6. qtype/application/facade.py +94 -73
  7. qtype/base/types.py +227 -7
  8. qtype/cli.py +4 -0
  9. qtype/commands/convert.py +20 -8
  10. qtype/commands/generate.py +19 -27
  11. qtype/commands/run.py +73 -36
  12. qtype/commands/serve.py +74 -54
  13. qtype/commands/validate.py +34 -8
  14. qtype/commands/visualize.py +46 -22
  15. qtype/dsl/__init__.py +6 -5
  16. qtype/dsl/custom_types.py +1 -1
  17. qtype/dsl/domain_types.py +65 -5
  18. qtype/dsl/linker.py +384 -0
  19. qtype/dsl/loader.py +315 -0
  20. qtype/dsl/model.py +612 -363
  21. qtype/dsl/parser.py +200 -0
  22. qtype/dsl/types.py +50 -0
  23. qtype/interpreter/api.py +57 -136
  24. qtype/interpreter/auth/aws.py +19 -9
  25. qtype/interpreter/auth/generic.py +93 -16
  26. qtype/interpreter/base/base_step_executor.py +436 -0
  27. qtype/interpreter/base/batch_step_executor.py +171 -0
  28. qtype/interpreter/base/exceptions.py +50 -0
  29. qtype/interpreter/base/executor_context.py +74 -0
  30. qtype/interpreter/base/factory.py +117 -0
  31. qtype/interpreter/base/progress_tracker.py +110 -0
  32. qtype/interpreter/base/secrets.py +339 -0
  33. qtype/interpreter/base/step_cache.py +74 -0
  34. qtype/interpreter/base/stream_emitter.py +469 -0
  35. qtype/interpreter/conversions.py +462 -22
  36. qtype/interpreter/converters.py +77 -0
  37. qtype/interpreter/endpoints.py +355 -0
  38. qtype/interpreter/executors/agent_executor.py +242 -0
  39. qtype/interpreter/executors/aggregate_executor.py +93 -0
  40. qtype/interpreter/executors/decoder_executor.py +163 -0
  41. qtype/interpreter/executors/doc_to_text_executor.py +112 -0
  42. qtype/interpreter/executors/document_embedder_executor.py +107 -0
  43. qtype/interpreter/executors/document_search_executor.py +122 -0
  44. qtype/interpreter/executors/document_source_executor.py +118 -0
  45. qtype/interpreter/executors/document_splitter_executor.py +105 -0
  46. qtype/interpreter/executors/echo_executor.py +63 -0
  47. qtype/interpreter/executors/field_extractor_executor.py +160 -0
  48. qtype/interpreter/executors/file_source_executor.py +101 -0
  49. qtype/interpreter/executors/file_writer_executor.py +110 -0
  50. qtype/interpreter/executors/index_upsert_executor.py +228 -0
  51. qtype/interpreter/executors/invoke_embedding_executor.py +92 -0
  52. qtype/interpreter/executors/invoke_flow_executor.py +51 -0
  53. qtype/interpreter/executors/invoke_tool_executor.py +358 -0
  54. qtype/interpreter/executors/llm_inference_executor.py +272 -0
  55. qtype/interpreter/executors/prompt_template_executor.py +78 -0
  56. qtype/interpreter/executors/sql_source_executor.py +106 -0
  57. qtype/interpreter/executors/vector_search_executor.py +91 -0
  58. qtype/interpreter/flow.py +159 -22
  59. qtype/interpreter/metadata_api.py +115 -0
  60. qtype/interpreter/resource_cache.py +5 -4
  61. qtype/interpreter/rich_progress.py +225 -0
  62. qtype/interpreter/stream/chat/__init__.py +15 -0
  63. qtype/interpreter/stream/chat/converter.py +391 -0
  64. qtype/interpreter/{chat → stream/chat}/file_conversions.py +2 -2
  65. qtype/interpreter/stream/chat/ui_request_to_domain_type.py +140 -0
  66. qtype/interpreter/stream/chat/vercel.py +609 -0
  67. qtype/interpreter/stream/utils/__init__.py +15 -0
  68. qtype/interpreter/stream/utils/build_vercel_ai_formatter.py +74 -0
  69. qtype/interpreter/stream/utils/callback_to_stream.py +66 -0
  70. qtype/interpreter/stream/utils/create_streaming_response.py +18 -0
  71. qtype/interpreter/stream/utils/default_chat_extract_text.py +20 -0
  72. qtype/interpreter/stream/utils/error_streaming_response.py +20 -0
  73. qtype/interpreter/telemetry.py +135 -8
  74. qtype/interpreter/tools/__init__.py +5 -0
  75. qtype/interpreter/tools/function_tool_helper.py +265 -0
  76. qtype/interpreter/types.py +330 -0
  77. qtype/interpreter/typing.py +83 -89
  78. qtype/interpreter/ui/404/index.html +1 -1
  79. qtype/interpreter/ui/404.html +1 -1
  80. qtype/interpreter/ui/_next/static/{nUaw6_IwRwPqkzwe5s725 → 20HoJN6otZ_LyHLHpCPE6}/_buildManifest.js +1 -1
  81. qtype/interpreter/ui/_next/static/chunks/{393-8fd474427f8e19ce.js → 434-b2112d19f25c44ff.js} +3 -3
  82. qtype/interpreter/ui/_next/static/chunks/app/page-8c67d16ac90d23cb.js +1 -0
  83. qtype/interpreter/ui/_next/static/chunks/ba12c10f-546f2714ff8abc66.js +1 -0
  84. qtype/interpreter/ui/_next/static/css/8a8d1269e362fef7.css +3 -0
  85. qtype/interpreter/ui/icon.png +0 -0
  86. qtype/interpreter/ui/index.html +1 -1
  87. qtype/interpreter/ui/index.txt +4 -4
  88. qtype/semantic/checker.py +583 -0
  89. qtype/semantic/generate.py +262 -83
  90. qtype/semantic/loader.py +95 -0
  91. qtype/semantic/model.py +436 -159
  92. qtype/semantic/resolver.py +63 -19
  93. qtype/semantic/visualize.py +28 -31
  94. {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/METADATA +16 -3
  95. qtype-0.1.1.dist-info/RECORD +135 -0
  96. qtype/dsl/base_types.py +0 -38
  97. qtype/dsl/validator.py +0 -465
  98. qtype/interpreter/batch/__init__.py +0 -0
  99. qtype/interpreter/batch/file_sink_source.py +0 -162
  100. qtype/interpreter/batch/flow.py +0 -95
  101. qtype/interpreter/batch/sql_source.py +0 -92
  102. qtype/interpreter/batch/step.py +0 -74
  103. qtype/interpreter/batch/types.py +0 -41
  104. qtype/interpreter/batch/utils.py +0 -178
  105. qtype/interpreter/chat/chat_api.py +0 -237
  106. qtype/interpreter/chat/vercel.py +0 -314
  107. qtype/interpreter/exceptions.py +0 -10
  108. qtype/interpreter/step.py +0 -67
  109. qtype/interpreter/steps/__init__.py +0 -0
  110. qtype/interpreter/steps/agent.py +0 -114
  111. qtype/interpreter/steps/condition.py +0 -36
  112. qtype/interpreter/steps/decoder.py +0 -88
  113. qtype/interpreter/steps/llm_inference.py +0 -171
  114. qtype/interpreter/steps/prompt_template.py +0 -54
  115. qtype/interpreter/steps/search.py +0 -24
  116. qtype/interpreter/steps/tool.py +0 -219
  117. qtype/interpreter/streaming_helpers.py +0 -123
  118. qtype/interpreter/ui/_next/static/chunks/app/page-7e26b6156cfb55d3.js +0 -1
  119. qtype/interpreter/ui/_next/static/chunks/ba12c10f-22556063851a6df2.js +0 -1
  120. qtype/interpreter/ui/_next/static/css/b40532b0db09cce3.css +0 -3
  121. qtype/interpreter/ui/favicon.ico +0 -0
  122. qtype/loader.py +0 -390
  123. qtype-0.0.16.dist-info/RECORD +0 -106
  124. /qtype/interpreter/ui/_next/static/{nUaw6_IwRwPqkzwe5s725 → 20HoJN6otZ_LyHLHpCPE6}/_ssgManifest.js +0 -0
  125. {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/WHEEL +0 -0
  126. {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/entry_points.txt +0 -0
  127. {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/licenses/LICENSE +0 -0
  128. {qtype-0.0.16.dist-info → qtype-0.1.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,106 @@
1
+ from typing import AsyncIterator
2
+
3
+ import boto3 # type: ignore[import-untyped]
4
+ import pandas as pd
5
+ import sqlalchemy
6
+ from sqlalchemy import create_engine
7
+ from sqlalchemy.exc import SQLAlchemyError
8
+
9
+ from qtype.interpreter.auth.generic import auth
10
+ from qtype.interpreter.base.base_step_executor import StepExecutor
11
+ from qtype.interpreter.base.executor_context import ExecutorContext
12
+ from qtype.interpreter.types import FlowMessage
13
+ from qtype.semantic.model import SQLSource
14
+
15
+
16
+ class SQLSourceExecutor(StepExecutor):
17
+ """Executor for SQLSource steps."""
18
+
19
+ def __init__(
20
+ self, step: SQLSource, context: ExecutorContext, **dependencies
21
+ ):
22
+ super().__init__(step, context, **dependencies)
23
+ if not isinstance(step, SQLSource):
24
+ raise ValueError(
25
+ "SQLSourceExecutor can only execute SQLSource steps."
26
+ )
27
+ self.step: SQLSource = step
28
+
29
+ async def process_message(
30
+ self,
31
+ message: FlowMessage,
32
+ ) -> AsyncIterator[FlowMessage]:
33
+ """Process a single FlowMessage for the SQLSource step.
34
+
35
+ Args:
36
+ message: The FlowMessage to process.
37
+ Yields:
38
+ FlowMessages with the results of SQL query execution.
39
+ """
40
+ # Create a database engine - resolve connection string if it's a SecretReference
41
+ connection_string = self._resolve_secret(self.step.connection)
42
+ connect_args = {}
43
+ if self.step.auth:
44
+ with auth(self.step.auth) as creds:
45
+ if isinstance(creds, boto3.Session):
46
+ connect_args["session"] = creds
47
+ engine = create_engine(connection_string, connect_args=connect_args)
48
+
49
+ output_columns = {output.id for output in self.step.outputs}
50
+ step_inputs = {i.id for i in self.step.inputs}
51
+
52
+ try:
53
+ # Make a dictionary of column_name: value from message variables
54
+ params = {
55
+ col: message.variables.get(col)
56
+ for col in step_inputs
57
+ if col in message.variables
58
+ }
59
+
60
+ await self.stream_emitter.status(
61
+ f"Executing SQL query with params: {params}",
62
+ )
63
+
64
+ # Execute the query and fetch the results into a DataFrame
65
+ with engine.connect() as connection:
66
+ result = connection.execute(
67
+ sqlalchemy.text(self.step.query),
68
+ parameters=params if params else None,
69
+ )
70
+ df = pd.DataFrame(
71
+ result.fetchall(), columns=list(result.keys())
72
+ )
73
+
74
+ # Confirm the outputs exist in the dataframe
75
+ columns = set(df.columns)
76
+ missing_columns = output_columns - columns
77
+ if missing_columns:
78
+ raise ValueError(
79
+ (
80
+ f"SQL Result was missing expected columns: "
81
+ f"{', '.join(missing_columns)}, it has columns: "
82
+ f"{', '.join(columns)}"
83
+ )
84
+ )
85
+
86
+ # Emit one message per result row
87
+ for _, row in df.iterrows():
88
+ # Create a dict with only the output columns
89
+ row_dict = {
90
+ str(k): v
91
+ for k, v in row.to_dict().items()
92
+ if str(k) in output_columns
93
+ }
94
+ # Merge with original message variables
95
+ yield message.copy_with_variables(new_variables=row_dict)
96
+
97
+ await self.stream_emitter.status(
98
+ f"Emitted {len(df)} rows from SQL query"
99
+ )
100
+
101
+ except SQLAlchemyError as e:
102
+ # Emit error event to stream so frontend can display it
103
+ await self.stream_emitter.error(str(e))
104
+ # Set error on the message and yield it
105
+ message.set_error(self.step.id, e)
106
+ yield message
@@ -0,0 +1,91 @@
1
+ """Vector search executor for retrieving relevant chunks from vector stores."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ from typing import AsyncIterator
7
+
8
+ from qtype.interpreter.base.base_step_executor import StepExecutor
9
+ from qtype.interpreter.base.executor_context import ExecutorContext
10
+ from qtype.interpreter.conversions import (
11
+ from_node_with_score,
12
+ to_llama_vector_store_and_retriever,
13
+ )
14
+ from qtype.interpreter.types import FlowMessage
15
+ from qtype.semantic.model import VectorIndex, VectorSearch
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class VectorSearchExecutor(StepExecutor):
21
+ """Executor for VectorSearch steps using LlamaIndex vector stores."""
22
+
23
+ def __init__(
24
+ self, step: VectorSearch, context: ExecutorContext, **dependencies
25
+ ):
26
+ super().__init__(step, context, **dependencies)
27
+ if not isinstance(step, VectorSearch):
28
+ raise ValueError(
29
+ "VectorSearchExecutor can only execute VectorSearch steps."
30
+ )
31
+ self.step: VectorSearch = step
32
+
33
+ if not isinstance(self.step.index, VectorIndex):
34
+ raise ValueError(
35
+ f"VectorSearch step {self.step.id} must reference a VectorIndex."
36
+ )
37
+ self.index: VectorIndex = self.step.index
38
+
39
+ # Get the vector store and retriever
40
+ self._vector_store, self._retriever = (
41
+ to_llama_vector_store_and_retriever(
42
+ self.step.index, self.context.secret_manager
43
+ )
44
+ )
45
+
46
+ async def process_message(
47
+ self,
48
+ message: FlowMessage,
49
+ ) -> AsyncIterator[FlowMessage]:
50
+ """Process a single FlowMessage for the VectorSearch step.
51
+
52
+ Args:
53
+ message: The FlowMessage to process.
54
+
55
+ Yields:
56
+ FlowMessage with search results.
57
+ """
58
+ try:
59
+ # Get the query from the input variable
60
+ # (validated to be exactly one text input)
61
+ input_var = self.step.inputs[0]
62
+ query = message.variables.get(input_var.id)
63
+
64
+ if not isinstance(query, str):
65
+ raise ValueError(
66
+ f"VectorSearch input must be text, got {type(query)}"
67
+ )
68
+
69
+ # Perform the vector search
70
+ logger.debug(f"Performing vector search with query: {query}")
71
+ nodes_with_scores = await self._retriever.aretrieve(query)
72
+
73
+ # Convert results to RAGSearchResult objects
74
+ search_results = [
75
+ from_node_with_score(node_with_score)
76
+ for node_with_score in nodes_with_scores
77
+ ]
78
+
79
+ # Set the output variable (validated to be exactly one output
80
+ # of type list[RAGSearchResult])
81
+ output_var = self.step.outputs[0]
82
+ output_vars = {output_var.id: search_results}
83
+
84
+ yield message.copy_with_variables(output_vars)
85
+
86
+ except Exception as e:
87
+ logger.error(f"Vector search failed: {e}", exc_info=True)
88
+ # Emit error event to stream so frontend can display it
89
+ await self.stream_emitter.error(str(e))
90
+ message.set_error(self.step.id, e)
91
+ yield message
qtype/interpreter/flow.py CHANGED
@@ -1,37 +1,174 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import logging
4
- from typing import Any
5
4
 
6
- from qtype.interpreter.exceptions import InterpreterError
7
- from qtype.interpreter.step import execute_step
8
- from qtype.semantic.model import Flow, Variable
5
+ from openinference.semconv.trace import (
6
+ OpenInferenceSpanKindValues,
7
+ SpanAttributes,
8
+ )
9
+ from opentelemetry import context as otel_context
10
+ from opentelemetry import trace
11
+ from opentelemetry.trace import Status, StatusCode
12
+
13
+ from qtype.interpreter.base import factory
14
+ from qtype.interpreter.base.executor_context import ExecutorContext
15
+ from qtype.interpreter.rich_progress import RichProgressCallback
16
+ from qtype.interpreter.types import FlowMessage
17
+ from qtype.semantic.model import Flow
9
18
 
10
19
  logger = logging.getLogger(__name__)
11
20
 
12
21
 
13
- def execute_flow(flow: Flow, **kwargs: dict[Any, Any]) -> list[Variable]:
14
- """Execute a flow based on the provided arguments.
22
+ async def run_flow(
23
+ flow: Flow,
24
+ initial: list[FlowMessage] | FlowMessage,
25
+ show_progress: bool = False,
26
+ **kwargs,
27
+ ) -> list[FlowMessage]:
28
+ """
29
+ Main entrypoint for executing a flow.
15
30
 
16
31
  Args:
17
- flow: The flow to execute.
18
- inputs: The input variables for the flow.
19
- **kwargs: Additional keyword arguments.
32
+ flow: The flow to execute
33
+ initial: Initial FlowMessage(s) to start execution
34
+ **kwargs: Dependencies including:
35
+ - context: ExecutorContext with cross-cutting concerns (optional)
36
+ - Other executor-specific dependencies
37
+
38
+ Returns:
39
+ List of final FlowMessages after execution
20
40
  """
21
- logger.debug(f"Executing step: {flow.id} with kwargs: {kwargs}")
41
+ from qtype.interpreter.base.secrets import NoOpSecretManager
22
42
 
23
- unset_inputs = [input for input in flow.inputs if not input.is_set()]
24
- if unset_inputs:
25
- raise InterpreterError(
26
- f"The following inputs are required but have no values: {', '.join([input.id for input in unset_inputs])}"
43
+ # Extract or create ExecutorContext
44
+ exec_context = kwargs.pop("context", None)
45
+ progress_callback = RichProgressCallback() if show_progress else None
46
+ if exec_context is None:
47
+ exec_context = ExecutorContext(
48
+ secret_manager=NoOpSecretManager(),
49
+ tracer=trace.get_tracer(__name__),
50
+ on_progress=progress_callback,
27
51
  )
52
+ else:
53
+ if exec_context.on_progress is None and show_progress:
54
+ exec_context.on_progress = progress_callback
28
55
 
29
- for step in flow.steps:
30
- execute_step(step, **kwargs)
56
+ # Use tracer from context
57
+ tracer = exec_context.tracer or trace.get_tracer(__name__)
31
58
 
32
- unset_outputs = [output for output in flow.outputs if not output.is_set()]
33
- if unset_outputs:
34
- raise InterpreterError(
35
- f"The following outputs are required but have no values: {', '.join([output.id for output in unset_outputs])}"
36
- )
37
- return flow.outputs
59
+ # Start a span for the entire flow execution
60
+ span = tracer.start_span(
61
+ f"flow.{flow.id}",
62
+ attributes={
63
+ "flow.id": flow.id,
64
+ "flow.step_count": len(flow.steps),
65
+ SpanAttributes.OPENINFERENCE_SPAN_KIND: (
66
+ OpenInferenceSpanKindValues.CHAIN.value
67
+ ),
68
+ },
69
+ )
70
+
71
+ # Make this span the active context so step spans will nest under it
72
+ # Only attach if span is recording (i.e., real tracer is configured)
73
+ ctx = trace.set_span_in_context(span)
74
+ token = otel_context.attach(ctx) if span.is_recording() else None
75
+
76
+ try:
77
+ # 1. Get the execution plan is just the steps in order
78
+ execution_plan = flow.steps
79
+
80
+ # 2. Initialize the stream
81
+ if not isinstance(initial, list):
82
+ initial = [initial]
83
+
84
+ span.set_attribute("flow.input_count", len(initial))
85
+
86
+ # Record input variables for observability
87
+ if initial:
88
+ import json
89
+
90
+ try:
91
+ input_vars = {
92
+ k: v for msg in initial for k, v in msg.variables.items()
93
+ }
94
+ span.set_attribute(
95
+ SpanAttributes.INPUT_VALUE,
96
+ json.dumps(input_vars, default=str),
97
+ )
98
+ span.set_attribute(
99
+ SpanAttributes.INPUT_MIME_TYPE, "application/json"
100
+ )
101
+ except Exception:
102
+ # If serialization fails, skip it
103
+ pass
104
+
105
+ async def initial_stream():
106
+ for message in initial:
107
+ yield message
108
+
109
+ current_stream = initial_stream()
110
+
111
+ # 3. Chain executors together in the main loop
112
+ for step in execution_plan:
113
+ executor = factory.create_executor(step, exec_context, **kwargs)
114
+ output_stream = executor.execute(
115
+ current_stream,
116
+ )
117
+ current_stream = output_stream
118
+
119
+ # 4. Collect the final results from the last stream
120
+ final_results = [state async for state in current_stream]
121
+
122
+ # Close the progress bars if any
123
+ if progress_callback is not None:
124
+ progress_callback.close()
125
+ # Record flow completion metrics
126
+ span.set_attribute("flow.output_count", len(final_results))
127
+ error_count = sum(1 for msg in final_results if msg.is_failed())
128
+ span.set_attribute("flow.error_count", error_count)
129
+
130
+ # Record output variables for observability
131
+ if final_results:
132
+ import json
133
+
134
+ try:
135
+ output_vars = {
136
+ k: v
137
+ for msg in final_results
138
+ if not msg.is_failed()
139
+ for k, v in msg.variables.items()
140
+ }
141
+ span.set_attribute(
142
+ SpanAttributes.OUTPUT_VALUE,
143
+ json.dumps(output_vars, default=str),
144
+ )
145
+ span.set_attribute(
146
+ SpanAttributes.OUTPUT_MIME_TYPE, "application/json"
147
+ )
148
+ except Exception:
149
+ # If serialization fails, skip it
150
+ pass
151
+
152
+ if error_count > 0:
153
+ span.set_status(
154
+ Status(
155
+ StatusCode.ERROR,
156
+ f"{error_count} of {len(final_results)} messages failed",
157
+ )
158
+ )
159
+ else:
160
+ span.set_status(Status(StatusCode.OK))
161
+
162
+ return final_results
163
+
164
+ except Exception as e:
165
+ # Record the exception and set error status
166
+ span.record_exception(e)
167
+ span.set_status(Status(StatusCode.ERROR, f"Flow failed: {e}"))
168
+ raise
169
+ finally:
170
+ # Detach the context and end the span
171
+ # Only detach if we successfully attached (span was recording)
172
+ if token is not None:
173
+ otel_context.detach(token)
174
+ span.end()
@@ -0,0 +1,115 @@
1
+ """Metadata API endpoints for flow discovery."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+ from fastapi import FastAPI
8
+ from pydantic import BaseModel, Field
9
+
10
+ from qtype.interpreter.typing import create_input_shape, create_output_shape
11
+ from qtype.semantic.model import Application, Flow
12
+
13
+
14
+ class FlowEndpoints(BaseModel):
15
+ """Available endpoints for a flow."""
16
+
17
+ rest: str = Field(..., description="REST execution endpoint")
18
+ stream: str | None = Field(
19
+ None,
20
+ description="Streaming endpoint (SSE) if flow has an interface",
21
+ )
22
+
23
+
24
+ class FlowMetadata(BaseModel):
25
+ """Metadata about a flow for frontend discovery."""
26
+
27
+ id: str = Field(..., description="Flow ID")
28
+ description: str | None = Field(None, description="Flow description")
29
+ interface_type: str | None = Field(
30
+ None,
31
+ description="Interface type: 'Complete' or 'Conversational'",
32
+ )
33
+ session_inputs: list[str] = Field(
34
+ default_factory=list,
35
+ description="Input variables that persist across session",
36
+ )
37
+ endpoints: FlowEndpoints = Field(
38
+ ..., description="Available API endpoints"
39
+ )
40
+ input_schema: dict[str, Any] = Field(
41
+ ..., description="JSON schema for input"
42
+ )
43
+ output_schema: dict[str, Any] = Field(
44
+ ..., description="JSON schema for output"
45
+ )
46
+
47
+
48
+ def create_metadata_endpoints(app: FastAPI, application: Application) -> None:
49
+ """
50
+ Create metadata endpoints for flow discovery.
51
+
52
+ Args:
53
+ app: FastAPI application instance
54
+ application: QType Application with flows
55
+ """
56
+
57
+ @app.get(
58
+ "/flows",
59
+ tags=["flows"],
60
+ summary="List all flows",
61
+ description="Get metadata for all available flows",
62
+ response_model=list[FlowMetadata],
63
+ )
64
+ async def list_flows() -> list[FlowMetadata]:
65
+ """List all flows with their metadata."""
66
+ flows_metadata = []
67
+
68
+ for flow in application.flows:
69
+ metadata = _create_flow_metadata(flow)
70
+ flows_metadata.append(metadata)
71
+
72
+ return flows_metadata
73
+
74
+
75
+ def _create_flow_metadata(flow: Flow) -> FlowMetadata:
76
+ """
77
+ Create metadata for a single flow.
78
+
79
+ Args:
80
+ flow: Flow to create metadata for
81
+
82
+ Returns:
83
+ FlowMetadata with all information
84
+ """
85
+ # Determine interface type
86
+ interface_type = None
87
+ session_inputs = []
88
+ if flow.interface:
89
+ interface_type = flow.interface.type
90
+ session_inputs = [
91
+ var.id if hasattr(var, "id") else str(var)
92
+ for var in flow.interface.session_inputs
93
+ ]
94
+
95
+ # Create schemas
96
+ input_model = create_input_shape(flow)
97
+ output_model = create_output_shape(flow)
98
+
99
+ # Determine streaming endpoint availability
100
+ stream_endpoint = (
101
+ f"/flows/{flow.id}/stream" if flow.interface is not None else None
102
+ )
103
+
104
+ return FlowMetadata(
105
+ id=flow.id,
106
+ description=flow.description,
107
+ interface_type=interface_type,
108
+ session_inputs=session_inputs,
109
+ endpoints=FlowEndpoints(
110
+ rest=f"/flows/{flow.id}",
111
+ stream=stream_endpoint,
112
+ ),
113
+ input_schema=input_model.model_json_schema(),
114
+ output_schema=output_model.model_json_schema(),
115
+ )
@@ -2,12 +2,13 @@ import functools
2
2
  import os
3
3
  from typing import Any, Callable
4
4
 
5
- from cachetools import LRUCache # type: ignore[import-untyped]
5
+ from cachetools import TTLCache # type: ignore[import-untyped]
6
6
 
7
- # Global LRU cache with a reasonable default size
7
+ # Global TTL cache with a reasonable default size and 55-minute TTL
8
8
  _RESOURCE_CACHE_MAX_SIZE = int(os.environ.get("RESOURCE_CACHE_MAX_SIZE", 128))
9
- _GLOBAL_RESOURCE_CACHE: LRUCache[Any, Any] = LRUCache(
10
- maxsize=_RESOURCE_CACHE_MAX_SIZE
9
+ _RESOURCE_CACHE_TTL = int(os.environ.get("RESOURCE_CACHE_TTL", 55 * 60))
10
+ _GLOBAL_RESOURCE_CACHE: TTLCache[Any, Any] = TTLCache(
11
+ maxsize=_RESOURCE_CACHE_MAX_SIZE, ttl=_RESOURCE_CACHE_TTL
11
12
  )
12
13
 
13
14