qtype 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qtype/application/facade.py +2 -2
- qtype/cli.py +4 -0
- qtype/commands/run.py +22 -3
- qtype/interpreter/base/base_step_executor.py +8 -1
- qtype/interpreter/base/progress_tracker.py +35 -0
- qtype/interpreter/base/step_cache.py +3 -2
- qtype/interpreter/conversions.py +19 -13
- qtype/interpreter/converters.py +5 -1
- qtype/interpreter/executors/document_embedder_executor.py +36 -4
- qtype/interpreter/executors/document_splitter_executor.py +1 -1
- qtype/interpreter/executors/index_upsert_executor.py +2 -2
- qtype/interpreter/executors/invoke_embedding_executor.py +2 -2
- qtype/interpreter/executors/invoke_tool_executor.py +6 -1
- qtype/interpreter/flow.py +13 -1
- qtype/interpreter/rich_progress.py +225 -0
- qtype/interpreter/types.py +2 -0
- qtype/semantic/resolver.py +4 -2
- {qtype-0.1.0.dist-info → qtype-0.1.1.dist-info}/METADATA +1 -1
- {qtype-0.1.0.dist-info → qtype-0.1.1.dist-info}/RECORD +23 -22
- {qtype-0.1.0.dist-info → qtype-0.1.1.dist-info}/WHEEL +0 -0
- {qtype-0.1.0.dist-info → qtype-0.1.1.dist-info}/entry_points.txt +0 -0
- {qtype-0.1.0.dist-info → qtype-0.1.1.dist-info}/licenses/LICENSE +0 -0
- {qtype-0.1.0.dist-info → qtype-0.1.1.dist-info}/top_level.txt +0 -0
qtype/application/facade.py
CHANGED
|
@@ -2,10 +2,10 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
+
import logging
|
|
5
6
|
from pathlib import Path
|
|
6
7
|
from typing import Any
|
|
7
8
|
|
|
8
|
-
from qtype.base.logging import get_logger
|
|
9
9
|
from qtype.base.types import PathLike
|
|
10
10
|
from qtype.semantic.model import Application as SemanticApplication
|
|
11
11
|
from qtype.semantic.model import DocumentType as SemanticDocumentType
|
|
@@ -14,7 +14,7 @@ from qtype.semantic.model import DocumentType as SemanticDocumentType
|
|
|
14
14
|
# That's the whole point of this facade - to avoid importing optional
|
|
15
15
|
# dependencies unless these methods are called.
|
|
16
16
|
|
|
17
|
-
logger =
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
class QTypeFacade:
|
qtype/cli.py
CHANGED
qtype/commands/run.py
CHANGED
|
@@ -7,10 +7,12 @@ from __future__ import annotations
|
|
|
7
7
|
import argparse
|
|
8
8
|
import json
|
|
9
9
|
import logging
|
|
10
|
+
import warnings
|
|
10
11
|
from pathlib import Path
|
|
11
12
|
from typing import Any
|
|
12
13
|
|
|
13
14
|
import pandas as pd
|
|
15
|
+
from pydantic.warnings import UnsupportedFieldAttributeWarning
|
|
14
16
|
|
|
15
17
|
from qtype.application.facade import QTypeFacade
|
|
16
18
|
from qtype.base.exceptions import InterpreterError, LoadError, ValidationError
|
|
@@ -18,6 +20,15 @@ from qtype.base.exceptions import InterpreterError, LoadError, ValidationError
|
|
|
18
20
|
logger = logging.getLogger(__name__)
|
|
19
21
|
|
|
20
22
|
|
|
23
|
+
# Supress specific pydantic warnings that llamaindex needs to fix
|
|
24
|
+
warnings.filterwarnings("ignore", category=UnsupportedFieldAttributeWarning)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# supress qdrant logging
|
|
28
|
+
for name in ["httpx", "urllib3", "qdrant_client"]:
|
|
29
|
+
logging.getLogger(name).setLevel(logging.WARNING)
|
|
30
|
+
|
|
31
|
+
|
|
21
32
|
def read_data_from_file(file_path: str) -> pd.DataFrame:
|
|
22
33
|
"""
|
|
23
34
|
Reads a file into a pandas DataFrame based on its MIME type.
|
|
@@ -87,7 +98,10 @@ def run_flow(args: Any) -> None:
|
|
|
87
98
|
# Execute the workflow using the facade (now async, returns DataFrame)
|
|
88
99
|
result_df = asyncio.run(
|
|
89
100
|
facade.execute_workflow(
|
|
90
|
-
spec_path,
|
|
101
|
+
spec_path,
|
|
102
|
+
flow_name=args.flow,
|
|
103
|
+
inputs=input,
|
|
104
|
+
show_progress=args.progress,
|
|
91
105
|
)
|
|
92
106
|
)
|
|
93
107
|
|
|
@@ -95,7 +109,7 @@ def run_flow(args: Any) -> None:
|
|
|
95
109
|
|
|
96
110
|
# Display results
|
|
97
111
|
if len(result_df) > 0:
|
|
98
|
-
logger.info(f"Processed {len(result_df)}
|
|
112
|
+
logger.info(f"Processed {len(result_df)} em")
|
|
99
113
|
|
|
100
114
|
# Remove 'row' and 'error' columns for display if all errors are None
|
|
101
115
|
display_df = result_df.copy()
|
|
@@ -108,7 +122,7 @@ def run_flow(args: Any) -> None:
|
|
|
108
122
|
display_df = display_df.drop(columns=["row"])
|
|
109
123
|
|
|
110
124
|
if len(display_df) > 1:
|
|
111
|
-
logger.info(f"\nResults:\n{display_df.to_string()}")
|
|
125
|
+
logger.info(f"\nResults:\n{display_df[0:10].to_string()}\n...")
|
|
112
126
|
else:
|
|
113
127
|
# Print the first row with column_name: value one per line
|
|
114
128
|
fmt_str = []
|
|
@@ -172,6 +186,11 @@ def parser(subparsers: argparse._SubParsersAction) -> None:
|
|
|
172
186
|
default=None,
|
|
173
187
|
help="Path to save output data. If input is a DataFrame, output will be saved as parquet. If single result, saved as JSON.",
|
|
174
188
|
)
|
|
189
|
+
cmd_parser.add_argument(
|
|
190
|
+
"--progress",
|
|
191
|
+
action="store_true",
|
|
192
|
+
help="Show progress bars during flow execution.",
|
|
193
|
+
)
|
|
175
194
|
|
|
176
195
|
cmd_parser.add_argument(
|
|
177
196
|
"spec", type=str, help="Path to the QType YAML spec file."
|
|
@@ -212,7 +212,6 @@ class StepExecutor(ABC):
|
|
|
212
212
|
num_workers = (
|
|
213
213
|
self.step.concurrency_config.num_workers # type: ignore[attr-defined]
|
|
214
214
|
)
|
|
215
|
-
|
|
216
215
|
span.set_attribute("step.concurrency", num_workers)
|
|
217
216
|
|
|
218
217
|
# Prepare messages for processing (batching hook)
|
|
@@ -331,6 +330,11 @@ class StepExecutor(ABC):
|
|
|
331
330
|
cached_result = self.cache.get(key)
|
|
332
331
|
if cached_result is not None:
|
|
333
332
|
result = [from_cache_value(d, message) for d in cached_result] # type: ignore
|
|
333
|
+
self.progress.increment_cache(
|
|
334
|
+
self.context.on_progress,
|
|
335
|
+
hit_delta=len(result),
|
|
336
|
+
miss_delta=0,
|
|
337
|
+
)
|
|
334
338
|
# cache hit
|
|
335
339
|
for msg in result:
|
|
336
340
|
yield msg
|
|
@@ -341,6 +345,9 @@ class StepExecutor(ABC):
|
|
|
341
345
|
buf.append(output_msg)
|
|
342
346
|
yield output_msg
|
|
343
347
|
|
|
348
|
+
self.progress.increment_cache(
|
|
349
|
+
self.context.on_progress, hit_delta=0, miss_delta=len(buf)
|
|
350
|
+
)
|
|
344
351
|
# store the results in the cache of there are no errors or if instructed to do so
|
|
345
352
|
if (
|
|
346
353
|
all(not msg.is_failed() for msg in buf)
|
|
@@ -20,6 +20,8 @@ class ProgressTracker:
|
|
|
20
20
|
self.items_processed = 0
|
|
21
21
|
self.items_in_error = 0
|
|
22
22
|
self.total_items = total_items
|
|
23
|
+
self.cache_hits = None
|
|
24
|
+
self.cache_misses = None
|
|
23
25
|
|
|
24
26
|
@property
|
|
25
27
|
def items_succeeded(self) -> int:
|
|
@@ -36,6 +38,8 @@ class ProgressTracker:
|
|
|
36
38
|
on_progress: ProgressCallback | None,
|
|
37
39
|
processed_delta: int,
|
|
38
40
|
error_delta: int,
|
|
41
|
+
hit_delta: int | None = None,
|
|
42
|
+
miss_delta: int | None = None,
|
|
39
43
|
) -> None:
|
|
40
44
|
"""
|
|
41
45
|
Update progress counters and invoke the progress callback.
|
|
@@ -51,6 +55,19 @@ class ProgressTracker:
|
|
|
51
55
|
self.items_processed += processed_delta
|
|
52
56
|
self.items_in_error += error_delta
|
|
53
57
|
|
|
58
|
+
if hit_delta is not None:
|
|
59
|
+
self.cache_hits = (
|
|
60
|
+
self.cache_hits + hit_delta
|
|
61
|
+
if self.cache_hits is not None
|
|
62
|
+
else hit_delta
|
|
63
|
+
)
|
|
64
|
+
if miss_delta is not None:
|
|
65
|
+
self.cache_misses = (
|
|
66
|
+
self.cache_misses + miss_delta
|
|
67
|
+
if self.cache_misses is not None
|
|
68
|
+
else miss_delta
|
|
69
|
+
)
|
|
70
|
+
|
|
54
71
|
if on_progress:
|
|
55
72
|
on_progress(
|
|
56
73
|
self.step_id,
|
|
@@ -58,6 +75,8 @@ class ProgressTracker:
|
|
|
58
75
|
self.items_in_error,
|
|
59
76
|
self.items_succeeded,
|
|
60
77
|
self.total_items,
|
|
78
|
+
self.cache_hits,
|
|
79
|
+
self.cache_misses,
|
|
61
80
|
)
|
|
62
81
|
|
|
63
82
|
def update_for_message(
|
|
@@ -73,3 +92,19 @@ class ProgressTracker:
|
|
|
73
92
|
on_progress: Optional callback to notify of progress updates
|
|
74
93
|
"""
|
|
75
94
|
self.update(on_progress, 1, 1 if message.is_failed() else 0)
|
|
95
|
+
|
|
96
|
+
def increment_cache(
|
|
97
|
+
self,
|
|
98
|
+
on_progress: ProgressCallback | None,
|
|
99
|
+
hit_delta: int = 0,
|
|
100
|
+
miss_delta: int = 0,
|
|
101
|
+
) -> None:
|
|
102
|
+
"""
|
|
103
|
+
Increment cache hit/miss counters.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
on_progress: Optional callback to notify of progress updates
|
|
107
|
+
hit_delta: Number of cache hits to add
|
|
108
|
+
miss_delta: Number of cache misses to add
|
|
109
|
+
"""
|
|
110
|
+
self.update(on_progress, 0, 0, hit_delta, miss_delta)
|
|
@@ -4,7 +4,8 @@ import pathlib
|
|
|
4
4
|
from typing import Any
|
|
5
5
|
|
|
6
6
|
import diskcache as dc
|
|
7
|
-
from
|
|
7
|
+
from pydantic import BaseModel
|
|
8
|
+
from pydantic.json import pydantic_encoder
|
|
8
9
|
|
|
9
10
|
from qtype.base.types import CacheConfig
|
|
10
11
|
from qtype.interpreter.types import FlowMessage
|
|
@@ -41,7 +42,7 @@ def cache_key(message: FlowMessage, step: Step) -> str:
|
|
|
41
42
|
raise ValueError(
|
|
42
43
|
f"Input variable '{var.id}' not found in message -- caching can not be performed."
|
|
43
44
|
)
|
|
44
|
-
input_str = json.dumps(inputs, sort_keys=True)
|
|
45
|
+
input_str = json.dumps(inputs, sort_keys=True, default=pydantic_encoder)
|
|
45
46
|
return hashlib.sha256(input_str.encode("utf-8")).hexdigest()
|
|
46
47
|
|
|
47
48
|
|
qtype/interpreter/conversions.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import importlib
|
|
4
|
+
import uuid
|
|
4
5
|
from typing import Any
|
|
5
6
|
|
|
6
7
|
from llama_index.core.base.embeddings.base import BaseEmbedding
|
|
@@ -305,7 +306,8 @@ def to_embedding_model(model: Model) -> BaseEmbedding:
|
|
|
305
306
|
)
|
|
306
307
|
|
|
307
308
|
bedrock_embedding: BaseEmbedding = BedrockEmbedding(
|
|
308
|
-
model_name=model.model_id if model.model_id else model.id
|
|
309
|
+
model_name=model.model_id if model.model_id else model.id,
|
|
310
|
+
max_retries=100,
|
|
309
311
|
)
|
|
310
312
|
return bedrock_embedding
|
|
311
313
|
elif model.provider == "openai":
|
|
@@ -506,26 +508,30 @@ def to_text_splitter(splitter: DocumentSplitter) -> Any:
|
|
|
506
508
|
Raises:
|
|
507
509
|
InterpreterError: If the splitter class cannot be found or instantiated.
|
|
508
510
|
"""
|
|
509
|
-
from llama_index.core.node_parser import SentenceSplitter
|
|
510
511
|
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
512
|
+
module_path = "llama_index.core.node_parser"
|
|
513
|
+
class_name = splitter.splitter_name
|
|
514
|
+
try:
|
|
515
|
+
reader_module = importlib.import_module(module_path)
|
|
516
|
+
splitter_class = getattr(reader_module, class_name)
|
|
517
|
+
except (ImportError, AttributeError) as e:
|
|
518
|
+
raise ImportError(
|
|
519
|
+
f"Failed to import reader class '{class_name}' from '{module_path}': {e}"
|
|
520
|
+
) from e
|
|
521
|
+
from llama_index.core.schema import BaseNode
|
|
515
522
|
|
|
516
|
-
#
|
|
517
|
-
|
|
523
|
+
# TODO: let the user specify a custom ID namespace
|
|
524
|
+
namespace = uuid.UUID("12345678-1234-5678-1234-567812345678")
|
|
518
525
|
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
f"Supported splitters: {', '.join(splitter_classes.keys())}"
|
|
523
|
-
)
|
|
526
|
+
def id_func(i: int, doc: BaseNode) -> str:
|
|
527
|
+
u = uuid.uuid5(namespace, f"{doc.node_id}_{i}")
|
|
528
|
+
return str(u)
|
|
524
529
|
|
|
525
530
|
# Prepare arguments for the splitter
|
|
526
531
|
splitter_args = {
|
|
527
532
|
"chunk_size": splitter.chunk_size,
|
|
528
533
|
"chunk_overlap": splitter.chunk_overlap,
|
|
534
|
+
"id_func": id_func,
|
|
529
535
|
**splitter.args,
|
|
530
536
|
}
|
|
531
537
|
|
qtype/interpreter/converters.py
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import pandas as pd
|
|
6
|
+
from pydantic import BaseModel
|
|
6
7
|
|
|
7
8
|
from qtype.interpreter.types import FlowMessage, Session
|
|
8
9
|
from qtype.semantic.model import Flow
|
|
@@ -54,7 +55,10 @@ def flow_messages_to_dataframe(
|
|
|
54
55
|
# Extract output variables
|
|
55
56
|
for var in flow.outputs:
|
|
56
57
|
if var.id in message.variables:
|
|
57
|
-
|
|
58
|
+
value = message.variables[var.id]
|
|
59
|
+
if isinstance(value, BaseModel):
|
|
60
|
+
value = value.model_dump()
|
|
61
|
+
row_data[var.id] = value
|
|
58
62
|
else:
|
|
59
63
|
row_data[var.id] = None
|
|
60
64
|
|
|
@@ -1,5 +1,14 @@
|
|
|
1
1
|
from typing import AsyncIterator
|
|
2
2
|
|
|
3
|
+
from botocore.exceptions import ClientError
|
|
4
|
+
from llama_index.core.base.embeddings.base import BaseEmbedding
|
|
5
|
+
from tenacity import (
|
|
6
|
+
retry,
|
|
7
|
+
retry_if_exception,
|
|
8
|
+
stop_after_attempt,
|
|
9
|
+
wait_exponential,
|
|
10
|
+
)
|
|
11
|
+
|
|
3
12
|
from qtype.dsl.domain_types import RAGChunk
|
|
4
13
|
from qtype.interpreter.base.base_step_executor import StepExecutor
|
|
5
14
|
from qtype.interpreter.base.executor_context import ExecutorContext
|
|
@@ -8,6 +17,13 @@ from qtype.interpreter.types import FlowMessage
|
|
|
8
17
|
from qtype.semantic.model import DocumentEmbedder
|
|
9
18
|
|
|
10
19
|
|
|
20
|
+
def is_throttling_error(e):
|
|
21
|
+
return (
|
|
22
|
+
isinstance(e, ClientError)
|
|
23
|
+
and e.response["Error"]["Code"] == "ThrottlingException"
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
11
27
|
class DocumentEmbedderExecutor(StepExecutor):
|
|
12
28
|
"""Executor for DocumentEmbedder steps."""
|
|
13
29
|
|
|
@@ -24,7 +40,25 @@ class DocumentEmbedderExecutor(StepExecutor):
|
|
|
24
40
|
)
|
|
25
41
|
self.step: DocumentEmbedder = step
|
|
26
42
|
# Initialize the embedding model once for the executor
|
|
27
|
-
self.embedding_model = to_embedding_model(
|
|
43
|
+
self.embedding_model: BaseEmbedding = to_embedding_model(
|
|
44
|
+
self.step.model
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
# TODO: properly abstract this into a mixin
|
|
48
|
+
@retry(
|
|
49
|
+
retry=retry_if_exception(is_throttling_error),
|
|
50
|
+
wait=wait_exponential(multiplier=0.5, min=1, max=30),
|
|
51
|
+
stop=stop_after_attempt(10),
|
|
52
|
+
)
|
|
53
|
+
async def _embed(self, text: str) -> list[float]:
|
|
54
|
+
"""Generate embedding for the given text using the embedding model.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
text: The text to embed.
|
|
58
|
+
Returns:
|
|
59
|
+
The embedding vector as a list of floats.
|
|
60
|
+
"""
|
|
61
|
+
return await self.embedding_model.aget_text_embedding(text=text)
|
|
28
62
|
|
|
29
63
|
async def process_message(
|
|
30
64
|
self,
|
|
@@ -52,9 +86,7 @@ class DocumentEmbedderExecutor(StepExecutor):
|
|
|
52
86
|
)
|
|
53
87
|
|
|
54
88
|
# Generate embedding for the chunk content
|
|
55
|
-
vector = self.
|
|
56
|
-
text=str(chunk.content)
|
|
57
|
-
)
|
|
89
|
+
vector = await self._embed(str(chunk.content))
|
|
58
90
|
|
|
59
91
|
# Create the output chunk with the vector
|
|
60
92
|
embedded_chunk = RAGChunk(
|
|
@@ -65,7 +65,7 @@ class IndexUpsertExecutor(BatchedStepExecutor):
|
|
|
65
65
|
Yields:
|
|
66
66
|
FlowMessages: Success messages after upserting to the index
|
|
67
67
|
"""
|
|
68
|
-
logger.
|
|
68
|
+
logger.debug(
|
|
69
69
|
f"Executing IndexUpsert step: {self.step.id} with batch size: {len(batch)}"
|
|
70
70
|
)
|
|
71
71
|
|
|
@@ -102,7 +102,7 @@ class IndexUpsertExecutor(BatchedStepExecutor):
|
|
|
102
102
|
else: # document index
|
|
103
103
|
await self._upsert_to_document_index(items_to_upsert)
|
|
104
104
|
|
|
105
|
-
logger.
|
|
105
|
+
logger.debug(
|
|
106
106
|
f"Successfully upserted {len(items_to_upsert)} items "
|
|
107
107
|
f"to {self.index_type} index in batch"
|
|
108
108
|
)
|
|
@@ -58,13 +58,13 @@ class InvokeEmbeddingExecutor(StepExecutor):
|
|
|
58
58
|
if input_type == PrimitiveTypeEnum.text:
|
|
59
59
|
if not isinstance(input_value, str):
|
|
60
60
|
input_value = str(input_value)
|
|
61
|
-
vector = self.embedding_model.
|
|
61
|
+
vector = await self.embedding_model.aget_text_embedding(
|
|
62
62
|
text=input_value
|
|
63
63
|
)
|
|
64
64
|
content = input_value
|
|
65
65
|
elif input_type == PrimitiveTypeEnum.image:
|
|
66
66
|
# For image embeddings
|
|
67
|
-
vector = self.embedding_model.
|
|
67
|
+
vector = await self.embedding_model.aget_image_embedding(
|
|
68
68
|
image_path=input_value
|
|
69
69
|
)
|
|
70
70
|
content = input_value
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import asyncio
|
|
3
4
|
import importlib
|
|
5
|
+
import inspect
|
|
4
6
|
import logging
|
|
5
7
|
import time
|
|
6
8
|
import uuid
|
|
@@ -86,7 +88,10 @@ class ToolExecutionMixin:
|
|
|
86
88
|
)
|
|
87
89
|
)
|
|
88
90
|
|
|
89
|
-
|
|
91
|
+
if inspect.iscoroutinefunction(function):
|
|
92
|
+
result = await function(**inputs)
|
|
93
|
+
else:
|
|
94
|
+
result = await asyncio.to_thread(function, **inputs)
|
|
90
95
|
await tool_ctx.complete(result)
|
|
91
96
|
return result
|
|
92
97
|
|
qtype/interpreter/flow.py
CHANGED
|
@@ -12,6 +12,7 @@ from opentelemetry.trace import Status, StatusCode
|
|
|
12
12
|
|
|
13
13
|
from qtype.interpreter.base import factory
|
|
14
14
|
from qtype.interpreter.base.executor_context import ExecutorContext
|
|
15
|
+
from qtype.interpreter.rich_progress import RichProgressCallback
|
|
15
16
|
from qtype.interpreter.types import FlowMessage
|
|
16
17
|
from qtype.semantic.model import Flow
|
|
17
18
|
|
|
@@ -19,7 +20,10 @@ logger = logging.getLogger(__name__)
|
|
|
19
20
|
|
|
20
21
|
|
|
21
22
|
async def run_flow(
|
|
22
|
-
flow: Flow,
|
|
23
|
+
flow: Flow,
|
|
24
|
+
initial: list[FlowMessage] | FlowMessage,
|
|
25
|
+
show_progress: bool = False,
|
|
26
|
+
**kwargs,
|
|
23
27
|
) -> list[FlowMessage]:
|
|
24
28
|
"""
|
|
25
29
|
Main entrypoint for executing a flow.
|
|
@@ -38,11 +42,16 @@ async def run_flow(
|
|
|
38
42
|
|
|
39
43
|
# Extract or create ExecutorContext
|
|
40
44
|
exec_context = kwargs.pop("context", None)
|
|
45
|
+
progress_callback = RichProgressCallback() if show_progress else None
|
|
41
46
|
if exec_context is None:
|
|
42
47
|
exec_context = ExecutorContext(
|
|
43
48
|
secret_manager=NoOpSecretManager(),
|
|
44
49
|
tracer=trace.get_tracer(__name__),
|
|
50
|
+
on_progress=progress_callback,
|
|
45
51
|
)
|
|
52
|
+
else:
|
|
53
|
+
if exec_context.on_progress is None and show_progress:
|
|
54
|
+
exec_context.on_progress = progress_callback
|
|
46
55
|
|
|
47
56
|
# Use tracer from context
|
|
48
57
|
tracer = exec_context.tracer or trace.get_tracer(__name__)
|
|
@@ -110,6 +119,9 @@ async def run_flow(
|
|
|
110
119
|
# 4. Collect the final results from the last stream
|
|
111
120
|
final_results = [state async for state in current_stream]
|
|
112
121
|
|
|
122
|
+
# Close the progress bars if any
|
|
123
|
+
if progress_callback is not None:
|
|
124
|
+
progress_callback.close()
|
|
113
125
|
# Record flow completion metrics
|
|
114
126
|
span.set_attribute("flow.output_count", len(final_results))
|
|
115
127
|
error_count = sum(1 for msg in final_results if msg.is_failed())
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import threading
|
|
5
|
+
from collections import deque
|
|
6
|
+
from typing import Deque, Dict
|
|
7
|
+
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
from rich.live import Live
|
|
10
|
+
from rich.panel import Panel
|
|
11
|
+
from rich.progress import (
|
|
12
|
+
Progress,
|
|
13
|
+
ProgressColumn,
|
|
14
|
+
TaskProgressColumn,
|
|
15
|
+
TextColumn,
|
|
16
|
+
TimeElapsedColumn,
|
|
17
|
+
TimeRemainingColumn,
|
|
18
|
+
)
|
|
19
|
+
from rich.text import Text
|
|
20
|
+
|
|
21
|
+
from qtype.interpreter.types import ProgressCallback
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class RateColumn(ProgressColumn):
|
|
27
|
+
"""Show processing speed as '123 msg/s' based on task.speed."""
|
|
28
|
+
|
|
29
|
+
def __init__(self, unit: str = "msg") -> None:
|
|
30
|
+
super().__init__()
|
|
31
|
+
self.unit = unit
|
|
32
|
+
|
|
33
|
+
def render(self, task) -> Text: # type: ignore[override]
|
|
34
|
+
speed = task.speed or 0.0
|
|
35
|
+
|
|
36
|
+
if speed <= 0:
|
|
37
|
+
return Text(f"- {self.unit}/s")
|
|
38
|
+
|
|
39
|
+
# Simple formatting similar-ish to tqdm
|
|
40
|
+
if speed < 1:
|
|
41
|
+
rate_str = f"{speed:.2f}"
|
|
42
|
+
elif speed < 100:
|
|
43
|
+
rate_str = f"{speed:4.1f}"
|
|
44
|
+
else:
|
|
45
|
+
rate_str = f"{speed:4.0f}"
|
|
46
|
+
|
|
47
|
+
return Text(f"{rate_str} {self.unit}/s")
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class SparklineColumn(ProgressColumn):
|
|
51
|
+
"""Tiny throughput trend graph using block characters."""
|
|
52
|
+
|
|
53
|
+
def __init__(self, max_samples: int = 20) -> None:
|
|
54
|
+
super().__init__()
|
|
55
|
+
self.max_samples = max_samples
|
|
56
|
+
# Per-task speed history
|
|
57
|
+
self._history: Dict[int, Deque[float]] = {}
|
|
58
|
+
|
|
59
|
+
def render(self, task) -> Text: # type: ignore[override]
|
|
60
|
+
speed = task.speed or 0.0
|
|
61
|
+
|
|
62
|
+
history = self._history.get(task.id)
|
|
63
|
+
if history is None:
|
|
64
|
+
history = self._history[task.id] = deque(maxlen=self.max_samples)
|
|
65
|
+
|
|
66
|
+
history.append(speed)
|
|
67
|
+
|
|
68
|
+
if not history or all(v <= 0 for v in history):
|
|
69
|
+
return Text("")
|
|
70
|
+
|
|
71
|
+
min_s = min(history)
|
|
72
|
+
max_s = max(history)
|
|
73
|
+
rng = max(max_s - min_s, 1e-9)
|
|
74
|
+
|
|
75
|
+
blocks = "▁▂▃▄▅▆▇█"
|
|
76
|
+
n_blocks = len(blocks)
|
|
77
|
+
|
|
78
|
+
chars = []
|
|
79
|
+
for v in history:
|
|
80
|
+
idx = int((v - min_s) / rng * (n_blocks - 1))
|
|
81
|
+
chars.append(blocks[idx])
|
|
82
|
+
|
|
83
|
+
return Text("".join(chars))
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class RichProgressCallback(ProgressCallback):
|
|
87
|
+
"""Progress callback that uses Rich to display progress bars.
|
|
88
|
+
|
|
89
|
+
Displays a progress row for each step, updating in place.
|
|
90
|
+
Colors the step label based on error rate:
|
|
91
|
+
- Green: error rate <= 1%
|
|
92
|
+
- Yellow: 1% < error rate <= 5%
|
|
93
|
+
- Red: error rate > 5%
|
|
94
|
+
|
|
95
|
+
Attributes:
|
|
96
|
+
order: Optional list defining the order of steps progress rows.
|
|
97
|
+
"""
|
|
98
|
+
|
|
99
|
+
def __init__(
|
|
100
|
+
self,
|
|
101
|
+
order: list[str] | None = None,
|
|
102
|
+
) -> None:
|
|
103
|
+
super().__init__()
|
|
104
|
+
self.order = order or []
|
|
105
|
+
self._lock = threading.Lock()
|
|
106
|
+
self.console = Console()
|
|
107
|
+
|
|
108
|
+
# One shared Progress instance for all steps
|
|
109
|
+
# Columns: description | bar | % | rate | sparkline | ✔ | ✖ | elapsed | remaining
|
|
110
|
+
self.progress = Progress(
|
|
111
|
+
TextColumn("[progress.description]{task.description}"),
|
|
112
|
+
TaskProgressColumn(),
|
|
113
|
+
RateColumn(unit="msg"),
|
|
114
|
+
SparklineColumn(max_samples=20),
|
|
115
|
+
TextColumn("[green]✔[/green] {task.fields[succeeded]} succeeded"),
|
|
116
|
+
TextColumn("[red]✖[/red] {task.fields[errors]} errors"),
|
|
117
|
+
TextColumn("[cyan]⟳[/cyan] {task.fields[cache_hits]} hits"),
|
|
118
|
+
TextColumn(
|
|
119
|
+
"[magenta]✗[/magenta] {task.fields[cache_misses]} misses"
|
|
120
|
+
),
|
|
121
|
+
TimeElapsedColumn(),
|
|
122
|
+
TimeRemainingColumn(),
|
|
123
|
+
console=self.console,
|
|
124
|
+
expand=True,
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# Wrap progress in a panel
|
|
128
|
+
self.panel = Panel(
|
|
129
|
+
self.progress,
|
|
130
|
+
title="[bold cyan]Flow Progress[/bold cyan]",
|
|
131
|
+
border_style="bright_blue",
|
|
132
|
+
padding=(1, 2),
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
# Live container for the panel
|
|
136
|
+
self.live = Live(
|
|
137
|
+
self.panel,
|
|
138
|
+
console=self.console,
|
|
139
|
+
refresh_per_second=10,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Map step_id -> Rich task id
|
|
143
|
+
self.tasks: Dict[str, int] = {}
|
|
144
|
+
self._started = False
|
|
145
|
+
|
|
146
|
+
# Pre-create tasks in the desired order if provided
|
|
147
|
+
for step_id in self.order:
|
|
148
|
+
task_id = self.progress.add_task(
|
|
149
|
+
f"Step {step_id}",
|
|
150
|
+
total=None, # we’ll update this once we know it
|
|
151
|
+
succeeded=0,
|
|
152
|
+
errors=0,
|
|
153
|
+
)
|
|
154
|
+
self.tasks[step_id] = task_id
|
|
155
|
+
|
|
156
|
+
def _ensure_started(self) -> None:
|
|
157
|
+
if not self._started:
|
|
158
|
+
self.live.start()
|
|
159
|
+
self._started = True
|
|
160
|
+
|
|
161
|
+
def __call__(
|
|
162
|
+
self,
|
|
163
|
+
step_id: str,
|
|
164
|
+
items_processed: int,
|
|
165
|
+
items_in_error: int,
|
|
166
|
+
items_succeeded: int,
|
|
167
|
+
total_items: int | None,
|
|
168
|
+
cache_hits: int | None = None,
|
|
169
|
+
cache_misses: int | None = None,
|
|
170
|
+
) -> None:
|
|
171
|
+
with self._lock:
|
|
172
|
+
self._ensure_started()
|
|
173
|
+
|
|
174
|
+
# Create a task lazily if we didn't pre-create it
|
|
175
|
+
if step_id not in self.tasks:
|
|
176
|
+
task_id = self.progress.add_task(
|
|
177
|
+
f"Step {step_id}",
|
|
178
|
+
total=total_items,
|
|
179
|
+
succeeded=items_succeeded,
|
|
180
|
+
errors=items_in_error,
|
|
181
|
+
cache_hits=cache_hits,
|
|
182
|
+
cache_misses=cache_misses,
|
|
183
|
+
)
|
|
184
|
+
self.tasks[step_id] = task_id
|
|
185
|
+
|
|
186
|
+
task_id = self.tasks[step_id]
|
|
187
|
+
color = self.compute_color(items_processed, items_in_error)
|
|
188
|
+
|
|
189
|
+
update_kwargs = {
|
|
190
|
+
"completed": items_processed,
|
|
191
|
+
"succeeded": items_succeeded,
|
|
192
|
+
"errors": items_in_error,
|
|
193
|
+
"description": f"[{color}]Step {step_id}[/{color}]",
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
update_kwargs["cache_hits"] = (
|
|
197
|
+
cache_hits if cache_hits is not None else "-"
|
|
198
|
+
)
|
|
199
|
+
update_kwargs["cache_misses"] = (
|
|
200
|
+
cache_misses if cache_misses is not None else "-"
|
|
201
|
+
)
|
|
202
|
+
if total_items is not None:
|
|
203
|
+
update_kwargs["total"] = total_items
|
|
204
|
+
|
|
205
|
+
self.progress.update(task_id, **update_kwargs)
|
|
206
|
+
|
|
207
|
+
def compute_color(self, items_processed: int, items_in_error: int) -> str:
|
|
208
|
+
# Avoid divide-by-zero
|
|
209
|
+
if items_processed == 0:
|
|
210
|
+
return "green"
|
|
211
|
+
|
|
212
|
+
error_rate = items_in_error / items_processed
|
|
213
|
+
|
|
214
|
+
if error_rate > 0.05:
|
|
215
|
+
return "red"
|
|
216
|
+
elif error_rate > 0.01:
|
|
217
|
+
return "yellow"
|
|
218
|
+
else:
|
|
219
|
+
return "green"
|
|
220
|
+
|
|
221
|
+
def close(self) -> None:
|
|
222
|
+
with self._lock:
|
|
223
|
+
if self._started:
|
|
224
|
+
self.live.stop()
|
|
225
|
+
self._started = False
|
qtype/interpreter/types.py
CHANGED
qtype/semantic/resolver.py
CHANGED
|
@@ -75,8 +75,10 @@ def to_semantic_ir(
|
|
|
75
75
|
# If the object is a list, we will resolve each item in the list.
|
|
76
76
|
return [to_semantic_ir(item, symbol_table) for item in dslobj] # type: ignore
|
|
77
77
|
|
|
78
|
-
|
|
79
|
-
|
|
78
|
+
# Return these types as-is as they are not changed
|
|
79
|
+
if isinstance(dslobj, dsl.Enum) or isinstance(
|
|
80
|
+
dslobj, base_types.CacheConfig
|
|
81
|
+
):
|
|
80
82
|
return dslobj
|
|
81
83
|
|
|
82
84
|
if _is_dsl_type(_resolve_forward_ref(type(dslobj))):
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
qtype/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
qtype/cli.py,sha256=
|
|
2
|
+
qtype/cli.py,sha256=JUieGAeKQ_8Q2LXZKScYIL__IXtJFu1D-huEyJk9_g0,4783
|
|
3
3
|
qtype/application/__init__.py,sha256=WS3x0b0NRt-nRmj1trsytlvMpQS5KN7Hi6THGfY8bKE,230
|
|
4
4
|
qtype/application/documentation.py,sha256=ifmdt0jBW410baQuCUxovYDQQj-kxPZ4fmf6rWps9JY,4988
|
|
5
|
-
qtype/application/facade.py,sha256=
|
|
5
|
+
qtype/application/facade.py,sha256=Qw378AV5hR93LWfO-etvXo8qyZwfQo9x0geUaBmHg_A,5795
|
|
6
6
|
qtype/application/commons/__init__.py,sha256=QyWAB2cvimM4DxNo2oBFCGkfBikH-ZeMBMGWmJcq4Uc,135
|
|
7
7
|
qtype/application/commons/tools.py,sha256=U_jJdVN2NO5v9b3qb6dPIiVykfal6tp6NvcLGWR6HC8,5035
|
|
8
8
|
qtype/application/converters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -16,7 +16,7 @@ qtype/base/types.py,sha256=zkweSHfpP6MrDEHzEpEWTZ-xVxQLZJj1QhKv-Mj4s0A,6834
|
|
|
16
16
|
qtype/commands/__init__.py,sha256=Qo4M07zm5I63r8STxDjvt5fhP1jygdXTsExNGELkefc,257
|
|
17
17
|
qtype/commands/convert.py,sha256=wh2-MSBlnMU5peAzVeQcGqqFzQbeCuL5WC5-EDZ-TFM,4636
|
|
18
18
|
qtype/commands/generate.py,sha256=v_k_CN0ub-_18rquvX_B366K4bec1aFLnLO9Bp-NfRc,7287
|
|
19
|
-
qtype/commands/run.py,sha256=
|
|
19
|
+
qtype/commands/run.py,sha256=uSh6_BxBY0mYpE7u26hpm_QTMAkauxAs1P2y5moMXUw,6303
|
|
20
20
|
qtype/commands/serve.py,sha256=lb5akSZ8fYLlCl8u8HDFFk6kyCHHwSRnP5wNzZry8sg,3216
|
|
21
21
|
qtype/commands/validate.py,sha256=f0aOk6A08910bFfamfg8xafByADMx4EktXAGMrEeUXU,3067
|
|
22
22
|
qtype/commands/visualize.py,sha256=J1eqwaVWTteGzib84g7Qtp0JlX9wIMmRQWSv1RhAns8,3763
|
|
@@ -30,44 +30,45 @@ qtype/dsl/parser.py,sha256=jpz32zyvOIo-R6Xr1lshzQiGfeo-2-fZczkdfURBufo,5487
|
|
|
30
30
|
qtype/dsl/types.py,sha256=k6cgThA287bZ_pvTKQvxWhatcYCPNne8zpqOYOvLvOg,1687
|
|
31
31
|
qtype/interpreter/__init__.py,sha256=IaRF90JLFbsTLKz9LTOMI_Pz4xwVaEyXPNaXV7sLou8,43
|
|
32
32
|
qtype/interpreter/api.py,sha256=V7hjsmDhe1IwbcwdM5bnPGBiwH3TtlMLjUJdGJumCdA,4193
|
|
33
|
-
qtype/interpreter/conversions.py,sha256=
|
|
34
|
-
qtype/interpreter/converters.py,sha256=
|
|
33
|
+
qtype/interpreter/conversions.py,sha256=m5MKMystF5mK6SCqkX2W636Y71JN74mDYX2a7Z1miSs,20862
|
|
34
|
+
qtype/interpreter/converters.py,sha256=90TkXZrZyq24fJUimacK3avg2TywspoMk7oPj-1Wtc8,2170
|
|
35
35
|
qtype/interpreter/endpoints.py,sha256=un4iCYCk86lYKpTDFdzlByvebdctNwRF3n4oD4ZwpTw,11946
|
|
36
|
-
qtype/interpreter/flow.py,sha256=
|
|
36
|
+
qtype/interpreter/flow.py,sha256=7UaZtB_K26-CTZTcqzK5YGtUDANOZKfVFWfHpuTzyQQ,5728
|
|
37
37
|
qtype/interpreter/metadata_api.py,sha256=LfJjt9atsgiAra6aVBXLoJrPa06_CBUagYysT556nt8,3267
|
|
38
38
|
qtype/interpreter/resource_cache.py,sha256=K0kzpm223COWk7FN9qyOvNOEoOcABR4yLeADL9ekE_o,1188
|
|
39
|
+
qtype/interpreter/rich_progress.py,sha256=J7TokOIqIUVWJZCfGEexQCwvvj6b1SjRtKThk2DU0CA,6761
|
|
39
40
|
qtype/interpreter/telemetry.py,sha256=Hcwd9sMW55LejgOIpPwjkWsmTvB2vnpSr4TshTAKljk,4901
|
|
40
|
-
qtype/interpreter/types.py,sha256=
|
|
41
|
+
qtype/interpreter/types.py,sha256=YBwav_frbFptGrIw60ebI-jfAu28GQUZEQ9mwfCjLFc,10045
|
|
41
42
|
qtype/interpreter/typing.py,sha256=Ka5wkkpQFZQKgKMCR3p7bD7W4uHpOryevEi-isc2RCw,3888
|
|
42
43
|
qtype/interpreter/auth/__init__.py,sha256=L98AxaSizb6LMdXEr8FGe9MBtPBnfCeWxjI0oi7sg_o,62
|
|
43
44
|
qtype/interpreter/auth/aws.py,sha256=eMXyEBqzv7I243fS-A1zHPQkN--yPFEh1Hxp4rxmyEs,8154
|
|
44
45
|
qtype/interpreter/auth/cache.py,sha256=uVyJ_jkbprRdlvbnm1DVIaYyTDLEsPXvi3xjuRneH2k,1825
|
|
45
46
|
qtype/interpreter/auth/generic.py,sha256=WHXu3SxWzxJn_bv6R20Aod84Vwe73xTYHx754dY1MSg,6178
|
|
46
|
-
qtype/interpreter/base/base_step_executor.py,sha256=
|
|
47
|
+
qtype/interpreter/base/base_step_executor.py,sha256=DFQOwRVXStxPLov-unG4IXq-iBQ66JVPeetNnUcdlZ8,16497
|
|
47
48
|
qtype/interpreter/base/batch_step_executor.py,sha256=g5_yPd5VTy_slW5ZXyamgFyTRd0CoaeVfDHj8x4PvUk,5906
|
|
48
49
|
qtype/interpreter/base/exceptions.py,sha256=7CIexzDfIjvAA0c6qwg4jsDcTQM1pKQLj6szxcqil_c,1586
|
|
49
50
|
qtype/interpreter/base/executor_context.py,sha256=s_EiNURd7uDkUdOvuVC0u7zuWDGV89r4ppMFOC0C1m0,2839
|
|
50
51
|
qtype/interpreter/base/factory.py,sha256=vpMvqVmBCrdJKbizEkjqNNDbq8d-CKsAEXiNX7PrJRM,3823
|
|
51
|
-
qtype/interpreter/base/progress_tracker.py,sha256=
|
|
52
|
+
qtype/interpreter/base/progress_tracker.py,sha256=zHtTp0JGrn1M3wNEEVfkwQmuHD-WKXV7tv4fDdHv1xs,3488
|
|
52
53
|
qtype/interpreter/base/secrets.py,sha256=74NoU0Fx96vva6LGWXk7EkvFWD4uZEk12NjWrGHWZTc,11241
|
|
53
|
-
qtype/interpreter/base/step_cache.py,sha256
|
|
54
|
+
qtype/interpreter/base/step_cache.py,sha256=iNtEFN-bvfG5S5iPhXR_U7iVtK-RWh9_nhCRetckukU,2432
|
|
54
55
|
qtype/interpreter/base/stream_emitter.py,sha256=8l5bCFTjMA3Takjh51QdWw8ERb7_GamHVoU-x6xkG5I,13828
|
|
55
56
|
qtype/interpreter/executors/agent_executor.py,sha256=pll5tdUD977fmMMfoXVhY-dLQttv-aqT04gyjrF6seo,8378
|
|
56
57
|
qtype/interpreter/executors/aggregate_executor.py,sha256=Z3NJekpeo7aqqvOcXQqb6d6t9g4UB1r3N1lSV9EwZq4,3495
|
|
57
58
|
qtype/interpreter/executors/decoder_executor.py,sha256=KqLhnhiclMIcUNf3bu7H4vDAOXCQeVO0rc2hIXm1qZ4,5610
|
|
58
59
|
qtype/interpreter/executors/doc_to_text_executor.py,sha256=ZkTtKUL0BfNIiuj-OcYybn1f0By6ujRmd1U4VEAtJt4,3804
|
|
59
|
-
qtype/interpreter/executors/document_embedder_executor.py,sha256=
|
|
60
|
+
qtype/interpreter/executors/document_embedder_executor.py,sha256=wvARlFPb2dmMdxjW8L1422a-XmcUVxEJXWf24bDR9BE,3529
|
|
60
61
|
qtype/interpreter/executors/document_search_executor.py,sha256=q_z8Lhl6jlIRt2I8NaINUiygr9T7nuz_tpRS6OowrvQ,4507
|
|
61
62
|
qtype/interpreter/executors/document_source_executor.py,sha256=ZpBrBaE16YeRk750TxvE08NnCIUzArjESZImESomaIo,4247
|
|
62
|
-
qtype/interpreter/executors/document_splitter_executor.py,sha256=
|
|
63
|
+
qtype/interpreter/executors/document_splitter_executor.py,sha256=2mzrkkNqsPb5erDUd_VapnrykgywMXGXZnkWT1YJe_w,3815
|
|
63
64
|
qtype/interpreter/executors/echo_executor.py,sha256=oQUgzQTHruT4on7wgEBOcikwOy6KP82d5zrul5QLoRU,2194
|
|
64
65
|
qtype/interpreter/executors/field_extractor_executor.py,sha256=M7sPFR89PiAF0vU5veaAE8N130SeC0WGPNO1t3yV5dM,5551
|
|
65
66
|
qtype/interpreter/executors/file_source_executor.py,sha256=OUT_zJrYN3iFMUgLECde93C4rv8PthcQsuJ--CJvEsI,3605
|
|
66
67
|
qtype/interpreter/executors/file_writer_executor.py,sha256=x4BpgdXM7Xhz1tJJ5MmBIjFO4y80VC1V1ow3tox_Xrw,4099
|
|
67
|
-
qtype/interpreter/executors/index_upsert_executor.py,sha256=
|
|
68
|
-
qtype/interpreter/executors/invoke_embedding_executor.py,sha256=
|
|
68
|
+
qtype/interpreter/executors/index_upsert_executor.py,sha256=YsEYqDg8GeD7gKcBtBTqykRYt0eE5ZYYM66o0A1EjVI,8170
|
|
69
|
+
qtype/interpreter/executors/invoke_embedding_executor.py,sha256=OPvd--x8iimjODLJkRpRfQDahL8LnYaPy3A8WVB5h00,3311
|
|
69
70
|
qtype/interpreter/executors/invoke_flow_executor.py,sha256=U30cYM3F_zy1_2CD1Dde59xyZD0rDa5W46lST1hxF6s,1682
|
|
70
|
-
qtype/interpreter/executors/invoke_tool_executor.py,sha256=
|
|
71
|
+
qtype/interpreter/executors/invoke_tool_executor.py,sha256=hhbE8YTr0x5-kz_xsvdWGGzkLkVdvDoAVAF-3ZUK5as,12786
|
|
71
72
|
qtype/interpreter/executors/llm_inference_executor.py,sha256=A6b_Ns_734TCn_DMhdNSqWc5qX970FryhpsX_jtEu_4,9593
|
|
72
73
|
qtype/interpreter/executors/prompt_template_executor.py,sha256=AKZWQvsoY2fQoSg5N8_R7LIlC8AKTwjNKOz_MuvASc0,2700
|
|
73
74
|
qtype/interpreter/executors/sql_source_executor.py,sha256=-fUWQK2S7NBe3oJG1lmkNRujASHFyXu3H8h-WrobbQw,3932
|
|
@@ -124,11 +125,11 @@ qtype/semantic/checker.py,sha256=B23bFmSISFgmL9ji_0QOFuTaSF55xLZNPDwyCXm1Z8U,199
|
|
|
124
125
|
qtype/semantic/generate.py,sha256=s56N0ollRJVVxy6RUKZWFFReKYcSSVw33ixvT2MQRuA,21116
|
|
125
126
|
qtype/semantic/loader.py,sha256=QRhTc_AJfsWSMn8ThaW60GmIGjFMN-3bBUy4pktFjz4,3041
|
|
126
127
|
qtype/semantic/model.py,sha256=OMScGFp1FMe8k_yWFhFs5yJW67fKL75BrZDUBmPT9aI,26981
|
|
127
|
-
qtype/semantic/resolver.py,sha256=
|
|
128
|
+
qtype/semantic/resolver.py,sha256=bWPCSB8KJpVqN_n41U_r-qzUiT8vAMBOD3pOGmxL6TY,4618
|
|
128
129
|
qtype/semantic/visualize.py,sha256=thjrZcfQuZJWrZ9EMAPhAa2kNikR5rLIJrfcD3hJ8XY,17426
|
|
129
|
-
qtype-0.1.
|
|
130
|
-
qtype-0.1.
|
|
131
|
-
qtype-0.1.
|
|
132
|
-
qtype-0.1.
|
|
133
|
-
qtype-0.1.
|
|
134
|
-
qtype-0.1.
|
|
130
|
+
qtype-0.1.1.dist-info/licenses/LICENSE,sha256=1KA5EgYBSR0O6nCH2HEvk6Di53YKJ9r_VCR7G8G8qAY,11341
|
|
131
|
+
qtype-0.1.1.dist-info/METADATA,sha256=PHlAs091t8uOXO0YXWlcjLzkbp64bj2QzKLssVz9SFw,5583
|
|
132
|
+
qtype-0.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
133
|
+
qtype-0.1.1.dist-info/entry_points.txt,sha256=5y4vj8RLvgl2tXSj-Hm7v5-Tn3kP4-UonjNoN-mfaQE,41
|
|
134
|
+
qtype-0.1.1.dist-info/top_level.txt,sha256=ONroH5B0mZ51jr7NSWCK0weFwwCO7wBLmyVS1YqNU14,6
|
|
135
|
+
qtype-0.1.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|