qtype 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qtype/__init__.py +0 -0
- qtype/cli.py +73 -0
- qtype/commands/__init__.py +5 -0
- qtype/commands/convert.py +76 -0
- qtype/commands/generate.py +107 -0
- qtype/commands/run.py +200 -0
- qtype/commands/validate.py +83 -0
- qtype/commons/__init__.py +0 -0
- qtype/commons/generate.py +88 -0
- qtype/commons/tools.py +192 -0
- qtype/converters/__init__.py +0 -0
- qtype/converters/tools_from_api.py +24 -0
- qtype/converters/tools_from_module.py +326 -0
- qtype/converters/types.py +20 -0
- qtype/dsl/__init__.py +1 -0
- qtype/dsl/base_types.py +31 -0
- qtype/dsl/document.py +108 -0
- qtype/dsl/domain_types.py +56 -0
- qtype/dsl/model.py +685 -0
- qtype/dsl/validator.py +439 -0
- qtype/interpreter/__init__.py +1 -0
- qtype/interpreter/api.py +104 -0
- qtype/interpreter/conversions.py +148 -0
- qtype/interpreter/exceptions.py +10 -0
- qtype/interpreter/flow.py +37 -0
- qtype/interpreter/resource_cache.py +37 -0
- qtype/interpreter/step.py +67 -0
- qtype/interpreter/steps/__init__.py +0 -0
- qtype/interpreter/steps/agent.py +114 -0
- qtype/interpreter/steps/condition.py +36 -0
- qtype/interpreter/steps/decoder.py +84 -0
- qtype/interpreter/steps/llm_inference.py +127 -0
- qtype/interpreter/steps/prompt_template.py +54 -0
- qtype/interpreter/steps/search.py +24 -0
- qtype/interpreter/steps/tool.py +53 -0
- qtype/interpreter/telemetry.py +16 -0
- qtype/interpreter/typing.py +78 -0
- qtype/loader.py +341 -0
- qtype/semantic/__init__.py +0 -0
- qtype/semantic/errors.py +4 -0
- qtype/semantic/generate.py +383 -0
- qtype/semantic/model.py +354 -0
- qtype/semantic/resolver.py +97 -0
- qtype-0.0.1.dist-info/METADATA +120 -0
- qtype-0.0.1.dist-info/RECORD +49 -0
- qtype-0.0.1.dist-info/WHEEL +5 -0
- qtype-0.0.1.dist-info/entry_points.txt +2 -0
- qtype-0.0.1.dist-info/licenses/LICENSE +202 -0
- qtype-0.0.1.dist-info/top_level.txt +1 -0
qtype/__init__.py
ADDED
|
File without changes
|
qtype/cli.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""
|
|
2
|
+
QType CLI entry point for generating schemas and validating QType specs.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import importlib
|
|
7
|
+
import logging
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _discover_commands(subparsers: argparse._SubParsersAction) -> None:
|
|
12
|
+
"""Automatically discover and register command modules.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
subparsers: The subparsers object to add commands to.
|
|
16
|
+
"""
|
|
17
|
+
commands_dir = Path(__file__).parent / "commands"
|
|
18
|
+
|
|
19
|
+
for py_file in commands_dir.glob("*.py"):
|
|
20
|
+
# Skip __init__.py and other private files
|
|
21
|
+
if py_file.name.startswith("_"):
|
|
22
|
+
continue
|
|
23
|
+
|
|
24
|
+
module_name = f"qtype.commands.{py_file.stem}"
|
|
25
|
+
try:
|
|
26
|
+
module = importlib.import_module(module_name)
|
|
27
|
+
# Call the parser function to set up the subparser
|
|
28
|
+
if hasattr(module, "parser"):
|
|
29
|
+
module.parser(subparsers)
|
|
30
|
+
else:
|
|
31
|
+
logging.warning(
|
|
32
|
+
f"Command module {module_name} does not have a 'parser' function"
|
|
33
|
+
)
|
|
34
|
+
except Exception as e:
|
|
35
|
+
logging.error(
|
|
36
|
+
f"Failed to load command module {module_name}: {e}",
|
|
37
|
+
exc_info=True,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def main() -> None:
|
|
42
|
+
"""
|
|
43
|
+
Main entry point for the QType CLI.
|
|
44
|
+
Sets up argument parsing and dispatches to the appropriate subcommand.
|
|
45
|
+
"""
|
|
46
|
+
parser = argparse.ArgumentParser(
|
|
47
|
+
description="QType CLI: Generate schema, validate, or run QType specs."
|
|
48
|
+
)
|
|
49
|
+
parser.add_argument(
|
|
50
|
+
"--log-level",
|
|
51
|
+
default="INFO",
|
|
52
|
+
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
|
|
53
|
+
help="Set the logging level (default: INFO)",
|
|
54
|
+
)
|
|
55
|
+
subparsers = parser.add_subparsers(dest="command", required=True)
|
|
56
|
+
|
|
57
|
+
# Auto-discover and register commands
|
|
58
|
+
_discover_commands(subparsers)
|
|
59
|
+
|
|
60
|
+
args = parser.parse_args()
|
|
61
|
+
|
|
62
|
+
# Set logging level based on user input
|
|
63
|
+
logging.basicConfig(
|
|
64
|
+
level=getattr(logging, args.log_level),
|
|
65
|
+
format="%(levelname)s: %(message)s",
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Dispatch to the selected subcommand
|
|
69
|
+
args.func(args)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
if __name__ == "__main__":
|
|
73
|
+
main()
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
from pydantic_yaml import to_yaml_str
|
|
5
|
+
|
|
6
|
+
from qtype.commons.generate import _write_yaml_file
|
|
7
|
+
from qtype.converters.tools_from_module import tools_from_module
|
|
8
|
+
from qtype.dsl.model import ToolList
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def convert_api(args: argparse.Namespace) -> None:
|
|
14
|
+
raise NotImplementedError("API conversion is not implemented yet.")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def convert_module(args: argparse.Namespace) -> None:
|
|
18
|
+
"""Convert Python module tools to qtype format."""
|
|
19
|
+
tools = ToolList(tools_from_module(args.module_path)) # type: ignore
|
|
20
|
+
if not tools:
|
|
21
|
+
raise ValueError(f"No tools found in the module: {args.module_path}")
|
|
22
|
+
|
|
23
|
+
if args.output:
|
|
24
|
+
_write_yaml_file(tools, args.output)
|
|
25
|
+
logger.info("Resulting yaml written to %s", args.output)
|
|
26
|
+
else:
|
|
27
|
+
logger.info(
|
|
28
|
+
"Resulting yaml:\n%s",
|
|
29
|
+
to_yaml_str(tools, exclude_unset=True, exclude_none=True),
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def parser(subparsers: argparse._SubParsersAction) -> None:
|
|
34
|
+
"""Set up the converter subcommand parser."""
|
|
35
|
+
cmd_parser = subparsers.add_parser(
|
|
36
|
+
"convert", help="Creates qtype files from different sources."
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
# Create a new subparser for "convert api", "convert module", etc.
|
|
40
|
+
convert_subparsers = cmd_parser.add_subparsers(
|
|
41
|
+
dest="convert_command", required=True
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
convert_module_parser = convert_subparsers.add_parser(
|
|
45
|
+
"module", help="Converts module specifications to qtype format."
|
|
46
|
+
)
|
|
47
|
+
convert_module_parser.add_argument(
|
|
48
|
+
"module_path",
|
|
49
|
+
type=str,
|
|
50
|
+
help="Path to the Python module to convert.",
|
|
51
|
+
)
|
|
52
|
+
convert_module_parser.add_argument(
|
|
53
|
+
"-o",
|
|
54
|
+
"--output",
|
|
55
|
+
type=str,
|
|
56
|
+
default=None,
|
|
57
|
+
help="Where to save the converted YAML file. If not specified, it is just printed to stdout.",
|
|
58
|
+
)
|
|
59
|
+
convert_module_parser.set_defaults(func=convert_module)
|
|
60
|
+
|
|
61
|
+
convert_api_parser = convert_subparsers.add_parser(
|
|
62
|
+
"api", help="Converts API specifications to qtype format."
|
|
63
|
+
)
|
|
64
|
+
convert_api_parser.add_argument(
|
|
65
|
+
"openapi_spec",
|
|
66
|
+
type=str,
|
|
67
|
+
help="URL of the OpenAPI specification.",
|
|
68
|
+
)
|
|
69
|
+
convert_api_parser.add_argument(
|
|
70
|
+
"-o",
|
|
71
|
+
"--output",
|
|
72
|
+
type=str,
|
|
73
|
+
default=None,
|
|
74
|
+
help="Where to save the converted YAML file. If not specified, it is just printed to stdout.",
|
|
75
|
+
)
|
|
76
|
+
convert_api_parser.set_defaults(func=convert_api)
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import json
|
|
3
|
+
import logging
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
from qtype.commons.generate import dump_commons_library
|
|
8
|
+
from qtype.dsl.document import generate_documentation
|
|
9
|
+
from qtype.dsl.model import Document
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def generate_schema(args: argparse.Namespace) -> None:
|
|
15
|
+
"""Generate and output the JSON schema for Document.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
args (argparse.Namespace): Command-line arguments with an optional
|
|
19
|
+
'output' attribute specifying the output file path.
|
|
20
|
+
"""
|
|
21
|
+
schema = Document.model_json_schema()
|
|
22
|
+
# Add the $schema property to indicate JSON Schema version
|
|
23
|
+
schema["$schema"] = "http://json-schema.org/draft-07/schema#"
|
|
24
|
+
output = json.dumps(schema, indent=2)
|
|
25
|
+
output_path: Optional[str] = getattr(args, "output", None)
|
|
26
|
+
if output_path:
|
|
27
|
+
with open(output_path, "w", encoding="utf-8") as f:
|
|
28
|
+
f.write(output)
|
|
29
|
+
logger.info(f"Schema written to {output_path}")
|
|
30
|
+
else:
|
|
31
|
+
logger.info("Schema is:\n%s", output)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def parser(subparsers: argparse._SubParsersAction) -> None:
|
|
35
|
+
"""Set up the generate subcommand parser."""
|
|
36
|
+
cmd_parser = subparsers.add_parser(
|
|
37
|
+
"generate", help="Generates qtype files from different sources."
|
|
38
|
+
)
|
|
39
|
+
generate_subparsers = cmd_parser.add_subparsers(
|
|
40
|
+
dest="generate_target", required=True
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
# Parse for generating commons library tools
|
|
44
|
+
commons_parser = generate_subparsers.add_parser(
|
|
45
|
+
"commons", help="Generates the commons library tools."
|
|
46
|
+
)
|
|
47
|
+
commons_parser.add_argument(
|
|
48
|
+
"-p",
|
|
49
|
+
"--prefix",
|
|
50
|
+
type=str,
|
|
51
|
+
default="./common/",
|
|
52
|
+
help="Output prefix for the YAML file (default: ./common/)",
|
|
53
|
+
)
|
|
54
|
+
commons_parser.set_defaults(func=dump_commons_library)
|
|
55
|
+
|
|
56
|
+
# Parser for generating the json schema
|
|
57
|
+
schema_parser = generate_subparsers.add_parser(
|
|
58
|
+
"schema", help="Generates the schema for the QType DSL."
|
|
59
|
+
)
|
|
60
|
+
schema_parser.add_argument(
|
|
61
|
+
"-o",
|
|
62
|
+
"--output",
|
|
63
|
+
type=str,
|
|
64
|
+
help="Output file for the schema (default: stdout)",
|
|
65
|
+
)
|
|
66
|
+
schema_parser.set_defaults(func=generate_schema)
|
|
67
|
+
|
|
68
|
+
# Parser for generating the DSL documentation
|
|
69
|
+
dsl_parser = generate_subparsers.add_parser(
|
|
70
|
+
"dsl-docs",
|
|
71
|
+
help="Generates markdown documentation for the QType DSL classes.",
|
|
72
|
+
)
|
|
73
|
+
dsl_parser.add_argument(
|
|
74
|
+
"-o",
|
|
75
|
+
"--output",
|
|
76
|
+
type=str,
|
|
77
|
+
default="docs/DSL-Reference/",
|
|
78
|
+
help="Output directory for the DSL documentation (default: docs/DSL-Reference/)",
|
|
79
|
+
)
|
|
80
|
+
dsl_parser.set_defaults(
|
|
81
|
+
func=lambda args: generate_documentation(Path(args.output))
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Parser for generating the semantic model
|
|
85
|
+
# only add this if networkx and ruff are installed
|
|
86
|
+
try:
|
|
87
|
+
import networkx # noqa: F401
|
|
88
|
+
import ruff # noqa: F401
|
|
89
|
+
|
|
90
|
+
from qtype.semantic.generate import generate_semantic_model
|
|
91
|
+
|
|
92
|
+
semantic_parser = generate_subparsers.add_parser(
|
|
93
|
+
"semantic-model",
|
|
94
|
+
help="Generates the semantic model (i.e., qtype/semantic/model.py) from QType DSL.",
|
|
95
|
+
)
|
|
96
|
+
semantic_parser.add_argument(
|
|
97
|
+
"-o",
|
|
98
|
+
"--output",
|
|
99
|
+
type=str,
|
|
100
|
+
default="qtype/semantic/model.py",
|
|
101
|
+
help="Output file for the semantic model (default: stdout)",
|
|
102
|
+
)
|
|
103
|
+
semantic_parser.set_defaults(func=generate_semantic_model)
|
|
104
|
+
except ImportError:
|
|
105
|
+
logger.debug(
|
|
106
|
+
"NetworkX or Ruff is not installed. Skipping semantic model generation."
|
|
107
|
+
)
|
qtype/commands/run.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Command-line interface for running QType YAML spec files.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import argparse
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from qtype.dsl.domain_types import ChatMessage
|
|
12
|
+
from qtype.interpreter.flow import execute_flow
|
|
13
|
+
from qtype.interpreter.typing import create_input_type_model
|
|
14
|
+
from qtype.loader import load
|
|
15
|
+
from qtype.semantic.model import Application, Flow, Step
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _get_flow(app: Application, flow_id: str | None) -> Flow:
|
|
21
|
+
if len(app.flows) == 0:
|
|
22
|
+
raise ValueError(
|
|
23
|
+
"No flows found in the application."
|
|
24
|
+
" Please ensure the spec contains at least one flow."
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
if flow_id is not None:
|
|
28
|
+
# find the first flow in the list with the given flow_id
|
|
29
|
+
flow = next((f for f in app.flows if f.id == flow_id), None)
|
|
30
|
+
if flow is None:
|
|
31
|
+
raise ValueError(f"Flow not found: {flow_id}")
|
|
32
|
+
|
|
33
|
+
else:
|
|
34
|
+
flow = app.flows[0]
|
|
35
|
+
|
|
36
|
+
return flow
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _telemetry(spec: Application) -> None:
|
|
40
|
+
if spec.telemetry:
|
|
41
|
+
logger.info(
|
|
42
|
+
f"Telemetry enabled with endpoint: {spec.telemetry.endpoint}"
|
|
43
|
+
)
|
|
44
|
+
# Register telemetry if needed
|
|
45
|
+
from qtype.interpreter.telemetry import register
|
|
46
|
+
|
|
47
|
+
register(spec.telemetry, spec.id)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def run_api(args: Any) -> None:
|
|
51
|
+
"""Run a QType YAML spec file as an API.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
args: Arguments passed from the command line or calling context.
|
|
55
|
+
"""
|
|
56
|
+
spec = load(args.spec)
|
|
57
|
+
logger.info(f"Running API for spec: {args.spec}")
|
|
58
|
+
from qtype.interpreter.api import APIExecutor
|
|
59
|
+
|
|
60
|
+
# Get the name from the spec filename.
|
|
61
|
+
# so if filename is tests/specs/full_application_test.qtype.yaml, name should be "Full Application Test"
|
|
62
|
+
name = (
|
|
63
|
+
args.spec.split("/")[-1]
|
|
64
|
+
.replace(".qtype.yaml", "")
|
|
65
|
+
.replace("_", " ")
|
|
66
|
+
.title()
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
_telemetry(spec)
|
|
70
|
+
api_executor = APIExecutor(spec)
|
|
71
|
+
fastapi_app = api_executor.create_app(name=name)
|
|
72
|
+
|
|
73
|
+
import uvicorn
|
|
74
|
+
|
|
75
|
+
uvicorn.run(
|
|
76
|
+
fastapi_app,
|
|
77
|
+
host=args.host,
|
|
78
|
+
port=args.port,
|
|
79
|
+
log_level="info",
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def run_flow(args: Any) -> None:
|
|
84
|
+
"""Run a QType YAML spec file by executing its flows.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
args: Arguments passed from the command line or calling context.
|
|
88
|
+
"""
|
|
89
|
+
spec = load(args.spec)
|
|
90
|
+
|
|
91
|
+
flow = _get_flow(spec, args.flow)
|
|
92
|
+
logger.info(f"Executing flow: {flow.id}")
|
|
93
|
+
input_type = create_input_type_model(flow)
|
|
94
|
+
inputs = input_type.model_validate_json(args.input)
|
|
95
|
+
for var in flow.inputs:
|
|
96
|
+
# Get the value from the request using the variable ID
|
|
97
|
+
inputs_dict = inputs.model_dump() # type: ignore
|
|
98
|
+
if var.id in inputs_dict:
|
|
99
|
+
var.value = getattr(inputs, var.id)
|
|
100
|
+
_telemetry(spec)
|
|
101
|
+
|
|
102
|
+
was_streamed = False
|
|
103
|
+
previous: str = ""
|
|
104
|
+
|
|
105
|
+
def stream_fn(step: Step, msg: ChatMessage | str) -> None:
|
|
106
|
+
"""Stream function to handle step outputs."""
|
|
107
|
+
nonlocal was_streamed, previous
|
|
108
|
+
if step == flow.steps[-1]:
|
|
109
|
+
was_streamed = True
|
|
110
|
+
if isinstance(msg, ChatMessage):
|
|
111
|
+
content = " ".join(
|
|
112
|
+
[m.content for m in msg.blocks if m.content]
|
|
113
|
+
)
|
|
114
|
+
# Note: streaming chat messages accumulate the content...
|
|
115
|
+
content = content.removeprefix(previous)
|
|
116
|
+
print(content, end="", flush=True)
|
|
117
|
+
previous += content
|
|
118
|
+
else:
|
|
119
|
+
print(msg, end="", flush=True)
|
|
120
|
+
|
|
121
|
+
result = execute_flow(flow, stream_fn=stream_fn) # type: ignore
|
|
122
|
+
if not was_streamed:
|
|
123
|
+
logger.info(
|
|
124
|
+
f"Flow execution result: {', '.join([f'{var.id}: {var.value}' for var in result])}"
|
|
125
|
+
)
|
|
126
|
+
else:
|
|
127
|
+
print("\n")
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def run_ui(args: Any) -> None:
|
|
131
|
+
"""Run a QType YAML spec file by executing its flows in a UI.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
args: Arguments passed from the command line or calling context.
|
|
135
|
+
"""
|
|
136
|
+
# Placeholder for actual implementation
|
|
137
|
+
logger.info(f"Running UI for spec: {args.spec}")
|
|
138
|
+
# Here you would implement the logic to run the flow in a UI context
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def parser(subparsers: argparse._SubParsersAction) -> None:
|
|
142
|
+
"""Set up the run subcommand parser.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
subparsers: The subparsers object to add the command to.
|
|
146
|
+
"""
|
|
147
|
+
cmd_parser = subparsers.add_parser(
|
|
148
|
+
"run", help="Run a QType YAML spec by executing its flows."
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
run_subparsers = cmd_parser.add_subparsers(
|
|
152
|
+
dest="run_method", required=True
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
# Parse for generating API runner
|
|
156
|
+
api_runner_parser = run_subparsers.add_parser(
|
|
157
|
+
"api", help="Serves the qtype file as an API."
|
|
158
|
+
)
|
|
159
|
+
api_runner_parser.add_argument(
|
|
160
|
+
"-H", "--host", type=str, default="localhost"
|
|
161
|
+
)
|
|
162
|
+
api_runner_parser.add_argument("-p", "--port", type=int, default=8000)
|
|
163
|
+
api_runner_parser.set_defaults(func=run_api)
|
|
164
|
+
|
|
165
|
+
# Parse for running a flow
|
|
166
|
+
flow_parser = run_subparsers.add_parser(
|
|
167
|
+
"flow", help="Runs a QType YAML spec file by executing its flows."
|
|
168
|
+
)
|
|
169
|
+
flow_parser.add_argument(
|
|
170
|
+
"-f",
|
|
171
|
+
"--flow",
|
|
172
|
+
type=str,
|
|
173
|
+
default=None,
|
|
174
|
+
help="The name of the flow to run. If not specified, runs the first flow found.",
|
|
175
|
+
)
|
|
176
|
+
flow_parser.add_argument(
|
|
177
|
+
"input",
|
|
178
|
+
type=str,
|
|
179
|
+
help="JSON blob of input values for the flow.",
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
flow_parser.set_defaults(func=run_flow)
|
|
183
|
+
|
|
184
|
+
# Run a user interface for the spec
|
|
185
|
+
ui_parser = run_subparsers.add_parser(
|
|
186
|
+
"ui",
|
|
187
|
+
help="Runs a QType YAML spec file by executing its flows in a UI.",
|
|
188
|
+
)
|
|
189
|
+
ui_parser.add_argument(
|
|
190
|
+
"-f",
|
|
191
|
+
"--flow",
|
|
192
|
+
type=str,
|
|
193
|
+
default=None,
|
|
194
|
+
help="The name of the flow to run in the UI. If not specified, runs the first flow found.",
|
|
195
|
+
)
|
|
196
|
+
ui_parser.set_defaults(func=run_ui)
|
|
197
|
+
|
|
198
|
+
cmd_parser.add_argument(
|
|
199
|
+
"spec", type=str, help="Path to the QType YAML spec file."
|
|
200
|
+
)
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Command-line interface for validating QType YAML spec files.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import logging
|
|
7
|
+
import sys
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from pydantic import ValidationError
|
|
11
|
+
|
|
12
|
+
from qtype import dsl
|
|
13
|
+
from qtype.dsl.validator import QTypeValidationError, validate
|
|
14
|
+
from qtype.loader import _resolve_root, load_yaml
|
|
15
|
+
from qtype.semantic.errors import SemanticResolutionError
|
|
16
|
+
from qtype.semantic.resolver import resolve
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def main(args: Any) -> None:
|
|
22
|
+
"""
|
|
23
|
+
Validate a QType YAML spec file against the QTypeSpec schema and semantics.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
args: Arguments passed from the command line or calling context.
|
|
27
|
+
|
|
28
|
+
Exits:
|
|
29
|
+
Exits with code 1 if validation fails.
|
|
30
|
+
"""
|
|
31
|
+
try:
|
|
32
|
+
yaml_data = load_yaml(args.spec)
|
|
33
|
+
logging.info("✅ Schema validation successful.")
|
|
34
|
+
document = dsl.Document.model_validate(yaml_data)
|
|
35
|
+
logging.info("✅ Model validation successful.")
|
|
36
|
+
document = _resolve_root(document)
|
|
37
|
+
if not isinstance(document, dsl.Application):
|
|
38
|
+
logging.warning(
|
|
39
|
+
"🟨 Spec is not an Application, skipping semantic resolution."
|
|
40
|
+
)
|
|
41
|
+
else:
|
|
42
|
+
document = validate(document)
|
|
43
|
+
logger.info("✅ Language validation successful")
|
|
44
|
+
document = resolve(document)
|
|
45
|
+
logger.info("✅ Semantic validation successful")
|
|
46
|
+
if args.print:
|
|
47
|
+
logger.info(
|
|
48
|
+
document.model_dump_json( # type: ignore
|
|
49
|
+
indent=2,
|
|
50
|
+
exclude_none=True,
|
|
51
|
+
)
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
except ValidationError as exc:
|
|
55
|
+
logger.error("❌ Schema validation failed:\n%s", exc)
|
|
56
|
+
sys.exit(1)
|
|
57
|
+
except QTypeValidationError as exc:
|
|
58
|
+
logger.error("❌ DSL validation failed:\n%s", exc)
|
|
59
|
+
sys.exit(1)
|
|
60
|
+
except SemanticResolutionError as exc:
|
|
61
|
+
logger.error("❌ Semantic resolution failed:\n%s", exc)
|
|
62
|
+
sys.exit(1)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def parser(subparsers: argparse._SubParsersAction) -> None:
|
|
66
|
+
"""Set up the validate subcommand parser.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
subparsers: The subparsers object to add the command to.
|
|
70
|
+
"""
|
|
71
|
+
cmd_parser = subparsers.add_parser(
|
|
72
|
+
"validate", help="Validate a QType YAML spec against the schema."
|
|
73
|
+
)
|
|
74
|
+
cmd_parser.add_argument(
|
|
75
|
+
"spec", type=str, help="Path to the QType YAML spec file."
|
|
76
|
+
)
|
|
77
|
+
cmd_parser.add_argument(
|
|
78
|
+
"-p",
|
|
79
|
+
"--print",
|
|
80
|
+
action="store_true",
|
|
81
|
+
help="Print the spec after validation (default: False)",
|
|
82
|
+
)
|
|
83
|
+
cmd_parser.set_defaults(func=main)
|
|
File without changes
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
from pydantic_yaml import to_yaml_str
|
|
6
|
+
|
|
7
|
+
from qtype.converters.tools_from_module import tools_from_module
|
|
8
|
+
from qtype.dsl.model import Model, ModelList, ToolList
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _write_yaml_file(data: BaseModel, output_path: str) -> None:
|
|
14
|
+
"""
|
|
15
|
+
Write a Pydantic model to a YAML file.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
data: The Pydantic model instance to write.
|
|
19
|
+
output_path: The path where the YAML file will be saved.
|
|
20
|
+
"""
|
|
21
|
+
result = to_yaml_str(data, exclude_unset=True, exclude_none=True)
|
|
22
|
+
with open(output_path, "w") as f:
|
|
23
|
+
f.write(result)
|
|
24
|
+
logger.info(f"Data exported to {output_path}")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def dump_built_in_tools(args: argparse.Namespace) -> None:
|
|
28
|
+
tools = tools_from_module("qtype.commons.tools")
|
|
29
|
+
if not tools:
|
|
30
|
+
logger.error("No tools found in the commons library.")
|
|
31
|
+
return
|
|
32
|
+
|
|
33
|
+
tool_list = ToolList(root=tools) # type: ignore
|
|
34
|
+
output_path = f"{args.prefix}/tools.qtype.yaml"
|
|
35
|
+
_write_yaml_file(tool_list, output_path)
|
|
36
|
+
logging.info(f"Built-in tools exported to {output_path}")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def dump_aws_bedrock_models(args: argparse.Namespace) -> None:
|
|
40
|
+
"""
|
|
41
|
+
Export AWS Bedrock models to a YAML file.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
args: Command line arguments containing the output prefix.
|
|
45
|
+
"""
|
|
46
|
+
try:
|
|
47
|
+
import boto3
|
|
48
|
+
|
|
49
|
+
client = boto3.client("bedrock")
|
|
50
|
+
models = client.list_foundation_models()
|
|
51
|
+
|
|
52
|
+
# generate a model list from the AWS Bedrock models
|
|
53
|
+
# the return type of list_foundation_models is
|
|
54
|
+
|
|
55
|
+
model_list = ModelList(
|
|
56
|
+
[
|
|
57
|
+
Model(
|
|
58
|
+
id=model_summary["modelId"],
|
|
59
|
+
provider="aws-bedrock",
|
|
60
|
+
)
|
|
61
|
+
for model_summary in models.get("modelSummaries", [])
|
|
62
|
+
]
|
|
63
|
+
)
|
|
64
|
+
output_path = f"{args.prefix}/aws.bedrock.models.qtype.yaml"
|
|
65
|
+
_write_yaml_file(model_list, output_path)
|
|
66
|
+
logging.info(f"AWS Bedrock Models exported to {output_path}")
|
|
67
|
+
|
|
68
|
+
logger.info("Exporting AWS Bedrock models...")
|
|
69
|
+
# Placeholder for actual implementation
|
|
70
|
+
# This function should gather AWS Bedrock models and export them similarly to dump_built_in_tools
|
|
71
|
+
logger.info("AWS Bedrock models exported successfully.")
|
|
72
|
+
except ImportError:
|
|
73
|
+
logger.error(
|
|
74
|
+
"boto3 is not installed. Please install it to use AWS Bedrock model export."
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def dump_commons_library(args: argparse.Namespace) -> None:
|
|
79
|
+
"""
|
|
80
|
+
Export the commons library tools to a YAML file.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
args: Command line arguments containing the output prefix.
|
|
84
|
+
"""
|
|
85
|
+
logger.info("Exporting commons library tools...")
|
|
86
|
+
dump_built_in_tools(args)
|
|
87
|
+
dump_aws_bedrock_models(args)
|
|
88
|
+
logger.info("Commons library tools export complete.")
|