qtype 0.0.1__tar.gz → 0.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. {qtype-0.0.1/qtype.egg-info → qtype-0.0.3}/PKG-INFO +13 -1
  2. {qtype-0.0.1 → qtype-0.0.3}/pyproject.toml +17 -14
  3. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/conversions.py +7 -2
  4. qtype-0.0.3/qtype/semantic/base_types.py +47 -0
  5. {qtype-0.0.1 → qtype-0.0.3}/qtype/semantic/generate.py +2 -8
  6. {qtype-0.0.1 → qtype-0.0.3}/qtype/semantic/model.py +2 -12
  7. {qtype-0.0.1 → qtype-0.0.3/qtype.egg-info}/PKG-INFO +13 -1
  8. {qtype-0.0.1 → qtype-0.0.3}/qtype.egg-info/SOURCES.txt +1 -0
  9. qtype-0.0.3/qtype.egg-info/requires.txt +20 -0
  10. qtype-0.0.1/qtype.egg-info/requires.txt +0 -7
  11. {qtype-0.0.1 → qtype-0.0.3}/LICENSE +0 -0
  12. {qtype-0.0.1 → qtype-0.0.3}/README.md +0 -0
  13. {qtype-0.0.1 → qtype-0.0.3}/qtype/__init__.py +0 -0
  14. {qtype-0.0.1 → qtype-0.0.3}/qtype/cli.py +0 -0
  15. {qtype-0.0.1 → qtype-0.0.3}/qtype/commands/__init__.py +0 -0
  16. {qtype-0.0.1 → qtype-0.0.3}/qtype/commands/convert.py +0 -0
  17. {qtype-0.0.1 → qtype-0.0.3}/qtype/commands/generate.py +0 -0
  18. {qtype-0.0.1 → qtype-0.0.3}/qtype/commands/run.py +0 -0
  19. {qtype-0.0.1 → qtype-0.0.3}/qtype/commands/validate.py +0 -0
  20. {qtype-0.0.1 → qtype-0.0.3}/qtype/commons/__init__.py +0 -0
  21. {qtype-0.0.1 → qtype-0.0.3}/qtype/commons/generate.py +0 -0
  22. {qtype-0.0.1 → qtype-0.0.3}/qtype/commons/tools.py +0 -0
  23. {qtype-0.0.1 → qtype-0.0.3}/qtype/converters/__init__.py +0 -0
  24. {qtype-0.0.1 → qtype-0.0.3}/qtype/converters/tools_from_api.py +0 -0
  25. {qtype-0.0.1 → qtype-0.0.3}/qtype/converters/tools_from_module.py +0 -0
  26. {qtype-0.0.1 → qtype-0.0.3}/qtype/converters/types.py +0 -0
  27. {qtype-0.0.1 → qtype-0.0.3}/qtype/dsl/__init__.py +0 -0
  28. {qtype-0.0.1 → qtype-0.0.3}/qtype/dsl/base_types.py +0 -0
  29. {qtype-0.0.1 → qtype-0.0.3}/qtype/dsl/document.py +0 -0
  30. {qtype-0.0.1 → qtype-0.0.3}/qtype/dsl/domain_types.py +0 -0
  31. {qtype-0.0.1 → qtype-0.0.3}/qtype/dsl/model.py +0 -0
  32. {qtype-0.0.1 → qtype-0.0.3}/qtype/dsl/validator.py +0 -0
  33. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/__init__.py +0 -0
  34. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/api.py +0 -0
  35. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/exceptions.py +0 -0
  36. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/flow.py +0 -0
  37. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/resource_cache.py +0 -0
  38. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/step.py +0 -0
  39. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/steps/__init__.py +0 -0
  40. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/steps/agent.py +0 -0
  41. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/steps/condition.py +0 -0
  42. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/steps/decoder.py +0 -0
  43. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/steps/llm_inference.py +0 -0
  44. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/steps/prompt_template.py +0 -0
  45. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/steps/search.py +0 -0
  46. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/steps/tool.py +0 -0
  47. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/telemetry.py +0 -0
  48. {qtype-0.0.1 → qtype-0.0.3}/qtype/interpreter/typing.py +0 -0
  49. {qtype-0.0.1 → qtype-0.0.3}/qtype/loader.py +0 -0
  50. {qtype-0.0.1 → qtype-0.0.3}/qtype/semantic/__init__.py +0 -0
  51. {qtype-0.0.1 → qtype-0.0.3}/qtype/semantic/errors.py +0 -0
  52. {qtype-0.0.1 → qtype-0.0.3}/qtype/semantic/resolver.py +0 -0
  53. {qtype-0.0.1 → qtype-0.0.3}/qtype.egg-info/dependency_links.txt +0 -0
  54. {qtype-0.0.1 → qtype-0.0.3}/qtype.egg-info/entry_points.txt +0 -0
  55. {qtype-0.0.1 → qtype-0.0.3}/qtype.egg-info/top_level.txt +0 -0
  56. {qtype-0.0.1 → qtype-0.0.3}/setup.cfg +0 -0
  57. {qtype-0.0.1 → qtype-0.0.3}/tests/test_dsl_loader.py +0 -0
  58. {qtype-0.0.1 → qtype-0.0.3}/tests/test_dsl_validation.py +0 -0
  59. {qtype-0.0.1 → qtype-0.0.3}/tests/test_semantic_resolver.py +0 -0
  60. {qtype-0.0.1 → qtype-0.0.3}/tests/test_tool_provider_python_module.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: qtype
3
- Version: 0.0.1
3
+ Version: 0.0.3
4
4
  Summary: DSL for Generative AI Prototyping
5
5
  Author-email: Lou Kratz <lou.kratz+qtype@bazaarvoice.com>
6
6
  License-Expression: Apache-2.0
@@ -15,6 +15,18 @@ Requires-Dist: python-dotenv>=1.0.0
15
15
  Requires-Dist: openai>=1.93.0
16
16
  Requires-Dist: fsspec>=2025.5.1
17
17
  Requires-Dist: pydantic-yaml>=1.5.1
18
+ Provides-Extra: interpreter
19
+ Requires-Dist: arize-phoenix-otel>=0.12.1; extra == "interpreter"
20
+ Requires-Dist: boto3>=1.34.0; extra == "interpreter"
21
+ Requires-Dist: fastapi>=0.116.1; extra == "interpreter"
22
+ Requires-Dist: llama-index-embeddings-bedrock>=0.5.2; extra == "interpreter"
23
+ Requires-Dist: llama-index-embeddings-openai>=0.3.1; extra == "interpreter"
24
+ Requires-Dist: llama-index-llms-bedrock-converse>=0.7.4; extra == "interpreter"
25
+ Requires-Dist: llama-index-llms-bedrock>=0.3.8; extra == "interpreter"
26
+ Requires-Dist: llama-index>=0.12.45; extra == "interpreter"
27
+ Requires-Dist: openinference-instrumentation-llama-index>=4.3.1; extra == "interpreter"
28
+ Requires-Dist: psycopg2-binary>=2.9.10; extra == "interpreter"
29
+ Requires-Dist: uvicorn[standard]>=0.35.0; extra == "interpreter"
18
30
  Dynamic: license-file
19
31
 
20
32
  # QType
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "qtype"
3
- version = "0.0.1"
3
+ version = "0.0.3"
4
4
  description = "DSL for Generative AI Prototyping"
5
5
  authors = [{ name="Lou Kratz", email="lou.kratz+qtype@bazaarvoice.com" }]
6
6
  readme = "README.md"
@@ -20,6 +20,21 @@ license-files = ["LICEN[CS]E*"]
20
20
  [project.urls]
21
21
  Homepage = "https://github.com/bazaarvoice/qtype"
22
22
 
23
+ [project.optional-dependencies]
24
+ interpreter = [
25
+ "arize-phoenix-otel>=0.12.1",
26
+ "boto3>=1.34.0",
27
+ "fastapi>=0.116.1",
28
+ "llama-index-embeddings-bedrock>=0.5.2",
29
+ "llama-index-embeddings-openai>=0.3.1",
30
+ "llama-index-llms-bedrock-converse>=0.7.4",
31
+ "llama-index-llms-bedrock>=0.3.8",
32
+ "llama-index>=0.12.45",
33
+ "openinference-instrumentation-llama-index>=4.3.1",
34
+ "psycopg2-binary>=2.9.10",
35
+ "uvicorn[standard]>=0.35.0",
36
+ ]
37
+
23
38
  [dependency-groups]
24
39
  dev = [
25
40
  "arize-phoenix>=11.2.2",
@@ -34,6 +49,7 @@ dev = [
34
49
  "mkdocstrings>=0.30.0",
35
50
  "mypy>=1.8.0",
36
51
  "networkx>=3.4.2",
52
+ "pkginfo>=1.12.1.2",
37
53
  "pre-commit>=3.6.0",
38
54
  "pymdown-extensions>=10.16",
39
55
  "pytest-cov>=6.0.0",
@@ -45,19 +61,6 @@ docs = [
45
61
  "mkdocs>=1.5.0",
46
62
  "mkdocs-material>=9.0.0",
47
63
  ]
48
- interpreter = [
49
- "arize-phoenix-otel>=0.12.1",
50
- "boto3>=1.34.0",
51
- "fastapi>=0.116.1",
52
- "llama-index-embeddings-bedrock>=0.5.2",
53
- "llama-index-embeddings-openai>=0.3.1",
54
- "llama-index-llms-bedrock-converse>=0.7.4",
55
- "llama-index-llms-bedrock>=0.3.8",
56
- "llama-index>=0.12.45",
57
- "openinference-instrumentation-llama-index>=4.3.1",
58
- "psycopg2-binary>=2.9.10",
59
- "uvicorn[standard]>=0.35.0",
60
- ]
61
64
 
62
65
  [tool.uv]
63
66
  # Install dev dependencies by default when running uv sync
@@ -39,6 +39,7 @@ def to_llm(model: Model, system_prompt: str | None) -> BaseLLM:
39
39
  # BedrockConverse requires a model_id and system_prompt
40
40
  # Inference params can be passed as additional kwargs
41
41
  from llama_index.llms.bedrock_converse import BedrockConverse
42
+
42
43
  brv: BaseLLM = BedrockConverse(
43
44
  model=model.model_id if model.model_id else model.id,
44
45
  system_prompt=system_prompt,
@@ -47,14 +48,18 @@ def to_llm(model: Model, system_prompt: str | None) -> BaseLLM:
47
48
  return brv
48
49
  elif model.provider == "openai":
49
50
  from llama_index.llms.openai import OpenAI
51
+
50
52
  return OpenAI(
51
53
  model=model.model_id if model.model_id else model.id,
52
54
  system_prompt=system_prompt,
53
55
  **(model.inference_params if model.inference_params else {}),
54
- api_key=model.auth.api_key if model.auth and model.auth.api_key else None,
56
+ api_key=model.auth.api_key
57
+ if model.auth and model.auth.api_key
58
+ else None,
55
59
  )
56
60
  elif model.provider == "anthropic":
57
61
  from llama_index.llms.anthropic import Anthropic
62
+
58
63
  arv: BaseLLM = Anthropic(
59
64
  model=model.model_id if model.model_id else model.id,
60
65
  system_prompt=system_prompt,
@@ -71,7 +76,7 @@ def to_llm(model: Model, system_prompt: str | None) -> BaseLLM:
71
76
  def to_embedding_model(model: Model) -> BaseEmbedding:
72
77
  """Convert a qtype Model to a LlamaIndex embedding model."""
73
78
 
74
- if model.provider in {"bedrock","aws", "aws-bedrock"}:
79
+ if model.provider in {"bedrock", "aws", "aws-bedrock"}:
75
80
  from llama_index.embeddings.bedrock import BedrockEmbedding
76
81
 
77
82
  embedding: BaseEmbedding = BedrockEmbedding(
@@ -0,0 +1,47 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+ from pydantic import BaseModel, ConfigDict, Field
6
+
7
+
8
+ def _make_hashable(value: Any) -> Any:
9
+ """Convert a value to a hashable equivalent."""
10
+ if isinstance(value, BaseModel):
11
+ # Handle Pydantic models by iterating over their fields
12
+ hashable_values = []
13
+ for field_name, field_value in value.model_dump().items():
14
+ hashable_values.append((field_name, _make_hashable(field_value)))
15
+ return tuple(sorted(hashable_values))
16
+ elif isinstance(value, dict):
17
+ return frozenset(
18
+ (k, _make_hashable(v)) for k, v in sorted(value.items())
19
+ )
20
+ elif isinstance(value, list):
21
+ return tuple(_make_hashable(item) for item in value)
22
+ elif isinstance(value, set):
23
+ return frozenset(_make_hashable(item) for item in value)
24
+ elif hasattr(value, "__dict__"):
25
+ # Handle nested objects
26
+ return tuple(
27
+ sorted(
28
+ (k, _make_hashable(v))
29
+ for k, v in value.__dict__.items()
30
+ if not k.startswith("_")
31
+ )
32
+ )
33
+ else:
34
+ # Value is already hashable (int, str, tuple, etc.)
35
+ return value
36
+
37
+
38
+ class ImmutableModel(BaseModel):
39
+ """Base model that can't be mutated but can be cached."""
40
+
41
+ id: str = Field(..., description="Unique ID of this model.")
42
+
43
+ model_config = ConfigDict(frozen=True)
44
+
45
+ def __hash__(self) -> int:
46
+ """Hash based on all model fields."""
47
+ return hash(_make_hashable(self))
@@ -108,7 +108,7 @@ def generate_semantic_model(args: argparse.Namespace) -> None:
108
108
  # Write imports
109
109
  f.write("from __future__ import annotations\n\n")
110
110
  f.write("from typing import Any, Type\n\n")
111
- f.write("from pydantic import BaseModel, ConfigDict, Field\n\n")
111
+ f.write("from pydantic import BaseModel, Field\n\n")
112
112
  f.write("# Import enums and type aliases from DSL\n")
113
113
  f.write("from qtype.dsl.model import VariableType # noqa: F401\n")
114
114
  f.write(
@@ -117,6 +117,7 @@ def generate_semantic_model(args: argparse.Namespace) -> None:
117
117
  f.write(
118
118
  "from qtype.dsl.model import Variable as DSLVariable # noqa: F401\n"
119
119
  )
120
+ f.write("from qtype.semantic.base_types import ImmutableModel\n")
120
121
 
121
122
  # Write the new variable class
122
123
  f.write("class Variable(DSLVariable, BaseModel):\n")
@@ -129,13 +130,6 @@ def generate_semantic_model(args: argparse.Namespace) -> None:
129
130
  f.write(" def is_set(self) -> bool:\n")
130
131
  f.write(" return self.value is not None\n")
131
132
 
132
- # Write the new ImmutableModel class
133
- f.write("\n\nclass ImmutableModel(BaseModel):\n")
134
- f.write(
135
- ' """Base model that can\'t be mutated but can be cached."""\n'
136
- )
137
- f.write(" model_config = ConfigDict(frozen=True)\n\n")
138
-
139
133
  # Write classes
140
134
  f.write("\n\n".join(generated))
141
135
  f.write("\n\n")
@@ -13,7 +13,7 @@ from __future__ import annotations
13
13
 
14
14
  from typing import Any
15
15
 
16
- from pydantic import BaseModel, ConfigDict, Field
16
+ from pydantic import BaseModel, Field
17
17
 
18
18
  # Import enums and type aliases from DSL
19
19
  from qtype.dsl.model import VariableType # noqa: F401
@@ -24,6 +24,7 @@ from qtype.dsl.model import (
24
24
  StructuralTypeEnum,
25
25
  )
26
26
  from qtype.dsl.model import Variable as DSLVariable # noqa: F401
27
+ from qtype.semantic.base_types import ImmutableModel
27
28
 
28
29
 
29
30
  class Variable(DSLVariable, BaseModel):
@@ -35,17 +36,6 @@ class Variable(DSLVariable, BaseModel):
35
36
  return self.value is not None
36
37
 
37
38
 
38
- class ImmutableModel(BaseModel):
39
- """Base model that can't be mutated but can be cached."""
40
-
41
- id: str = Field(..., description="Unique ID of this model.")
42
-
43
- model_config = ConfigDict(frozen=True)
44
-
45
- def __hash__(self) -> int:
46
- return hash(self.id) # Hash based on a hashable field
47
-
48
-
49
39
  class Application(BaseModel):
50
40
  """Defines a QType application that can include models, variables, and other components."""
51
41
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: qtype
3
- Version: 0.0.1
3
+ Version: 0.0.3
4
4
  Summary: DSL for Generative AI Prototyping
5
5
  Author-email: Lou Kratz <lou.kratz+qtype@bazaarvoice.com>
6
6
  License-Expression: Apache-2.0
@@ -15,6 +15,18 @@ Requires-Dist: python-dotenv>=1.0.0
15
15
  Requires-Dist: openai>=1.93.0
16
16
  Requires-Dist: fsspec>=2025.5.1
17
17
  Requires-Dist: pydantic-yaml>=1.5.1
18
+ Provides-Extra: interpreter
19
+ Requires-Dist: arize-phoenix-otel>=0.12.1; extra == "interpreter"
20
+ Requires-Dist: boto3>=1.34.0; extra == "interpreter"
21
+ Requires-Dist: fastapi>=0.116.1; extra == "interpreter"
22
+ Requires-Dist: llama-index-embeddings-bedrock>=0.5.2; extra == "interpreter"
23
+ Requires-Dist: llama-index-embeddings-openai>=0.3.1; extra == "interpreter"
24
+ Requires-Dist: llama-index-llms-bedrock-converse>=0.7.4; extra == "interpreter"
25
+ Requires-Dist: llama-index-llms-bedrock>=0.3.8; extra == "interpreter"
26
+ Requires-Dist: llama-index>=0.12.45; extra == "interpreter"
27
+ Requires-Dist: openinference-instrumentation-llama-index>=4.3.1; extra == "interpreter"
28
+ Requires-Dist: psycopg2-binary>=2.9.10; extra == "interpreter"
29
+ Requires-Dist: uvicorn[standard]>=0.35.0; extra == "interpreter"
18
30
  Dynamic: license-file
19
31
 
20
32
  # QType
@@ -46,6 +46,7 @@ qtype/interpreter/steps/prompt_template.py
46
46
  qtype/interpreter/steps/search.py
47
47
  qtype/interpreter/steps/tool.py
48
48
  qtype/semantic/__init__.py
49
+ qtype/semantic/base_types.py
49
50
  qtype/semantic/errors.py
50
51
  qtype/semantic/generate.py
51
52
  qtype/semantic/model.py
@@ -0,0 +1,20 @@
1
+ jsonschema>=4.24.0
2
+ pydantic>=2.11.5
3
+ pyyaml>=6.0.2
4
+ python-dotenv>=1.0.0
5
+ openai>=1.93.0
6
+ fsspec>=2025.5.1
7
+ pydantic-yaml>=1.5.1
8
+
9
+ [interpreter]
10
+ arize-phoenix-otel>=0.12.1
11
+ boto3>=1.34.0
12
+ fastapi>=0.116.1
13
+ llama-index-embeddings-bedrock>=0.5.2
14
+ llama-index-embeddings-openai>=0.3.1
15
+ llama-index-llms-bedrock-converse>=0.7.4
16
+ llama-index-llms-bedrock>=0.3.8
17
+ llama-index>=0.12.45
18
+ openinference-instrumentation-llama-index>=4.3.1
19
+ psycopg2-binary>=2.9.10
20
+ uvicorn[standard]>=0.35.0
@@ -1,7 +0,0 @@
1
- jsonschema>=4.24.0
2
- pydantic>=2.11.5
3
- pyyaml>=6.0.2
4
- python-dotenv>=1.0.0
5
- openai>=1.93.0
6
- fsspec>=2025.5.1
7
- pydantic-yaml>=1.5.1
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes