py-ai-toolkit 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. py_ai_toolkit-0.2.0/LICENSE.txt +21 -0
  2. py_ai_toolkit-0.2.0/MANIFEST.in +38 -0
  3. py_ai_toolkit-0.2.0/PKG-INFO +165 -0
  4. py_ai_toolkit-0.2.0/README.md +138 -0
  5. py_ai_toolkit-0.2.0/py_ai_toolkit/__init__.py +18 -0
  6. py_ai_toolkit-0.2.0/py_ai_toolkit/adapters/__init__.py +5 -0
  7. py_ai_toolkit-0.2.0/py_ai_toolkit/adapters/instructor_adapter.py +125 -0
  8. py_ai_toolkit-0.2.0/py_ai_toolkit/adapters/jinja2_adapter.py +40 -0
  9. py_ai_toolkit-0.2.0/py_ai_toolkit/adapters/pydantic_adapter.py +71 -0
  10. py_ai_toolkit-0.2.0/py_ai_toolkit/core/base.py +130 -0
  11. py_ai_toolkit-0.2.0/py_ai_toolkit/core/domain/errors.py +30 -0
  12. py_ai_toolkit-0.2.0/py_ai_toolkit/core/domain/interfaces.py +24 -0
  13. py_ai_toolkit-0.2.0/py_ai_toolkit/core/ports/__init__.py +6 -0
  14. py_ai_toolkit-0.2.0/py_ai_toolkit/core/ports/formatter_port.py +18 -0
  15. py_ai_toolkit-0.2.0/py_ai_toolkit/core/ports/llm_port.py +60 -0
  16. py_ai_toolkit-0.2.0/py_ai_toolkit/core/ports/modeller_port.py +32 -0
  17. py_ai_toolkit-0.2.0/py_ai_toolkit/core/ports/workflow_port.py +56 -0
  18. py_ai_toolkit-0.2.0/py_ai_toolkit/core/tools.py +178 -0
  19. py_ai_toolkit-0.2.0/py_ai_toolkit/core/utils.py +4 -0
  20. py_ai_toolkit-0.2.0/py_ai_toolkit/factories.py +55 -0
  21. py_ai_toolkit-0.2.0/py_ai_toolkit.egg-info/PKG-INFO +165 -0
  22. py_ai_toolkit-0.2.0/py_ai_toolkit.egg-info/SOURCES.txt +27 -0
  23. py_ai_toolkit-0.2.0/py_ai_toolkit.egg-info/dependency_links.txt +1 -0
  24. py_ai_toolkit-0.2.0/py_ai_toolkit.egg-info/requires.txt +10 -0
  25. py_ai_toolkit-0.2.0/py_ai_toolkit.egg-info/top_level.txt +1 -0
  26. py_ai_toolkit-0.2.0/pyproject.toml +35 -0
  27. py_ai_toolkit-0.2.0/setup.cfg +4 -0
  28. py_ai_toolkit-0.2.0/tests/test_workflow.py +89 -0
  29. py_ai_toolkit-0.2.0/tests/unit/test_tools.py +119 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Paulo Mattos
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,38 @@
1
+ # Include the essential files
2
+ include LICENSE.txt
3
+ include README.md
4
+ include requirements.txt
5
+ include setup.py
6
+
7
+ # Include all Python files in the main directories
8
+ recursive-include ait *.py
9
+ recursive-include tests *.py
10
+
11
+ # Exclude unnecessary directories and files
12
+ exclude .github/*
13
+ exclude .gitignore
14
+ exclude .vscode/*
15
+ exclude .venv/*
16
+ exclude .pytest_cache/*
17
+ recursive-exclude ait/__pycache__ *
18
+ recursive-exclude tests/__pycache__ *
19
+ # Include the essential files
20
+ include LICENSE.txt
21
+ include readme.md
22
+ include requirements.txt
23
+ include setup.py
24
+
25
+ # Include all Python files in the main directories
26
+ recursive-include ait *.py
27
+ recursive-include tests *.py
28
+
29
+ # Exclude unnecessary directories and files
30
+ exclude .gitignore
31
+ exclude .vscode/*
32
+ exclude .venv/*
33
+ exclude .pytest_cache/*
34
+ exclude push-all.sh
35
+ exclude ait.egg-info/*
36
+ recursive-exclude ait/__pycache__ *
37
+ recursive-exclude tests/__pycache__ *
38
+ recursive-exclude ait.egg-info *
@@ -0,0 +1,165 @@
1
+ Metadata-Version: 2.4
2
+ Name: py-ai-toolkit
3
+ Version: 0.2.0
4
+ Summary: A set of tools for easily interacting with LLMs.
5
+ Author-email: "@paulomtts" <paulomtts@outlook.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/paulomtts/Grafo-AI-Tools.git
8
+ Keywords: ai,agents,llm,workflows
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: Programming Language :: Python :: 3.11
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Operating System :: OS Independent
13
+ Requires-Python: >=3.11
14
+ Description-Content-Type: text/markdown
15
+ License-File: LICENSE.txt
16
+ Requires-Dist: build>=1.3.0
17
+ Requires-Dist: bump2version>=1.0.1
18
+ Requires-Dist: grafo>=0.2.36
19
+ Requires-Dist: instructor>=1.13.0
20
+ Requires-Dist: openai>=1.104.2
21
+ Requires-Dist: pydantic>=2.12.4
22
+ Requires-Dist: pytest>=8.4.1
23
+ Requires-Dist: pytest-asyncio>=1.1.0
24
+ Requires-Dist: pyyaml>=6.0.2
25
+ Requires-Dist: toon-python>=0.1.2
26
+ Dynamic: license-file
27
+
28
+ # Install
29
+ ```
30
+ uv add py-ai-toolkit
31
+ ```
32
+
33
+ # WHAT
34
+ A set of tools for easily interacting with LLMs.
35
+
36
+ # WHY
37
+ Building AI-driven software leans upon a number of utilities, such as prompt building and LLM calling via HTTP requests. Additionally, writing agents and workflows can prove particularly challenging using conventional code structures.
38
+
39
+ # HOW
40
+ This simple library offers a set of predefined functions for:
41
+ - Easy prompting - you need only provide a path
42
+ - Calling LLMs - instructor takes care of that for us
43
+ - Modifying response models - we use Pydantic (duh)
44
+
45
+ Additionally, we provide `grafo` out of the box for convenient workflow building.
46
+
47
+ ## About Grafo
48
+ Grafo (see Recommended Docs below) is a library for building executable DAGs where each node contains a coroutine. Since the DAG abstraction fits particularly well into AI-driven building, we have provided the `BaseWorkflow` class with the following methods:
49
+ - `task` for LLM calling
50
+ - `redirect` to help you manage redirections in your `grafo` workflows
51
+
52
+ # Examples
53
+ ### Simple text:
54
+ ```python
55
+ from py_ai_toolkit import AIT
56
+
57
+ ait = AIT("gpt-5")
58
+ path = "./prompt.md"
59
+ response = ait.chat(path)
60
+ print(response.completion)
61
+ print(response.content)
62
+ ```
63
+
64
+ ### Structured response:
65
+ ```python
66
+ from py_ai_toolkit import AIT
67
+ from pydantic import BaseModel
68
+
69
+ class Purchase(BaseModel):
70
+ product: str
71
+ quantity: int
72
+
73
+ ait = AIT("gpt-5")
74
+ path = "./prompt.md" # PROMPT: {{ message }}
75
+ message = "I want to buy 5 apples"
76
+ response = ait.asend(response_model=Fruit, path=path, message=message)
77
+ ```
78
+
79
+ ### Structured response with model type injection:
80
+ ```python
81
+ from py_ai_toolkit import AIT
82
+ from pydantic import BaseModel
83
+
84
+ class Purchase(BaseModel):
85
+ product: str
86
+ quantity: int
87
+
88
+ ait = AIT("gpt-5")
89
+ path = "./prompt.md" # PROMPT: {{ message }}
90
+ message = "I want to buy 5 apples"
91
+ available_fruits = ["apple", "banana", "orange"]
92
+ FruitModel = ait.inject_types(Purchase, [
93
+ ("product", Literal[tuple(available_fruits)])
94
+ ])
95
+ response = ait.asend(response_model=Purchase, path=path, message=message)
96
+ ```
97
+
98
+ ### Simple workflow:
99
+ ```python
100
+ from py_ai_toolkit import AIT, BaseWorkflow, Node
101
+ from pydantic import BaseModel
102
+
103
+ class Purchase(BaseModel):
104
+ product: str
105
+ quantity: int
106
+
107
+ class Eval(BaseModel):
108
+ is_valid: bool
109
+ reasoning: str
110
+ humanized_failure_reason: str | None
111
+
112
+ ait = AIT("gpt-5")
113
+ prompts_path = "./"
114
+ message = "I want to buy 5 apples"
115
+ available_fruits = ["apple", "banana", "orange"]
116
+ FruitModel = ait.inject_types(Purchase, [
117
+ ("product", Literal[tuple(available_fruits)])
118
+ ])
119
+
120
+ class PurchaseWorkflow(BaseWorkflow):
121
+ def __init__(...):
122
+ ...
123
+
124
+ async def run(self, message) -> Purchase:
125
+ purchase_node = Node[FruitModel](
126
+ uuid="fruit purchase node"
127
+ coroutine=self.task
128
+ kwargs=dict(
129
+ path=f"{prompts_path}/purchase.md"
130
+ response_model=FruitModel
131
+ message=message
132
+ )
133
+ )
134
+ validation_node = Node[Eval](
135
+ uuid="purchase eval node"
136
+ coroutine=self.task
137
+ kwargs=dict(
138
+ path=f"{prompts_path}/eval.md"
139
+ response_model=Eval
140
+ message=message
141
+ purchase=lambda: purchase_node.output
142
+ )
143
+ )
144
+ eval_node.on_after_run = (
145
+ self.redirect,
146
+ dict(
147
+ source_node=purchase_node
148
+ validation_node=validation_node
149
+ )
150
+ )
151
+ await purchase_node.connect(validation_node)
152
+ executor = TreeExecutor(uuid="Purchase Workflow", roots=[purchase_node])
153
+ await executor.run()
154
+
155
+ if not purchase_node.output or not validation_node.output.is_valid:
156
+ raise ValueError("Purchase failed.")
157
+
158
+ return purchase_node.output
159
+ ```
160
+
161
+ ## Recommended Docs
162
+ - `instructor` https://python.useinstructor.com/
163
+ - `jinja2` https://jinja.palletsprojects.com/en/stable/
164
+ - `pydantic` https://docs.pydantic.dev/latest/
165
+ - `grafo` https://github.com/paulomtts/grafo
@@ -0,0 +1,138 @@
1
+ # Install
2
+ ```
3
+ uv add py-ai-toolkit
4
+ ```
5
+
6
+ # WHAT
7
+ A set of tools for easily interacting with LLMs.
8
+
9
+ # WHY
10
+ Building AI-driven software leans upon a number of utilities, such as prompt building and LLM calling via HTTP requests. Additionally, writing agents and workflows can prove particularly challenging using conventional code structures.
11
+
12
+ # HOW
13
+ This simple library offers a set of predefined functions for:
14
+ - Easy prompting - you need only provide a path
15
+ - Calling LLMs - instructor takes care of that for us
16
+ - Modifying response models - we use Pydantic (duh)
17
+
18
+ Additionally, we provide `grafo` out of the box for convenient workflow building.
19
+
20
+ ## About Grafo
21
+ Grafo (see Recommended Docs below) is a library for building executable DAGs where each node contains a coroutine. Since the DAG abstraction fits particularly well into AI-driven building, we have provided the `BaseWorkflow` class with the following methods:
22
+ - `task` for LLM calling
23
+ - `redirect` to help you manage redirections in your `grafo` workflows
24
+
25
+ # Examples
26
+ ### Simple text:
27
+ ```python
28
+ from py_ai_toolkit import AIT
29
+
30
+ ait = AIT("gpt-5")
31
+ path = "./prompt.md"
32
+ response = ait.chat(path)
33
+ print(response.completion)
34
+ print(response.content)
35
+ ```
36
+
37
+ ### Structured response:
38
+ ```python
39
+ from py_ai_toolkit import AIT
40
+ from pydantic import BaseModel
41
+
42
+ class Purchase(BaseModel):
43
+ product: str
44
+ quantity: int
45
+
46
+ ait = AIT("gpt-5")
47
+ path = "./prompt.md" # PROMPT: {{ message }}
48
+ message = "I want to buy 5 apples"
49
+ response = ait.asend(response_model=Fruit, path=path, message=message)
50
+ ```
51
+
52
+ ### Structured response with model type injection:
53
+ ```python
54
+ from py_ai_toolkit import AIT
55
+ from pydantic import BaseModel
56
+
57
+ class Purchase(BaseModel):
58
+ product: str
59
+ quantity: int
60
+
61
+ ait = AIT("gpt-5")
62
+ path = "./prompt.md" # PROMPT: {{ message }}
63
+ message = "I want to buy 5 apples"
64
+ available_fruits = ["apple", "banana", "orange"]
65
+ FruitModel = ait.inject_types(Purchase, [
66
+ ("product", Literal[tuple(available_fruits)])
67
+ ])
68
+ response = ait.asend(response_model=Purchase, path=path, message=message)
69
+ ```
70
+
71
+ ### Simple workflow:
72
+ ```python
73
+ from py_ai_toolkit import AIT, BaseWorkflow, Node
74
+ from pydantic import BaseModel
75
+
76
+ class Purchase(BaseModel):
77
+ product: str
78
+ quantity: int
79
+
80
+ class Eval(BaseModel):
81
+ is_valid: bool
82
+ reasoning: str
83
+ humanized_failure_reason: str | None
84
+
85
+ ait = AIT("gpt-5")
86
+ prompts_path = "./"
87
+ message = "I want to buy 5 apples"
88
+ available_fruits = ["apple", "banana", "orange"]
89
+ FruitModel = ait.inject_types(Purchase, [
90
+ ("product", Literal[tuple(available_fruits)])
91
+ ])
92
+
93
+ class PurchaseWorkflow(BaseWorkflow):
94
+ def __init__(...):
95
+ ...
96
+
97
+ async def run(self, message) -> Purchase:
98
+ purchase_node = Node[FruitModel](
99
+ uuid="fruit purchase node"
100
+ coroutine=self.task
101
+ kwargs=dict(
102
+ path=f"{prompts_path}/purchase.md"
103
+ response_model=FruitModel
104
+ message=message
105
+ )
106
+ )
107
+ validation_node = Node[Eval](
108
+ uuid="purchase eval node"
109
+ coroutine=self.task
110
+ kwargs=dict(
111
+ path=f"{prompts_path}/eval.md"
112
+ response_model=Eval
113
+ message=message
114
+ purchase=lambda: purchase_node.output
115
+ )
116
+ )
117
+ eval_node.on_after_run = (
118
+ self.redirect,
119
+ dict(
120
+ source_node=purchase_node
121
+ validation_node=validation_node
122
+ )
123
+ )
124
+ await purchase_node.connect(validation_node)
125
+ executor = TreeExecutor(uuid="Purchase Workflow", roots=[purchase_node])
126
+ await executor.run()
127
+
128
+ if not purchase_node.output or not validation_node.output.is_valid:
129
+ raise ValueError("Purchase failed.")
130
+
131
+ return purchase_node.output
132
+ ```
133
+
134
+ ## Recommended Docs
135
+ - `instructor` https://python.useinstructor.com/
136
+ - `jinja2` https://jinja.palletsprojects.com/en/stable/
137
+ - `pydantic` https://docs.pydantic.dev/latest/
138
+ - `grafo` https://github.com/paulomtts/grafo
@@ -0,0 +1,18 @@
1
+ __version__ = "0.2.0"
2
+
3
+ from grafo import Chunk, Node, TreeExecutor
4
+
5
+ from .core.base import BaseWorkflow
6
+ from .core.domain.errors import BaseError
7
+ from .core.domain.interfaces import CompletionResponse
8
+ from .core.tools import PyAIToolkit
9
+
10
+ __all__ = [
11
+ "PyAIToolkit",
12
+ "CompletionResponse",
13
+ "Node",
14
+ "TreeExecutor",
15
+ "Chunk",
16
+ "BaseWorkflow",
17
+ "BaseError",
18
+ ]
@@ -0,0 +1,5 @@
1
+ from .instructor_adapter import InstructorAdapter
2
+ from .jinja2_adapter import Jinja2Adapter
3
+ from .pydantic_adapter import PydanticAdapter
4
+
5
+ __all__ = ["Jinja2Adapter", "PydanticAdapter", "InstructorAdapter"]
@@ -0,0 +1,125 @@
1
+ from http import HTTPStatus
2
+ from typing import AsyncGenerator, Type
3
+
4
+ import instructor
5
+ from openai import AsyncOpenAI
6
+ from openai.types.chat import ChatCompletion, ChatCompletionChunk
7
+
8
+ from py_ai_toolkit.core.domain.errors import LLMAdapterError
9
+ from py_ai_toolkit.core.domain.interfaces import CompletionResponse, T
10
+ from py_ai_toolkit.core.ports import LLMPort
11
+
12
+
13
+ class InstructorAdapter(LLMPort):
14
+ """
15
+ Instructor implementation of the LLM port.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ model: str,
21
+ embedding_model: str,
22
+ api_key: str,
23
+ base_url: str | None = None,
24
+ ):
25
+ self._model = model
26
+ self._embedding_model = embedding_model
27
+
28
+ client_kwargs = dict(
29
+ api_key=api_key,
30
+ )
31
+ if not base_url:
32
+ client_kwargs["base_url"] = "http://localhost:11434/v1"
33
+ self.openai_client = AsyncOpenAI(**client_kwargs) # type: ignore
34
+ self.client = instructor.from_openai(
35
+ client=self.openai_client,
36
+ mode=instructor.Mode.JSON,
37
+ )
38
+
39
+ async def chat(self, messages: list[dict[str, str]]) -> CompletionResponse:
40
+ """
41
+ Sends a message to the LLM and returns a structured response.
42
+
43
+ Args:
44
+ messages (list[dict[str, str]]): The messages to generate a response to
45
+
46
+ Returns:
47
+ str: The response from the LLM
48
+ """
49
+ output: ChatCompletion = await self.openai_client.chat.completions.create(
50
+ model=self._model,
51
+ messages=messages,
52
+ stream=False,
53
+ )
54
+ response = output.choices[0].message.content
55
+ if not response:
56
+ raise LLMAdapterError(
57
+ status_code=HTTPStatus.SERVICE_UNAVAILABLE.value,
58
+ message="No response from the model",
59
+ )
60
+ return CompletionResponse(
61
+ completion=output,
62
+ content=response,
63
+ )
64
+
65
+ async def stream(
66
+ self, messages: list[dict[str, str]]
67
+ ) -> AsyncGenerator[CompletionResponse, None]:
68
+ """
69
+ Streams text outputs from the model.
70
+
71
+ Args:
72
+ messages (list[dict[str, str]]): The messages to generate a response to
73
+
74
+ Returns:
75
+ AsyncGenerator[str, None]: The response from the LLM
76
+ """
77
+ output: AsyncGenerator[
78
+ ChatCompletionChunk, None
79
+ ] = await self.openai_client.chat.completions.create(
80
+ model=self._model,
81
+ messages=messages,
82
+ stream=True,
83
+ )
84
+ async for chunk in output:
85
+ response = chunk.choices[0].delta.content
86
+ if not response:
87
+ continue
88
+ yield CompletionResponse(
89
+ completion=chunk,
90
+ content=response,
91
+ )
92
+
93
+ async def asend(
94
+ self,
95
+ messages: list[dict[str, str]],
96
+ response_model: Type[T],
97
+ ) -> CompletionResponse[T]:
98
+ """
99
+ Sends a message to the LLM asynchronously and returns a structured response.
100
+
101
+ Args:
102
+ messages (list[dict[str, str]]): The messages to generate a response to
103
+ response_model (Type[T]): The model to return the response as
104
+
105
+ Returns:
106
+ CompletionResponse[T]: The response from the LLM
107
+ """
108
+
109
+ (
110
+ instance,
111
+ completion,
112
+ ) = await self.client.chat.completions.create_with_completion(
113
+ response_model=response_model,
114
+ model=self._model,
115
+ messages=messages,
116
+ )
117
+ if not instance:
118
+ raise LLMAdapterError(
119
+ status_code=HTTPStatus.SERVICE_UNAVAILABLE.value,
120
+ message="No response content from the model",
121
+ )
122
+ return CompletionResponse(
123
+ completion=completion,
124
+ content=instance,
125
+ )
@@ -0,0 +1,40 @@
1
+ from typing import Any, Optional
2
+
3
+ from jinja2 import Environment
4
+
5
+ from py_ai_toolkit.core.domain.errors import FormatterAdapterError
6
+ from py_ai_toolkit.core.ports import FormatterPort
7
+
8
+
9
+ class Jinja2Adapter(FormatterPort):
10
+ """
11
+ Jinja2 implementation of the formatter port.
12
+ Supports only Markdown files.
13
+ """
14
+
15
+ def __init__(self):
16
+ self.env = Environment()
17
+
18
+ def _load_prompt(self, path: str) -> str:
19
+ with open(path, "r", encoding="utf-8") as file:
20
+ return file.read()
21
+
22
+ def render(
23
+ self,
24
+ path: str | None = None,
25
+ prompt: str | None = None,
26
+ input: Optional[dict[str, Any]] = None,
27
+ ) -> str:
28
+ """
29
+ Render a Markdown template from a path with variables.
30
+
31
+ Args:
32
+ path (str | None): The directory path where the file is located
33
+ prompt (str | None): The prompt to render
34
+ input (Optional[dict[str, Any]]): Variables to pass to the template
35
+ """
36
+ if not path and not prompt:
37
+ raise FormatterAdapterError("Either path or prompt must be provided")
38
+ base_prompt = prompt or self._load_prompt(path)
39
+ template = self.env.from_string(base_prompt)
40
+ return template.render(**(input or {}))
@@ -0,0 +1,71 @@
1
+ import re
2
+ from typing import Any, Type, TypeVar
3
+
4
+ from pydantic import BaseModel, Field, create_model
5
+ from pydantic_core import PydanticUndefined
6
+
7
+ from py_ai_toolkit.core.ports import ModellerPort
8
+
9
+ T = TypeVar("T", bound=BaseModel)
10
+
11
+
12
+ class PydanticAdapter(ModellerPort):
13
+ """
14
+ Service for creating Pydantic models from schemas.
15
+ """
16
+
17
+ def _normalize(self, text: str) -> str:
18
+ """
19
+ Normalizes the text to a valid Pydantic model field name.
20
+ """
21
+ return re.sub(r"[^a-zA-Z0-9_]", "", text)
22
+
23
+ def _pascal_case(self, string: str) -> str:
24
+ """
25
+ Converts a string to pascal case.
26
+ """
27
+ normalized = re.sub(r"[^a-zA-Z0-9\s]", " ", string).strip()
28
+ return "".join(word.capitalize() for word in normalized.split())
29
+
30
+ def inject_types(
31
+ self,
32
+ model: Type[T],
33
+ fields: list[tuple[str, Any]],
34
+ ) -> Type[T]:
35
+ """
36
+ Injects field types into a model.
37
+ """
38
+ return create_model(
39
+ model.__name__ + "Model",
40
+ __base__=(model,),
41
+ __doc__=model.__doc__,
42
+ **{
43
+ field_name: (
44
+ field_type,
45
+ Field(
46
+ description=model.model_fields[field_name].description,
47
+ examples=model.model_fields[field_name].examples,
48
+ ),
49
+ )
50
+ for (field_name, field_type) in fields
51
+ }, # type: ignore
52
+ )
53
+
54
+ def reduce_model_schema(
55
+ self, model: Type[T], include_description: bool = True
56
+ ) -> str:
57
+ """
58
+ Reduces the model schema into version with less tokens. Helpful for reducing prompt noise.
59
+ """
60
+ reduced_schema = []
61
+ for field, info in model.model_fields.items():
62
+ reduced_schema.append(
63
+ f"{field}({info.annotation}"
64
+ + (
65
+ f", default={info.default})"
66
+ if info.default is not PydanticUndefined
67
+ else ")"
68
+ )
69
+ + (f": {info.description}" if include_description else "")
70
+ )
71
+ return "\n".join(reduced_schema)