prompt-caller 0.1.3__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {prompt_caller-0.1.3 → prompt_caller-0.2.0}/PKG-INFO +7 -5
- prompt_caller-0.2.0/prompt_caller/prompt_caller.py +283 -0
- {prompt_caller-0.1.3 → prompt_caller-0.2.0}/prompt_caller.egg-info/PKG-INFO +7 -5
- prompt_caller-0.2.0/prompt_caller.egg-info/requires.txt +8 -0
- {prompt_caller-0.1.3 → prompt_caller-0.2.0}/setup.py +7 -5
- prompt_caller-0.1.3/prompt_caller/prompt_caller.py +0 -224
- prompt_caller-0.1.3/prompt_caller.egg-info/requires.txt +0 -6
- {prompt_caller-0.1.3 → prompt_caller-0.2.0}/LICENSE +0 -0
- {prompt_caller-0.1.3 → prompt_caller-0.2.0}/README.md +0 -0
- {prompt_caller-0.1.3 → prompt_caller-0.2.0}/prompt_caller/__init__.py +0 -0
- {prompt_caller-0.1.3 → prompt_caller-0.2.0}/prompt_caller/__main__.py +0 -0
- {prompt_caller-0.1.3 → prompt_caller-0.2.0}/prompt_caller.egg-info/SOURCES.txt +0 -0
- {prompt_caller-0.1.3 → prompt_caller-0.2.0}/prompt_caller.egg-info/dependency_links.txt +0 -0
- {prompt_caller-0.1.3 → prompt_caller-0.2.0}/prompt_caller.egg-info/top_level.txt +0 -0
- {prompt_caller-0.1.3 → prompt_caller-0.2.0}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: prompt_caller
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.0
|
|
4
4
|
Summary: This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API
|
|
5
5
|
Home-page: https://github.com/ThiNepo/prompt-caller
|
|
6
6
|
Author: Thiago Nepomuceno
|
|
@@ -11,11 +11,13 @@ Classifier: Operating System :: OS Independent
|
|
|
11
11
|
Description-Content-Type: text/markdown
|
|
12
12
|
License-File: LICENSE
|
|
13
13
|
Requires-Dist: pyyaml>=6.0.2
|
|
14
|
-
Requires-Dist: python-dotenv>=1.
|
|
14
|
+
Requires-Dist: python-dotenv>=1.2.1
|
|
15
15
|
Requires-Dist: Jinja2>=3.1.4
|
|
16
|
-
Requires-Dist: langchain-
|
|
17
|
-
Requires-Dist: openai>=1.
|
|
18
|
-
Requires-Dist:
|
|
16
|
+
Requires-Dist: langchain-core>=1.2.7
|
|
17
|
+
Requires-Dist: langchain-openai>=1.1.7
|
|
18
|
+
Requires-Dist: langchain-google-genai>=4.2.0
|
|
19
|
+
Requires-Dist: openai>=2.16.0
|
|
20
|
+
Requires-Dist: pillow>=12.1.0
|
|
19
21
|
|
|
20
22
|
# PromptCaller
|
|
21
23
|
|
|
@@ -0,0 +1,283 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import re
|
|
3
|
+
import ast
|
|
4
|
+
|
|
5
|
+
import requests
|
|
6
|
+
import yaml
|
|
7
|
+
from dotenv import load_dotenv
|
|
8
|
+
from jinja2 import Template
|
|
9
|
+
from langgraph.types import Command
|
|
10
|
+
from langchain_core.tools import tool
|
|
11
|
+
from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage
|
|
12
|
+
from langchain.agents import create_agent
|
|
13
|
+
from langchain.agents.middleware import wrap_tool_call
|
|
14
|
+
from langchain_openai import ChatOpenAI
|
|
15
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
16
|
+
from PIL import Image
|
|
17
|
+
from pydantic import BaseModel, Field, create_model
|
|
18
|
+
|
|
19
|
+
from io import BytesIO
|
|
20
|
+
import base64
|
|
21
|
+
|
|
22
|
+
load_dotenv()
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class PromptCaller:
|
|
26
|
+
def __init__(self, promptPath="prompts"):
|
|
27
|
+
self.promptPath = promptPath
|
|
28
|
+
|
|
29
|
+
def _loadPrompt(self, file_path):
|
|
30
|
+
with open(file_path, "r", encoding="utf-8") as file:
|
|
31
|
+
content = file.read()
|
|
32
|
+
|
|
33
|
+
# Split YAML header and the body
|
|
34
|
+
header, body = content.split("---", 2)[1:]
|
|
35
|
+
|
|
36
|
+
# Parse the YAML header
|
|
37
|
+
model_config = yaml.safe_load(header.strip())
|
|
38
|
+
|
|
39
|
+
# Step 2: Parse the JSX body and return it
|
|
40
|
+
return model_config, body.strip()
|
|
41
|
+
|
|
42
|
+
def _renderTemplate(self, body, context):
|
|
43
|
+
template = Template(body)
|
|
44
|
+
return template.render(context)
|
|
45
|
+
|
|
46
|
+
import re
|
|
47
|
+
|
|
48
|
+
def _parseJSXBody(self, body):
|
|
49
|
+
elements = []
|
|
50
|
+
# 1. Regex to find tags, attributes string, and content
|
|
51
|
+
tag_pattern = r"<(system|user|assistant|image)([^>]*)>(.*?)</\1>"
|
|
52
|
+
|
|
53
|
+
# 2. Regex to find key="value" pairs within the attributes string
|
|
54
|
+
attr_pattern = r'(\w+)\s*=\s*"(.*?)"'
|
|
55
|
+
|
|
56
|
+
matches = re.findall(tag_pattern, body, re.DOTALL)
|
|
57
|
+
|
|
58
|
+
for tag, attrs_string, content in matches:
|
|
59
|
+
# 3. Parse the attributes string (e.g., ' tag="image 1"') into a dict
|
|
60
|
+
attributes = {}
|
|
61
|
+
if attrs_string:
|
|
62
|
+
attr_matches = re.findall(attr_pattern, attrs_string)
|
|
63
|
+
for key, value in attr_matches:
|
|
64
|
+
attributes[key] = value
|
|
65
|
+
|
|
66
|
+
element = {"role": tag, "content": content.strip()}
|
|
67
|
+
|
|
68
|
+
# 4. Add the attributes to our element dict if they exist
|
|
69
|
+
if attributes:
|
|
70
|
+
element["attributes"] = attributes
|
|
71
|
+
|
|
72
|
+
elements.append(element)
|
|
73
|
+
|
|
74
|
+
return elements
|
|
75
|
+
|
|
76
|
+
def _createChat(self, configuration):
|
|
77
|
+
if configuration.get("model") is not None and configuration.get(
|
|
78
|
+
"model"
|
|
79
|
+
).startswith("gemini"):
|
|
80
|
+
return ChatGoogleGenerativeAI(**configuration)
|
|
81
|
+
else:
|
|
82
|
+
return ChatOpenAI(**configuration)
|
|
83
|
+
|
|
84
|
+
def getImageBase64(self, url: str) -> str:
|
|
85
|
+
response = requests.get(url)
|
|
86
|
+
response.raise_for_status()
|
|
87
|
+
img = Image.open(BytesIO(response.content))
|
|
88
|
+
buffered = BytesIO()
|
|
89
|
+
img.save(buffered, format="PNG")
|
|
90
|
+
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
|
91
|
+
return f"data:image/png;base64,{img_base64}"
|
|
92
|
+
|
|
93
|
+
def loadPrompt(self, promptName, context=None):
|
|
94
|
+
# initialize context
|
|
95
|
+
if context is None:
|
|
96
|
+
context = {}
|
|
97
|
+
|
|
98
|
+
configuration, template = self._loadPrompt(
|
|
99
|
+
os.path.join(self.promptPath, f"{promptName}.prompt")
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
template = self._renderTemplate(template, context)
|
|
103
|
+
|
|
104
|
+
parsedMessages = self._parseJSXBody(template)
|
|
105
|
+
|
|
106
|
+
messages = []
|
|
107
|
+
|
|
108
|
+
for message in parsedMessages:
|
|
109
|
+
if message.get("role") == "system":
|
|
110
|
+
messages.append(SystemMessage(content=message.get("content")))
|
|
111
|
+
|
|
112
|
+
if message.get("role") == "user":
|
|
113
|
+
messages.append(HumanMessage(content=message.get("content")))
|
|
114
|
+
|
|
115
|
+
if message.get("role") == "image":
|
|
116
|
+
base64_image = message.get("content")
|
|
117
|
+
|
|
118
|
+
if base64_image.startswith("http"):
|
|
119
|
+
base64_image = self.getImageBase64(base64_image)
|
|
120
|
+
|
|
121
|
+
content = [
|
|
122
|
+
{
|
|
123
|
+
"type": "image_url",
|
|
124
|
+
"image_url": {"url": base64_image},
|
|
125
|
+
}
|
|
126
|
+
]
|
|
127
|
+
|
|
128
|
+
tag = message.get("attributes", {}).get("tag")
|
|
129
|
+
if tag:
|
|
130
|
+
content.append({"type": "text", "text": f"({tag})"})
|
|
131
|
+
|
|
132
|
+
messages.append(HumanMessage(content=content))
|
|
133
|
+
|
|
134
|
+
return configuration, messages
|
|
135
|
+
|
|
136
|
+
def createPydanticModel(self, dynamic_dict):
|
|
137
|
+
# Create a dynamic Pydantic model from the dictionary
|
|
138
|
+
fields = {
|
|
139
|
+
key: (str, Field(description=f"Description for {key}"))
|
|
140
|
+
for key in dynamic_dict.keys()
|
|
141
|
+
}
|
|
142
|
+
# Dynamically create the Pydantic model with the fields
|
|
143
|
+
return create_model("DynamicModel", **fields)
|
|
144
|
+
|
|
145
|
+
def call(self, promptName, context=None):
|
|
146
|
+
configuration, messages = self.loadPrompt(promptName, context)
|
|
147
|
+
|
|
148
|
+
output = None
|
|
149
|
+
|
|
150
|
+
if "output" in configuration:
|
|
151
|
+
output = configuration.get("output")
|
|
152
|
+
configuration.pop("output")
|
|
153
|
+
|
|
154
|
+
chat = self._createChat(configuration)
|
|
155
|
+
|
|
156
|
+
if output:
|
|
157
|
+
dynamicModel = self.createPydanticModel(output)
|
|
158
|
+
chat = chat.with_structured_output(dynamicModel)
|
|
159
|
+
|
|
160
|
+
response = chat.invoke(messages)
|
|
161
|
+
|
|
162
|
+
return response
|
|
163
|
+
|
|
164
|
+
def _create_pdf_middleware(self):
|
|
165
|
+
"""Middleware to handle tool responses that contain pdf content."""
|
|
166
|
+
|
|
167
|
+
@wrap_tool_call
|
|
168
|
+
def handle_pdf_response(request, handler):
|
|
169
|
+
# Execute the actual tool
|
|
170
|
+
result = handler(request)
|
|
171
|
+
|
|
172
|
+
# Check if result content is pdf data
|
|
173
|
+
if hasattr(result, "content"):
|
|
174
|
+
content = result.content
|
|
175
|
+
# Try to parse if it's a string representation of a list
|
|
176
|
+
if isinstance(content, str) and content.startswith("["):
|
|
177
|
+
try:
|
|
178
|
+
content = ast.literal_eval(content)
|
|
179
|
+
except (ValueError, SyntaxError):
|
|
180
|
+
pass
|
|
181
|
+
|
|
182
|
+
if (
|
|
183
|
+
isinstance(content, list)
|
|
184
|
+
and content
|
|
185
|
+
and isinstance(content[0], dict)
|
|
186
|
+
and "input_file" in content[0]
|
|
187
|
+
and "pdf" in content[0]["file_data"]
|
|
188
|
+
):
|
|
189
|
+
# Use Command to add both tool result and image to messages
|
|
190
|
+
return Command(
|
|
191
|
+
update={"messages": [result, HumanMessage(content=content)]}
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
return result # Return normal result
|
|
195
|
+
|
|
196
|
+
return handle_pdf_response
|
|
197
|
+
|
|
198
|
+
def _create_image_middleware(self):
|
|
199
|
+
"""Middleware to handle tool responses that contain image content."""
|
|
200
|
+
|
|
201
|
+
@wrap_tool_call
|
|
202
|
+
def handle_image_response(request, handler):
|
|
203
|
+
# Execute the actual tool
|
|
204
|
+
result = handler(request)
|
|
205
|
+
|
|
206
|
+
# Check if result content is image data (list with image_url dict)
|
|
207
|
+
if hasattr(result, "content"):
|
|
208
|
+
content = result.content
|
|
209
|
+
# Try to parse if it's a string representation of a list
|
|
210
|
+
if isinstance(content, str) and content.startswith("["):
|
|
211
|
+
try:
|
|
212
|
+
content = ast.literal_eval(content)
|
|
213
|
+
except (ValueError, SyntaxError):
|
|
214
|
+
pass
|
|
215
|
+
|
|
216
|
+
if (
|
|
217
|
+
isinstance(content, list)
|
|
218
|
+
and content
|
|
219
|
+
and isinstance(content[0], dict)
|
|
220
|
+
and "image_url" in content[0]
|
|
221
|
+
):
|
|
222
|
+
# Use Command to add both tool result and image to messages
|
|
223
|
+
return Command(
|
|
224
|
+
update={"messages": [result, HumanMessage(content=content)]}
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
return result # Return normal result
|
|
228
|
+
|
|
229
|
+
return handle_image_response
|
|
230
|
+
|
|
231
|
+
def agent(
|
|
232
|
+
self, promptName, context=None, tools=None, output=None, allowed_steps=10
|
|
233
|
+
):
|
|
234
|
+
configuration, messages = self.loadPrompt(promptName, context)
|
|
235
|
+
|
|
236
|
+
# Handle structured output from config
|
|
237
|
+
dynamicOutput = None
|
|
238
|
+
if output is None and "output" in configuration:
|
|
239
|
+
dynamicOutput = configuration.pop("output")
|
|
240
|
+
|
|
241
|
+
chat = self._createChat(configuration)
|
|
242
|
+
|
|
243
|
+
# Prepare tools
|
|
244
|
+
if tools is None:
|
|
245
|
+
tools = []
|
|
246
|
+
tools = [tool(t) for t in tools]
|
|
247
|
+
|
|
248
|
+
# Handle response format (structured output)
|
|
249
|
+
response_format = None
|
|
250
|
+
if output:
|
|
251
|
+
response_format = output
|
|
252
|
+
elif dynamicOutput:
|
|
253
|
+
response_format = self.createPydanticModel(dynamicOutput)
|
|
254
|
+
|
|
255
|
+
# Extract system message for create_agent
|
|
256
|
+
system_prompt = None
|
|
257
|
+
user_messages = []
|
|
258
|
+
for msg in messages:
|
|
259
|
+
if isinstance(msg, SystemMessage):
|
|
260
|
+
system_prompt = msg.content
|
|
261
|
+
else:
|
|
262
|
+
user_messages.append(msg)
|
|
263
|
+
|
|
264
|
+
# Create and invoke agent
|
|
265
|
+
agent_graph = create_agent(
|
|
266
|
+
model=chat,
|
|
267
|
+
tools=tools,
|
|
268
|
+
system_prompt=system_prompt,
|
|
269
|
+
response_format=response_format,
|
|
270
|
+
middleware=[
|
|
271
|
+
self._create_image_middleware(),
|
|
272
|
+
self._create_pdf_middleware(),
|
|
273
|
+
],
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
result = agent_graph.invoke(
|
|
277
|
+
{"messages": user_messages}, config={"recursion_limit": allowed_steps}
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
# Return structured output or last message
|
|
281
|
+
if response_format and result.get("structured_response"):
|
|
282
|
+
return result["structured_response"]
|
|
283
|
+
return result["messages"][-1]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: prompt_caller
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.0
|
|
4
4
|
Summary: This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API
|
|
5
5
|
Home-page: https://github.com/ThiNepo/prompt-caller
|
|
6
6
|
Author: Thiago Nepomuceno
|
|
@@ -11,11 +11,13 @@ Classifier: Operating System :: OS Independent
|
|
|
11
11
|
Description-Content-Type: text/markdown
|
|
12
12
|
License-File: LICENSE
|
|
13
13
|
Requires-Dist: pyyaml>=6.0.2
|
|
14
|
-
Requires-Dist: python-dotenv>=1.
|
|
14
|
+
Requires-Dist: python-dotenv>=1.2.1
|
|
15
15
|
Requires-Dist: Jinja2>=3.1.4
|
|
16
|
-
Requires-Dist: langchain-
|
|
17
|
-
Requires-Dist: openai>=1.
|
|
18
|
-
Requires-Dist:
|
|
16
|
+
Requires-Dist: langchain-core>=1.2.7
|
|
17
|
+
Requires-Dist: langchain-openai>=1.1.7
|
|
18
|
+
Requires-Dist: langchain-google-genai>=4.2.0
|
|
19
|
+
Requires-Dist: openai>=2.16.0
|
|
20
|
+
Requires-Dist: pillow>=12.1.0
|
|
19
21
|
|
|
20
22
|
# PromptCaller
|
|
21
23
|
|
|
@@ -35,7 +35,7 @@ class BdistWheelCommand(bdist_wheel):
|
|
|
35
35
|
|
|
36
36
|
setuptools.setup(
|
|
37
37
|
name="prompt_caller",
|
|
38
|
-
version="0.
|
|
38
|
+
version="0.2.0",
|
|
39
39
|
author="Thiago Nepomuceno",
|
|
40
40
|
author_email="thiago@neps.academy",
|
|
41
41
|
description="This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API",
|
|
@@ -51,11 +51,13 @@ setuptools.setup(
|
|
|
51
51
|
],
|
|
52
52
|
install_requires=[
|
|
53
53
|
"pyyaml>=6.0.2",
|
|
54
|
-
"python-dotenv>=1.
|
|
54
|
+
"python-dotenv>=1.2.1",
|
|
55
55
|
"Jinja2>=3.1.4",
|
|
56
|
-
"langchain-
|
|
57
|
-
"openai>=1.
|
|
58
|
-
"
|
|
56
|
+
"langchain-core>=1.2.7",
|
|
57
|
+
"langchain-openai>=1.1.7",
|
|
58
|
+
"langchain-google-genai>=4.2.0",
|
|
59
|
+
"openai>=2.16.0",
|
|
60
|
+
"pillow>=12.1.0",
|
|
59
61
|
],
|
|
60
62
|
cmdclass={"sdist": SdistCommand, "bdist_wheel": BdistWheelCommand},
|
|
61
63
|
)
|
|
@@ -1,224 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import re
|
|
3
|
-
|
|
4
|
-
import requests
|
|
5
|
-
import yaml
|
|
6
|
-
from dotenv import load_dotenv
|
|
7
|
-
from jinja2 import Template
|
|
8
|
-
from langchain_core.tools import tool
|
|
9
|
-
from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage
|
|
10
|
-
from langchain_openai import ChatOpenAI
|
|
11
|
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
12
|
-
from PIL import Image
|
|
13
|
-
from pydantic import BaseModel, Field, create_model
|
|
14
|
-
|
|
15
|
-
from io import BytesIO
|
|
16
|
-
import base64
|
|
17
|
-
|
|
18
|
-
load_dotenv()
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
class PromptCaller:
|
|
22
|
-
|
|
23
|
-
def __init__(self, promptPath="prompts"):
|
|
24
|
-
self.promptPath = promptPath
|
|
25
|
-
|
|
26
|
-
def _loadPrompt(self, file_path):
|
|
27
|
-
with open(file_path, "r", encoding="utf-8") as file:
|
|
28
|
-
content = file.read()
|
|
29
|
-
|
|
30
|
-
# Split YAML header and the body
|
|
31
|
-
header, body = content.split("---", 2)[1:]
|
|
32
|
-
|
|
33
|
-
# Parse the YAML header
|
|
34
|
-
model_config = yaml.safe_load(header.strip())
|
|
35
|
-
|
|
36
|
-
# Step 2: Parse the JSX body and return it
|
|
37
|
-
return model_config, body.strip()
|
|
38
|
-
|
|
39
|
-
def _renderTemplate(self, body, context):
|
|
40
|
-
template = Template(body)
|
|
41
|
-
return template.render(context)
|
|
42
|
-
|
|
43
|
-
def _parseJSXBody(self, body):
|
|
44
|
-
elements = []
|
|
45
|
-
tag_pattern = r"<(system|user|assistant|image)>(.*?)</\1>"
|
|
46
|
-
|
|
47
|
-
matches = re.findall(tag_pattern, body, re.DOTALL)
|
|
48
|
-
|
|
49
|
-
for tag, content in matches:
|
|
50
|
-
elements.append({"role": tag, "content": content.strip()})
|
|
51
|
-
|
|
52
|
-
return elements
|
|
53
|
-
|
|
54
|
-
def _createChat(self, configuration):
|
|
55
|
-
if configuration.get("model") is not None and configuration.get(
|
|
56
|
-
"model"
|
|
57
|
-
).startswith("gemini"):
|
|
58
|
-
return ChatGoogleGenerativeAI(**configuration)
|
|
59
|
-
else:
|
|
60
|
-
return ChatOpenAI(**configuration)
|
|
61
|
-
|
|
62
|
-
def getImageBase64(self, url: str) -> str:
|
|
63
|
-
response = requests.get(url)
|
|
64
|
-
response.raise_for_status()
|
|
65
|
-
img = Image.open(BytesIO(response.content))
|
|
66
|
-
buffered = BytesIO()
|
|
67
|
-
img.save(buffered, format="PNG")
|
|
68
|
-
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
|
69
|
-
return f"data:image/png;base64,{img_base64}"
|
|
70
|
-
|
|
71
|
-
def loadPrompt(self, promptName, context=None):
|
|
72
|
-
# initialize context
|
|
73
|
-
if context is None:
|
|
74
|
-
context = {}
|
|
75
|
-
|
|
76
|
-
configuration, template = self._loadPrompt(
|
|
77
|
-
os.path.join(self.promptPath, f"{promptName}.prompt")
|
|
78
|
-
)
|
|
79
|
-
|
|
80
|
-
template = self._renderTemplate(template, context)
|
|
81
|
-
|
|
82
|
-
parsedMessages = self._parseJSXBody(template)
|
|
83
|
-
|
|
84
|
-
messages = []
|
|
85
|
-
|
|
86
|
-
for message in parsedMessages:
|
|
87
|
-
if message.get("role") == "system":
|
|
88
|
-
messages.append(SystemMessage(content=message.get("content")))
|
|
89
|
-
|
|
90
|
-
if message.get("role") == "user":
|
|
91
|
-
messages.append(HumanMessage(content=message.get("content")))
|
|
92
|
-
|
|
93
|
-
if message.get("role") == "image":
|
|
94
|
-
base64_image = message.get("content")
|
|
95
|
-
|
|
96
|
-
if base64_image.startswith("http"):
|
|
97
|
-
base64_image = self.getImageBase64(base64_image)
|
|
98
|
-
|
|
99
|
-
messages.append(
|
|
100
|
-
HumanMessage(
|
|
101
|
-
content=[
|
|
102
|
-
{
|
|
103
|
-
"type": "image_url",
|
|
104
|
-
"image_url": {"url": base64_image},
|
|
105
|
-
}
|
|
106
|
-
]
|
|
107
|
-
)
|
|
108
|
-
)
|
|
109
|
-
|
|
110
|
-
return configuration, messages
|
|
111
|
-
|
|
112
|
-
def createPydanticModel(self, dynamic_dict):
|
|
113
|
-
# Create a dynamic Pydantic model from the dictionary
|
|
114
|
-
fields = {
|
|
115
|
-
key: (str, Field(description=f"Description for {key}"))
|
|
116
|
-
for key in dynamic_dict.keys()
|
|
117
|
-
}
|
|
118
|
-
# Dynamically create the Pydantic model with the fields
|
|
119
|
-
return create_model("DynamicModel", **fields)
|
|
120
|
-
|
|
121
|
-
def call(self, promptName, context=None):
|
|
122
|
-
|
|
123
|
-
configuration, messages = self.loadPrompt(promptName, context)
|
|
124
|
-
|
|
125
|
-
output = None
|
|
126
|
-
|
|
127
|
-
if "output" in configuration:
|
|
128
|
-
output = configuration.get("output")
|
|
129
|
-
configuration.pop("output")
|
|
130
|
-
|
|
131
|
-
chat = self._createChat(configuration)
|
|
132
|
-
|
|
133
|
-
if output:
|
|
134
|
-
dynamicModel = self.createPydanticModel(output)
|
|
135
|
-
chat = chat.with_structured_output(dynamicModel)
|
|
136
|
-
|
|
137
|
-
response = chat.invoke(messages)
|
|
138
|
-
|
|
139
|
-
return response
|
|
140
|
-
|
|
141
|
-
def agent(
|
|
142
|
-
self, promptName, context=None, tools=None, output=None, allowed_steps=10
|
|
143
|
-
):
|
|
144
|
-
|
|
145
|
-
configuration, messages = self.loadPrompt(promptName, context)
|
|
146
|
-
|
|
147
|
-
dynamicOutput = None
|
|
148
|
-
|
|
149
|
-
if output is None and "output" in configuration:
|
|
150
|
-
dynamicOutput = configuration.get("output")
|
|
151
|
-
configuration.pop("output")
|
|
152
|
-
|
|
153
|
-
for message in messages:
|
|
154
|
-
if isinstance(message, SystemMessage):
|
|
155
|
-
message.content += "\nOnly use the tool DynamicModel when providing an output call."
|
|
156
|
-
break
|
|
157
|
-
|
|
158
|
-
chat = self._createChat(configuration)
|
|
159
|
-
|
|
160
|
-
# Register the tools
|
|
161
|
-
if tools is None:
|
|
162
|
-
tools = []
|
|
163
|
-
|
|
164
|
-
# Transform functions in tools
|
|
165
|
-
tools = [tool(t) for t in tools]
|
|
166
|
-
|
|
167
|
-
tools_dict = {t.name.lower(): t for t in tools}
|
|
168
|
-
|
|
169
|
-
if output:
|
|
170
|
-
tools.extend([output])
|
|
171
|
-
tools_dict[output.__name__.lower()] = output
|
|
172
|
-
elif dynamicOutput:
|
|
173
|
-
dynamicModel = self.createPydanticModel(dynamicOutput)
|
|
174
|
-
|
|
175
|
-
tools.extend([dynamicModel])
|
|
176
|
-
tools_dict["dynamicmodel"] = dynamicModel
|
|
177
|
-
|
|
178
|
-
chat = chat.bind_tools(tools)
|
|
179
|
-
|
|
180
|
-
try:
|
|
181
|
-
# First LLM invocation
|
|
182
|
-
response = chat.invoke(messages)
|
|
183
|
-
messages.append(response)
|
|
184
|
-
|
|
185
|
-
steps = 0
|
|
186
|
-
while response.tool_calls and steps < allowed_steps:
|
|
187
|
-
for tool_call in response.tool_calls:
|
|
188
|
-
tool_name = tool_call["name"].lower()
|
|
189
|
-
|
|
190
|
-
# If it's the final formatting tool, validate and return
|
|
191
|
-
if dynamicOutput and tool_name == "dynamicmodel":
|
|
192
|
-
return dynamicModel.model_validate(tool_call["args"])
|
|
193
|
-
|
|
194
|
-
if output and tool_name == output.__name__.lower():
|
|
195
|
-
return output.model_validate(tool_call["args"])
|
|
196
|
-
|
|
197
|
-
selected_tool = tools_dict.get(tool_name)
|
|
198
|
-
if not selected_tool:
|
|
199
|
-
raise ValueError(f"Unknown tool: {tool_name}")
|
|
200
|
-
|
|
201
|
-
# Invoke the selected tool with provided arguments
|
|
202
|
-
tool_response = selected_tool.invoke(tool_call)
|
|
203
|
-
messages.append(tool_response)
|
|
204
|
-
|
|
205
|
-
# If the latest message is a ToolMessage, re-invoke the LLM
|
|
206
|
-
if isinstance(messages[-1], ToolMessage):
|
|
207
|
-
response = chat.invoke(messages)
|
|
208
|
-
messages.append(response)
|
|
209
|
-
else:
|
|
210
|
-
break
|
|
211
|
-
|
|
212
|
-
steps += 1
|
|
213
|
-
|
|
214
|
-
# Final LLM call if the last message is still a ToolMessage
|
|
215
|
-
if isinstance(messages[-1], ToolMessage):
|
|
216
|
-
response = chat.invoke(messages)
|
|
217
|
-
messages.append(response)
|
|
218
|
-
|
|
219
|
-
return response
|
|
220
|
-
|
|
221
|
-
except Exception as e:
|
|
222
|
-
print(e)
|
|
223
|
-
# Replace with appropriate logging in production
|
|
224
|
-
raise RuntimeError("Error during agent process") from e
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|