prompt-caller 0.1.4__tar.gz → 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompt_caller
3
- Version: 0.1.4
3
+ Version: 0.2.0
4
4
  Summary: This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API
5
5
  Home-page: https://github.com/ThiNepo/prompt-caller
6
6
  Author: Thiago Nepomuceno
@@ -11,12 +11,13 @@ Classifier: Operating System :: OS Independent
11
11
  Description-Content-Type: text/markdown
12
12
  License-File: LICENSE
13
13
  Requires-Dist: pyyaml>=6.0.2
14
- Requires-Dist: python-dotenv>=1.0.1
14
+ Requires-Dist: python-dotenv>=1.2.1
15
15
  Requires-Dist: Jinja2>=3.1.4
16
- Requires-Dist: langchain-openai>=0.3.5
17
- Requires-Dist: langchain-google-genai==2.1.5
18
- Requires-Dist: openai>=1.63.0
19
- Requires-Dist: pillow>=11.0.0
16
+ Requires-Dist: langchain-core>=1.2.7
17
+ Requires-Dist: langchain-openai>=1.1.7
18
+ Requires-Dist: langchain-google-genai>=4.2.0
19
+ Requires-Dist: openai>=2.16.0
20
+ Requires-Dist: pillow>=12.1.0
20
21
 
21
22
  # PromptCaller
22
23
 
@@ -1,12 +1,16 @@
1
1
  import os
2
2
  import re
3
+ import ast
3
4
 
4
5
  import requests
5
6
  import yaml
6
7
  from dotenv import load_dotenv
7
8
  from jinja2 import Template
9
+ from langgraph.types import Command
8
10
  from langchain_core.tools import tool
9
11
  from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage
12
+ from langchain.agents import create_agent
13
+ from langchain.agents.middleware import wrap_tool_call
10
14
  from langchain_openai import ChatOpenAI
11
15
  from langchain_google_genai import ChatGoogleGenerativeAI
12
16
  from PIL import Image
@@ -157,86 +161,123 @@ class PromptCaller:
157
161
 
158
162
  return response
159
163
 
164
+ def _create_pdf_middleware(self):
165
+ """Middleware to handle tool responses that contain pdf content."""
166
+
167
+ @wrap_tool_call
168
+ def handle_pdf_response(request, handler):
169
+ # Execute the actual tool
170
+ result = handler(request)
171
+
172
+ # Check if result content is pdf data
173
+ if hasattr(result, "content"):
174
+ content = result.content
175
+ # Try to parse if it's a string representation of a list
176
+ if isinstance(content, str) and content.startswith("["):
177
+ try:
178
+ content = ast.literal_eval(content)
179
+ except (ValueError, SyntaxError):
180
+ pass
181
+
182
+ if (
183
+ isinstance(content, list)
184
+ and content
185
+ and isinstance(content[0], dict)
186
+ and "input_file" in content[0]
187
+ and "pdf" in content[0]["file_data"]
188
+ ):
189
+ # Use Command to add both tool result and image to messages
190
+ return Command(
191
+ update={"messages": [result, HumanMessage(content=content)]}
192
+ )
193
+
194
+ return result # Return normal result
195
+
196
+ return handle_pdf_response
197
+
198
+ def _create_image_middleware(self):
199
+ """Middleware to handle tool responses that contain image content."""
200
+
201
+ @wrap_tool_call
202
+ def handle_image_response(request, handler):
203
+ # Execute the actual tool
204
+ result = handler(request)
205
+
206
+ # Check if result content is image data (list with image_url dict)
207
+ if hasattr(result, "content"):
208
+ content = result.content
209
+ # Try to parse if it's a string representation of a list
210
+ if isinstance(content, str) and content.startswith("["):
211
+ try:
212
+ content = ast.literal_eval(content)
213
+ except (ValueError, SyntaxError):
214
+ pass
215
+
216
+ if (
217
+ isinstance(content, list)
218
+ and content
219
+ and isinstance(content[0], dict)
220
+ and "image_url" in content[0]
221
+ ):
222
+ # Use Command to add both tool result and image to messages
223
+ return Command(
224
+ update={"messages": [result, HumanMessage(content=content)]}
225
+ )
226
+
227
+ return result # Return normal result
228
+
229
+ return handle_image_response
230
+
160
231
  def agent(
161
232
  self, promptName, context=None, tools=None, output=None, allowed_steps=10
162
233
  ):
163
234
  configuration, messages = self.loadPrompt(promptName, context)
164
235
 
236
+ # Handle structured output from config
165
237
  dynamicOutput = None
166
-
167
238
  if output is None and "output" in configuration:
168
- dynamicOutput = configuration.get("output")
169
- configuration.pop("output")
170
-
171
- for message in messages:
172
- if isinstance(message, SystemMessage):
173
- message.content += "\n\nYou have to use the tool `dynamicmodel` when providing your final answer. If you don't, you have failed the task."
174
- break
239
+ dynamicOutput = configuration.pop("output")
175
240
 
176
241
  chat = self._createChat(configuration)
177
242
 
178
- # Register the tools
243
+ # Prepare tools
179
244
  if tools is None:
180
245
  tools = []
181
-
182
- # Transform functions in tools
183
246
  tools = [tool(t) for t in tools]
184
247
 
185
- tools_dict = {t.name.lower(): t for t in tools}
186
-
248
+ # Handle response format (structured output)
249
+ response_format = None
187
250
  if output:
188
- tools.extend([output])
189
- tools_dict[output.__name__.lower()] = output
251
+ response_format = output
190
252
  elif dynamicOutput:
191
- dynamicModel = self.createPydanticModel(dynamicOutput)
192
-
193
- tools.extend([dynamicModel])
194
- tools_dict["dynamicmodel"] = dynamicModel
195
-
196
- chat = chat.bind_tools(tools)
197
-
198
- try:
199
- # First LLM invocation
200
- response = chat.invoke(messages)
201
- messages.append(response)
202
-
203
- steps = 0
204
- while response.tool_calls and steps < allowed_steps:
205
- for tool_call in response.tool_calls:
206
- tool_name = tool_call["name"].lower()
207
-
208
- # If it's the final formatting tool, validate and return
209
- if dynamicOutput and tool_name == "dynamicmodel":
210
- return dynamicModel.model_validate(tool_call["args"])
211
-
212
- if output and tool_name == output.__name__.lower():
213
- return output.model_validate(tool_call["args"])
214
-
215
- selected_tool = tools_dict.get(tool_name)
216
- if not selected_tool:
217
- raise ValueError(f"Unknown tool: {tool_name}")
218
-
219
- # Invoke the selected tool with provided arguments
220
- tool_response = selected_tool.invoke(tool_call)
221
- messages.append(tool_response)
222
-
223
- # If the latest message is a ToolMessage, re-invoke the LLM
224
- if isinstance(messages[-1], ToolMessage):
225
- response = chat.invoke(messages)
226
- messages.append(response)
227
- else:
228
- break
229
-
230
- steps += 1
231
-
232
- # Final LLM call if the last message is still a ToolMessage
233
- if isinstance(messages[-1], ToolMessage):
234
- response = chat.invoke(messages)
235
- messages.append(response)
253
+ response_format = self.createPydanticModel(dynamicOutput)
254
+
255
+ # Extract system message for create_agent
256
+ system_prompt = None
257
+ user_messages = []
258
+ for msg in messages:
259
+ if isinstance(msg, SystemMessage):
260
+ system_prompt = msg.content
261
+ else:
262
+ user_messages.append(msg)
263
+
264
+ # Create and invoke agent
265
+ agent_graph = create_agent(
266
+ model=chat,
267
+ tools=tools,
268
+ system_prompt=system_prompt,
269
+ response_format=response_format,
270
+ middleware=[
271
+ self._create_image_middleware(),
272
+ self._create_pdf_middleware(),
273
+ ],
274
+ )
236
275
 
237
- return response
276
+ result = agent_graph.invoke(
277
+ {"messages": user_messages}, config={"recursion_limit": allowed_steps}
278
+ )
238
279
 
239
- except Exception as e:
240
- print(e)
241
- # Replace with appropriate logging in production
242
- raise RuntimeError("Error during agent process") from e
280
+ # Return structured output or last message
281
+ if response_format and result.get("structured_response"):
282
+ return result["structured_response"]
283
+ return result["messages"][-1]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompt_caller
3
- Version: 0.1.4
3
+ Version: 0.2.0
4
4
  Summary: This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API
5
5
  Home-page: https://github.com/ThiNepo/prompt-caller
6
6
  Author: Thiago Nepomuceno
@@ -11,12 +11,13 @@ Classifier: Operating System :: OS Independent
11
11
  Description-Content-Type: text/markdown
12
12
  License-File: LICENSE
13
13
  Requires-Dist: pyyaml>=6.0.2
14
- Requires-Dist: python-dotenv>=1.0.1
14
+ Requires-Dist: python-dotenv>=1.2.1
15
15
  Requires-Dist: Jinja2>=3.1.4
16
- Requires-Dist: langchain-openai>=0.3.5
17
- Requires-Dist: langchain-google-genai==2.1.5
18
- Requires-Dist: openai>=1.63.0
19
- Requires-Dist: pillow>=11.0.0
16
+ Requires-Dist: langchain-core>=1.2.7
17
+ Requires-Dist: langchain-openai>=1.1.7
18
+ Requires-Dist: langchain-google-genai>=4.2.0
19
+ Requires-Dist: openai>=2.16.0
20
+ Requires-Dist: pillow>=12.1.0
20
21
 
21
22
  # PromptCaller
22
23
 
@@ -0,0 +1,8 @@
1
+ pyyaml>=6.0.2
2
+ python-dotenv>=1.2.1
3
+ Jinja2>=3.1.4
4
+ langchain-core>=1.2.7
5
+ langchain-openai>=1.1.7
6
+ langchain-google-genai>=4.2.0
7
+ openai>=2.16.0
8
+ pillow>=12.1.0
@@ -35,7 +35,7 @@ class BdistWheelCommand(bdist_wheel):
35
35
 
36
36
  setuptools.setup(
37
37
  name="prompt_caller",
38
- version="0.1.4",
38
+ version="0.2.0",
39
39
  author="Thiago Nepomuceno",
40
40
  author_email="thiago@neps.academy",
41
41
  description="This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API",
@@ -51,12 +51,13 @@ setuptools.setup(
51
51
  ],
52
52
  install_requires=[
53
53
  "pyyaml>=6.0.2",
54
- "python-dotenv>=1.0.1",
54
+ "python-dotenv>=1.2.1",
55
55
  "Jinja2>=3.1.4",
56
- "langchain-openai>=0.3.5",
57
- "langchain-google-genai==2.1.5",
58
- "openai>=1.63.0",
59
- "pillow>=11.0.0",
56
+ "langchain-core>=1.2.7",
57
+ "langchain-openai>=1.1.7",
58
+ "langchain-google-genai>=4.2.0",
59
+ "openai>=2.16.0",
60
+ "pillow>=12.1.0",
60
61
  ],
61
62
  cmdclass={"sdist": SdistCommand, "bdist_wheel": BdistWheelCommand},
62
63
  )
@@ -1,7 +0,0 @@
1
- pyyaml>=6.0.2
2
- python-dotenv>=1.0.1
3
- Jinja2>=3.1.4
4
- langchain-openai>=0.3.5
5
- langchain-google-genai==2.1.5
6
- openai>=1.63.0
7
- pillow>=11.0.0
File without changes
File without changes
File without changes