prompt-caller 0.1.1__tar.gz → 0.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompt_caller
3
- Version: 0.1.1
3
+ Version: 0.1.3
4
4
  Summary: This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API
5
5
  Home-page: https://github.com/ThiNepo/prompt-caller
6
6
  Author: Thiago Nepomuceno
@@ -8,6 +8,7 @@ from jinja2 import Template
8
8
  from langchain_core.tools import tool
9
9
  from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage
10
10
  from langchain_openai import ChatOpenAI
11
+ from langchain_google_genai import ChatGoogleGenerativeAI
11
12
  from PIL import Image
12
13
  from pydantic import BaseModel, Field, create_model
13
14
 
@@ -50,6 +51,14 @@ class PromptCaller:
50
51
 
51
52
  return elements
52
53
 
54
+ def _createChat(self, configuration):
55
+ if configuration.get("model") is not None and configuration.get(
56
+ "model"
57
+ ).startswith("gemini"):
58
+ return ChatGoogleGenerativeAI(**configuration)
59
+ else:
60
+ return ChatOpenAI(**configuration)
61
+
53
62
  def getImageBase64(self, url: str) -> str:
54
63
  response = requests.get(url)
55
64
  response.raise_for_status()
@@ -119,7 +128,7 @@ class PromptCaller:
119
128
  output = configuration.get("output")
120
129
  configuration.pop("output")
121
130
 
122
- chat = ChatOpenAI(**configuration)
131
+ chat = self._createChat(configuration)
123
132
 
124
133
  if output:
125
134
  dynamicModel = self.createPydanticModel(output)
@@ -129,14 +138,16 @@ class PromptCaller:
129
138
 
130
139
  return response
131
140
 
132
- def agent(self, promptName, context=None, tools=None, allowed_steps=3):
141
+ def agent(
142
+ self, promptName, context=None, tools=None, output=None, allowed_steps=10
143
+ ):
133
144
 
134
145
  configuration, messages = self.loadPrompt(promptName, context)
135
146
 
136
- output = None
147
+ dynamicOutput = None
137
148
 
138
- if "output" in configuration:
139
- output = configuration.get("output")
149
+ if output is None and "output" in configuration:
150
+ dynamicOutput = configuration.get("output")
140
151
  configuration.pop("output")
141
152
 
142
153
  for message in messages:
@@ -144,7 +155,7 @@ class PromptCaller:
144
155
  message.content += "\nOnly use the tool DynamicModel when providing an output call."
145
156
  break
146
157
 
147
- chat = ChatOpenAI(**configuration)
158
+ chat = self._createChat(configuration)
148
159
 
149
160
  # Register the tools
150
161
  if tools is None:
@@ -156,10 +167,13 @@ class PromptCaller:
156
167
  tools_dict = {t.name.lower(): t for t in tools}
157
168
 
158
169
  if output:
159
- dynamicModel = self.createPydanticModel(output)
170
+ tools.extend([output])
171
+ tools_dict[output.__name__.lower()] = output
172
+ elif dynamicOutput:
173
+ dynamicModel = self.createPydanticModel(dynamicOutput)
160
174
 
161
- tools.extend([dynamicModel])
162
- tools_dict["dynamicmodel"] = dynamicModel
175
+ tools.extend([dynamicModel])
176
+ tools_dict["dynamicmodel"] = dynamicModel
163
177
 
164
178
  chat = chat.bind_tools(tools)
165
179
 
@@ -174,9 +188,12 @@ class PromptCaller:
174
188
  tool_name = tool_call["name"].lower()
175
189
 
176
190
  # If it's the final formatting tool, validate and return
177
- if tool_name == "dynamicmodel":
191
+ if dynamicOutput and tool_name == "dynamicmodel":
178
192
  return dynamicModel.model_validate(tool_call["args"])
179
193
 
194
+ if output and tool_name == output.__name__.lower():
195
+ return output.model_validate(tool_call["args"])
196
+
180
197
  selected_tool = tools_dict.get(tool_name)
181
198
  if not selected_tool:
182
199
  raise ValueError(f"Unknown tool: {tool_name}")
@@ -202,5 +219,6 @@ class PromptCaller:
202
219
  return response
203
220
 
204
221
  except Exception as e:
222
+ print(e)
205
223
  # Replace with appropriate logging in production
206
224
  raise RuntimeError("Error during agent process") from e
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompt_caller
3
- Version: 0.1.1
3
+ Version: 0.1.3
4
4
  Summary: This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API
5
5
  Home-page: https://github.com/ThiNepo/prompt-caller
6
6
  Author: Thiago Nepomuceno
@@ -35,7 +35,7 @@ class BdistWheelCommand(bdist_wheel):
35
35
 
36
36
  setuptools.setup(
37
37
  name="prompt_caller",
38
- version="0.1.1",
38
+ version="0.1.3",
39
39
  author="Thiago Nepomuceno",
40
40
  author_email="thiago@neps.academy",
41
41
  description="This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API",
File without changes
File without changes
File without changes