prompt-caller 0.0.4__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,13 +1,19 @@
1
1
  import os
2
2
  import re
3
3
 
4
+ import requests
4
5
  import yaml
5
6
  from dotenv import load_dotenv
6
7
  from jinja2 import Template
7
- from langchain_core.messages import HumanMessage, SystemMessage
8
+ from langchain_core.tools import tool
9
+ from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage
8
10
  from langchain_openai import ChatOpenAI
11
+ from PIL import Image
9
12
  from pydantic import BaseModel, Field, create_model
10
13
 
14
+ from io import BytesIO
15
+ import base64
16
+
11
17
  load_dotenv()
12
18
 
13
19
 
@@ -32,7 +38,7 @@ class PromptCaller:
32
38
 
33
39
  def _parseJSXBody(self, body):
34
40
  elements = []
35
- tag_pattern = r"<(system|user|assistant)>(.*?)</\1>"
41
+ tag_pattern = r"<(system|user|assistant|image)>(.*?)</\1>"
36
42
 
37
43
  matches = re.findall(tag_pattern, body, re.DOTALL)
38
44
 
@@ -41,6 +47,15 @@ class PromptCaller:
41
47
 
42
48
  return elements
43
49
 
50
+ def getImageBase64(self, url: str) -> str:
51
+ response = requests.get(url)
52
+ response.raise_for_status()
53
+ img = Image.open(BytesIO(response.content))
54
+ buffered = BytesIO()
55
+ img.save(buffered, format="PNG")
56
+ img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
57
+ return f"data:image/png;base64,{img_base64}"
58
+
44
59
  def loadPrompt(self, promptName, context=None):
45
60
  # initialize context
46
61
  if context is None:
@@ -63,6 +78,23 @@ class PromptCaller:
63
78
  if message.get("role") == "user":
64
79
  messages.append(HumanMessage(content=message.get("content")))
65
80
 
81
+ if message.get("role") == "image":
82
+ base64_image = message.get("content")
83
+
84
+ if base64_image.startswith("http"):
85
+ base64_image = self.getImageBase64(base64_image)
86
+
87
+ messages.append(
88
+ HumanMessage(
89
+ content=[
90
+ {
91
+ "type": "image_url",
92
+ "image_url": {"url": base64_image},
93
+ }
94
+ ]
95
+ )
96
+ )
97
+
66
98
  return configuration, messages
67
99
 
68
100
  def createPydanticModel(self, dynamic_dict):
@@ -93,3 +125,79 @@ class PromptCaller:
93
125
  response = chat.invoke(messages)
94
126
 
95
127
  return response
128
+
129
+ def agent(self, promptName, context=None, tools=None, allowed_steps=3):
130
+
131
+ configuration, messages = self.loadPrompt(promptName, context)
132
+
133
+ output = None
134
+
135
+ if "output" in configuration:
136
+ output = configuration.get("output")
137
+ configuration.pop("output")
138
+
139
+ for message in messages:
140
+ if isinstance(message, SystemMessage):
141
+ message.content += "\nOnly use the tool DynamicModel when providing an output call."
142
+ break
143
+
144
+ chat = ChatOpenAI(**configuration)
145
+
146
+ # Register the tools
147
+ if tools is None:
148
+ tools = []
149
+
150
+ # Transform functions in tools
151
+ tools = [tool(t) for t in tools]
152
+
153
+ tools_dict = {t.name.lower(): t for t in tools}
154
+
155
+ if output:
156
+ dynamicModel = self.createPydanticModel(output)
157
+
158
+ tools.extend([dynamicModel])
159
+ tools_dict["dynamicmodel"] = dynamicModel
160
+
161
+ chat = chat.bind_tools(tools)
162
+
163
+ try:
164
+ # First LLM invocation
165
+ response = chat.invoke(messages)
166
+ messages.append(response)
167
+
168
+ steps = 0
169
+ while response.tool_calls and steps < allowed_steps:
170
+ for tool_call in response.tool_calls:
171
+ tool_name = tool_call["name"].lower()
172
+
173
+ # If it's the final formatting tool, validate and return
174
+ if tool_name == "dynamicmodel":
175
+ return dynamicModel.model_validate(tool_call["args"])
176
+
177
+ selected_tool = tools_dict.get(tool_name)
178
+ if not selected_tool:
179
+ raise ValueError(f"Unknown tool: {tool_name}")
180
+
181
+ # Invoke the selected tool with provided arguments
182
+ tool_response = selected_tool.invoke(tool_call)
183
+ messages.append(tool_response)
184
+
185
+ # If the latest message is a ToolMessage, re-invoke the LLM
186
+ if isinstance(messages[-1], ToolMessage):
187
+ response = chat.invoke(messages)
188
+ messages.append(response)
189
+ else:
190
+ break
191
+
192
+ steps += 1
193
+
194
+ # Final LLM call if the last message is still a ToolMessage
195
+ if isinstance(messages[-1], ToolMessage):
196
+ response = chat.invoke(messages)
197
+ messages.append(response)
198
+
199
+ return response
200
+
201
+ except Exception as e:
202
+ # Replace with appropriate logging in production
203
+ raise RuntimeError("Error during agent process") from e
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompt_caller
3
- Version: 0.0.4
3
+ Version: 0.1.0
4
4
  Summary: This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API
5
5
  Home-page: https://github.com/ThiNepo/prompt-caller
6
6
  Author: Thiago Nepomuceno
@@ -15,6 +15,7 @@ Requires-Dist: python-dotenv>=1.0.1
15
15
  Requires-Dist: Jinja2>=3.1.4
16
16
  Requires-Dist: langchain-openai>=0.3.5
17
17
  Requires-Dist: openai>=1.63.0
18
+ Requires-Dist: pillow>=11.0.0
18
19
 
19
20
  # PromptCaller
20
21
 
@@ -86,6 +87,36 @@ In this example:
86
87
  - The `expression` value `3+8/9` is injected into the user message.
87
88
  - The model will respond with both the result of the expression and an explanation, as specified in the `output` section of the prompt.
88
89
 
90
+ 3. **Using the agent feature:**
91
+
92
+ The `agent` method allows you to enhance the prompt's functionality by integrating external tools. Here’s an example where we evaluate a mathematical expression using Python’s `eval` in a safe execution environment:
93
+
94
+ ```python
95
+ from prompt_caller import PromptCaller
96
+
97
+ ai = PromptCaller()
98
+
99
+ def evaluate_expression(expression: str):
100
+ """
101
+ Evaluate a math expression using eval.
102
+ """
103
+ safe_globals = {"__builtins__": None}
104
+ return eval(expression, safe_globals, {})
105
+
106
+ response = ai.agent(
107
+ "sample-agent", {"expression": "3+8/9"}, tools=[evaluate_expression]
108
+ )
109
+
110
+ print(response)
111
+ ```
112
+
113
+ In this example:
114
+
115
+ - The `agent` method is used to process the prompt while integrating external tools.
116
+ - The `evaluate_expression` function evaluates the mathematical expression securely.
117
+ - The response includes the processed result based on the prompt and tool execution.
118
+
119
+
89
120
  ## How It Works
90
121
 
91
122
  1. **\_loadPrompt:** Loads the prompt file, splits the YAML header from the body, and parses them.
@@ -0,0 +1,8 @@
1
+ prompt_caller/__init__.py,sha256=4EGdeAJ_Ig7A-b-e17-nYbiXjckT7uL3to5lchMsoW4,41
2
+ prompt_caller/__main__.py,sha256=dJ0dYtVmnhZuoV79R6YiAIta1ZkUKb-TEX4VEuYbgk0,139
3
+ prompt_caller/prompt_caller.py,sha256=FlQEmNJWrxrdLMaoTxsCfPPcbr3DXWN5Oq1pp45yVQM,6644
4
+ prompt_caller-0.1.0.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
5
+ prompt_caller-0.1.0.dist-info/METADATA,sha256=0ciKS5ENrpqRA6EjrulhMu_R-7iwWwmMEbiYpjJbymk,4957
6
+ prompt_caller-0.1.0.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
7
+ prompt_caller-0.1.0.dist-info/top_level.txt,sha256=iihiDRq-0VrKB8IKjxf7Lrtv-fLMq4tvgM4fH3x0I94,14
8
+ prompt_caller-0.1.0.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- prompt_caller/__init__.py,sha256=4EGdeAJ_Ig7A-b-e17-nYbiXjckT7uL3to5lchMsoW4,41
2
- prompt_caller/__main__.py,sha256=dJ0dYtVmnhZuoV79R6YiAIta1ZkUKb-TEX4VEuYbgk0,139
3
- prompt_caller/prompt_caller.py,sha256=KRQYxvAOPMFIqhoLgJWQYr1ahGEWefNWtIjw1W_v8BM,2842
4
- prompt_caller-0.0.4.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
5
- prompt_caller-0.0.4.dist-info/METADATA,sha256=PEL3ljVM0N5qT_SHpWmoWsDeZGlKLsy9fFXAFECOUTY,3916
6
- prompt_caller-0.0.4.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
7
- prompt_caller-0.0.4.dist-info/top_level.txt,sha256=iihiDRq-0VrKB8IKjxf7Lrtv-fLMq4tvgM4fH3x0I94,14
8
- prompt_caller-0.0.4.dist-info/RECORD,,