prompt-caller 0.0.4__tar.gz → 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompt_caller
3
- Version: 0.0.4
3
+ Version: 0.1.0
4
4
  Summary: This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API
5
5
  Home-page: https://github.com/ThiNepo/prompt-caller
6
6
  Author: Thiago Nepomuceno
@@ -15,6 +15,7 @@ Requires-Dist: python-dotenv>=1.0.1
15
15
  Requires-Dist: Jinja2>=3.1.4
16
16
  Requires-Dist: langchain-openai>=0.3.5
17
17
  Requires-Dist: openai>=1.63.0
18
+ Requires-Dist: pillow>=11.0.0
18
19
 
19
20
  # PromptCaller
20
21
 
@@ -86,6 +87,36 @@ In this example:
86
87
  - The `expression` value `3+8/9` is injected into the user message.
87
88
  - The model will respond with both the result of the expression and an explanation, as specified in the `output` section of the prompt.
88
89
 
90
+ 3. **Using the agent feature:**
91
+
92
+ The `agent` method allows you to enhance the prompt's functionality by integrating external tools. Here’s an example where we evaluate a mathematical expression using Python’s `eval` in a safe execution environment:
93
+
94
+ ```python
95
+ from prompt_caller import PromptCaller
96
+
97
+ ai = PromptCaller()
98
+
99
+ def evaluate_expression(expression: str):
100
+ """
101
+ Evaluate a math expression using eval.
102
+ """
103
+ safe_globals = {"__builtins__": None}
104
+ return eval(expression, safe_globals, {})
105
+
106
+ response = ai.agent(
107
+ "sample-agent", {"expression": "3+8/9"}, tools=[evaluate_expression]
108
+ )
109
+
110
+ print(response)
111
+ ```
112
+
113
+ In this example:
114
+
115
+ - The `agent` method is used to process the prompt while integrating external tools.
116
+ - The `evaluate_expression` function evaluates the mathematical expression securely.
117
+ - The response includes the processed result based on the prompt and tool execution.
118
+
119
+
89
120
  ## How It Works
90
121
 
91
122
  1. **\_loadPrompt:** Loads the prompt file, splits the YAML header from the body, and parses them.
@@ -68,6 +68,36 @@ In this example:
68
68
  - The `expression` value `3+8/9` is injected into the user message.
69
69
  - The model will respond with both the result of the expression and an explanation, as specified in the `output` section of the prompt.
70
70
 
71
+ 3. **Using the agent feature:**
72
+
73
+ The `agent` method allows you to enhance the prompt's functionality by integrating external tools. Here’s an example where we evaluate a mathematical expression using Python’s `eval` in a safe execution environment:
74
+
75
+ ```python
76
+ from prompt_caller import PromptCaller
77
+
78
+ ai = PromptCaller()
79
+
80
+ def evaluate_expression(expression: str):
81
+ """
82
+ Evaluate a math expression using eval.
83
+ """
84
+ safe_globals = {"__builtins__": None}
85
+ return eval(expression, safe_globals, {})
86
+
87
+ response = ai.agent(
88
+ "sample-agent", {"expression": "3+8/9"}, tools=[evaluate_expression]
89
+ )
90
+
91
+ print(response)
92
+ ```
93
+
94
+ In this example:
95
+
96
+ - The `agent` method is used to process the prompt while integrating external tools.
97
+ - The `evaluate_expression` function evaluates the mathematical expression securely.
98
+ - The response includes the processed result based on the prompt and tool execution.
99
+
100
+
71
101
  ## How It Works
72
102
 
73
103
  1. **\_loadPrompt:** Loads the prompt file, splits the YAML header from the body, and parses them.
@@ -0,0 +1,203 @@
1
+ import os
2
+ import re
3
+
4
+ import requests
5
+ import yaml
6
+ from dotenv import load_dotenv
7
+ from jinja2 import Template
8
+ from langchain_core.tools import tool
9
+ from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage
10
+ from langchain_openai import ChatOpenAI
11
+ from PIL import Image
12
+ from pydantic import BaseModel, Field, create_model
13
+
14
+ from io import BytesIO
15
+ import base64
16
+
17
+ load_dotenv()
18
+
19
+
20
+ class PromptCaller:
21
+
22
+ def _loadPrompt(self, file_path):
23
+ with open(file_path, "r", encoding="utf-8") as file:
24
+ content = file.read()
25
+
26
+ # Split YAML header and the body
27
+ header, body = content.split("---", 2)[1:]
28
+
29
+ # Parse the YAML header
30
+ model_config = yaml.safe_load(header.strip())
31
+
32
+ # Step 2: Parse the JSX body and return it
33
+ return model_config, body.strip()
34
+
35
+ def _renderTemplate(self, body, context):
36
+ template = Template(body)
37
+ return template.render(context)
38
+
39
+ def _parseJSXBody(self, body):
40
+ elements = []
41
+ tag_pattern = r"<(system|user|assistant|image)>(.*?)</\1>"
42
+
43
+ matches = re.findall(tag_pattern, body, re.DOTALL)
44
+
45
+ for tag, content in matches:
46
+ elements.append({"role": tag, "content": content.strip()})
47
+
48
+ return elements
49
+
50
+ def getImageBase64(self, url: str) -> str:
51
+ response = requests.get(url)
52
+ response.raise_for_status()
53
+ img = Image.open(BytesIO(response.content))
54
+ buffered = BytesIO()
55
+ img.save(buffered, format="PNG")
56
+ img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
57
+ return f"data:image/png;base64,{img_base64}"
58
+
59
+ def loadPrompt(self, promptName, context=None):
60
+ # initialize context
61
+ if context is None:
62
+ context = {}
63
+
64
+ configuration, template = self._loadPrompt(
65
+ os.path.join("prompts", f"{promptName}.prompt")
66
+ )
67
+
68
+ template = self._renderTemplate(template, context)
69
+
70
+ parsedMessages = self._parseJSXBody(template)
71
+
72
+ messages = []
73
+
74
+ for message in parsedMessages:
75
+ if message.get("role") == "system":
76
+ messages.append(SystemMessage(content=message.get("content")))
77
+
78
+ if message.get("role") == "user":
79
+ messages.append(HumanMessage(content=message.get("content")))
80
+
81
+ if message.get("role") == "image":
82
+ base64_image = message.get("content")
83
+
84
+ if base64_image.startswith("http"):
85
+ base64_image = self.getImageBase64(base64_image)
86
+
87
+ messages.append(
88
+ HumanMessage(
89
+ content=[
90
+ {
91
+ "type": "image_url",
92
+ "image_url": {"url": base64_image},
93
+ }
94
+ ]
95
+ )
96
+ )
97
+
98
+ return configuration, messages
99
+
100
+ def createPydanticModel(self, dynamic_dict):
101
+ # Create a dynamic Pydantic model from the dictionary
102
+ fields = {
103
+ key: (str, Field(description=f"Description for {key}"))
104
+ for key in dynamic_dict.keys()
105
+ }
106
+ # Dynamically create the Pydantic model with the fields
107
+ return create_model("DynamicModel", **fields)
108
+
109
+ def call(self, promptName, context=None):
110
+
111
+ configuration, messages = self.loadPrompt(promptName, context)
112
+
113
+ output = None
114
+
115
+ if "output" in configuration:
116
+ output = configuration.get("output")
117
+ configuration.pop("output")
118
+
119
+ chat = ChatOpenAI(**configuration)
120
+
121
+ if output:
122
+ dynamicModel = self.createPydanticModel(output)
123
+ chat = chat.with_structured_output(dynamicModel)
124
+
125
+ response = chat.invoke(messages)
126
+
127
+ return response
128
+
129
+ def agent(self, promptName, context=None, tools=None, allowed_steps=3):
130
+
131
+ configuration, messages = self.loadPrompt(promptName, context)
132
+
133
+ output = None
134
+
135
+ if "output" in configuration:
136
+ output = configuration.get("output")
137
+ configuration.pop("output")
138
+
139
+ for message in messages:
140
+ if isinstance(message, SystemMessage):
141
+ message.content += "\nOnly use the tool DynamicModel when providing an output call."
142
+ break
143
+
144
+ chat = ChatOpenAI(**configuration)
145
+
146
+ # Register the tools
147
+ if tools is None:
148
+ tools = []
149
+
150
+ # Transform functions in tools
151
+ tools = [tool(t) for t in tools]
152
+
153
+ tools_dict = {t.name.lower(): t for t in tools}
154
+
155
+ if output:
156
+ dynamicModel = self.createPydanticModel(output)
157
+
158
+ tools.extend([dynamicModel])
159
+ tools_dict["dynamicmodel"] = dynamicModel
160
+
161
+ chat = chat.bind_tools(tools)
162
+
163
+ try:
164
+ # First LLM invocation
165
+ response = chat.invoke(messages)
166
+ messages.append(response)
167
+
168
+ steps = 0
169
+ while response.tool_calls and steps < allowed_steps:
170
+ for tool_call in response.tool_calls:
171
+ tool_name = tool_call["name"].lower()
172
+
173
+ # If it's the final formatting tool, validate and return
174
+ if tool_name == "dynamicmodel":
175
+ return dynamicModel.model_validate(tool_call["args"])
176
+
177
+ selected_tool = tools_dict.get(tool_name)
178
+ if not selected_tool:
179
+ raise ValueError(f"Unknown tool: {tool_name}")
180
+
181
+ # Invoke the selected tool with provided arguments
182
+ tool_response = selected_tool.invoke(tool_call)
183
+ messages.append(tool_response)
184
+
185
+ # If the latest message is a ToolMessage, re-invoke the LLM
186
+ if isinstance(messages[-1], ToolMessage):
187
+ response = chat.invoke(messages)
188
+ messages.append(response)
189
+ else:
190
+ break
191
+
192
+ steps += 1
193
+
194
+ # Final LLM call if the last message is still a ToolMessage
195
+ if isinstance(messages[-1], ToolMessage):
196
+ response = chat.invoke(messages)
197
+ messages.append(response)
198
+
199
+ return response
200
+
201
+ except Exception as e:
202
+ # Replace with appropriate logging in production
203
+ raise RuntimeError("Error during agent process") from e
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompt_caller
3
- Version: 0.0.4
3
+ Version: 0.1.0
4
4
  Summary: This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API
5
5
  Home-page: https://github.com/ThiNepo/prompt-caller
6
6
  Author: Thiago Nepomuceno
@@ -15,6 +15,7 @@ Requires-Dist: python-dotenv>=1.0.1
15
15
  Requires-Dist: Jinja2>=3.1.4
16
16
  Requires-Dist: langchain-openai>=0.3.5
17
17
  Requires-Dist: openai>=1.63.0
18
+ Requires-Dist: pillow>=11.0.0
18
19
 
19
20
  # PromptCaller
20
21
 
@@ -86,6 +87,36 @@ In this example:
86
87
  - The `expression` value `3+8/9` is injected into the user message.
87
88
  - The model will respond with both the result of the expression and an explanation, as specified in the `output` section of the prompt.
88
89
 
90
+ 3. **Using the agent feature:**
91
+
92
+ The `agent` method allows you to enhance the prompt's functionality by integrating external tools. Here’s an example where we evaluate a mathematical expression using Python’s `eval` in a safe execution environment:
93
+
94
+ ```python
95
+ from prompt_caller import PromptCaller
96
+
97
+ ai = PromptCaller()
98
+
99
+ def evaluate_expression(expression: str):
100
+ """
101
+ Evaluate a math expression using eval.
102
+ """
103
+ safe_globals = {"__builtins__": None}
104
+ return eval(expression, safe_globals, {})
105
+
106
+ response = ai.agent(
107
+ "sample-agent", {"expression": "3+8/9"}, tools=[evaluate_expression]
108
+ )
109
+
110
+ print(response)
111
+ ```
112
+
113
+ In this example:
114
+
115
+ - The `agent` method is used to process the prompt while integrating external tools.
116
+ - The `evaluate_expression` function evaluates the mathematical expression securely.
117
+ - The response includes the processed result based on the prompt and tool execution.
118
+
119
+
89
120
  ## How It Works
90
121
 
91
122
  1. **\_loadPrompt:** Loads the prompt file, splits the YAML header from the body, and parses them.
@@ -3,3 +3,4 @@ python-dotenv>=1.0.1
3
3
  Jinja2>=3.1.4
4
4
  langchain-openai>=0.3.5
5
5
  openai>=1.63.0
6
+ pillow>=11.0.0
@@ -35,7 +35,7 @@ class BdistWheelCommand(bdist_wheel):
35
35
 
36
36
  setuptools.setup(
37
37
  name="prompt_caller",
38
- version="0.0.4",
38
+ version="0.1.0",
39
39
  author="Thiago Nepomuceno",
40
40
  author_email="thiago@neps.academy",
41
41
  description="This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API",
@@ -55,6 +55,7 @@ setuptools.setup(
55
55
  "Jinja2>=3.1.4",
56
56
  "langchain-openai>=0.3.5",
57
57
  "openai>=1.63.0",
58
+ "pillow>=11.0.0",
58
59
  ],
59
60
  cmdclass={"sdist": SdistCommand, "bdist_wheel": BdistWheelCommand},
60
61
  )
@@ -1,95 +0,0 @@
1
- import os
2
- import re
3
-
4
- import yaml
5
- from dotenv import load_dotenv
6
- from jinja2 import Template
7
- from langchain_core.messages import HumanMessage, SystemMessage
8
- from langchain_openai import ChatOpenAI
9
- from pydantic import BaseModel, Field, create_model
10
-
11
- load_dotenv()
12
-
13
-
14
- class PromptCaller:
15
-
16
- def _loadPrompt(self, file_path):
17
- with open(file_path, "r", encoding="utf-8") as file:
18
- content = file.read()
19
-
20
- # Split YAML header and the body
21
- header, body = content.split("---", 2)[1:]
22
-
23
- # Parse the YAML header
24
- model_config = yaml.safe_load(header.strip())
25
-
26
- # Step 2: Parse the JSX body and return it
27
- return model_config, body.strip()
28
-
29
- def _renderTemplate(self, body, context):
30
- template = Template(body)
31
- return template.render(context)
32
-
33
- def _parseJSXBody(self, body):
34
- elements = []
35
- tag_pattern = r"<(system|user|assistant)>(.*?)</\1>"
36
-
37
- matches = re.findall(tag_pattern, body, re.DOTALL)
38
-
39
- for tag, content in matches:
40
- elements.append({"role": tag, "content": content.strip()})
41
-
42
- return elements
43
-
44
- def loadPrompt(self, promptName, context=None):
45
- # initialize context
46
- if context is None:
47
- context = {}
48
-
49
- configuration, template = self._loadPrompt(
50
- os.path.join("prompts", f"{promptName}.prompt")
51
- )
52
-
53
- template = self._renderTemplate(template, context)
54
-
55
- parsedMessages = self._parseJSXBody(template)
56
-
57
- messages = []
58
-
59
- for message in parsedMessages:
60
- if message.get("role") == "system":
61
- messages.append(SystemMessage(content=message.get("content")))
62
-
63
- if message.get("role") == "user":
64
- messages.append(HumanMessage(content=message.get("content")))
65
-
66
- return configuration, messages
67
-
68
- def createPydanticModel(self, dynamic_dict):
69
- # Create a dynamic Pydantic model from the dictionary
70
- fields = {
71
- key: (str, Field(description=f"Description for {key}"))
72
- for key in dynamic_dict.keys()
73
- }
74
- # Dynamically create the Pydantic model with the fields
75
- return create_model("DynamicModel", **fields)
76
-
77
- def call(self, promptName, context=None):
78
-
79
- configuration, messages = self.loadPrompt(promptName, context)
80
-
81
- output = None
82
-
83
- if "output" in configuration:
84
- output = configuration.get("output")
85
- configuration.pop("output")
86
-
87
- chat = ChatOpenAI(**configuration)
88
-
89
- if output:
90
- dynamicModel = self.createPydanticModel(output)
91
- chat = chat.with_structured_output(dynamicModel)
92
-
93
- response = chat.invoke(messages)
94
-
95
- return response
File without changes
File without changes