prompt-caller 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prompt_caller/prompt_caller.py +47 -20
- {prompt_caller-0.1.2.dist-info → prompt_caller-0.1.4.dist-info}/METADATA +2 -1
- prompt_caller-0.1.4.dist-info/RECORD +8 -0
- prompt_caller-0.1.2.dist-info/RECORD +0 -8
- {prompt_caller-0.1.2.dist-info → prompt_caller-0.1.4.dist-info}/LICENSE +0 -0
- {prompt_caller-0.1.2.dist-info → prompt_caller-0.1.4.dist-info}/WHEEL +0 -0
- {prompt_caller-0.1.2.dist-info → prompt_caller-0.1.4.dist-info}/top_level.txt +0 -0
prompt_caller/prompt_caller.py
CHANGED
|
@@ -8,6 +8,7 @@ from jinja2 import Template
|
|
|
8
8
|
from langchain_core.tools import tool
|
|
9
9
|
from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage
|
|
10
10
|
from langchain_openai import ChatOpenAI
|
|
11
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
11
12
|
from PIL import Image
|
|
12
13
|
from pydantic import BaseModel, Field, create_model
|
|
13
14
|
|
|
@@ -18,7 +19,6 @@ load_dotenv()
|
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
class PromptCaller:
|
|
21
|
-
|
|
22
22
|
def __init__(self, promptPath="prompts"):
|
|
23
23
|
self.promptPath = promptPath
|
|
24
24
|
|
|
@@ -39,17 +39,44 @@ class PromptCaller:
|
|
|
39
39
|
template = Template(body)
|
|
40
40
|
return template.render(context)
|
|
41
41
|
|
|
42
|
+
import re
|
|
43
|
+
|
|
42
44
|
def _parseJSXBody(self, body):
|
|
43
45
|
elements = []
|
|
44
|
-
|
|
46
|
+
# 1. Regex to find tags, attributes string, and content
|
|
47
|
+
tag_pattern = r"<(system|user|assistant|image)([^>]*)>(.*?)</\1>"
|
|
48
|
+
|
|
49
|
+
# 2. Regex to find key="value" pairs within the attributes string
|
|
50
|
+
attr_pattern = r'(\w+)\s*=\s*"(.*?)"'
|
|
45
51
|
|
|
46
52
|
matches = re.findall(tag_pattern, body, re.DOTALL)
|
|
47
53
|
|
|
48
|
-
for tag, content in matches:
|
|
49
|
-
|
|
54
|
+
for tag, attrs_string, content in matches:
|
|
55
|
+
# 3. Parse the attributes string (e.g., ' tag="image 1"') into a dict
|
|
56
|
+
attributes = {}
|
|
57
|
+
if attrs_string:
|
|
58
|
+
attr_matches = re.findall(attr_pattern, attrs_string)
|
|
59
|
+
for key, value in attr_matches:
|
|
60
|
+
attributes[key] = value
|
|
61
|
+
|
|
62
|
+
element = {"role": tag, "content": content.strip()}
|
|
63
|
+
|
|
64
|
+
# 4. Add the attributes to our element dict if they exist
|
|
65
|
+
if attributes:
|
|
66
|
+
element["attributes"] = attributes
|
|
67
|
+
|
|
68
|
+
elements.append(element)
|
|
50
69
|
|
|
51
70
|
return elements
|
|
52
71
|
|
|
72
|
+
def _createChat(self, configuration):
|
|
73
|
+
if configuration.get("model") is not None and configuration.get(
|
|
74
|
+
"model"
|
|
75
|
+
).startswith("gemini"):
|
|
76
|
+
return ChatGoogleGenerativeAI(**configuration)
|
|
77
|
+
else:
|
|
78
|
+
return ChatOpenAI(**configuration)
|
|
79
|
+
|
|
53
80
|
def getImageBase64(self, url: str) -> str:
|
|
54
81
|
response = requests.get(url)
|
|
55
82
|
response.raise_for_status()
|
|
@@ -87,16 +114,18 @@ class PromptCaller:
|
|
|
87
114
|
if base64_image.startswith("http"):
|
|
88
115
|
base64_image = self.getImageBase64(base64_image)
|
|
89
116
|
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
117
|
+
content = [
|
|
118
|
+
{
|
|
119
|
+
"type": "image_url",
|
|
120
|
+
"image_url": {"url": base64_image},
|
|
121
|
+
}
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
tag = message.get("attributes", {}).get("tag")
|
|
125
|
+
if tag:
|
|
126
|
+
content.append({"type": "text", "text": f"({tag})"})
|
|
127
|
+
|
|
128
|
+
messages.append(HumanMessage(content=content))
|
|
100
129
|
|
|
101
130
|
return configuration, messages
|
|
102
131
|
|
|
@@ -110,7 +139,6 @@ class PromptCaller:
|
|
|
110
139
|
return create_model("DynamicModel", **fields)
|
|
111
140
|
|
|
112
141
|
def call(self, promptName, context=None):
|
|
113
|
-
|
|
114
142
|
configuration, messages = self.loadPrompt(promptName, context)
|
|
115
143
|
|
|
116
144
|
output = None
|
|
@@ -119,7 +147,7 @@ class PromptCaller:
|
|
|
119
147
|
output = configuration.get("output")
|
|
120
148
|
configuration.pop("output")
|
|
121
149
|
|
|
122
|
-
chat =
|
|
150
|
+
chat = self._createChat(configuration)
|
|
123
151
|
|
|
124
152
|
if output:
|
|
125
153
|
dynamicModel = self.createPydanticModel(output)
|
|
@@ -132,7 +160,6 @@ class PromptCaller:
|
|
|
132
160
|
def agent(
|
|
133
161
|
self, promptName, context=None, tools=None, output=None, allowed_steps=10
|
|
134
162
|
):
|
|
135
|
-
|
|
136
163
|
configuration, messages = self.loadPrompt(promptName, context)
|
|
137
164
|
|
|
138
165
|
dynamicOutput = None
|
|
@@ -143,10 +170,10 @@ class PromptCaller:
|
|
|
143
170
|
|
|
144
171
|
for message in messages:
|
|
145
172
|
if isinstance(message, SystemMessage):
|
|
146
|
-
message.content += "\
|
|
173
|
+
message.content += "\n\nYou have to use the tool `dynamicmodel` when providing your final answer. If you don't, you have failed the task."
|
|
147
174
|
break
|
|
148
175
|
|
|
149
|
-
chat =
|
|
176
|
+
chat = self._createChat(configuration)
|
|
150
177
|
|
|
151
178
|
# Register the tools
|
|
152
179
|
if tools is None:
|
|
@@ -161,7 +188,7 @@ class PromptCaller:
|
|
|
161
188
|
tools.extend([output])
|
|
162
189
|
tools_dict[output.__name__.lower()] = output
|
|
163
190
|
elif dynamicOutput:
|
|
164
|
-
dynamicModel = self.createPydanticModel(
|
|
191
|
+
dynamicModel = self.createPydanticModel(dynamicOutput)
|
|
165
192
|
|
|
166
193
|
tools.extend([dynamicModel])
|
|
167
194
|
tools_dict["dynamicmodel"] = dynamicModel
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: prompt_caller
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.4
|
|
4
4
|
Summary: This package is responsible for calling prompts in a specific format. It uses LangChain and OpenAI API
|
|
5
5
|
Home-page: https://github.com/ThiNepo/prompt-caller
|
|
6
6
|
Author: Thiago Nepomuceno
|
|
@@ -14,6 +14,7 @@ Requires-Dist: pyyaml>=6.0.2
|
|
|
14
14
|
Requires-Dist: python-dotenv>=1.0.1
|
|
15
15
|
Requires-Dist: Jinja2>=3.1.4
|
|
16
16
|
Requires-Dist: langchain-openai>=0.3.5
|
|
17
|
+
Requires-Dist: langchain-google-genai==2.1.5
|
|
17
18
|
Requires-Dist: openai>=1.63.0
|
|
18
19
|
Requires-Dist: pillow>=11.0.0
|
|
19
20
|
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
prompt_caller/__init__.py,sha256=4EGdeAJ_Ig7A-b-e17-nYbiXjckT7uL3to5lchMsoW4,41
|
|
2
|
+
prompt_caller/__main__.py,sha256=dJ0dYtVmnhZuoV79R6YiAIta1ZkUKb-TEX4VEuYbgk0,139
|
|
3
|
+
prompt_caller/prompt_caller.py,sha256=0Rqfzp7hUgPH10auxEwJMoaASOSZNlTyDvS8Hd6X0Yk,8333
|
|
4
|
+
prompt_caller-0.1.4.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
5
|
+
prompt_caller-0.1.4.dist-info/METADATA,sha256=0x0EKp50KvL-dYuc9n8TEvouL53jD_V1fMw2bSKGPEY,4955
|
|
6
|
+
prompt_caller-0.1.4.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
|
7
|
+
prompt_caller-0.1.4.dist-info/top_level.txt,sha256=iihiDRq-0VrKB8IKjxf7Lrtv-fLMq4tvgM4fH3x0I94,14
|
|
8
|
+
prompt_caller-0.1.4.dist-info/RECORD,,
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
prompt_caller/__init__.py,sha256=4EGdeAJ_Ig7A-b-e17-nYbiXjckT7uL3to5lchMsoW4,41
|
|
2
|
-
prompt_caller/__main__.py,sha256=dJ0dYtVmnhZuoV79R6YiAIta1ZkUKb-TEX4VEuYbgk0,139
|
|
3
|
-
prompt_caller/prompt_caller.py,sha256=zAJq_5v-ku_O9ACAw7C7JU1RmVwlIvunbd-31B0XX6E,7119
|
|
4
|
-
prompt_caller-0.1.2.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
5
|
-
prompt_caller-0.1.2.dist-info/METADATA,sha256=IjSAGTvmJbi7X6_5f3OxvH6QA7l9H6Opx4d9wtGGRak,4909
|
|
6
|
-
prompt_caller-0.1.2.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
|
7
|
-
prompt_caller-0.1.2.dist-info/top_level.txt,sha256=iihiDRq-0VrKB8IKjxf7Lrtv-fLMq4tvgM4fH3x0I94,14
|
|
8
|
-
prompt_caller-0.1.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|