prompty 1.0.0a2__tar.gz → 1.0.0b1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {prompty-1.0.0a2 → prompty-1.0.0b1}/PKG-INFO +1 -1
- prompty-1.0.0b1/prompty/_version.py +1 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/azure/executor.py +155 -83
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/azure/processor.py +2 -16
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/common.py +31 -7
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/core.py +29 -23
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/invoker.py +9 -4
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/openai/executor.py +2 -0
- prompty-1.0.0b1/tests/agent/simple_agent.prompty +49 -0
- prompty-1.0.0b1/tests/agent/simple_agent.prompty.1.execution.json +76 -0
- prompty-1.0.0b1/tests/agent/simple_agent.prompty.2.execution.json +84 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/embedding.prompty +1 -0
- prompty-1.0.0b1/tests/prompts/structured_complex.prompty +54 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/threaded_chat.prompty +2 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/properties/basic_array.prompty +1 -0
- prompty-1.0.0b1/tests/response/structured_complex.prompty +52 -0
- prompty-1.0.0b1/tests/response/structured_complex.prompty.execution.json +92 -0
- prompty-1.0.0b1/tests/response/structured_complex_other.prompty +62 -0
- prompty-1.0.0b1/tests/response/structured_complex_other.prompty.execution.json +92 -0
- prompty-1.0.0a2/tests/prompts/structured_output.prompty → prompty-1.0.0b1/tests/response/structured_inline.prompty +15 -7
- prompty-1.0.0b1/tests/response/structured_inline.prompty.execution.json +84 -0
- prompty-1.0.0b1/tests/test_agent.py +80 -0
- prompty-1.0.0b1/tests/test_common.py +136 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/test_core.py +2 -2
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/test_execute.py +29 -14
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/test_tracing.py +0 -23
- prompty-1.0.0a2/prompty/_version.py +0 -1
- prompty-1.0.0a2/prompty/azure_beta/__init__.py +0 -9
- prompty-1.0.0a2/prompty/azure_beta/executor.py +0 -296
- prompty-1.0.0a2/tests/test_common.py +0 -66
- {prompty-1.0.0a2 → prompty-1.0.0b1}/.gitignore +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/.vscode/settings.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/LICENSE +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/README.md +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/__init__.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/azure/__init__.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/cli.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/openai/__init__.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/openai/processor.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/parsers.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/py.typed +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/renderers.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/serverless/__init__.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/serverless/executor.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/serverless/processor.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/tracer.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/prompty/utils.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/pyproject.toml +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/__init__.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/coverage.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/fake_azure_executor.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/fake_serverless_executor.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/generated/1contoso.md +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/generated/2contoso.md +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/generated/3contoso.md +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/generated/4contoso.md +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/generated/basic.prompty.md +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/generated/camping.jpg +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/generated/context.prompty.md +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/generated/contoso_multi.md +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/generated/faithfulness.prompty.md +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/generated/groundedness.prompty.md +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/hello_world.embedding.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/mustache/basic.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/__init__.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/agent.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/basic.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/basic.prompty.execution.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/basic_json_output.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/basic_mustache.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/camping.jpg +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/chat.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/context.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/context.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/context.prompty.execution.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/embedding.prompty.execution.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/evaluation.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/faithfulness.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/faithfulness.prompty.execution.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/fake.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/func_inline.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/funcfile.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/funcfile.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/functions.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/functions.prompty.execution.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/groundedness.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/groundedness.prompty.execution.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/prompty.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/serverless.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/serverless.prompty.execution.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/serverless_stream.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/serverless_stream.prompty.execution.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/streaming.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/streaming.prompty.execution.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/structured_inline.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/structured_output.prompty.execution.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/structured_output_schema.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/sub/__init__.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/sub/basic.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/sub/sub/__init__.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/sub/sub/basic.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/sub/sub/prompty.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/sub/sub/test.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompts/test.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/prompty.json +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/properties/basic_dictionary.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/properties/basic_mixed.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/properties/thread_split.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/test_conversion.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/test_factory_invoker.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/test_mustache.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/test_parser.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/test_path_exec.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/test_tools.py +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/tools/basic.prompty +0 -0
- {prompty-1.0.0a2 → prompty-1.0.0b1}/tests/tools/dynamic.prompty +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: prompty
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.0b1
|
4
4
|
Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
|
5
5
|
Author-email: Seth Juarez <seth.juarez@microsoft.com>
|
6
6
|
Requires-Python: >=3.9
|
@@ -0,0 +1 @@
|
|
1
|
+
VERSION = "1.0.0b1"
|
@@ -1,16 +1,17 @@
|
|
1
|
+
import inspect
|
1
2
|
import json
|
2
3
|
import typing
|
3
4
|
from collections.abc import AsyncIterator, Iterator
|
4
5
|
|
5
6
|
import azure.identity
|
6
|
-
from openai import
|
7
|
+
from openai import AsyncAzureOpenAI, AzureOpenAI
|
7
8
|
from openai.types.chat.chat_completion import ChatCompletion
|
8
9
|
|
9
10
|
from prompty.tracer import Tracer
|
10
11
|
|
11
12
|
from .._version import VERSION
|
12
13
|
from ..common import convert_function_tools, convert_output_props
|
13
|
-
from ..core import AsyncPromptyStream, Prompty, PromptyStream
|
14
|
+
from ..core import AsyncPromptyStream, InputProperty, Prompty, PromptyStream, ToolProperty
|
14
15
|
from ..invoker import Invoker, InvokerFactory
|
15
16
|
|
16
17
|
|
@@ -146,6 +147,22 @@ class AzureOpenAIExecutor(Invoker):
|
|
146
147
|
|
147
148
|
return args
|
148
149
|
|
150
|
+
def _execute_chat_completion(self, client: AzureOpenAI, args: dict, trace) -> typing.Any:
|
151
|
+
if "stream" in args and args["stream"]:
|
152
|
+
response = client.chat.completions.create(**args)
|
153
|
+
else:
|
154
|
+
raw = client.chat.completions.with_raw_response.create(**args)
|
155
|
+
|
156
|
+
response = ChatCompletion.model_validate_json(raw.text)
|
157
|
+
|
158
|
+
for k, v in raw.headers.raw:
|
159
|
+
trace(k.decode("utf-8"), v.decode("utf-8"))
|
160
|
+
|
161
|
+
trace("request_id", raw.request_id)
|
162
|
+
trace("retries_taken", raw.retries_taken)
|
163
|
+
|
164
|
+
return response
|
165
|
+
|
149
166
|
def _create_chat(self, client: AzureOpenAI, data: typing.Any, ignore_thread_content=False) -> typing.Any:
|
150
167
|
with Tracer.start("create") as trace:
|
151
168
|
trace("type", "LLM")
|
@@ -153,20 +170,25 @@ class AzureOpenAIExecutor(Invoker):
|
|
153
170
|
trace("signature", "AzureOpenAI.chat.completions.create")
|
154
171
|
args = self._resolve_chat_args(data, ignore_thread_content)
|
155
172
|
trace("inputs", args)
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
raw = client.chat.completions.with_raw_response.create(**args)
|
173
|
+
response = self._execute_chat_completion(client, args, trace)
|
174
|
+
trace("result", response)
|
175
|
+
return response
|
160
176
|
|
161
|
-
|
177
|
+
async def _execute_chat_completion_async(self, client: AsyncAzureOpenAI, args: dict, trace) -> typing.Any:
|
178
|
+
if "stream" in args and args["stream"]:
|
179
|
+
response = await client.chat.completions.create(**args)
|
180
|
+
else:
|
181
|
+
raw = await client.chat.completions.with_raw_response.create(**args)
|
162
182
|
|
163
|
-
|
164
|
-
trace(k.decode("utf-8"), v.decode("utf-8"))
|
183
|
+
response = ChatCompletion.model_validate_json(raw.text)
|
165
184
|
|
166
|
-
|
167
|
-
trace("
|
168
|
-
|
169
|
-
|
185
|
+
for k, v in raw.headers.raw:
|
186
|
+
trace(k.decode("utf-8"), v.decode("utf-8"))
|
187
|
+
|
188
|
+
trace("request_id", raw.request_id)
|
189
|
+
trace("retries_taken", raw.retries_taken)
|
190
|
+
|
191
|
+
return response
|
170
192
|
|
171
193
|
async def _create_chat_async(
|
172
194
|
self, client: AsyncAzureOpenAI, data: typing.Any, ignore_thread_content=False
|
@@ -178,82 +200,130 @@ class AzureOpenAIExecutor(Invoker):
|
|
178
200
|
trace("signature", "AzureOpenAIAsync.chat.completions.create")
|
179
201
|
args = self._resolve_chat_args(data, ignore_thread_content)
|
180
202
|
trace("inputs", args)
|
181
|
-
|
182
|
-
response = await client.chat.completions.create(**args)
|
183
|
-
else:
|
184
|
-
raw: APIResponse = await client.chat.completions.with_raw_response.create(**args)
|
185
|
-
if raw is not None and raw.text is not None and isinstance(raw.text, str):
|
186
|
-
response = ChatCompletion.model_validate_json(raw.text)
|
187
|
-
|
188
|
-
for k, v in raw.headers.raw:
|
189
|
-
trace(k.decode("utf-8"), v.decode("utf-8"))
|
190
|
-
|
191
|
-
trace("request_id", raw.request_id)
|
192
|
-
trace("retries_taken", raw.retries_taken)
|
203
|
+
response = await self._execute_chat_completion_async(client, args, trace)
|
193
204
|
trace("result", response)
|
194
205
|
|
195
206
|
return response
|
196
207
|
|
208
|
+
def _get_thread(self) -> InputProperty:
|
209
|
+
thread = self.prompty.get_input("thread")
|
210
|
+
if thread is None:
|
211
|
+
raise ValueError("thread requires thread input")
|
212
|
+
|
213
|
+
return thread
|
214
|
+
|
215
|
+
def _retrieve_tool(self, tool_name: str) -> ToolProperty:
|
216
|
+
tool = self.prompty.get_tool(tool_name)
|
217
|
+
if tool is None:
|
218
|
+
raise ValueError(f"Tool {tool_name} does not exist")
|
219
|
+
|
220
|
+
if tool.type != "function":
|
221
|
+
raise ValueError(f"Server tool ({tool_name}) is currently not supported")
|
222
|
+
|
223
|
+
if tool.value is None:
|
224
|
+
raise ValueError(f"Tool {tool_name} has not been initialized")
|
225
|
+
|
226
|
+
return tool
|
227
|
+
|
197
228
|
def _execute_agent(self, client: AzureOpenAI, data: typing.Any) -> typing.Any:
|
198
229
|
with Tracer.start("create") as trace:
|
199
230
|
trace("type", "LLM")
|
200
231
|
trace("description", "Azure OpenAI Client")
|
201
|
-
|
202
232
|
trace("signature", "AzureOpenAI.chat.agent.create")
|
233
|
+
|
203
234
|
trace("inputs", data)
|
204
235
|
|
205
236
|
response = self._create_chat(client, data)
|
206
|
-
if isinstance(response, ChatCompletion):
|
207
|
-
message = response.choices[0].message
|
208
|
-
if message.tool_calls:
|
209
|
-
thread = self.prompty.get_input("thread")
|
210
|
-
if thread is None:
|
211
|
-
raise ValueError("thread requires thread input")
|
212
237
|
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
238
|
+
# execute tool calls if any (until no more tool calls)
|
239
|
+
while (
|
240
|
+
isinstance(response, ChatCompletion)
|
241
|
+
and response.choices[0].finish_reason == "tool_calls"
|
242
|
+
and response.choices[0].message.tool_calls is not None
|
243
|
+
and len(response.choices[0].message.tool_calls) > 0
|
244
|
+
):
|
245
|
+
|
246
|
+
tool_calls = response.choices[0].message.tool_calls
|
247
|
+
thread = self._get_thread()
|
248
|
+
thread.value.append(
|
249
|
+
{
|
250
|
+
"role": "assistant",
|
251
|
+
"tool_calls": [t.model_dump() for t in tool_calls],
|
252
|
+
}
|
253
|
+
)
|
219
254
|
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
raise ValueError(f"Tool {tool_call.function.name} does not exist")
|
255
|
+
for tool_call in tool_calls:
|
256
|
+
tool = self._retrieve_tool(tool_call.function.name)
|
257
|
+
function_args = json.loads(tool_call.function.arguments)
|
224
258
|
|
225
|
-
|
259
|
+
if inspect.iscoroutinefunction(tool.value):
|
260
|
+
raise ValueError("Cannot execute async tool in sync mode")
|
226
261
|
|
227
|
-
|
228
|
-
raise ValueError(f"Tool {tool_call.function.name} does not have a value")
|
262
|
+
r = tool.value(**function_args)
|
229
263
|
|
230
|
-
|
264
|
+
thread.value.append(
|
265
|
+
{
|
266
|
+
"role": "tool",
|
267
|
+
"tool_call_id": tool_call.id,
|
268
|
+
"name": tool_call.function.name,
|
269
|
+
"content": r,
|
270
|
+
}
|
271
|
+
)
|
231
272
|
|
232
|
-
|
233
|
-
{
|
234
|
-
"role": "tool",
|
235
|
-
"tool_call_id": tool_call.id,
|
236
|
-
"name": tool_call.function.name,
|
237
|
-
"content": r,
|
238
|
-
}
|
239
|
-
)
|
240
|
-
else:
|
241
|
-
trace("result", response)
|
242
|
-
return response
|
273
|
+
response = self._create_chat(client, data, True)
|
243
274
|
|
244
|
-
response = self._create_chat(client, data, True)
|
245
275
|
trace("result", response)
|
246
|
-
|
247
276
|
return response
|
248
277
|
|
249
278
|
async def _execute_agent_async(self, client: AsyncAzureOpenAI, data: typing.Any) -> typing.Any:
|
250
279
|
with Tracer.start("create") as trace:
|
251
280
|
trace("type", "LLM")
|
252
281
|
trace("description", "Azure OpenAI Client")
|
253
|
-
trace("signature", "
|
254
|
-
|
255
|
-
trace("inputs",
|
256
|
-
|
282
|
+
trace("signature", "AzureOpenAIAsync.chat.agent.create")
|
283
|
+
|
284
|
+
trace("inputs", data)
|
285
|
+
|
286
|
+
response = await self._create_chat_async(client, data)
|
287
|
+
|
288
|
+
# execute tool calls if any (until no more tool calls)
|
289
|
+
while (
|
290
|
+
isinstance(response, ChatCompletion)
|
291
|
+
and response.choices[0].finish_reason == "tool_calls"
|
292
|
+
and response.choices[0].message.tool_calls is not None
|
293
|
+
and len(response.choices[0].message.tool_calls) > 0
|
294
|
+
):
|
295
|
+
|
296
|
+
tool_calls = response.choices[0].message.tool_calls
|
297
|
+
thread = self._get_thread()
|
298
|
+
thread.value.append(
|
299
|
+
{
|
300
|
+
"role": "assistant",
|
301
|
+
"tool_calls": [t.model_dump() for t in tool_calls],
|
302
|
+
}
|
303
|
+
)
|
304
|
+
|
305
|
+
for tool_call in tool_calls:
|
306
|
+
tool = self._retrieve_tool(tool_call.function.name)
|
307
|
+
function_args = json.loads(tool_call.function.arguments)
|
308
|
+
|
309
|
+
if inspect.iscoroutinefunction(tool.value):
|
310
|
+
# if the tool is async, we need to await it
|
311
|
+
r = await tool.value(**function_args)
|
312
|
+
else:
|
313
|
+
# if the tool is not async, we can call it directly
|
314
|
+
r = tool.value(**function_args)
|
315
|
+
|
316
|
+
thread.value.append(
|
317
|
+
{
|
318
|
+
"role": "tool",
|
319
|
+
"tool_call_id": tool_call.id,
|
320
|
+
"name": tool_call.function.name,
|
321
|
+
"content": r,
|
322
|
+
}
|
323
|
+
)
|
324
|
+
|
325
|
+
response = await self._create_chat_async(client, data, True)
|
326
|
+
|
257
327
|
trace("result", response)
|
258
328
|
return response
|
259
329
|
|
@@ -358,7 +428,7 @@ class AzureOpenAIExecutor(Invoker):
|
|
358
428
|
|
359
429
|
return response
|
360
430
|
|
361
|
-
def invoke(self, data: typing.Any) -> typing.
|
431
|
+
def invoke(self, data: typing.Any) -> typing.Any:
|
362
432
|
"""Invoke the Azure OpenAI API
|
363
433
|
|
364
434
|
Parameters
|
@@ -374,28 +444,29 @@ class AzureOpenAIExecutor(Invoker):
|
|
374
444
|
|
375
445
|
client = self._get_ctor()
|
376
446
|
|
447
|
+
r = None
|
377
448
|
if self.api == "chat":
|
378
|
-
|
449
|
+
r = self._create_chat(client, data)
|
379
450
|
elif self.api == "agent":
|
380
|
-
|
451
|
+
r = self._execute_agent(client, data)
|
381
452
|
elif self.api == "completion":
|
382
|
-
|
453
|
+
r = self._create_completion(client, data)
|
383
454
|
elif self.api == "embedding":
|
384
|
-
|
455
|
+
r = self._create_embedding(client, data)
|
385
456
|
elif self.api == "image":
|
386
|
-
|
457
|
+
r = self._create_image(client, data)
|
387
458
|
|
388
459
|
# stream response
|
389
|
-
if isinstance(
|
460
|
+
if isinstance(r, Iterator):
|
390
461
|
if self.api == "chat":
|
391
462
|
# TODO: handle the case where there might be no usage in the stream
|
392
|
-
return PromptyStream("AzureOpenAIExecutor",
|
463
|
+
return PromptyStream("AzureOpenAIExecutor", r)
|
393
464
|
else:
|
394
|
-
return PromptyStream("AzureOpenAIExecutor",
|
465
|
+
return PromptyStream("AzureOpenAIExecutor", r)
|
395
466
|
else:
|
396
|
-
return
|
467
|
+
return r
|
397
468
|
|
398
|
-
async def invoke_async(self, data: str) -> typing.
|
469
|
+
async def invoke_async(self, data: str) -> typing.Any:
|
399
470
|
"""Invoke the Prompty Chat Parser (Async)
|
400
471
|
|
401
472
|
Parameters
|
@@ -410,23 +481,24 @@ class AzureOpenAIExecutor(Invoker):
|
|
410
481
|
"""
|
411
482
|
client = self._get_async_ctor()
|
412
483
|
|
484
|
+
r = None
|
413
485
|
if self.api == "chat":
|
414
|
-
|
486
|
+
r = await self._create_chat_async(client, data)
|
415
487
|
elif self.api == "agent":
|
416
|
-
|
488
|
+
r = await self._execute_agent_async(client, data)
|
417
489
|
elif self.api == "completion":
|
418
|
-
|
490
|
+
r = await self._create_completion_async(client, data)
|
419
491
|
elif self.api == "embedding":
|
420
|
-
|
492
|
+
r = await self._create_embedding_async(client, data)
|
421
493
|
elif self.api == "image":
|
422
|
-
|
494
|
+
r = await self._create_image_async(client, data)
|
423
495
|
|
424
496
|
# stream response
|
425
|
-
if isinstance(
|
497
|
+
if isinstance(r, AsyncIterator):
|
426
498
|
if self.api == "chat":
|
427
499
|
# TODO: handle the case where there might be no usage in the stream
|
428
|
-
return AsyncPromptyStream("AzureOpenAIExecutorAsync",
|
500
|
+
return AsyncPromptyStream("AzureOpenAIExecutorAsync", r)
|
429
501
|
else:
|
430
|
-
return AsyncPromptyStream("AzureOpenAIExecutorAsync",
|
502
|
+
return AsyncPromptyStream("AzureOpenAIExecutorAsync", r)
|
431
503
|
else:
|
432
|
-
return
|
504
|
+
return r
|
@@ -42,13 +42,6 @@ class AzureOpenAIProcessor(Invoker):
|
|
42
42
|
any
|
43
43
|
The response from the OpenAI/Azure API
|
44
44
|
"""
|
45
|
-
# agent invocations return the thread
|
46
|
-
# and the last message is the response
|
47
|
-
if self.prompty.model.api == "agent" and isinstance(data, list):
|
48
|
-
if isinstance(data[-1], dict):
|
49
|
-
return data[-1]["content"]
|
50
|
-
else:
|
51
|
-
return data
|
52
45
|
|
53
46
|
if isinstance(data, ChatCompletion):
|
54
47
|
response = data.choices[0].message
|
@@ -113,7 +106,7 @@ class AzureOpenAIProcessor(Invoker):
|
|
113
106
|
else:
|
114
107
|
return data
|
115
108
|
|
116
|
-
async def invoke_async(self, data: str) -> typing.
|
109
|
+
async def invoke_async(self, data: str) -> typing.Any:
|
117
110
|
"""Invoke the Prompty Chat Parser (Async)
|
118
111
|
|
119
112
|
Parameters
|
@@ -126,13 +119,6 @@ class AzureOpenAIProcessor(Invoker):
|
|
126
119
|
str
|
127
120
|
The parsed data
|
128
121
|
"""
|
129
|
-
# agent invocations return the thread
|
130
|
-
# and the last message is the response
|
131
|
-
if self.prompty.model.api == "agent" and isinstance(data, list):
|
132
|
-
if isinstance(data[-1], dict):
|
133
|
-
return data[-1]["content"]
|
134
|
-
else:
|
135
|
-
return data
|
136
122
|
|
137
123
|
if isinstance(data, ChatCompletion):
|
138
124
|
response = data.choices[0].message
|
@@ -179,7 +165,7 @@ class AzureOpenAIProcessor(Invoker):
|
|
179
165
|
elif len(data.data) == 1:
|
180
166
|
return data.data[0].url if item.data[0].url else item.data[0].b64_json
|
181
167
|
else:
|
182
|
-
return [item.url if item.url else item.b64_json for item in data.data]
|
168
|
+
return [str(item.url) if item.url else item.b64_json for item in data.data]
|
183
169
|
|
184
170
|
elif isinstance(data, AsyncIterator):
|
185
171
|
|
@@ -60,15 +60,39 @@ def convert_output_props(name: str, outputs: list[OutputProperty]) -> dict[str,
|
|
60
60
|
"strict": True,
|
61
61
|
"schema": {
|
62
62
|
"type": "object",
|
63
|
-
"properties": {
|
64
|
-
|
65
|
-
"type": p.type,
|
66
|
-
}
|
67
|
-
for p in outputs
|
68
|
-
},
|
69
|
-
"required": [p.name for p in outputs],
|
63
|
+
"properties": {p.name: _convert_output_object(p) for p in outputs},
|
64
|
+
"required": [p.name for p in outputs if p.required],
|
70
65
|
"additionalProperties": False,
|
71
66
|
},
|
72
67
|
},
|
73
68
|
}
|
74
69
|
return {}
|
70
|
+
|
71
|
+
|
72
|
+
def _convert_output_object(output: OutputProperty) -> dict[str, typing.Any]:
|
73
|
+
"""Convert an OutputProperty to a dictionary"""
|
74
|
+
if output.type == "array":
|
75
|
+
if output.items is None:
|
76
|
+
raise ValueError("Array type must have items defined")
|
77
|
+
|
78
|
+
o = _convert_output_object(output.items)
|
79
|
+
if "name" in o:
|
80
|
+
o.pop("name")
|
81
|
+
|
82
|
+
return {
|
83
|
+
"type": "array",
|
84
|
+
"items": o,
|
85
|
+
}
|
86
|
+
elif output.type == "object":
|
87
|
+
return {
|
88
|
+
"type": "object",
|
89
|
+
"properties": {prop.name: _convert_output_object(prop) for prop in output.properties},
|
90
|
+
"required": [prop.name for prop in output.properties if prop.required],
|
91
|
+
"additionalProperties": False,
|
92
|
+
}
|
93
|
+
else:
|
94
|
+
return {
|
95
|
+
"type": output.type,
|
96
|
+
"description": output.description,
|
97
|
+
**({"enum": output.enum} if output.enum else {}),
|
98
|
+
}
|
@@ -71,10 +71,13 @@ class OutputProperty:
|
|
71
71
|
type: Literal["string", "number", "array", "object", "boolean"]
|
72
72
|
name: str = field(default="")
|
73
73
|
description: str = field(default="")
|
74
|
-
|
75
|
-
|
74
|
+
required: bool = field(default=True)
|
75
|
+
enum: list[typing.Any] = field(default_factory=list)
|
76
76
|
|
77
|
-
|
77
|
+
# for array types, items is a type of OutputProperty
|
78
|
+
items: Optional["OutputProperty"] = field(default=None)
|
79
|
+
# for object types, properties is a list of OutputProperty
|
80
|
+
properties: list["OutputProperty"] = field(default_factory=list)
|
78
81
|
|
79
82
|
|
80
83
|
@dataclass
|
@@ -266,25 +269,6 @@ class Prompty:
|
|
266
269
|
|
267
270
|
raise ValueError(f"Tool {name} not found")
|
268
271
|
|
269
|
-
def get_output(self, name: str) -> OutputProperty:
|
270
|
-
"""Get the output property of the prompty
|
271
|
-
|
272
|
-
Parameters
|
273
|
-
----------
|
274
|
-
name : str
|
275
|
-
The name of the property
|
276
|
-
|
277
|
-
Returns
|
278
|
-
-------
|
279
|
-
OutputProperty
|
280
|
-
The property of the prompty
|
281
|
-
"""
|
282
|
-
|
283
|
-
for i in self.outputs:
|
284
|
-
if i.name == name:
|
285
|
-
return i
|
286
|
-
raise ValueError(f"Property {name} not found")
|
287
|
-
|
288
272
|
def to_safe_dict(self) -> dict[str, typing.Any]:
|
289
273
|
d: dict[str, typing.Any] = {}
|
290
274
|
for items in fields(self):
|
@@ -598,13 +582,35 @@ class Prompty:
|
|
598
582
|
|
599
583
|
return {**attributes, **prompty, "content": content}
|
600
584
|
|
585
|
+
@staticmethod
|
586
|
+
def _load_output(attributes: dict) -> OutputProperty:
|
587
|
+
if "type" in attributes and attributes["type"] == "array":
|
588
|
+
items = attributes.pop("items", [])
|
589
|
+
attributes["items"] = Prompty._load_output({"name": "item", **items})
|
590
|
+
|
591
|
+
elif "type" in attributes and attributes["type"] == "object":
|
592
|
+
p = attributes.pop("properties", [])
|
593
|
+
if isinstance(p, dict):
|
594
|
+
p = [{"name": k, **v} for k, v in p.items()]
|
595
|
+
|
596
|
+
properties = [Prompty._load_output(i) for i in p]
|
597
|
+
attributes["properties"] = properties
|
598
|
+
|
599
|
+
return OutputProperty(**attributes)
|
600
|
+
|
601
601
|
@staticmethod
|
602
602
|
def load_raw(attributes: dict, file: Path) -> "Prompty":
|
603
|
+
# normalize outputs
|
604
|
+
outputs = []
|
605
|
+
if "outputs" in attributes:
|
606
|
+
outputs = attributes.pop("outputs")
|
607
|
+
if isinstance(outputs, dict):
|
608
|
+
outputs = [{"name": k, **v} for k, v in outputs.items()]
|
603
609
|
|
604
610
|
prompty = Prompty(
|
605
611
|
model=ModelProperty(**attributes.pop("model")),
|
606
612
|
inputs=[InputProperty(**i) for i in attributes.pop("inputs", [])],
|
607
|
-
outputs=[
|
613
|
+
outputs=[Prompty._load_output(i) for i in outputs],
|
608
614
|
tools=Prompty.load_tools(attributes.pop("tools", [])),
|
609
615
|
template=TemplateProperty(**attributes.pop("template")),
|
610
616
|
file=file,
|
@@ -270,6 +270,7 @@ class Parser(Invoker):
|
|
270
270
|
"""
|
271
271
|
pass
|
272
272
|
|
273
|
+
@trace
|
273
274
|
def run(self, data: typing.Any) -> typing.Any:
|
274
275
|
"""Method to run the invoker
|
275
276
|
|
@@ -287,6 +288,7 @@ class Parser(Invoker):
|
|
287
288
|
parsed = self.invoke(data)
|
288
289
|
return self.process(parsed)
|
289
290
|
|
291
|
+
@trace
|
290
292
|
async def run_async(self, data: typing.Any) -> typing.Any:
|
291
293
|
"""Method to run the invoker asynchronously
|
292
294
|
|
@@ -304,6 +306,9 @@ class Parser(Invoker):
|
|
304
306
|
return self.process(parsed)
|
305
307
|
|
306
308
|
|
309
|
+
InvokerTypes = Literal["renderer", "parser", "executor", "processor"]
|
310
|
+
|
311
|
+
|
307
312
|
class InvokerFactory:
|
308
313
|
"""Factory class for Invoker"""
|
309
314
|
|
@@ -367,7 +372,7 @@ class InvokerFactory:
|
|
367
372
|
@classmethod
|
368
373
|
def _get_name(
|
369
374
|
cls,
|
370
|
-
type:
|
375
|
+
type: InvokerTypes,
|
371
376
|
prompty: Prompty,
|
372
377
|
) -> str:
|
373
378
|
if type == "renderer":
|
@@ -384,7 +389,7 @@ class InvokerFactory:
|
|
384
389
|
@classmethod
|
385
390
|
def _get_invoker(
|
386
391
|
cls,
|
387
|
-
type:
|
392
|
+
type: InvokerTypes,
|
388
393
|
prompty: Prompty,
|
389
394
|
) -> Invoker:
|
390
395
|
if type == "renderer":
|
@@ -421,7 +426,7 @@ class InvokerFactory:
|
|
421
426
|
@classmethod
|
422
427
|
def run(
|
423
428
|
cls,
|
424
|
-
type:
|
429
|
+
type: InvokerTypes,
|
425
430
|
prompty: Prompty,
|
426
431
|
data: typing.Any,
|
427
432
|
default: typing.Any = None,
|
@@ -439,7 +444,7 @@ class InvokerFactory:
|
|
439
444
|
@classmethod
|
440
445
|
async def run_async(
|
441
446
|
cls,
|
442
|
-
type:
|
447
|
+
type: InvokerTypes,
|
443
448
|
prompty: Prompty,
|
444
449
|
data: typing.Any,
|
445
450
|
default: typing.Any = None,
|
@@ -0,0 +1,49 @@
|
|
1
|
+
---
|
2
|
+
name: Researcher Agent
|
3
|
+
description: A basic prompt that uses the GPT-3 chat API to answer questions
|
4
|
+
metadata:
|
5
|
+
authors:
|
6
|
+
- sethjuarez
|
7
|
+
- jietong
|
8
|
+
|
9
|
+
model:
|
10
|
+
api: agent
|
11
|
+
connection:
|
12
|
+
type: azure
|
13
|
+
azure_deployment: gpt-4o-mini
|
14
|
+
|
15
|
+
tools:
|
16
|
+
- id: get_current_weather
|
17
|
+
type: function
|
18
|
+
description: Get the current weather for a given city.
|
19
|
+
parameters:
|
20
|
+
city:
|
21
|
+
type: string
|
22
|
+
description: The name of the city to get the weather for.
|
23
|
+
required: true
|
24
|
+
unit:
|
25
|
+
type: string
|
26
|
+
description: The unit of measurement for the temperature (Celsius or Fahrenheit).
|
27
|
+
enum:
|
28
|
+
- Celsius
|
29
|
+
- Fahrenheit
|
30
|
+
|
31
|
+
inputs:
|
32
|
+
firstName: Seth
|
33
|
+
lastName: Juarez
|
34
|
+
question: What's the weather like in San Francisco, Tokyo, and Paris?
|
35
|
+
---
|
36
|
+
system:
|
37
|
+
You are a helpful assistant that helps the user with the help of some functions.
|
38
|
+
If you are using multiple tools to solve a user's task, make sure to communicate
|
39
|
+
information learned from one tool to the next tool.
|
40
|
+
For instance, if the user ask to draw a picture of the current weather in NYC,
|
41
|
+
you can use the weather API to get the current weather in NYC and then pass that information
|
42
|
+
to the image generation tool.
|
43
|
+
|
44
|
+
# Customer
|
45
|
+
You are helping {{firstName}} {{lastName}} to find answers to their questions.
|
46
|
+
Use their name to address them in your responses.
|
47
|
+
|
48
|
+
![thread]
|
49
|
+
{{question}}
|