prompty 0.1.33__tar.gz → 0.1.34__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {prompty-0.1.33 → prompty-0.1.34}/PKG-INFO +2 -1
- {prompty-0.1.33 → prompty-0.1.34}/prompty/serverless/executor.py +91 -4
- {prompty-0.1.33 → prompty-0.1.34}/prompty/serverless/processor.py +38 -3
- {prompty-0.1.33 → prompty-0.1.34}/prompty/tracer.py +16 -20
- {prompty-0.1.33 → prompty-0.1.34}/pyproject.toml +2 -1
- {prompty-0.1.33 → prompty-0.1.34}/tests/test_tracing.py +2 -2
- {prompty-0.1.33 → prompty-0.1.34}/LICENSE +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/README.md +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/__init__.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/azure/__init__.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/azure/executor.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/azure/processor.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/cli.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/core.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/invoker.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/openai/__init__.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/openai/executor.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/openai/processor.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/parsers.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/renderers.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/serverless/__init__.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/prompty/utils.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/fake_azure_executor.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/fake_serverless_executor.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/generated/1contoso.md +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/generated/2contoso.md +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/generated/3contoso.md +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/generated/4contoso.md +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/generated/basic.prompty.md +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/generated/camping.jpg +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/generated/context.prompty.md +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/generated/contoso_multi.md +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/generated/faithfulness.prompty.md +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/generated/groundedness.prompty.md +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/hello_world.embedding.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/__init__.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/basic.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/basic.prompty.execution.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/basic_json_output.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/camping.jpg +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/chat.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/context.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/context.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/context.prompty.execution.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/embedding.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/embedding.prompty.execution.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/evaluation.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/faithfulness.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/faithfulness.prompty.execution.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/fake.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/funcfile.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/funcfile.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/functions.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/functions.prompty.execution.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/groundedness.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/groundedness.prompty.execution.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/prompty.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/serverless.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/serverless.prompty.execution.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/serverless_stream.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/serverless_stream.prompty.execution.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/streaming.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/streaming.prompty.execution.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/sub/__init__.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/sub/basic.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/sub/sub/__init__.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/sub/sub/basic.prompty +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/sub/sub/prompty.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/sub/sub/test.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompts/test.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/prompty.json +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/test_common.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/test_execute.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/test_factory_invoker.py +0 -0
- {prompty-0.1.33 → prompty-0.1.34}/tests/test_path_exec.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: prompty
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.34
|
4
4
|
Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
|
5
5
|
Author-Email: Seth Juarez <seth.juarez@microsoft.com>
|
6
6
|
License: MIT
|
@@ -17,6 +17,7 @@ Requires-Dist: openai>=1.35.10; extra == "azure"
|
|
17
17
|
Provides-Extra: openai
|
18
18
|
Requires-Dist: openai>=1.35.10; extra == "openai"
|
19
19
|
Provides-Extra: serverless
|
20
|
+
Requires-Dist: azure-identity>=1.17.1; extra == "serverless"
|
20
21
|
Requires-Dist: azure-ai-inference>=1.0.0b3; extra == "serverless"
|
21
22
|
Description-Content-Type: text/markdown
|
22
23
|
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import azure.identity
|
1
2
|
import importlib.metadata
|
2
3
|
from typing import Iterator
|
3
4
|
from azure.core.credentials import AzureKeyCredential
|
@@ -5,6 +6,11 @@ from azure.ai.inference import (
|
|
5
6
|
ChatCompletionsClient,
|
6
7
|
EmbeddingsClient,
|
7
8
|
)
|
9
|
+
|
10
|
+
from azure.ai.inference.aio import (
|
11
|
+
ChatCompletionsClient as AsyncChatCompletionsClient,
|
12
|
+
EmbeddingsClient as AsyncEmbeddingsClient,
|
13
|
+
)
|
8
14
|
from azure.ai.inference.models import (
|
9
15
|
StreamingChatCompletions,
|
10
16
|
AsyncStreamingChatCompletions,
|
@@ -24,10 +30,18 @@ class ServerlessExecutor(Invoker):
|
|
24
30
|
def __init__(self, prompty: Prompty) -> None:
|
25
31
|
super().__init__(prompty)
|
26
32
|
|
27
|
-
# serverless configuration
|
28
33
|
self.endpoint = self.prompty.model.configuration["endpoint"]
|
29
34
|
self.model = self.prompty.model.configuration["model"]
|
30
|
-
|
35
|
+
|
36
|
+
# no key, use default credentials
|
37
|
+
if "key" not in self.kwargs:
|
38
|
+
self.credential = azure.identity.DefaultAzureCredential(
|
39
|
+
exclude_shared_token_cache_credential=True
|
40
|
+
)
|
41
|
+
else:
|
42
|
+
self.credential = AzureKeyCredential(
|
43
|
+
self.prompty.model.configuration["key"]
|
44
|
+
)
|
31
45
|
|
32
46
|
# api type
|
33
47
|
self.api = self.prompty.model.api
|
@@ -64,7 +78,7 @@ class ServerlessExecutor(Invoker):
|
|
64
78
|
|
65
79
|
cargs = {
|
66
80
|
"endpoint": self.endpoint,
|
67
|
-
"credential":
|
81
|
+
"credential": self.credential,
|
68
82
|
}
|
69
83
|
|
70
84
|
if self.api == "chat":
|
@@ -150,4 +164,77 @@ class ServerlessExecutor(Invoker):
|
|
150
164
|
str
|
151
165
|
The parsed data
|
152
166
|
"""
|
153
|
-
|
167
|
+
cargs = {
|
168
|
+
"endpoint": self.endpoint,
|
169
|
+
"credential": self.credential,
|
170
|
+
}
|
171
|
+
|
172
|
+
if self.api == "chat":
|
173
|
+
with Tracer.start("ChatCompletionsClient") as trace:
|
174
|
+
trace("type", "LLM")
|
175
|
+
trace("signature", "azure.ai.inference.aio.ChatCompletionsClient.ctor")
|
176
|
+
trace(
|
177
|
+
"description", "Azure Unified Inference SDK Async Chat Completions Client"
|
178
|
+
)
|
179
|
+
trace("inputs", cargs)
|
180
|
+
client = AsyncChatCompletionsClient(
|
181
|
+
user_agent=f"prompty/{VERSION}",
|
182
|
+
**cargs,
|
183
|
+
)
|
184
|
+
trace("result", client)
|
185
|
+
|
186
|
+
with Tracer.start("complete") as trace:
|
187
|
+
trace("type", "LLM")
|
188
|
+
trace("signature", "azure.ai.inference.ChatCompletionsClient.complete")
|
189
|
+
trace(
|
190
|
+
"description", "Azure Unified Inference SDK Async Chat Completions Client"
|
191
|
+
)
|
192
|
+
eargs = {
|
193
|
+
"model": self.model,
|
194
|
+
"messages": data if isinstance(data, list) else [data],
|
195
|
+
**self.prompty.model.parameters,
|
196
|
+
}
|
197
|
+
trace("inputs", eargs)
|
198
|
+
r = await client.complete(**eargs)
|
199
|
+
trace("result", r)
|
200
|
+
|
201
|
+
response = self._response(r)
|
202
|
+
|
203
|
+
elif self.api == "completion":
|
204
|
+
raise NotImplementedError(
|
205
|
+
"Serverless Completions API is not implemented yet"
|
206
|
+
)
|
207
|
+
|
208
|
+
elif self.api == "embedding":
|
209
|
+
with Tracer.start("EmbeddingsClient") as trace:
|
210
|
+
trace("type", "LLM")
|
211
|
+
trace("signature", "azure.ai.inference.aio.EmbeddingsClient.ctor")
|
212
|
+
trace("description", "Azure Unified Inference SDK Async Embeddings Client")
|
213
|
+
trace("inputs", cargs)
|
214
|
+
client = AsyncEmbeddingsClient(
|
215
|
+
user_agent=f"prompty/{VERSION}",
|
216
|
+
**cargs,
|
217
|
+
)
|
218
|
+
trace("result", client)
|
219
|
+
|
220
|
+
with Tracer.start("complete") as trace:
|
221
|
+
trace("type", "LLM")
|
222
|
+
trace("signature", "azure.ai.inference.ChatCompletionsClient.complete")
|
223
|
+
trace(
|
224
|
+
"description", "Azure Unified Inference SDK Chat Completions Client"
|
225
|
+
)
|
226
|
+
eargs = {
|
227
|
+
"model": self.model,
|
228
|
+
"input": data if isinstance(data, list) else [data],
|
229
|
+
**self.prompty.model.parameters,
|
230
|
+
}
|
231
|
+
trace("inputs", eargs)
|
232
|
+
r = await client.complete(**eargs)
|
233
|
+
trace("result", r)
|
234
|
+
|
235
|
+
response = self._response(r)
|
236
|
+
|
237
|
+
elif self.api == "image":
|
238
|
+
raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
|
239
|
+
|
240
|
+
return response
|
@@ -1,6 +1,6 @@
|
|
1
|
-
from typing import Iterator
|
1
|
+
from typing import AsyncIterator, Iterator
|
2
2
|
from ..invoker import Invoker, InvokerFactory
|
3
|
-
from ..core import Prompty, PromptyStream, ToolCall
|
3
|
+
from ..core import AsyncPromptyStream, Prompty, PromptyStream, ToolCall
|
4
4
|
|
5
5
|
from azure.ai.inference.models import ChatCompletions, EmbeddingsResult
|
6
6
|
|
@@ -75,4 +75,39 @@ class ServerlessProcessor(Invoker):
|
|
75
75
|
str
|
76
76
|
The parsed data
|
77
77
|
"""
|
78
|
-
|
78
|
+
if isinstance(data, ChatCompletions):
|
79
|
+
response = data.choices[0].message
|
80
|
+
# tool calls available in response
|
81
|
+
if response.tool_calls:
|
82
|
+
return [
|
83
|
+
ToolCall(
|
84
|
+
id=tool_call.id,
|
85
|
+
name=tool_call.function.name,
|
86
|
+
arguments=tool_call.function.arguments,
|
87
|
+
)
|
88
|
+
for tool_call in response.tool_calls
|
89
|
+
]
|
90
|
+
else:
|
91
|
+
return response.content
|
92
|
+
|
93
|
+
elif isinstance(data, EmbeddingsResult):
|
94
|
+
if len(data.data) == 0:
|
95
|
+
raise ValueError("Invalid data")
|
96
|
+
elif len(data.data) == 1:
|
97
|
+
return data.data[0].embedding
|
98
|
+
else:
|
99
|
+
return [item.embedding for item in data.data]
|
100
|
+
elif isinstance(data, AsyncIterator):
|
101
|
+
|
102
|
+
async def generator():
|
103
|
+
async for chunk in data:
|
104
|
+
if (
|
105
|
+
len(chunk.choices) == 1
|
106
|
+
and chunk.choices[0].delta.content != None
|
107
|
+
):
|
108
|
+
content = chunk.choices[0].delta.content
|
109
|
+
yield content
|
110
|
+
|
111
|
+
return AsyncPromptyStream("ServerlessProcessor", generator())
|
112
|
+
else:
|
113
|
+
return data
|
@@ -93,7 +93,9 @@ def _name(func: Callable, args):
|
|
93
93
|
if core_invoker:
|
94
94
|
name = type(args[0]).__name__
|
95
95
|
if signature.endswith("async"):
|
96
|
-
signature =
|
96
|
+
signature = (
|
97
|
+
f"{args[0].__module__}.{args[0].__class__.__name__}.invoke_async"
|
98
|
+
)
|
97
99
|
else:
|
98
100
|
signature = f"{args[0].__module__}.{args[0].__class__.__name__}.invoke"
|
99
101
|
else:
|
@@ -116,20 +118,19 @@ def _results(result: Any) -> dict:
|
|
116
118
|
|
117
119
|
|
118
120
|
def _trace_sync(
|
119
|
-
func: Callable = None,
|
121
|
+
func: Callable = None, **okwargs: Any
|
120
122
|
) -> Callable:
|
121
|
-
description = description or ""
|
122
123
|
|
123
124
|
@wraps(func)
|
124
125
|
def wrapper(*args, **kwargs):
|
125
126
|
name, signature = _name(func, args)
|
126
127
|
with Tracer.start(name) as trace:
|
127
128
|
trace("signature", signature)
|
128
|
-
if description and description != "":
|
129
|
-
trace("description", description)
|
130
129
|
|
131
|
-
|
132
|
-
|
130
|
+
# support arbitrary keyword
|
131
|
+
# arguments for trace decorator
|
132
|
+
for k, v in okwargs.items():
|
133
|
+
trace(k, to_dict(v))
|
133
134
|
|
134
135
|
inputs = _inputs(func, args, kwargs)
|
135
136
|
trace("inputs", inputs)
|
@@ -161,20 +162,19 @@ def _trace_sync(
|
|
161
162
|
|
162
163
|
|
163
164
|
def _trace_async(
|
164
|
-
func: Callable = None,
|
165
|
+
func: Callable = None, **okwargs: Any
|
165
166
|
) -> Callable:
|
166
|
-
description = description or ""
|
167
167
|
|
168
168
|
@wraps(func)
|
169
169
|
async def wrapper(*args, **kwargs):
|
170
170
|
name, signature = _name(func, args)
|
171
171
|
with Tracer.start(name) as trace:
|
172
172
|
trace("signature", signature)
|
173
|
-
if description and description != "":
|
174
|
-
trace("description", description)
|
175
173
|
|
176
|
-
|
177
|
-
|
174
|
+
# support arbitrary keyword
|
175
|
+
# arguments for trace decorator
|
176
|
+
for k, v in okwargs.items():
|
177
|
+
trace(k, to_dict(v))
|
178
178
|
|
179
179
|
inputs = _inputs(func, args, kwargs)
|
180
180
|
trace("inputs", inputs)
|
@@ -204,15 +204,11 @@ def _trace_async(
|
|
204
204
|
return wrapper
|
205
205
|
|
206
206
|
|
207
|
-
def trace(
|
208
|
-
func: Callable = None, *, description: str = None, itemtype: str = None
|
209
|
-
) -> Callable:
|
207
|
+
def trace(func: Callable = None, **kwargs: Any) -> Callable:
|
210
208
|
if func is None:
|
211
|
-
return partial(trace,
|
212
|
-
|
209
|
+
return partial(trace, **kwargs)
|
213
210
|
wrapped_method = _trace_async if inspect.iscoroutinefunction(func) else _trace_sync
|
214
|
-
|
215
|
-
return wrapped_method(func, description=description, itemtype=itemtype)
|
211
|
+
return wrapped_method(func, **kwargs)
|
216
212
|
|
217
213
|
|
218
214
|
class PromptyTracer:
|
@@ -15,7 +15,7 @@ dependencies = [
|
|
15
15
|
"click>=8.1.7",
|
16
16
|
"aiofiles>=24.1.0",
|
17
17
|
]
|
18
|
-
version = "0.1.
|
18
|
+
version = "0.1.34"
|
19
19
|
|
20
20
|
[project.license]
|
21
21
|
text = "MIT"
|
@@ -29,6 +29,7 @@ openai = [
|
|
29
29
|
"openai>=1.35.10",
|
30
30
|
]
|
31
31
|
serverless = [
|
32
|
+
"azure-identity>=1.17.1",
|
32
33
|
"azure-ai-inference>=1.0.0b3",
|
33
34
|
]
|
34
35
|
|
@@ -241,7 +241,7 @@ async def test_function_calling_async():
|
|
241
241
|
# need to add trace attribute to
|
242
242
|
# materialize stream into the function
|
243
243
|
# trace decorator
|
244
|
-
@trace
|
244
|
+
@trace(streaming=True, other="test")
|
245
245
|
def test_streaming():
|
246
246
|
result = prompty.execute(
|
247
247
|
"prompts/streaming.prompty",
|
@@ -254,7 +254,7 @@ def test_streaming():
|
|
254
254
|
|
255
255
|
|
256
256
|
@pytest.mark.asyncio
|
257
|
-
@trace
|
257
|
+
@trace(streaming=True)
|
258
258
|
async def test_streaming_async():
|
259
259
|
result = await prompty.execute_async(
|
260
260
|
"prompts/streaming.prompty",
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{prompty-0.1.33 → prompty-0.1.34}/tests/hello_world-goodbye_world-hello_again.embedding.json
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|