prompty 0.1.9__py3-none-any.whl → 0.1.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prompty/__init__.py +312 -117
- prompty/azure/__init__.py +10 -0
- prompty/azure/executor.py +218 -0
- prompty/azure/processor.py +142 -0
- prompty/cli.py +74 -28
- prompty/core.py +138 -221
- prompty/invoker.py +297 -0
- prompty/openai/__init__.py +10 -0
- prompty/openai/executor.py +114 -0
- prompty/{processors.py → openai/processor.py} +25 -15
- prompty/parsers.py +18 -1
- prompty/renderers.py +19 -2
- prompty/serverless/__init__.py +8 -0
- prompty/serverless/executor.py +153 -0
- prompty/serverless/processor.py +78 -0
- prompty/tracer.py +162 -22
- prompty/utils.py +105 -0
- prompty-0.1.33.dist-info/METADATA +218 -0
- prompty-0.1.33.dist-info/RECORD +22 -0
- {prompty-0.1.9.dist-info → prompty-0.1.33.dist-info}/WHEEL +1 -1
- prompty-0.1.33.dist-info/entry_points.txt +5 -0
- prompty/executors.py +0 -94
- prompty-0.1.9.dist-info/METADATA +0 -136
- prompty-0.1.9.dist-info/RECORD +0 -12
- {prompty-0.1.9.dist-info → prompty-0.1.33.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,10 @@
|
|
1
|
+
# __init__.py
|
2
|
+
from prompty.invoker import InvokerException
|
3
|
+
|
4
|
+
try:
|
5
|
+
from .executor import OpenAIExecutor
|
6
|
+
from .processor import OpenAIProcessor
|
7
|
+
except ImportError:
|
8
|
+
raise InvokerException(
|
9
|
+
"Error registering OpenAIExecutor and OpenAIProcessor", "openai"
|
10
|
+
)
|
@@ -0,0 +1,114 @@
|
|
1
|
+
import importlib.metadata
|
2
|
+
from openai import OpenAI
|
3
|
+
from typing import Iterator
|
4
|
+
|
5
|
+
from prompty.tracer import Tracer
|
6
|
+
from ..core import Prompty, PromptyStream
|
7
|
+
from ..invoker import Invoker, InvokerFactory
|
8
|
+
|
9
|
+
VERSION = importlib.metadata.version("prompty")
|
10
|
+
|
11
|
+
|
12
|
+
@InvokerFactory.register_executor("openai")
|
13
|
+
class OpenAIExecutor(Invoker):
|
14
|
+
"""OpenAI Executor"""
|
15
|
+
|
16
|
+
def __init__(self, prompty: Prompty) -> None:
|
17
|
+
super().__init__(prompty)
|
18
|
+
self.kwargs = {
|
19
|
+
key: value
|
20
|
+
for key, value in self.prompty.model.configuration.items()
|
21
|
+
if key != "type"
|
22
|
+
}
|
23
|
+
|
24
|
+
self.api = self.prompty.model.api
|
25
|
+
self.deployment = self.prompty.model.configuration["azure_deployment"]
|
26
|
+
self.parameters = self.prompty.model.parameters
|
27
|
+
|
28
|
+
def invoke(self, data: any) -> any:
|
29
|
+
"""Invoke the OpenAI API
|
30
|
+
|
31
|
+
Parameters
|
32
|
+
----------
|
33
|
+
data : any
|
34
|
+
The data to send to the OpenAI API
|
35
|
+
|
36
|
+
Returns
|
37
|
+
-------
|
38
|
+
any
|
39
|
+
The response from the OpenAI API
|
40
|
+
"""
|
41
|
+
with Tracer.start("OpenAI") as trace:
|
42
|
+
trace("type", "LLM")
|
43
|
+
trace("signature", "OpenAI.ctor")
|
44
|
+
trace("description", "OpenAI Constructor")
|
45
|
+
trace("inputs", self.kwargs)
|
46
|
+
client = OpenAI(
|
47
|
+
default_headers={
|
48
|
+
"User-Agent": f"prompty/{VERSION}",
|
49
|
+
"x-ms-useragent": f"prompty/{VERSION}",
|
50
|
+
},
|
51
|
+
**self.kwargs,
|
52
|
+
)
|
53
|
+
trace("result", client)
|
54
|
+
|
55
|
+
with Tracer.start("create") as trace:
|
56
|
+
trace("type", "LLM")
|
57
|
+
trace("description", "OpenAI Prompty Execution Invoker")
|
58
|
+
|
59
|
+
if self.api == "chat":
|
60
|
+
trace("signature", "OpenAI.chat.completions.create")
|
61
|
+
args = {
|
62
|
+
"model": self.deployment,
|
63
|
+
"messages": data if isinstance(data, list) else [data],
|
64
|
+
**self.parameters,
|
65
|
+
}
|
66
|
+
trace("inputs", args)
|
67
|
+
response = client.chat.completions.create(**args)
|
68
|
+
|
69
|
+
elif self.api == "completion":
|
70
|
+
trace("signature", "OpenAI.completions.create")
|
71
|
+
args = {
|
72
|
+
"prompt": data.item,
|
73
|
+
"model": self.deployment,
|
74
|
+
**self.parameters,
|
75
|
+
}
|
76
|
+
trace("inputs", args)
|
77
|
+
response = client.completions.create(**args)
|
78
|
+
|
79
|
+
elif self.api == "embedding":
|
80
|
+
trace("signature", "OpenAI.embeddings.create")
|
81
|
+
args = {
|
82
|
+
"input": data if isinstance(data, list) else [data],
|
83
|
+
"model": self.deployment,
|
84
|
+
**self.parameters,
|
85
|
+
}
|
86
|
+
trace("inputs", args)
|
87
|
+
response = client.embeddings.create(**args)
|
88
|
+
|
89
|
+
elif self.api == "image":
|
90
|
+
raise NotImplementedError("OpenAI Image API is not implemented yet")
|
91
|
+
|
92
|
+
# stream response
|
93
|
+
if isinstance(response, Iterator):
|
94
|
+
stream = PromptyStream("AzureOpenAIExecutor", response)
|
95
|
+
trace("result", stream)
|
96
|
+
return stream
|
97
|
+
else:
|
98
|
+
trace("result", response)
|
99
|
+
return response
|
100
|
+
|
101
|
+
async def invoke_async(self, data: str) -> str:
|
102
|
+
"""Invoke the Prompty Chat Parser (Async)
|
103
|
+
|
104
|
+
Parameters
|
105
|
+
----------
|
106
|
+
data : str
|
107
|
+
The data to parse
|
108
|
+
|
109
|
+
Returns
|
110
|
+
-------
|
111
|
+
str
|
112
|
+
The parsed data
|
113
|
+
"""
|
114
|
+
return self.invoke(data)
|
@@ -1,38 +1,30 @@
|
|
1
1
|
from typing import Iterator
|
2
|
-
from pydantic import BaseModel
|
3
2
|
from openai.types.completion import Completion
|
4
3
|
from openai.types.chat.chat_completion import ChatCompletion
|
5
|
-
from
|
4
|
+
from ..invoker import Invoker, InvokerFactory
|
5
|
+
from ..core import Prompty, PromptyStream, ToolCall
|
6
6
|
from openai.types.create_embedding_response import CreateEmbeddingResponse
|
7
7
|
|
8
8
|
|
9
|
-
class ToolCall(BaseModel):
|
10
|
-
id: str
|
11
|
-
name: str
|
12
|
-
arguments: str
|
13
|
-
|
14
|
-
|
15
9
|
@InvokerFactory.register_processor("openai")
|
16
|
-
@InvokerFactory.register_processor("azure")
|
17
|
-
@InvokerFactory.register_processor("azure_openai")
|
18
10
|
class OpenAIProcessor(Invoker):
|
19
|
-
"""OpenAI
|
11
|
+
"""OpenAI Processor"""
|
20
12
|
|
21
13
|
def __init__(self, prompty: Prompty) -> None:
|
22
14
|
super().__init__(prompty)
|
23
15
|
|
24
16
|
def invoke(self, data: any) -> any:
|
25
|
-
"""Invoke the OpenAI
|
17
|
+
"""Invoke the OpenAI API
|
26
18
|
|
27
19
|
Parameters
|
28
20
|
----------
|
29
21
|
data : any
|
30
|
-
The data to send to the OpenAI
|
22
|
+
The data to send to the OpenAI API
|
31
23
|
|
32
24
|
Returns
|
33
25
|
-------
|
34
26
|
any
|
35
|
-
The response from the OpenAI
|
27
|
+
The response from the OpenAI API
|
36
28
|
"""
|
37
29
|
if isinstance(data, ChatCompletion):
|
38
30
|
response = data.choices[0].message
|
@@ -62,10 +54,28 @@ class OpenAIProcessor(Invoker):
|
|
62
54
|
|
63
55
|
def generator():
|
64
56
|
for chunk in data:
|
65
|
-
if
|
57
|
+
if (
|
58
|
+
len(chunk.choices) == 1
|
59
|
+
and chunk.choices[0].delta.content != None
|
60
|
+
):
|
66
61
|
content = chunk.choices[0].delta.content
|
67
62
|
yield content
|
68
63
|
|
69
64
|
return PromptyStream("OpenAIProcessor", generator())
|
70
65
|
else:
|
71
66
|
return data
|
67
|
+
|
68
|
+
async def invoke_async(self, data: str) -> str:
|
69
|
+
"""Invoke the Prompty Chat Parser (Async)
|
70
|
+
|
71
|
+
Parameters
|
72
|
+
----------
|
73
|
+
data : str
|
74
|
+
The data to parse
|
75
|
+
|
76
|
+
Returns
|
77
|
+
-------
|
78
|
+
str
|
79
|
+
The parsed data
|
80
|
+
"""
|
81
|
+
return self.invoke(data)
|
prompty/parsers.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
import re
|
2
2
|
import base64
|
3
|
-
from .core import
|
3
|
+
from .core import Prompty
|
4
|
+
from .invoker import Invoker, InvokerFactory
|
4
5
|
|
5
6
|
|
6
7
|
@InvokerFactory.register_parser("prompty.chat")
|
@@ -137,3 +138,19 @@ class PromptyChatParser(Invoker):
|
|
137
138
|
messages.append({"role": role, "content": self.parse_content(content)})
|
138
139
|
|
139
140
|
return messages
|
141
|
+
|
142
|
+
|
143
|
+
async def invoke_async(self, data: str) -> str:
|
144
|
+
""" Invoke the Prompty Chat Parser (Async)
|
145
|
+
|
146
|
+
Parameters
|
147
|
+
----------
|
148
|
+
data : str
|
149
|
+
The data to parse
|
150
|
+
|
151
|
+
Returns
|
152
|
+
-------
|
153
|
+
str
|
154
|
+
The parsed data
|
155
|
+
"""
|
156
|
+
return self.invoke(data)
|
prompty/renderers.py
CHANGED
@@ -1,10 +1,12 @@
|
|
1
|
+
from .core import Prompty
|
1
2
|
from jinja2 import DictLoader, Environment
|
2
|
-
from .
|
3
|
+
from .invoker import Invoker, InvokerFactory
|
3
4
|
|
4
5
|
|
5
6
|
@InvokerFactory.register_renderer("jinja2")
|
6
7
|
class Jinja2Renderer(Invoker):
|
7
|
-
"""
|
8
|
+
"""Jinja2 Renderer"""
|
9
|
+
|
8
10
|
def __init__(self, prompty: Prompty) -> None:
|
9
11
|
super().__init__(prompty)
|
10
12
|
self.templates = {}
|
@@ -21,3 +23,18 @@ class Jinja2Renderer(Invoker):
|
|
21
23
|
t = env.get_template(self.name)
|
22
24
|
generated = t.render(**data)
|
23
25
|
return generated
|
26
|
+
|
27
|
+
async def invoke_async(self, data: str) -> str:
|
28
|
+
"""Invoke the Prompty Chat Parser (Async)
|
29
|
+
|
30
|
+
Parameters
|
31
|
+
----------
|
32
|
+
data : str
|
33
|
+
The data to parse
|
34
|
+
|
35
|
+
Returns
|
36
|
+
-------
|
37
|
+
str
|
38
|
+
The parsed data
|
39
|
+
"""
|
40
|
+
return self.invoke(data)
|
@@ -0,0 +1,8 @@
|
|
1
|
+
# __init__.py
|
2
|
+
from prompty.invoker import InvokerException
|
3
|
+
|
4
|
+
try:
|
5
|
+
from .executor import ServerlessExecutor
|
6
|
+
from .processor import ServerlessProcessor
|
7
|
+
except ImportError:
|
8
|
+
raise InvokerException("Error registering ServerlessExecutor and ServerlessProcessor", "serverless")
|
@@ -0,0 +1,153 @@
|
|
1
|
+
import importlib.metadata
|
2
|
+
from typing import Iterator
|
3
|
+
from azure.core.credentials import AzureKeyCredential
|
4
|
+
from azure.ai.inference import (
|
5
|
+
ChatCompletionsClient,
|
6
|
+
EmbeddingsClient,
|
7
|
+
)
|
8
|
+
from azure.ai.inference.models import (
|
9
|
+
StreamingChatCompletions,
|
10
|
+
AsyncStreamingChatCompletions,
|
11
|
+
)
|
12
|
+
|
13
|
+
from ..tracer import Tracer
|
14
|
+
from ..invoker import Invoker, InvokerFactory
|
15
|
+
from ..core import Prompty, PromptyStream, AsyncPromptyStream
|
16
|
+
|
17
|
+
VERSION = importlib.metadata.version("prompty")
|
18
|
+
|
19
|
+
|
20
|
+
@InvokerFactory.register_executor("serverless")
|
21
|
+
class ServerlessExecutor(Invoker):
|
22
|
+
"""Azure OpenAI Executor"""
|
23
|
+
|
24
|
+
def __init__(self, prompty: Prompty) -> None:
|
25
|
+
super().__init__(prompty)
|
26
|
+
|
27
|
+
# serverless configuration
|
28
|
+
self.endpoint = self.prompty.model.configuration["endpoint"]
|
29
|
+
self.model = self.prompty.model.configuration["model"]
|
30
|
+
self.key = self.prompty.model.configuration["key"]
|
31
|
+
|
32
|
+
# api type
|
33
|
+
self.api = self.prompty.model.api
|
34
|
+
|
35
|
+
def _response(self, response: any) -> any:
|
36
|
+
# stream response
|
37
|
+
if isinstance(response, Iterator):
|
38
|
+
if isinstance(response, StreamingChatCompletions):
|
39
|
+
stream = PromptyStream("ServerlessExecutor", response)
|
40
|
+
return stream
|
41
|
+
elif isinstance(response, AsyncStreamingChatCompletions):
|
42
|
+
stream = AsyncPromptyStream("ServerlessExecutor", response)
|
43
|
+
return stream
|
44
|
+
else:
|
45
|
+
stream = PromptyStream("ServerlessExecutor", response)
|
46
|
+
|
47
|
+
return stream
|
48
|
+
else:
|
49
|
+
return response
|
50
|
+
|
51
|
+
def invoke(self, data: any) -> any:
|
52
|
+
"""Invoke the Serverless SDK
|
53
|
+
|
54
|
+
Parameters
|
55
|
+
----------
|
56
|
+
data : any
|
57
|
+
The data to send to the Serverless SDK
|
58
|
+
|
59
|
+
Returns
|
60
|
+
-------
|
61
|
+
any
|
62
|
+
The response from the Serverless SDK
|
63
|
+
"""
|
64
|
+
|
65
|
+
cargs = {
|
66
|
+
"endpoint": self.endpoint,
|
67
|
+
"credential": AzureKeyCredential(self.key),
|
68
|
+
}
|
69
|
+
|
70
|
+
if self.api == "chat":
|
71
|
+
with Tracer.start("ChatCompletionsClient") as trace:
|
72
|
+
trace("type", "LLM")
|
73
|
+
trace("signature", "azure.ai.inference.ChatCompletionsClient.ctor")
|
74
|
+
trace(
|
75
|
+
"description", "Azure Unified Inference SDK Chat Completions Client"
|
76
|
+
)
|
77
|
+
trace("inputs", cargs)
|
78
|
+
client = ChatCompletionsClient(
|
79
|
+
user_agent=f"prompty/{VERSION}",
|
80
|
+
**cargs,
|
81
|
+
)
|
82
|
+
trace("result", client)
|
83
|
+
|
84
|
+
with Tracer.start("complete") as trace:
|
85
|
+
trace("type", "LLM")
|
86
|
+
trace("signature", "azure.ai.inference.ChatCompletionsClient.complete")
|
87
|
+
trace(
|
88
|
+
"description", "Azure Unified Inference SDK Chat Completions Client"
|
89
|
+
)
|
90
|
+
eargs = {
|
91
|
+
"model": self.model,
|
92
|
+
"messages": data if isinstance(data, list) else [data],
|
93
|
+
**self.prompty.model.parameters,
|
94
|
+
}
|
95
|
+
trace("inputs", eargs)
|
96
|
+
r = client.complete(**eargs)
|
97
|
+
trace("result", r)
|
98
|
+
|
99
|
+
response = self._response(r)
|
100
|
+
|
101
|
+
elif self.api == "completion":
|
102
|
+
raise NotImplementedError(
|
103
|
+
"Serverless Completions API is not implemented yet"
|
104
|
+
)
|
105
|
+
|
106
|
+
elif self.api == "embedding":
|
107
|
+
with Tracer.start("EmbeddingsClient") as trace:
|
108
|
+
trace("type", "LLM")
|
109
|
+
trace("signature", "azure.ai.inference.EmbeddingsClient.ctor")
|
110
|
+
trace("description", "Azure Unified Inference SDK Embeddings Client")
|
111
|
+
trace("inputs", cargs)
|
112
|
+
client = EmbeddingsClient(
|
113
|
+
user_agent=f"prompty/{VERSION}",
|
114
|
+
**cargs,
|
115
|
+
)
|
116
|
+
trace("result", client)
|
117
|
+
|
118
|
+
with Tracer.start("complete") as trace:
|
119
|
+
trace("type", "LLM")
|
120
|
+
trace("signature", "azure.ai.inference.ChatCompletionsClient.complete")
|
121
|
+
trace(
|
122
|
+
"description", "Azure Unified Inference SDK Chat Completions Client"
|
123
|
+
)
|
124
|
+
eargs = {
|
125
|
+
"model": self.model,
|
126
|
+
"input": data if isinstance(data, list) else [data],
|
127
|
+
**self.prompty.model.parameters,
|
128
|
+
}
|
129
|
+
trace("inputs", eargs)
|
130
|
+
r = client.complete(**eargs)
|
131
|
+
trace("result", r)
|
132
|
+
|
133
|
+
response = self._response(r)
|
134
|
+
|
135
|
+
elif self.api == "image":
|
136
|
+
raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
|
137
|
+
|
138
|
+
return response
|
139
|
+
|
140
|
+
async def invoke_async(self, data: str) -> str:
|
141
|
+
"""Invoke the Prompty Chat Parser (Async)
|
142
|
+
|
143
|
+
Parameters
|
144
|
+
----------
|
145
|
+
data : str
|
146
|
+
The data to parse
|
147
|
+
|
148
|
+
Returns
|
149
|
+
-------
|
150
|
+
str
|
151
|
+
The parsed data
|
152
|
+
"""
|
153
|
+
return self.invoke(data)
|
@@ -0,0 +1,78 @@
|
|
1
|
+
from typing import Iterator
|
2
|
+
from ..invoker import Invoker, InvokerFactory
|
3
|
+
from ..core import Prompty, PromptyStream, ToolCall
|
4
|
+
|
5
|
+
from azure.ai.inference.models import ChatCompletions, EmbeddingsResult
|
6
|
+
|
7
|
+
|
8
|
+
@InvokerFactory.register_processor("serverless")
|
9
|
+
class ServerlessProcessor(Invoker):
|
10
|
+
"""OpenAI Processor"""
|
11
|
+
|
12
|
+
def __init__(self, prompty: Prompty) -> None:
|
13
|
+
super().__init__(prompty)
|
14
|
+
|
15
|
+
def invoke(self, data: any) -> any:
|
16
|
+
"""Invoke the OpenAI API
|
17
|
+
|
18
|
+
Parameters
|
19
|
+
----------
|
20
|
+
data : any
|
21
|
+
The data to send to the OpenAI API
|
22
|
+
|
23
|
+
Returns
|
24
|
+
-------
|
25
|
+
any
|
26
|
+
The response from the OpenAI API
|
27
|
+
"""
|
28
|
+
if isinstance(data, ChatCompletions):
|
29
|
+
response = data.choices[0].message
|
30
|
+
# tool calls available in response
|
31
|
+
if response.tool_calls:
|
32
|
+
return [
|
33
|
+
ToolCall(
|
34
|
+
id=tool_call.id,
|
35
|
+
name=tool_call.function.name,
|
36
|
+
arguments=tool_call.function.arguments,
|
37
|
+
)
|
38
|
+
for tool_call in response.tool_calls
|
39
|
+
]
|
40
|
+
else:
|
41
|
+
return response.content
|
42
|
+
|
43
|
+
elif isinstance(data, EmbeddingsResult):
|
44
|
+
if len(data.data) == 0:
|
45
|
+
raise ValueError("Invalid data")
|
46
|
+
elif len(data.data) == 1:
|
47
|
+
return data.data[0].embedding
|
48
|
+
else:
|
49
|
+
return [item.embedding for item in data.data]
|
50
|
+
elif isinstance(data, Iterator):
|
51
|
+
|
52
|
+
def generator():
|
53
|
+
for chunk in data:
|
54
|
+
if (
|
55
|
+
len(chunk.choices) == 1
|
56
|
+
and chunk.choices[0].delta.content != None
|
57
|
+
):
|
58
|
+
content = chunk.choices[0].delta.content
|
59
|
+
yield content
|
60
|
+
|
61
|
+
return PromptyStream("ServerlessProcessor", generator())
|
62
|
+
else:
|
63
|
+
return data
|
64
|
+
|
65
|
+
async def invoke_async(self, data: str) -> str:
|
66
|
+
"""Invoke the Prompty Chat Parser (Async)
|
67
|
+
|
68
|
+
Parameters
|
69
|
+
----------
|
70
|
+
data : str
|
71
|
+
The data to parse
|
72
|
+
|
73
|
+
Returns
|
74
|
+
-------
|
75
|
+
str
|
76
|
+
The parsed data
|
77
|
+
"""
|
78
|
+
return self.invoke(data)
|