prompty 0.1.10__py3-none-any.whl → 0.1.34__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,114 @@
1
+ import importlib.metadata
2
+ from openai import OpenAI
3
+ from typing import Iterator
4
+
5
+ from prompty.tracer import Tracer
6
+ from ..core import Prompty, PromptyStream
7
+ from ..invoker import Invoker, InvokerFactory
8
+
9
+ VERSION = importlib.metadata.version("prompty")
10
+
11
+
12
+ @InvokerFactory.register_executor("openai")
13
+ class OpenAIExecutor(Invoker):
14
+ """OpenAI Executor"""
15
+
16
+ def __init__(self, prompty: Prompty) -> None:
17
+ super().__init__(prompty)
18
+ self.kwargs = {
19
+ key: value
20
+ for key, value in self.prompty.model.configuration.items()
21
+ if key != "type"
22
+ }
23
+
24
+ self.api = self.prompty.model.api
25
+ self.deployment = self.prompty.model.configuration["azure_deployment"]
26
+ self.parameters = self.prompty.model.parameters
27
+
28
+ def invoke(self, data: any) -> any:
29
+ """Invoke the OpenAI API
30
+
31
+ Parameters
32
+ ----------
33
+ data : any
34
+ The data to send to the OpenAI API
35
+
36
+ Returns
37
+ -------
38
+ any
39
+ The response from the OpenAI API
40
+ """
41
+ with Tracer.start("OpenAI") as trace:
42
+ trace("type", "LLM")
43
+ trace("signature", "OpenAI.ctor")
44
+ trace("description", "OpenAI Constructor")
45
+ trace("inputs", self.kwargs)
46
+ client = OpenAI(
47
+ default_headers={
48
+ "User-Agent": f"prompty/{VERSION}",
49
+ "x-ms-useragent": f"prompty/{VERSION}",
50
+ },
51
+ **self.kwargs,
52
+ )
53
+ trace("result", client)
54
+
55
+ with Tracer.start("create") as trace:
56
+ trace("type", "LLM")
57
+ trace("description", "OpenAI Prompty Execution Invoker")
58
+
59
+ if self.api == "chat":
60
+ trace("signature", "OpenAI.chat.completions.create")
61
+ args = {
62
+ "model": self.deployment,
63
+ "messages": data if isinstance(data, list) else [data],
64
+ **self.parameters,
65
+ }
66
+ trace("inputs", args)
67
+ response = client.chat.completions.create(**args)
68
+
69
+ elif self.api == "completion":
70
+ trace("signature", "OpenAI.completions.create")
71
+ args = {
72
+ "prompt": data.item,
73
+ "model": self.deployment,
74
+ **self.parameters,
75
+ }
76
+ trace("inputs", args)
77
+ response = client.completions.create(**args)
78
+
79
+ elif self.api == "embedding":
80
+ trace("signature", "OpenAI.embeddings.create")
81
+ args = {
82
+ "input": data if isinstance(data, list) else [data],
83
+ "model": self.deployment,
84
+ **self.parameters,
85
+ }
86
+ trace("inputs", args)
87
+ response = client.embeddings.create(**args)
88
+
89
+ elif self.api == "image":
90
+ raise NotImplementedError("OpenAI Image API is not implemented yet")
91
+
92
+ # stream response
93
+ if isinstance(response, Iterator):
94
+ stream = PromptyStream("AzureOpenAIExecutor", response)
95
+ trace("result", stream)
96
+ return stream
97
+ else:
98
+ trace("result", response)
99
+ return response
100
+
101
+ async def invoke_async(self, data: str) -> str:
102
+ """Invoke the Prompty Chat Parser (Async)
103
+
104
+ Parameters
105
+ ----------
106
+ data : str
107
+ The data to parse
108
+
109
+ Returns
110
+ -------
111
+ str
112
+ The parsed data
113
+ """
114
+ return self.invoke(data)
@@ -1,38 +1,30 @@
1
1
  from typing import Iterator
2
- from pydantic import BaseModel
3
2
  from openai.types.completion import Completion
4
3
  from openai.types.chat.chat_completion import ChatCompletion
5
- from .core import Invoker, InvokerFactory, Prompty, PromptyStream
4
+ from ..invoker import Invoker, InvokerFactory
5
+ from ..core import Prompty, PromptyStream, ToolCall
6
6
  from openai.types.create_embedding_response import CreateEmbeddingResponse
7
7
 
8
8
 
9
- class ToolCall(BaseModel):
10
- id: str
11
- name: str
12
- arguments: str
13
-
14
-
15
9
  @InvokerFactory.register_processor("openai")
16
- @InvokerFactory.register_processor("azure")
17
- @InvokerFactory.register_processor("azure_openai")
18
10
  class OpenAIProcessor(Invoker):
19
- """OpenAI/Azure Processor"""
11
+ """OpenAI Processor"""
20
12
 
21
13
  def __init__(self, prompty: Prompty) -> None:
22
14
  super().__init__(prompty)
23
15
 
24
16
  def invoke(self, data: any) -> any:
25
- """Invoke the OpenAI/Azure API
17
+ """Invoke the OpenAI API
26
18
 
27
19
  Parameters
28
20
  ----------
29
21
  data : any
30
- The data to send to the OpenAI/Azure API
22
+ The data to send to the OpenAI API
31
23
 
32
24
  Returns
33
25
  -------
34
26
  any
35
- The response from the OpenAI/Azure API
27
+ The response from the OpenAI API
36
28
  """
37
29
  if isinstance(data, ChatCompletion):
38
30
  response = data.choices[0].message
@@ -62,10 +54,28 @@ class OpenAIProcessor(Invoker):
62
54
 
63
55
  def generator():
64
56
  for chunk in data:
65
- if len(chunk.choices) == 1 and chunk.choices[0].delta.content != None:
57
+ if (
58
+ len(chunk.choices) == 1
59
+ and chunk.choices[0].delta.content != None
60
+ ):
66
61
  content = chunk.choices[0].delta.content
67
62
  yield content
68
63
 
69
64
  return PromptyStream("OpenAIProcessor", generator())
70
65
  else:
71
66
  return data
67
+
68
+ async def invoke_async(self, data: str) -> str:
69
+ """Invoke the Prompty Chat Parser (Async)
70
+
71
+ Parameters
72
+ ----------
73
+ data : str
74
+ The data to parse
75
+
76
+ Returns
77
+ -------
78
+ str
79
+ The parsed data
80
+ """
81
+ return self.invoke(data)
prompty/parsers.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import re
2
2
  import base64
3
- from .core import Invoker, InvokerFactory, Prompty
3
+ from .core import Prompty
4
+ from .invoker import Invoker, InvokerFactory
4
5
 
5
6
 
6
7
  @InvokerFactory.register_parser("prompty.chat")
@@ -137,3 +138,19 @@ class PromptyChatParser(Invoker):
137
138
  messages.append({"role": role, "content": self.parse_content(content)})
138
139
 
139
140
  return messages
141
+
142
+
143
+ async def invoke_async(self, data: str) -> str:
144
+ """ Invoke the Prompty Chat Parser (Async)
145
+
146
+ Parameters
147
+ ----------
148
+ data : str
149
+ The data to parse
150
+
151
+ Returns
152
+ -------
153
+ str
154
+ The parsed data
155
+ """
156
+ return self.invoke(data)
prompty/renderers.py CHANGED
@@ -1,10 +1,12 @@
1
+ from .core import Prompty
1
2
  from jinja2 import DictLoader, Environment
2
- from .core import Invoker, InvokerFactory, Prompty
3
+ from .invoker import Invoker, InvokerFactory
3
4
 
4
5
 
5
6
  @InvokerFactory.register_renderer("jinja2")
6
7
  class Jinja2Renderer(Invoker):
7
- """ Jinja2 Renderer """
8
+ """Jinja2 Renderer"""
9
+
8
10
  def __init__(self, prompty: Prompty) -> None:
9
11
  super().__init__(prompty)
10
12
  self.templates = {}
@@ -21,3 +23,18 @@ class Jinja2Renderer(Invoker):
21
23
  t = env.get_template(self.name)
22
24
  generated = t.render(**data)
23
25
  return generated
26
+
27
+ async def invoke_async(self, data: str) -> str:
28
+ """Invoke the Prompty Chat Parser (Async)
29
+
30
+ Parameters
31
+ ----------
32
+ data : str
33
+ The data to parse
34
+
35
+ Returns
36
+ -------
37
+ str
38
+ The parsed data
39
+ """
40
+ return self.invoke(data)
@@ -0,0 +1,8 @@
1
+ # __init__.py
2
+ from prompty.invoker import InvokerException
3
+
4
+ try:
5
+ from .executor import ServerlessExecutor
6
+ from .processor import ServerlessProcessor
7
+ except ImportError:
8
+ raise InvokerException("Error registering ServerlessExecutor and ServerlessProcessor", "serverless")
@@ -0,0 +1,240 @@
1
+ import azure.identity
2
+ import importlib.metadata
3
+ from typing import Iterator
4
+ from azure.core.credentials import AzureKeyCredential
5
+ from azure.ai.inference import (
6
+ ChatCompletionsClient,
7
+ EmbeddingsClient,
8
+ )
9
+
10
+ from azure.ai.inference.aio import (
11
+ ChatCompletionsClient as AsyncChatCompletionsClient,
12
+ EmbeddingsClient as AsyncEmbeddingsClient,
13
+ )
14
+ from azure.ai.inference.models import (
15
+ StreamingChatCompletions,
16
+ AsyncStreamingChatCompletions,
17
+ )
18
+
19
+ from ..tracer import Tracer
20
+ from ..invoker import Invoker, InvokerFactory
21
+ from ..core import Prompty, PromptyStream, AsyncPromptyStream
22
+
23
+ VERSION = importlib.metadata.version("prompty")
24
+
25
+
26
+ @InvokerFactory.register_executor("serverless")
27
+ class ServerlessExecutor(Invoker):
28
+ """Azure OpenAI Executor"""
29
+
30
+ def __init__(self, prompty: Prompty) -> None:
31
+ super().__init__(prompty)
32
+
33
+ self.endpoint = self.prompty.model.configuration["endpoint"]
34
+ self.model = self.prompty.model.configuration["model"]
35
+
36
+ # no key, use default credentials
37
+ if "key" not in self.kwargs:
38
+ self.credential = azure.identity.DefaultAzureCredential(
39
+ exclude_shared_token_cache_credential=True
40
+ )
41
+ else:
42
+ self.credential = AzureKeyCredential(
43
+ self.prompty.model.configuration["key"]
44
+ )
45
+
46
+ # api type
47
+ self.api = self.prompty.model.api
48
+
49
+ def _response(self, response: any) -> any:
50
+ # stream response
51
+ if isinstance(response, Iterator):
52
+ if isinstance(response, StreamingChatCompletions):
53
+ stream = PromptyStream("ServerlessExecutor", response)
54
+ return stream
55
+ elif isinstance(response, AsyncStreamingChatCompletions):
56
+ stream = AsyncPromptyStream("ServerlessExecutor", response)
57
+ return stream
58
+ else:
59
+ stream = PromptyStream("ServerlessExecutor", response)
60
+
61
+ return stream
62
+ else:
63
+ return response
64
+
65
+ def invoke(self, data: any) -> any:
66
+ """Invoke the Serverless SDK
67
+
68
+ Parameters
69
+ ----------
70
+ data : any
71
+ The data to send to the Serverless SDK
72
+
73
+ Returns
74
+ -------
75
+ any
76
+ The response from the Serverless SDK
77
+ """
78
+
79
+ cargs = {
80
+ "endpoint": self.endpoint,
81
+ "credential": self.credential,
82
+ }
83
+
84
+ if self.api == "chat":
85
+ with Tracer.start("ChatCompletionsClient") as trace:
86
+ trace("type", "LLM")
87
+ trace("signature", "azure.ai.inference.ChatCompletionsClient.ctor")
88
+ trace(
89
+ "description", "Azure Unified Inference SDK Chat Completions Client"
90
+ )
91
+ trace("inputs", cargs)
92
+ client = ChatCompletionsClient(
93
+ user_agent=f"prompty/{VERSION}",
94
+ **cargs,
95
+ )
96
+ trace("result", client)
97
+
98
+ with Tracer.start("complete") as trace:
99
+ trace("type", "LLM")
100
+ trace("signature", "azure.ai.inference.ChatCompletionsClient.complete")
101
+ trace(
102
+ "description", "Azure Unified Inference SDK Chat Completions Client"
103
+ )
104
+ eargs = {
105
+ "model": self.model,
106
+ "messages": data if isinstance(data, list) else [data],
107
+ **self.prompty.model.parameters,
108
+ }
109
+ trace("inputs", eargs)
110
+ r = client.complete(**eargs)
111
+ trace("result", r)
112
+
113
+ response = self._response(r)
114
+
115
+ elif self.api == "completion":
116
+ raise NotImplementedError(
117
+ "Serverless Completions API is not implemented yet"
118
+ )
119
+
120
+ elif self.api == "embedding":
121
+ with Tracer.start("EmbeddingsClient") as trace:
122
+ trace("type", "LLM")
123
+ trace("signature", "azure.ai.inference.EmbeddingsClient.ctor")
124
+ trace("description", "Azure Unified Inference SDK Embeddings Client")
125
+ trace("inputs", cargs)
126
+ client = EmbeddingsClient(
127
+ user_agent=f"prompty/{VERSION}",
128
+ **cargs,
129
+ )
130
+ trace("result", client)
131
+
132
+ with Tracer.start("complete") as trace:
133
+ trace("type", "LLM")
134
+ trace("signature", "azure.ai.inference.ChatCompletionsClient.complete")
135
+ trace(
136
+ "description", "Azure Unified Inference SDK Chat Completions Client"
137
+ )
138
+ eargs = {
139
+ "model": self.model,
140
+ "input": data if isinstance(data, list) else [data],
141
+ **self.prompty.model.parameters,
142
+ }
143
+ trace("inputs", eargs)
144
+ r = client.complete(**eargs)
145
+ trace("result", r)
146
+
147
+ response = self._response(r)
148
+
149
+ elif self.api == "image":
150
+ raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
151
+
152
+ return response
153
+
154
+ async def invoke_async(self, data: str) -> str:
155
+ """Invoke the Prompty Chat Parser (Async)
156
+
157
+ Parameters
158
+ ----------
159
+ data : str
160
+ The data to parse
161
+
162
+ Returns
163
+ -------
164
+ str
165
+ The parsed data
166
+ """
167
+ cargs = {
168
+ "endpoint": self.endpoint,
169
+ "credential": self.credential,
170
+ }
171
+
172
+ if self.api == "chat":
173
+ with Tracer.start("ChatCompletionsClient") as trace:
174
+ trace("type", "LLM")
175
+ trace("signature", "azure.ai.inference.aio.ChatCompletionsClient.ctor")
176
+ trace(
177
+ "description", "Azure Unified Inference SDK Async Chat Completions Client"
178
+ )
179
+ trace("inputs", cargs)
180
+ client = AsyncChatCompletionsClient(
181
+ user_agent=f"prompty/{VERSION}",
182
+ **cargs,
183
+ )
184
+ trace("result", client)
185
+
186
+ with Tracer.start("complete") as trace:
187
+ trace("type", "LLM")
188
+ trace("signature", "azure.ai.inference.ChatCompletionsClient.complete")
189
+ trace(
190
+ "description", "Azure Unified Inference SDK Async Chat Completions Client"
191
+ )
192
+ eargs = {
193
+ "model": self.model,
194
+ "messages": data if isinstance(data, list) else [data],
195
+ **self.prompty.model.parameters,
196
+ }
197
+ trace("inputs", eargs)
198
+ r = await client.complete(**eargs)
199
+ trace("result", r)
200
+
201
+ response = self._response(r)
202
+
203
+ elif self.api == "completion":
204
+ raise NotImplementedError(
205
+ "Serverless Completions API is not implemented yet"
206
+ )
207
+
208
+ elif self.api == "embedding":
209
+ with Tracer.start("EmbeddingsClient") as trace:
210
+ trace("type", "LLM")
211
+ trace("signature", "azure.ai.inference.aio.EmbeddingsClient.ctor")
212
+ trace("description", "Azure Unified Inference SDK Async Embeddings Client")
213
+ trace("inputs", cargs)
214
+ client = AsyncEmbeddingsClient(
215
+ user_agent=f"prompty/{VERSION}",
216
+ **cargs,
217
+ )
218
+ trace("result", client)
219
+
220
+ with Tracer.start("complete") as trace:
221
+ trace("type", "LLM")
222
+ trace("signature", "azure.ai.inference.ChatCompletionsClient.complete")
223
+ trace(
224
+ "description", "Azure Unified Inference SDK Chat Completions Client"
225
+ )
226
+ eargs = {
227
+ "model": self.model,
228
+ "input": data if isinstance(data, list) else [data],
229
+ **self.prompty.model.parameters,
230
+ }
231
+ trace("inputs", eargs)
232
+ r = await client.complete(**eargs)
233
+ trace("result", r)
234
+
235
+ response = self._response(r)
236
+
237
+ elif self.api == "image":
238
+ raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
239
+
240
+ return response
@@ -0,0 +1,113 @@
1
+ from typing import AsyncIterator, Iterator
2
+ from ..invoker import Invoker, InvokerFactory
3
+ from ..core import AsyncPromptyStream, Prompty, PromptyStream, ToolCall
4
+
5
+ from azure.ai.inference.models import ChatCompletions, EmbeddingsResult
6
+
7
+
8
+ @InvokerFactory.register_processor("serverless")
9
+ class ServerlessProcessor(Invoker):
10
+ """OpenAI Processor"""
11
+
12
+ def __init__(self, prompty: Prompty) -> None:
13
+ super().__init__(prompty)
14
+
15
+ def invoke(self, data: any) -> any:
16
+ """Invoke the OpenAI API
17
+
18
+ Parameters
19
+ ----------
20
+ data : any
21
+ The data to send to the OpenAI API
22
+
23
+ Returns
24
+ -------
25
+ any
26
+ The response from the OpenAI API
27
+ """
28
+ if isinstance(data, ChatCompletions):
29
+ response = data.choices[0].message
30
+ # tool calls available in response
31
+ if response.tool_calls:
32
+ return [
33
+ ToolCall(
34
+ id=tool_call.id,
35
+ name=tool_call.function.name,
36
+ arguments=tool_call.function.arguments,
37
+ )
38
+ for tool_call in response.tool_calls
39
+ ]
40
+ else:
41
+ return response.content
42
+
43
+ elif isinstance(data, EmbeddingsResult):
44
+ if len(data.data) == 0:
45
+ raise ValueError("Invalid data")
46
+ elif len(data.data) == 1:
47
+ return data.data[0].embedding
48
+ else:
49
+ return [item.embedding for item in data.data]
50
+ elif isinstance(data, Iterator):
51
+
52
+ def generator():
53
+ for chunk in data:
54
+ if (
55
+ len(chunk.choices) == 1
56
+ and chunk.choices[0].delta.content != None
57
+ ):
58
+ content = chunk.choices[0].delta.content
59
+ yield content
60
+
61
+ return PromptyStream("ServerlessProcessor", generator())
62
+ else:
63
+ return data
64
+
65
+ async def invoke_async(self, data: str) -> str:
66
+ """Invoke the Prompty Chat Parser (Async)
67
+
68
+ Parameters
69
+ ----------
70
+ data : str
71
+ The data to parse
72
+
73
+ Returns
74
+ -------
75
+ str
76
+ The parsed data
77
+ """
78
+ if isinstance(data, ChatCompletions):
79
+ response = data.choices[0].message
80
+ # tool calls available in response
81
+ if response.tool_calls:
82
+ return [
83
+ ToolCall(
84
+ id=tool_call.id,
85
+ name=tool_call.function.name,
86
+ arguments=tool_call.function.arguments,
87
+ )
88
+ for tool_call in response.tool_calls
89
+ ]
90
+ else:
91
+ return response.content
92
+
93
+ elif isinstance(data, EmbeddingsResult):
94
+ if len(data.data) == 0:
95
+ raise ValueError("Invalid data")
96
+ elif len(data.data) == 1:
97
+ return data.data[0].embedding
98
+ else:
99
+ return [item.embedding for item in data.data]
100
+ elif isinstance(data, AsyncIterator):
101
+
102
+ async def generator():
103
+ async for chunk in data:
104
+ if (
105
+ len(chunk.choices) == 1
106
+ and chunk.choices[0].delta.content != None
107
+ ):
108
+ content = chunk.choices[0].delta.content
109
+ yield content
110
+
111
+ return AsyncPromptyStream("ServerlessProcessor", generator())
112
+ else:
113
+ return data