prompty 0.1.10__py3-none-any.whl → 0.1.33__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,218 @@
1
+ import azure.identity
2
+ import importlib.metadata
3
+ from typing import AsyncIterator, Iterator
4
+ from openai import AzureOpenAI, AsyncAzureOpenAI
5
+
6
+ from prompty.tracer import Tracer
7
+ from ..core import AsyncPromptyStream, Prompty, PromptyStream
8
+ from ..invoker import Invoker, InvokerFactory
9
+
10
+ VERSION = importlib.metadata.version("prompty")
11
+
12
+
13
+ @InvokerFactory.register_executor("azure")
14
+ @InvokerFactory.register_executor("azure_openai")
15
+ class AzureOpenAIExecutor(Invoker):
16
+ """Azure OpenAI Executor"""
17
+
18
+ def __init__(self, prompty: Prompty) -> None:
19
+ super().__init__(prompty)
20
+ self.kwargs = {
21
+ key: value
22
+ for key, value in self.prompty.model.configuration.items()
23
+ if key != "type"
24
+ }
25
+
26
+ # no key, use default credentials
27
+ if "api_key" not in self.kwargs:
28
+ # managed identity if client id
29
+ if "client_id" in self.kwargs:
30
+ default_credential = azure.identity.ManagedIdentityCredential(
31
+ client_id=self.kwargs.pop("client_id"),
32
+ )
33
+ # default credential
34
+ else:
35
+ default_credential = azure.identity.DefaultAzureCredential(
36
+ exclude_shared_token_cache_credential=True
37
+ )
38
+
39
+ self.kwargs["azure_ad_token_provider"] = (
40
+ azure.identity.get_bearer_token_provider(
41
+ default_credential, "https://cognitiveservices.azure.com/.default"
42
+ )
43
+ )
44
+
45
+ self.api = self.prompty.model.api
46
+ self.deployment = self.prompty.model.configuration["azure_deployment"]
47
+ self.parameters = self.prompty.model.parameters
48
+
49
+ def invoke(self, data: any) -> any:
50
+ """Invoke the Azure OpenAI API
51
+
52
+ Parameters
53
+ ----------
54
+ data : any
55
+ The data to send to the Azure OpenAI API
56
+
57
+ Returns
58
+ -------
59
+ any
60
+ The response from the Azure OpenAI API
61
+ """
62
+
63
+ with Tracer.start("AzureOpenAI") as trace:
64
+ trace("type", "LLM")
65
+ trace("signature", "AzureOpenAI.ctor")
66
+ trace("description", "Azure OpenAI Constructor")
67
+ trace("inputs", self.kwargs)
68
+ client = AzureOpenAI(
69
+ default_headers={
70
+ "User-Agent": f"prompty/{VERSION}",
71
+ "x-ms-useragent": f"prompty/{VERSION}",
72
+ },
73
+ **self.kwargs,
74
+ )
75
+ trace("result", client)
76
+
77
+ with Tracer.start("create") as trace:
78
+ trace("type", "LLM")
79
+ trace("description", "Azure OpenAI Client")
80
+
81
+ if self.api == "chat":
82
+ trace("signature", "AzureOpenAI.chat.completions.create")
83
+ args = {
84
+ "model": self.deployment,
85
+ "messages": data if isinstance(data, list) else [data],
86
+ **self.parameters,
87
+ }
88
+ trace("inputs", args)
89
+ response = client.chat.completions.create(**args)
90
+ trace("result", response)
91
+
92
+ elif self.api == "completion":
93
+ trace("signature", "AzureOpenAI.completions.create")
94
+ args = {
95
+ "prompt": data,
96
+ "model": self.deployment,
97
+ **self.parameters,
98
+ }
99
+ trace("inputs", args)
100
+ response = client.completions.create(**args)
101
+ trace("result", response)
102
+
103
+ elif self.api == "embedding":
104
+ trace("signature", "AzureOpenAI.embeddings.create")
105
+ args = {
106
+ "input": data if isinstance(data, list) else [data],
107
+ "model": self.deployment,
108
+ **self.parameters,
109
+ }
110
+ trace("inputs", args)
111
+ response = client.embeddings.create(**args)
112
+ trace("result", response)
113
+
114
+ elif self.api == "image":
115
+ trace("signature", "AzureOpenAI.images.generate")
116
+ args = {
117
+ "prompt": data,
118
+ "model": self.deployment,
119
+ **self.parameters,
120
+ }
121
+ trace("inputs", args)
122
+ response = client.images.generate.create(**args)
123
+ trace("result", response)
124
+
125
+ # stream response
126
+ if isinstance(response, Iterator):
127
+ if self.api == "chat":
128
+ # TODO: handle the case where there might be no usage in the stream
129
+ return PromptyStream("AzureOpenAIExecutor", response)
130
+ else:
131
+ return PromptyStream("AzureOpenAIExecutor", response)
132
+ else:
133
+ return response
134
+
135
+ async def invoke_async(self, data: str) -> str:
136
+ """Invoke the Prompty Chat Parser (Async)
137
+
138
+ Parameters
139
+ ----------
140
+ data : str
141
+ The data to parse
142
+
143
+ Returns
144
+ -------
145
+ str
146
+ The parsed data
147
+ """
148
+ with Tracer.start("AzureOpenAIAsync") as trace:
149
+ trace("type", "LLM")
150
+ trace("signature", "AzureOpenAIAsync.ctor")
151
+ trace("description", "Async Azure OpenAI Constructor")
152
+ trace("inputs", self.kwargs)
153
+ client = AsyncAzureOpenAI(
154
+ default_headers={
155
+ "User-Agent": f"prompty/{VERSION}",
156
+ "x-ms-useragent": f"prompty/{VERSION}",
157
+ },
158
+ **self.kwargs,
159
+ )
160
+ trace("result", client)
161
+
162
+ with Tracer.start("create") as trace:
163
+ trace("type", "LLM")
164
+ trace("description", "Azure OpenAI Client")
165
+
166
+ if self.api == "chat":
167
+ trace("signature", "AzureOpenAIAsync.chat.completions.create")
168
+ args = {
169
+ "model": self.deployment,
170
+ "messages": data if isinstance(data, list) else [data],
171
+ **self.parameters,
172
+ }
173
+ trace("inputs", args)
174
+ response = await client.chat.completions.create(**args)
175
+ trace("result", response)
176
+
177
+ elif self.api == "completion":
178
+ trace("signature", "AzureOpenAIAsync.completions.create")
179
+ args = {
180
+ "prompt": data,
181
+ "model": self.deployment,
182
+ **self.parameters,
183
+ }
184
+ trace("inputs", args)
185
+ response = await client.completions.create(**args)
186
+ trace("result", response)
187
+
188
+ elif self.api == "embedding":
189
+ trace("signature", "AzureOpenAIAsync.embeddings.create")
190
+ args = {
191
+ "input": data if isinstance(data, list) else [data],
192
+ "model": self.deployment,
193
+ **self.parameters,
194
+ }
195
+ trace("inputs", args)
196
+ response = await client.embeddings.create(**args)
197
+ trace("result", response)
198
+
199
+ elif self.api == "image":
200
+ trace("signature", "AzureOpenAIAsync.images.generate")
201
+ args = {
202
+ "prompt": data,
203
+ "model": self.deployment,
204
+ **self.parameters,
205
+ }
206
+ trace("inputs", args)
207
+ response = await client.images.generate.create(**args)
208
+ trace("result", response)
209
+
210
+ # stream response
211
+ if isinstance(response, AsyncIterator):
212
+ if self.api == "chat":
213
+ # TODO: handle the case where there might be no usage in the stream
214
+ return AsyncPromptyStream("AzureOpenAIExecutorAsync", response)
215
+ else:
216
+ return AsyncPromptyStream("AzureOpenAIExecutorAsync", response)
217
+ else:
218
+ return response
@@ -0,0 +1,142 @@
1
+ from typing import AsyncIterator, Iterator
2
+ from openai.types.completion import Completion
3
+ from openai.types.images_response import ImagesResponse
4
+ from openai.types.chat.chat_completion import ChatCompletion
5
+ from ..core import AsyncPromptyStream, Prompty, PromptyStream, ToolCall
6
+ from ..invoker import Invoker, InvokerFactory
7
+ from openai.types.create_embedding_response import CreateEmbeddingResponse
8
+
9
+
10
+ @InvokerFactory.register_processor("azure")
11
+ @InvokerFactory.register_processor("azure_openai")
12
+ class AzureOpenAIProcessor(Invoker):
13
+ """Azure OpenAI Processor"""
14
+
15
+ def __init__(self, prompty: Prompty) -> None:
16
+ super().__init__(prompty)
17
+
18
+ def invoke(self, data: any) -> any:
19
+ """Invoke the OpenAI/Azure API
20
+
21
+ Parameters
22
+ ----------
23
+ data : any
24
+ The data to send to the OpenAI/Azure API
25
+
26
+ Returns
27
+ -------
28
+ any
29
+ The response from the OpenAI/Azure API
30
+ """
31
+ if isinstance(data, ChatCompletion):
32
+ response = data.choices[0].message
33
+ # tool calls available in response
34
+ if response.tool_calls:
35
+ return [
36
+ ToolCall(
37
+ id=tool_call.id,
38
+ name=tool_call.function.name,
39
+ arguments=tool_call.function.arguments,
40
+ )
41
+ for tool_call in response.tool_calls
42
+ ]
43
+ else:
44
+ return response.content
45
+
46
+ elif isinstance(data, Completion):
47
+ return data.choices[0].text
48
+ elif isinstance(data, CreateEmbeddingResponse):
49
+ if len(data.data) == 0:
50
+ raise ValueError("Invalid data")
51
+ elif len(data.data) == 1:
52
+ return data.data[0].embedding
53
+ else:
54
+ return [item.embedding for item in data.data]
55
+ elif isinstance(data, ImagesResponse):
56
+ self.prompty.model.parameters
57
+ item: ImagesResponse = data
58
+
59
+ if len(data.data) == 0:
60
+ raise ValueError("Invalid data")
61
+ elif len(data.data) == 1:
62
+ return data.data[0].url if item.data[0].url else item.data[0].b64_json
63
+ else:
64
+ return [item.url if item.url else item.b64_json for item in data.data]
65
+
66
+ elif isinstance(data, Iterator):
67
+
68
+ def generator():
69
+ for chunk in data:
70
+ if (
71
+ len(chunk.choices) == 1
72
+ and chunk.choices[0].delta.content != None
73
+ ):
74
+ content = chunk.choices[0].delta.content
75
+ yield content
76
+
77
+ return PromptyStream("AzureOpenAIProcessor", generator())
78
+ else:
79
+ return data
80
+
81
+ async def invoke_async(self, data: str) -> str:
82
+ """Invoke the Prompty Chat Parser (Async)
83
+
84
+ Parameters
85
+ ----------
86
+ data : str
87
+ The data to parse
88
+
89
+ Returns
90
+ -------
91
+ str
92
+ The parsed data
93
+ """
94
+ if isinstance(data, ChatCompletion):
95
+ response = data.choices[0].message
96
+ # tool calls available in response
97
+ if response.tool_calls:
98
+ return [
99
+ ToolCall(
100
+ id=tool_call.id,
101
+ name=tool_call.function.name,
102
+ arguments=tool_call.function.arguments,
103
+ )
104
+ for tool_call in response.tool_calls
105
+ ]
106
+ else:
107
+ return response.content
108
+
109
+ elif isinstance(data, Completion):
110
+ return data.choices[0].text
111
+ elif isinstance(data, CreateEmbeddingResponse):
112
+ if len(data.data) == 0:
113
+ raise ValueError("Invalid data")
114
+ elif len(data.data) == 1:
115
+ return data.data[0].embedding
116
+ else:
117
+ return [item.embedding for item in data.data]
118
+ elif isinstance(data, ImagesResponse):
119
+ self.prompty.model.parameters
120
+ item: ImagesResponse = data
121
+
122
+ if len(data.data) == 0:
123
+ raise ValueError("Invalid data")
124
+ elif len(data.data) == 1:
125
+ return data.data[0].url if item.data[0].url else item.data[0].b64_json
126
+ else:
127
+ return [item.url if item.url else item.b64_json for item in data.data]
128
+
129
+ elif isinstance(data, AsyncIterator):
130
+
131
+ async def generator():
132
+ async for chunk in data:
133
+ if (
134
+ len(chunk.choices) == 1
135
+ and chunk.choices[0].delta.content != None
136
+ ):
137
+ content = chunk.choices[0].delta.content
138
+ yield content
139
+
140
+ return AsyncPromptyStream("AsyncAzureOpenAIProcessor", generator())
141
+ else:
142
+ return data
prompty/cli.py CHANGED
@@ -1,17 +1,15 @@
1
1
  import os
2
2
  import json
3
3
  import click
4
-
4
+ import importlib
5
5
 
6
6
  from pathlib import Path
7
7
  from pydantic import BaseModel
8
8
 
9
- from . import load, execute
10
- from .tracer import trace, Trace, PromptyTracer
9
+ import prompty
10
+ from prompty.tracer import trace, PromptyTracer, console_tracer, Tracer
11
11
  from dotenv import load_dotenv
12
12
 
13
- load_dotenv()
14
- Trace.add_tracer("prompty", PromptyTracer())
15
13
 
16
14
  def normalize_path(p, create_dir=False) -> Path:
17
15
  path = Path(p)
@@ -28,57 +26,105 @@ def normalize_path(p, create_dir=False) -> Path:
28
26
  return path
29
27
 
30
28
 
29
+ def dynamic_import(module: str):
30
+ # built in modules
31
+ if module == "azure" or module == "azure_openai":
32
+ t = "prompty.azure"
33
+ elif module == "serverless":
34
+ t = "prompty.serverless"
35
+ else:
36
+ t = module
37
+
38
+ print(f"Loading invokers from {t}")
39
+ importlib.import_module(t)
40
+
41
+
31
42
  @trace
32
43
  def chat_mode(prompt_path: str):
33
- W = "\033[0m" # white (normal)
44
+ W = "\033[0m" # white (normal)
34
45
  R = "\033[31m" # red
35
46
  G = "\033[32m" # green
36
47
  O = "\033[33m" # orange
37
48
  B = "\033[34m" # blue
38
49
  P = "\033[35m" # purple
39
50
  print(f"Executing {str(prompt_path)} in chat mode...")
40
- prompty = load(str(prompt_path))
41
- if "chat_history" not in prompty.sample:
51
+ p = prompty.load(str(prompt_path))
52
+ if "chat_history" not in p.sample:
42
53
  print(
43
54
  f"{R}{str(prompt_path)} needs to have a chat_history input to work in chat mode{W}"
44
55
  )
45
56
  return
46
57
  else:
47
- chat_history = prompty.sample["chat_history"]
48
- while True:
49
- user_input = input(f"{B}User:{W} ")
50
- if user_input == "exit":
51
- break
52
- chat_history.append({"role": "user", "content": user_input})
53
- # reloadable prompty file
54
- result = execute(prompt_path, inputs={"chat_history": chat_history})
55
- print(f"\n{G}Assistant:{W} {result}\n")
56
- chat_history.append({"role": "assistant", "content": result})
57
- print("Goodbye!")
58
+
59
+ try:
60
+ # load executor / processor types
61
+ dynamic_import(p.model.configuration["type"])
62
+ chat_history = p.sample["chat_history"]
63
+ while True:
64
+ user_input = input(f"\n{B}User:{W} ")
65
+ if user_input == "exit":
66
+ break
67
+ # reloadable prompty file
68
+ chat_history.append({"role": "user", "content": user_input})
69
+ result = prompty.execute(
70
+ prompt_path, inputs={"chat_history": chat_history}
71
+ )
72
+ print(f"\n{G}Assistant:{W} {result}")
73
+ chat_history.append({"role": "assistant", "content": result})
74
+ except Exception as e:
75
+ print(f"{type(e).__qualname__}: {e}")
76
+
77
+ print(f"\n{R}Goodbye!{W}\n")
78
+
79
+
80
+ @trace
81
+ def execute(prompt_path: str, raw=False):
82
+ p = prompty.load(prompt_path)
83
+
84
+ try:
85
+ # load executor / processor types
86
+ dynamic_import(p.model.configuration["type"])
87
+
88
+ result = prompty.execute(p, raw=raw)
89
+ if issubclass(type(result), BaseModel):
90
+ print("\n", json.dumps(result.model_dump(), indent=4), "\n")
91
+ elif isinstance(result, list):
92
+ print(
93
+ "\n", json.dumps([item.model_dump() for item in result], indent=4), "\n"
94
+ )
95
+ else:
96
+ print("\n", result, "\n")
97
+ except Exception as e:
98
+ print(f"{type(e).__qualname__}: {e}", "\n")
58
99
 
59
100
 
60
101
  @click.command()
61
102
  @click.option("--source", "-s", required=True)
103
+ @click.option("--env", "-e", required=False)
62
104
  @click.option("--verbose", "-v", is_flag=True)
63
105
  @click.option("--chat", "-c", is_flag=True)
64
106
  @click.version_option()
65
- @trace
66
- def run(source, verbose, chat):
107
+ def run(source, env, verbose, chat):
108
+ # load external env file
109
+ if env:
110
+ print(f"Loading environment variables from {env}")
111
+ load_dotenv(env)
112
+
67
113
  prompt_path = normalize_path(source)
68
114
  if not prompt_path.exists():
69
115
  print(f"{str(prompt_path)} does not exist")
70
116
  return
71
117
 
118
+ if verbose:
119
+ Tracer.add("console", console_tracer)
120
+
121
+ ptrace = PromptyTracer()
122
+ Tracer.add("prompty", ptrace.tracer)
123
+
72
124
  if chat:
73
125
  chat_mode(str(prompt_path))
74
126
  else:
75
- result = execute(str(prompt_path), raw=verbose)
76
- if issubclass(type(result), BaseModel):
77
- print(json.dumps(result.model_dump(), indent=4))
78
- elif isinstance(result, list):
79
- print(json.dumps([item.model_dump() for item in result], indent=4))
80
- else:
81
- print(result)
127
+ execute(str(prompt_path), raw=verbose)
82
128
 
83
129
 
84
130
  if __name__ == "__main__":