prompty 0.1.18__tar.gz → 0.1.20__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (77) hide show
  1. {prompty-0.1.18 → prompty-0.1.20}/PKG-INFO +4 -4
  2. {prompty-0.1.18 → prompty-0.1.20}/README.md +3 -3
  3. prompty-0.1.20/prompty/azure/executor.py +132 -0
  4. {prompty-0.1.18 → prompty-0.1.20}/prompty/azure/processor.py +12 -0
  5. {prompty-0.1.18 → prompty-0.1.20}/prompty/core.py +3 -1
  6. prompty-0.1.20/prompty/openai/executor.py +98 -0
  7. prompty-0.1.20/prompty/serverless/executor.py +131 -0
  8. {prompty-0.1.18 → prompty-0.1.20}/prompty/tracer.py +42 -10
  9. {prompty-0.1.18 → prompty-0.1.20}/pyproject.toml +1 -1
  10. {prompty-0.1.18 → prompty-0.1.20}/tests/test_execute.py +2 -1
  11. {prompty-0.1.18 → prompty-0.1.20}/tests/test_tracing.py +4 -1
  12. prompty-0.1.18/prompty/azure/executor.py +0 -95
  13. prompty-0.1.18/prompty/openai/executor.py +0 -74
  14. prompty-0.1.18/prompty/serverless/executor.py +0 -84
  15. {prompty-0.1.18 → prompty-0.1.20}/LICENSE +0 -0
  16. {prompty-0.1.18 → prompty-0.1.20}/prompty/__init__.py +0 -0
  17. {prompty-0.1.18 → prompty-0.1.20}/prompty/azure/__init__.py +0 -0
  18. {prompty-0.1.18 → prompty-0.1.20}/prompty/cli.py +0 -0
  19. {prompty-0.1.18 → prompty-0.1.20}/prompty/openai/__init__.py +0 -0
  20. {prompty-0.1.18 → prompty-0.1.20}/prompty/openai/processor.py +0 -0
  21. {prompty-0.1.18 → prompty-0.1.20}/prompty/parsers.py +0 -0
  22. {prompty-0.1.18 → prompty-0.1.20}/prompty/renderers.py +0 -0
  23. {prompty-0.1.18 → prompty-0.1.20}/prompty/serverless/__init__.py +0 -0
  24. {prompty-0.1.18 → prompty-0.1.20}/prompty/serverless/processor.py +0 -0
  25. {prompty-0.1.18 → prompty-0.1.20}/tests/fake_azure_executor.py +0 -0
  26. {prompty-0.1.18 → prompty-0.1.20}/tests/fake_serverless_executor.py +0 -0
  27. {prompty-0.1.18 → prompty-0.1.20}/tests/generated/1contoso.md +0 -0
  28. {prompty-0.1.18 → prompty-0.1.20}/tests/generated/2contoso.md +0 -0
  29. {prompty-0.1.18 → prompty-0.1.20}/tests/generated/3contoso.md +0 -0
  30. {prompty-0.1.18 → prompty-0.1.20}/tests/generated/4contoso.md +0 -0
  31. {prompty-0.1.18 → prompty-0.1.20}/tests/generated/basic.prompty.md +0 -0
  32. {prompty-0.1.18 → prompty-0.1.20}/tests/generated/camping.jpg +0 -0
  33. {prompty-0.1.18 → prompty-0.1.20}/tests/generated/context.prompty.md +0 -0
  34. {prompty-0.1.18 → prompty-0.1.20}/tests/generated/contoso_multi.md +0 -0
  35. {prompty-0.1.18 → prompty-0.1.20}/tests/generated/faithfulness.prompty.md +0 -0
  36. {prompty-0.1.18 → prompty-0.1.20}/tests/generated/groundedness.prompty.md +0 -0
  37. {prompty-0.1.18 → prompty-0.1.20}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
  38. {prompty-0.1.18 → prompty-0.1.20}/tests/hello_world.embedding.json +0 -0
  39. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/__init__.py +0 -0
  40. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/basic.prompty +0 -0
  41. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/basic.prompty.execution.json +0 -0
  42. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/basic_json_output.prompty +0 -0
  43. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/camping.jpg +0 -0
  44. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/chat.prompty +0 -0
  45. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/context.json +0 -0
  46. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/context.prompty +0 -0
  47. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/context.prompty.execution.json +0 -0
  48. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/embedding.prompty +0 -0
  49. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/embedding.prompty.execution.json +0 -0
  50. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/evaluation.prompty +0 -0
  51. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/faithfulness.prompty +0 -0
  52. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/faithfulness.prompty.execution.json +0 -0
  53. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/fake.prompty +0 -0
  54. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/funcfile.json +0 -0
  55. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/funcfile.prompty +0 -0
  56. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/functions.prompty +0 -0
  57. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/functions.prompty.execution.json +0 -0
  58. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/groundedness.prompty +0 -0
  59. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/groundedness.prompty.execution.json +0 -0
  60. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/prompty.json +0 -0
  61. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/serverless.prompty +0 -0
  62. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/serverless.prompty.execution.json +0 -0
  63. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/serverless_stream.prompty +0 -0
  64. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/serverless_stream.prompty.execution.json +0 -0
  65. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/streaming.prompty +0 -0
  66. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/streaming.prompty.execution.json +0 -0
  67. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/sub/__init__.py +0 -0
  68. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/sub/basic.prompty +0 -0
  69. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/sub/sub/__init__.py +0 -0
  70. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/sub/sub/basic.prompty +0 -0
  71. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/sub/sub/prompty.json +0 -0
  72. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/sub/sub/test.py +0 -0
  73. {prompty-0.1.18 → prompty-0.1.20}/tests/prompts/test.py +0 -0
  74. {prompty-0.1.18 → prompty-0.1.20}/tests/prompty.json +0 -0
  75. {prompty-0.1.18 → prompty-0.1.20}/tests/test_common.py +0 -0
  76. {prompty-0.1.18 → prompty-0.1.20}/tests/test_factory_invoker.py +0 -0
  77. {prompty-0.1.18 → prompty-0.1.20}/tests/test_path_exec.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompty
3
- Version: 0.1.18
3
+ Version: 0.1.20
4
4
  Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
5
  Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
6
  Requires-Dist: pyyaml>=6.0.1
@@ -177,7 +177,7 @@ def get_response(customerId, prompt):
177
177
 
178
178
  ```
179
179
 
180
- In this case, whenever this code is executed, a `.ptrace` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
180
+ In this case, whenever this code is executed, a `.tracy` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
181
181
 
182
182
  ## OpenTelemetry Tracing
183
183
  You can add OpenTelemetry tracing to your application using the same hook mechanism. In your application, you might create something like `trace_span` to trace the execution of your prompts:
@@ -206,10 +206,10 @@ This will produce spans during the execution of the prompt that can be sent to a
206
206
  The Prompty runtime also comes with a CLI tool that allows you to run prompts from the command line. The CLI tool is installed with the Python package.
207
207
 
208
208
  ```bash
209
- prompty -s path/to/prompty/file
209
+ prompty -s path/to/prompty/file -e .env
210
210
  ```
211
211
 
212
- This will execute the prompt and print the response to the console. It also has default tracing enabled.
212
+ This will execute the prompt and print the response to the console. If there are any environment variables the CLI should take into account, you can pass those in via the `-e` flag. It also has default tracing enabled.
213
213
 
214
214
  ## Contributing
215
215
  We welcome contributions to the Prompty project! This community led project is open to all contributors. The project cvan be found on [GitHub](https://github.com/Microsoft/prompty).
@@ -158,7 +158,7 @@ def get_response(customerId, prompt):
158
158
 
159
159
  ```
160
160
 
161
- In this case, whenever this code is executed, a `.ptrace` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
161
+ In this case, whenever this code is executed, a `.tracy` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
162
162
 
163
163
  ## OpenTelemetry Tracing
164
164
  You can add OpenTelemetry tracing to your application using the same hook mechanism. In your application, you might create something like `trace_span` to trace the execution of your prompts:
@@ -187,10 +187,10 @@ This will produce spans during the execution of the prompt that can be sent to a
187
187
  The Prompty runtime also comes with a CLI tool that allows you to run prompts from the command line. The CLI tool is installed with the Python package.
188
188
 
189
189
  ```bash
190
- prompty -s path/to/prompty/file
190
+ prompty -s path/to/prompty/file -e .env
191
191
  ```
192
192
 
193
- This will execute the prompt and print the response to the console. It also has default tracing enabled.
193
+ This will execute the prompt and print the response to the console. If there are any environment variables the CLI should take into account, you can pass those in via the `-e` flag. It also has default tracing enabled.
194
194
 
195
195
  ## Contributing
196
196
  We welcome contributions to the Prompty project! This community led project is open to all contributors. The project cvan be found on [GitHub](https://github.com/Microsoft/prompty).
@@ -0,0 +1,132 @@
1
+ import azure.identity
2
+ import importlib.metadata
3
+ from typing import Iterator
4
+ from openai import AzureOpenAI
5
+
6
+ from prompty.tracer import Tracer
7
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream
8
+
9
+ VERSION = importlib.metadata.version("prompty")
10
+
11
+
12
+ @InvokerFactory.register_executor("azure")
13
+ @InvokerFactory.register_executor("azure_openai")
14
+ class AzureOpenAIExecutor(Invoker):
15
+ """Azure OpenAI Executor"""
16
+
17
+ def __init__(self, prompty: Prompty) -> None:
18
+ super().__init__(prompty)
19
+ self.kwargs = {
20
+ key: value
21
+ for key, value in self.prompty.model.configuration.items()
22
+ if key != "type"
23
+ }
24
+
25
+ # no key, use default credentials
26
+ if "api_key" not in self.kwargs:
27
+ # managed identity if client id
28
+ if "client_id" in self.kwargs:
29
+ default_credential = azure.identity.ManagedIdentityCredential(
30
+ client_id=self.kwargs.pop("client_id"),
31
+ )
32
+ # default credential
33
+ else:
34
+ default_credential = azure.identity.DefaultAzureCredential(
35
+ exclude_shared_token_cache_credential=True
36
+ )
37
+
38
+ self.kwargs["azure_ad_token_provider"] = (
39
+ azure.identity.get_bearer_token_provider(
40
+ default_credential, "https://cognitiveservices.azure.com/.default"
41
+ )
42
+ )
43
+
44
+ self.api = self.prompty.model.api
45
+ self.deployment = self.prompty.model.configuration["azure_deployment"]
46
+ self.parameters = self.prompty.model.parameters
47
+
48
+ def invoke(self, data: any) -> any:
49
+ """Invoke the Azure OpenAI API
50
+
51
+ Parameters
52
+ ----------
53
+ data : any
54
+ The data to send to the Azure OpenAI API
55
+
56
+ Returns
57
+ -------
58
+ any
59
+ The response from the Azure OpenAI API
60
+ """
61
+
62
+ with Tracer.start("AzureOpenAI") as trace:
63
+ trace("type", "LLM")
64
+ trace("signature", "AzureOpenAI.ctor")
65
+ trace("description", "Azure OpenAI Constructor")
66
+ trace("inputs", self.kwargs)
67
+ client = AzureOpenAI(
68
+ default_headers={
69
+ "User-Agent": f"prompty/{VERSION}",
70
+ "x-ms-useragent": f"prompty/{VERSION}",
71
+ },
72
+ **self.kwargs,
73
+ )
74
+ trace("result", client)
75
+
76
+ with Tracer.start("create") as trace:
77
+ trace("type", "LLM")
78
+ trace("description", "Azure OpenAI Client")
79
+
80
+ if self.api == "chat":
81
+ trace("signature", "AzureOpenAI.chat.completions.create")
82
+ args = {
83
+ "model": self.deployment,
84
+ "messages": data if isinstance(data, list) else [data],
85
+ **self.parameters,
86
+ }
87
+ trace("inputs", args)
88
+ response = client.chat.completions.create(**args)
89
+ trace("result", response)
90
+
91
+ elif self.api == "completion":
92
+ trace("signature", "AzureOpenAI.completions.create")
93
+ args = {
94
+ "prompt": data,
95
+ "model": self.deployment,
96
+ **self.parameters,
97
+ }
98
+ trace("inputs", args)
99
+ response = client.completions.create(**args)
100
+ trace("result", response)
101
+
102
+ elif self.api == "embedding":
103
+ trace("signature", "AzureOpenAI.embeddings.create")
104
+ args = {
105
+ "input": data if isinstance(data, list) else [data],
106
+ "model": self.deployment,
107
+ **self.parameters,
108
+ }
109
+ trace("inputs", args)
110
+ response = client.embeddings.create(**args)
111
+ trace("result", response)
112
+
113
+ elif self.api == "image":
114
+ trace("signature", "AzureOpenAI.images.generate")
115
+ args = {
116
+ "prompt": data,
117
+ "model": self.deployment,
118
+ **self.parameters,
119
+ }
120
+ trace("inputs", args)
121
+ response = client.images.generate.create(**args)
122
+ trace("result", response)
123
+
124
+ # stream response
125
+ if isinstance(response, Iterator):
126
+ if self.api == "chat":
127
+ # TODO: handle the case where there might be no usage in the stream
128
+ return PromptyStream("AzureOpenAIExecutor", response)
129
+ else:
130
+ return PromptyStream("AzureOpenAIExecutor", response)
131
+ else:
132
+ return response
@@ -1,5 +1,6 @@
1
1
  from typing import Iterator
2
2
  from openai.types.completion import Completion
3
+ from openai.types.images_response import ImagesResponse
3
4
  from openai.types.chat.chat_completion import ChatCompletion
4
5
  from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, ToolCall
5
6
  from openai.types.create_embedding_response import CreateEmbeddingResponse
@@ -50,6 +51,17 @@ class AzureOpenAIProcessor(Invoker):
50
51
  return data.data[0].embedding
51
52
  else:
52
53
  return [item.embedding for item in data.data]
54
+ elif isinstance(data, ImagesResponse):
55
+ self.prompty.model.parameters
56
+ item: ImagesResponse = data
57
+
58
+ if len(data.data) == 0:
59
+ raise ValueError("Invalid data")
60
+ elif len(data.data) == 1:
61
+ return data.data[0].url if item.data[0].url else item.data[0].b64_json
62
+ else:
63
+ return [item.url if item.url else item.b64_json for item in data.data]
64
+
53
65
  elif isinstance(data, Iterator):
54
66
 
55
67
  def generator():
@@ -561,7 +561,9 @@ class AsyncPromptyStream(AsyncIterator):
561
561
  # StopIteration is raised
562
562
  # contents are exhausted
563
563
  if len(self.items) > 0:
564
- with Tracer.start(f"{self.name}.AsyncPromptyStream") as trace:
564
+ with Tracer.start("AsyncPromptyStream") as trace:
565
+ trace("signature", f"{self.name}.AsyncPromptyStream")
566
+ trace("inputs", "None")
565
567
  trace("result", [to_dict(s) for s in self.items])
566
568
 
567
569
  raise StopIteration
@@ -0,0 +1,98 @@
1
+ import importlib.metadata
2
+ from openai import OpenAI
3
+ from typing import Iterator
4
+
5
+ from prompty.tracer import Tracer
6
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream
7
+
8
+ VERSION = importlib.metadata.version("prompty")
9
+
10
+
11
+ @InvokerFactory.register_executor("openai")
12
+ class OpenAIExecutor(Invoker):
13
+ """OpenAI Executor"""
14
+
15
+ def __init__(self, prompty: Prompty) -> None:
16
+ super().__init__(prompty)
17
+ self.kwargs = {
18
+ key: value
19
+ for key, value in self.prompty.model.configuration.items()
20
+ if key != "type"
21
+ }
22
+
23
+ self.api = self.prompty.model.api
24
+ self.deployment = self.prompty.model.configuration["azure_deployment"]
25
+ self.parameters = self.prompty.model.parameters
26
+
27
+ def invoke(self, data: any) -> any:
28
+ """Invoke the OpenAI API
29
+
30
+ Parameters
31
+ ----------
32
+ data : any
33
+ The data to send to the OpenAI API
34
+
35
+ Returns
36
+ -------
37
+ any
38
+ The response from the OpenAI API
39
+ """
40
+ with Tracer.start("OpenAI") as trace:
41
+ trace("type", "LLM")
42
+ trace("signature", "OpenAI.ctor")
43
+ trace("description", "OpenAI Constructor")
44
+ trace("inputs", self.kwargs)
45
+ client = OpenAI(
46
+ default_headers={
47
+ "User-Agent": f"prompty/{VERSION}",
48
+ "x-ms-useragent": f"prompty/{VERSION}",
49
+ },
50
+ **self.kwargs,
51
+ )
52
+ trace("result", client)
53
+
54
+ with Tracer.start("create") as trace:
55
+ trace("type", "LLM")
56
+ trace("description", "OpenAI Prompty Execution Invoker")
57
+
58
+ if self.api == "chat":
59
+ trace("signature", "OpenAI.chat.completions.create")
60
+ args = {
61
+ "model": self.deployment,
62
+ "messages": data if isinstance(data, list) else [data],
63
+ **self.parameters,
64
+ }
65
+ trace("inputs", args)
66
+ response = client.chat.completions.create(**args)
67
+
68
+ elif self.api == "completion":
69
+ trace("signature", "OpenAI.completions.create")
70
+ args = {
71
+ "prompt": data.item,
72
+ "model": self.deployment,
73
+ **self.parameters,
74
+ }
75
+ trace("inputs", args)
76
+ response = client.completions.create(**args)
77
+
78
+ elif self.api == "embedding":
79
+ trace("signature", "OpenAI.embeddings.create")
80
+ args = {
81
+ "input": data if isinstance(data, list) else [data],
82
+ "model": self.deployment,
83
+ **self.parameters,
84
+ }
85
+ trace("inputs", args)
86
+ response = client.embeddings.create(**args)
87
+
88
+ elif self.api == "image":
89
+ raise NotImplementedError("OpenAI Image API is not implemented yet")
90
+
91
+ # stream response
92
+ if isinstance(response, Iterator):
93
+ stream = PromptyStream("AzureOpenAIExecutor", response)
94
+ trace("result", stream)
95
+ return stream
96
+ else:
97
+ trace("result", response)
98
+ return response
@@ -0,0 +1,131 @@
1
+ import importlib.metadata
2
+ from typing import Iterator
3
+ from azure.core.credentials import AzureKeyCredential
4
+ from azure.ai.inference import (
5
+ ChatCompletionsClient,
6
+ EmbeddingsClient,
7
+ )
8
+ from azure.ai.inference.models import (
9
+ StreamingChatCompletions,
10
+ AsyncStreamingChatCompletions,
11
+ )
12
+
13
+ from prompty.tracer import Tracer
14
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, AsyncPromptyStream
15
+
16
+ VERSION = importlib.metadata.version("prompty")
17
+
18
+
19
+ @InvokerFactory.register_executor("serverless")
20
+ class ServerlessExecutor(Invoker):
21
+ """Azure OpenAI Executor"""
22
+
23
+ def __init__(self, prompty: Prompty) -> None:
24
+ super().__init__(prompty)
25
+
26
+ # serverless configuration
27
+ self.endpoint = self.prompty.model.configuration["endpoint"]
28
+ self.model = self.prompty.model.configuration["model"]
29
+ self.key = self.prompty.model.configuration["key"]
30
+
31
+ # api type
32
+ self.api = self.prompty.model.api
33
+
34
+ def _response(self, response: any) -> any:
35
+ # stream response
36
+ if isinstance(response, Iterator):
37
+ if isinstance(response, StreamingChatCompletions):
38
+ stream = PromptyStream("ServerlessExecutor", response)
39
+ return stream
40
+ elif isinstance(response, AsyncStreamingChatCompletions):
41
+ stream = AsyncPromptyStream("ServerlessExecutor", response)
42
+ return stream
43
+ else:
44
+ stream = PromptyStream("ServerlessExecutor", response)
45
+
46
+ return stream
47
+ else:
48
+ return response
49
+
50
+ def invoke(self, data: any) -> any:
51
+ """Invoke the Serverless SDK
52
+
53
+ Parameters
54
+ ----------
55
+ data : any
56
+ The data to send to the Serverless SDK
57
+
58
+ Returns
59
+ -------
60
+ any
61
+ The response from the Serverless SDK
62
+ """
63
+
64
+ cargs = {
65
+ "endpoint": self.endpoint,
66
+ "credential": AzureKeyCredential(self.key),
67
+ }
68
+
69
+ if self.api == "chat":
70
+ with Tracer.start("ChatCompletionsClient") as trace:
71
+ trace("type", "LLM")
72
+ trace("signature", "azure.ai.inference.ChatCompletionsClient.ctor")
73
+ trace("description", "Azure Unified Inference SDK Chat Completions Client")
74
+ trace("inputs", cargs)
75
+ client = ChatCompletionsClient(
76
+ user_agent=f"prompty/{VERSION}",
77
+ **cargs,
78
+ )
79
+ trace("result", client)
80
+
81
+ with Tracer.start("complete") as trace:
82
+ trace("type", "LLM")
83
+ trace("signature", "azure.ai.inference.ChatCompletionsClient.complete")
84
+ trace("description", "Azure Unified Inference SDK Chat Completions Client")
85
+ eargs = {
86
+ "model": self.model,
87
+ "messages": data if isinstance(data, list) else [data],
88
+ **self.prompty.model.parameters,
89
+ }
90
+ trace("inputs", eargs)
91
+ r = client.complete(**eargs)
92
+ trace("result", r)
93
+
94
+ response = self._response(r)
95
+
96
+ elif self.api == "completion":
97
+ raise NotImplementedError(
98
+ "Serverless Completions API is not implemented yet"
99
+ )
100
+
101
+ elif self.api == "embedding":
102
+ with Tracer.start("EmbeddingsClient") as trace:
103
+ trace("type", "LLM")
104
+ trace("signature", "azure.ai.inference.EmbeddingsClient.ctor")
105
+ trace("description", "Azure Unified Inference SDK Embeddings Client")
106
+ trace("inputs", cargs)
107
+ client = EmbeddingsClient(
108
+ user_agent=f"prompty/{VERSION}",
109
+ **cargs,
110
+ )
111
+ trace("result", client)
112
+
113
+ with Tracer.start("complete") as trace:
114
+ trace("type", "LLM")
115
+ trace("signature", "azure.ai.inference.ChatCompletionsClient.complete")
116
+ trace("description", "Azure Unified Inference SDK Chat Completions Client")
117
+ eargs = {
118
+ "model": self.model,
119
+ "input": data if isinstance(data, list) else [data],
120
+ **self.prompty.model.parameters,
121
+ }
122
+ trace("inputs", eargs)
123
+ r = client.complete(**eargs)
124
+ trace("result", r)
125
+
126
+ response = self._response(r)
127
+
128
+ elif self.api == "image":
129
+ raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
130
+
131
+ return response
@@ -1,6 +1,7 @@
1
1
  import os
2
2
  import json
3
3
  import inspect
4
+ import traceback
4
5
  import importlib
5
6
  import contextlib
6
7
  from pathlib import Path
@@ -11,6 +12,18 @@ from functools import wraps, partial
11
12
  from typing import Any, Callable, Dict, Iterator, List
12
13
 
13
14
 
15
+ # clean up key value pairs for sensitive values
16
+ def sanitize(key: str, value: Any) -> Any:
17
+ if isinstance(value, str) and any(
18
+ [s in key.lower() for s in ["key", "token", "secret", "password", "credential"]]
19
+ ):
20
+ return len(str(value)) * "*"
21
+ elif isinstance(value, dict):
22
+ return {k: sanitize(k, v) for k, v in value.items()}
23
+ else:
24
+ return value
25
+
26
+
14
27
  class Tracer:
15
28
  _tracers: Dict[str, Callable[[str], Iterator[Callable[[str, Any], None]]]] = {}
16
29
 
@@ -31,7 +44,11 @@ class Tracer:
31
44
  traces = [
32
45
  stack.enter_context(tracer(name)) for tracer in cls._tracers.values()
33
46
  ]
34
- yield lambda key, value: [trace(key, value) for trace in traces]
47
+ yield lambda key, value: [
48
+ # normalize and sanitize any trace values
49
+ trace(key, sanitize(key, to_dict(value)))
50
+ for trace in traces
51
+ ]
35
52
 
36
53
 
37
54
  def to_dict(obj: Any) -> Dict[str, Any]:
@@ -94,7 +111,9 @@ def _results(result: Any) -> dict:
94
111
  return to_dict(result) if result is not None else "None"
95
112
 
96
113
 
97
- def _trace_sync(func: Callable = None, *, description: str = None) -> Callable:
114
+ def _trace_sync(
115
+ func: Callable = None, *, description: str = None, type: str = None
116
+ ) -> Callable:
98
117
  description = description or ""
99
118
 
100
119
  @wraps(func)
@@ -105,6 +124,9 @@ def _trace_sync(func: Callable = None, *, description: str = None) -> Callable:
105
124
  if description and description != "":
106
125
  trace("description", description)
107
126
 
127
+ if type and type != "":
128
+ trace("type", type)
129
+
108
130
  inputs = _inputs(func, args, kwargs)
109
131
  trace("inputs", inputs)
110
132
 
@@ -118,7 +140,7 @@ def _trace_sync(func: Callable = None, *, description: str = None) -> Callable:
118
140
  "exception": {
119
141
  "type": type(e).__name__,
120
142
  "message": str(e),
121
- "args": e.args,
143
+ "args": to_dict(e.args),
122
144
  }
123
145
  },
124
146
  )
@@ -129,7 +151,9 @@ def _trace_sync(func: Callable = None, *, description: str = None) -> Callable:
129
151
  return wrapper
130
152
 
131
153
 
132
- def _trace_async(func: Callable = None, *, description: str = None) -> Callable:
154
+ def _trace_async(
155
+ func: Callable = None, *, description: str = None, type: str = None
156
+ ) -> Callable:
133
157
  description = description or ""
134
158
 
135
159
  @wraps(func)
@@ -140,6 +164,9 @@ def _trace_async(func: Callable = None, *, description: str = None) -> Callable:
140
164
  if description and description != "":
141
165
  trace("description", description)
142
166
 
167
+ if type and type != "":
168
+ trace("type", type)
169
+
143
170
  inputs = _inputs(func, args, kwargs)
144
171
  trace("inputs", inputs)
145
172
  try:
@@ -150,9 +177,10 @@ def _trace_async(func: Callable = None, *, description: str = None) -> Callable:
150
177
  "result",
151
178
  {
152
179
  "exception": {
153
- "type": type(e).__name__,
180
+ "type": type(e),
181
+ "traceback": traceback.format_tb(),
154
182
  "message": str(e),
155
- "args": e.args,
183
+ "args": to_dict(e.args),
156
184
  }
157
185
  },
158
186
  )
@@ -163,13 +191,15 @@ def _trace_async(func: Callable = None, *, description: str = None) -> Callable:
163
191
  return wrapper
164
192
 
165
193
 
166
- def trace(func: Callable = None, *, description: str = None) -> Callable:
194
+ def trace(
195
+ func: Callable = None, *, description: str = None, type: str = None
196
+ ) -> Callable:
167
197
  if func is None:
168
- return partial(trace, description=description)
198
+ return partial(trace, description=description, type=type)
169
199
 
170
200
  wrapped_method = _trace_async if inspect.iscoroutinefunction(func) else _trace_sync
171
201
 
172
- return wrapped_method(func, description=description)
202
+ return wrapped_method(func, description=description, type=type)
173
203
 
174
204
 
175
205
  class PromptyTracer:
@@ -280,6 +310,8 @@ class PromptyTracer:
280
310
  def console_tracer(name: str) -> Iterator[Callable[[str, Any], None]]:
281
311
  try:
282
312
  print(f"Starting {name}")
283
- yield lambda key, value: print(f"{key}:\n{json.dumps(value, indent=4)}")
313
+ yield lambda key, value: print(
314
+ f"{key}:\n{json.dumps(to_dict(value), indent=4)}"
315
+ )
284
316
  finally:
285
317
  print(f"Ending {name}")
@@ -13,7 +13,7 @@ dependencies = [
13
13
  "python-dotenv>=1.0.1",
14
14
  "click>=8.1.7",
15
15
  ]
16
- version = "0.1.18"
16
+ version = "0.1.20"
17
17
 
18
18
  [project.optional-dependencies]
19
19
  azure = [
@@ -3,7 +3,6 @@ import pytest
3
3
  import prompty
4
4
  from prompty.core import InvokerFactory
5
5
 
6
-
7
6
  from tests.fake_azure_executor import FakeAzureExecutor
8
7
  from tests.fake_serverless_executor import FakeServerlessExecutor
9
8
  from prompty.azure import AzureOpenAIProcessor
@@ -24,6 +23,7 @@ def fake_azure_executor():
24
23
  InvokerFactory.add_processor("serverless", ServerlessProcessor)
25
24
 
26
25
 
26
+
27
27
  @pytest.mark.parametrize(
28
28
  "prompt",
29
29
  [
@@ -151,6 +151,7 @@ def test_streaming():
151
151
 
152
152
 
153
153
  def test_serverless():
154
+
154
155
  result = prompty.execute(
155
156
  "prompts/serverless.prompty",
156
157
  configuration={"key": os.environ.get("SERVERLESS_KEY", "key")},
@@ -151,5 +151,8 @@ def test_streaming():
151
151
  result = prompty.execute(
152
152
  "prompts/streaming.prompty",
153
153
  )
154
+ r = []
154
155
  for item in result:
155
- print(item)
156
+ r.append(item)
157
+
158
+ return ' '.join(r)
@@ -1,95 +0,0 @@
1
- import azure.identity
2
- import importlib.metadata
3
- from typing import Iterator
4
- from openai import AzureOpenAI
5
- from ..core import Invoker, InvokerFactory, Prompty, PromptyStream
6
-
7
- VERSION = importlib.metadata.version("prompty")
8
-
9
-
10
- @InvokerFactory.register_executor("azure")
11
- @InvokerFactory.register_executor("azure_openai")
12
- class AzureOpenAIExecutor(Invoker):
13
- """Azure OpenAI Executor"""
14
-
15
- def __init__(self, prompty: Prompty) -> None:
16
- super().__init__(prompty)
17
- kwargs = {
18
- key: value
19
- for key, value in self.prompty.model.configuration.items()
20
- if key != "type"
21
- }
22
-
23
- # no key, use default credentials
24
- if "api_key" not in kwargs:
25
- # managed identity if client id
26
- if "client_id" in kwargs:
27
- default_credential = azure.identity.ManagedIdentityCredential(
28
- client_id=kwargs.pop("client_id"),
29
- )
30
- # default credential
31
- else:
32
- default_credential = azure.identity.DefaultAzureCredential(
33
- exclude_shared_token_cache_credential=True
34
- )
35
-
36
- kwargs["azure_ad_token_provider"] = (
37
- azure.identity.get_bearer_token_provider(
38
- default_credential, "https://cognitiveservices.azure.com/.default"
39
- )
40
- )
41
-
42
- self.client = AzureOpenAI(
43
- default_headers={
44
- "User-Agent": f"prompty/{VERSION}",
45
- "x-ms-useragent": f"prompty/{VERSION}",
46
- },
47
- **kwargs,
48
- )
49
-
50
- self.api = self.prompty.model.api
51
- self.deployment = self.prompty.model.configuration["azure_deployment"]
52
- self.parameters = self.prompty.model.parameters
53
-
54
- def invoke(self, data: any) -> any:
55
- """Invoke the Azure OpenAI API
56
-
57
- Parameters
58
- ----------
59
- data : any
60
- The data to send to the Azure OpenAI API
61
-
62
- Returns
63
- -------
64
- any
65
- The response from the Azure OpenAI API
66
- """
67
- if self.api == "chat":
68
- response = self.client.chat.completions.create(
69
- model=self.deployment,
70
- messages=data if isinstance(data, list) else [data],
71
- **self.parameters,
72
- )
73
-
74
- elif self.api == "completion":
75
- response = self.client.completions.create(
76
- prompt=data.item,
77
- model=self.deployment,
78
- **self.parameters,
79
- )
80
-
81
- elif self.api == "embedding":
82
- response = self.client.embeddings.create(
83
- input=data if isinstance(data, list) else [data],
84
- model=self.deployment,
85
- **self.parameters,
86
- )
87
-
88
- elif self.api == "image":
89
- raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
90
-
91
- # stream response
92
- if isinstance(response, Iterator):
93
- return PromptyStream("AzureOpenAIExecutor", response)
94
- else:
95
- return response
@@ -1,74 +0,0 @@
1
- import importlib.metadata
2
- from openai import OpenAI
3
- from typing import Iterator
4
- from ..core import Invoker, InvokerFactory, Prompty, PromptyStream
5
-
6
- VERSION = importlib.metadata.version("prompty")
7
-
8
-
9
- @InvokerFactory.register_executor("openai")
10
- class OpenAIExecutor(Invoker):
11
- """OpenAI Executor"""
12
-
13
- def __init__(self, prompty: Prompty) -> None:
14
- super().__init__(prompty)
15
- kwargs = {
16
- key: value
17
- for key, value in self.prompty.model.configuration.items()
18
- if key != "type"
19
- }
20
-
21
- self.client = OpenAI(
22
- default_headers={
23
- "User-Agent": f"prompty/{VERSION}",
24
- "x-ms-useragent": f"prompty/{VERSION}",
25
- },
26
- **kwargs,
27
- )
28
-
29
- self.api = self.prompty.model.api
30
- self.deployment = self.prompty.model.configuration["azure_deployment"]
31
- self.parameters = self.prompty.model.parameters
32
-
33
- def invoke(self, data: any) -> any:
34
- """Invoke the OpenAI API
35
-
36
- Parameters
37
- ----------
38
- data : any
39
- The data to send to the OpenAI API
40
-
41
- Returns
42
- -------
43
- any
44
- The response from the OpenAI API
45
- """
46
- if self.api == "chat":
47
- response = self.client.chat.completions.create(
48
- model=self.deployment,
49
- messages=data if isinstance(data, list) else [data],
50
- **self.parameters,
51
- )
52
-
53
- elif self.api == "completion":
54
- response = self.client.completions.create(
55
- prompt=data.item,
56
- model=self.deployment,
57
- **self.parameters,
58
- )
59
-
60
- elif self.api == "embedding":
61
- response = self.client.embeddings.create(
62
- input=data if isinstance(data, list) else [data],
63
- model=self.deployment,
64
- **self.parameters,
65
- )
66
-
67
- elif self.api == "image":
68
- raise NotImplementedError("OpenAI Image API is not implemented yet")
69
-
70
- # stream response
71
- if isinstance(response, Iterator):
72
- return PromptyStream("OpenAIExecutor", response)
73
- else:
74
- return response
@@ -1,84 +0,0 @@
1
- import importlib.metadata
2
- from typing import Iterator
3
- from azure.core.credentials import AzureKeyCredential
4
- from azure.ai.inference import (
5
- ChatCompletionsClient,
6
- EmbeddingsClient,
7
- )
8
- from azure.ai.inference.models import (
9
- StreamingChatCompletions,
10
- AsyncStreamingChatCompletions,
11
- )
12
- from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, AsyncPromptyStream
13
-
14
- VERSION = importlib.metadata.version("prompty")
15
-
16
-
17
- @InvokerFactory.register_executor("serverless")
18
- class ServerlessExecutor(Invoker):
19
- """Azure OpenAI Executor"""
20
-
21
- def __init__(self, prompty: Prompty) -> None:
22
- super().__init__(prompty)
23
-
24
- # serverless configuration
25
- self.endpoint = self.prompty.model.configuration["endpoint"]
26
- self.model = self.prompty.model.configuration["model"]
27
- self.key = self.prompty.model.configuration["key"]
28
-
29
- # api type
30
- self.api = self.prompty.model.api
31
-
32
- def invoke(self, data: any) -> any:
33
- """Invoke the Serverless SDK
34
-
35
- Parameters
36
- ----------
37
- data : any
38
- The data to send to the Serverless SDK
39
-
40
- Returns
41
- -------
42
- any
43
- The response from the Serverless SDK
44
- """
45
- if self.api == "chat":
46
- response = ChatCompletionsClient(
47
- endpoint=self.endpoint,
48
- credential=AzureKeyCredential(self.key),
49
- user_agent=f"prompty/{VERSION}"
50
- ).complete(
51
- model=self.model,
52
- messages=data if isinstance(data, list) else [data],
53
- **self.prompty.model.parameters,
54
- )
55
-
56
- elif self.api == "completion":
57
- raise NotImplementedError(
58
- "Serverless Completions API is not implemented yet"
59
- )
60
-
61
- elif self.api == "embedding":
62
- response = EmbeddingsClient(
63
- endpoint=self.endpoint,
64
- credential=AzureKeyCredential(self.key),
65
- user_agent=f"prompty/{VERSION}",
66
- ).complete(
67
- model=self.model,
68
- input=data if isinstance(data, list) else [data],
69
- **self.prompty.model.parameters,
70
- )
71
-
72
- elif self.api == "image":
73
- raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
74
-
75
- # stream response
76
- if isinstance(response, Iterator):
77
- if isinstance(response, StreamingChatCompletions):
78
- return PromptyStream("ServerlessExecutor", response)
79
- elif isinstance(response, AsyncStreamingChatCompletions):
80
- return AsyncPromptyStream("ServerlessExecutor", response)
81
- return PromptyStream("ServerlessExecutor", response)
82
- else:
83
-
84
- return response
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes