prompty 0.1.10__tar.gz → 0.1.13__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (77) hide show
  1. prompty-0.1.13/PKG-INFO +216 -0
  2. prompty-0.1.13/README.md +197 -0
  3. {prompty-0.1.10 → prompty-0.1.13}/prompty/__init__.py +2 -4
  4. prompty-0.1.13/prompty/azure/__init__.py +3 -0
  5. prompty-0.1.10/prompty/executors.py → prompty-0.1.13/prompty/azure/executor.py +5 -4
  6. prompty-0.1.10/prompty/processors.py → prompty-0.1.13/prompty/azure/processor.py +8 -13
  7. prompty-0.1.13/prompty/cli.py +117 -0
  8. {prompty-0.1.10 → prompty-0.1.13}/prompty/core.py +35 -7
  9. prompty-0.1.13/prompty/openai/__init__.py +3 -0
  10. prompty-0.1.13/prompty/openai/executor.py +74 -0
  11. prompty-0.1.13/prompty/openai/processor.py +65 -0
  12. prompty-0.1.13/prompty/serverless/__init__.py +3 -0
  13. prompty-0.1.13/prompty/serverless/executor.py +82 -0
  14. prompty-0.1.13/prompty/serverless/processor.py +62 -0
  15. {prompty-0.1.10 → prompty-0.1.13}/prompty/tracer.py +59 -1
  16. {prompty-0.1.10 → prompty-0.1.13}/pyproject.toml +25 -12
  17. prompty-0.1.10/tests/__init__.py → prompty-0.1.13/tests/fake_azure_executor.py +1 -3
  18. prompty-0.1.13/tests/fake_serverless_executor.py +45 -0
  19. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/chat.prompty +4 -3
  20. prompty-0.1.13/tests/prompts/serverless.prompty +38 -0
  21. prompty-0.1.13/tests/prompts/serverless.prompty.execution.json +22 -0
  22. prompty-0.1.13/tests/prompts/serverless_stream.prompty +39 -0
  23. prompty-0.1.13/tests/prompts/serverless_stream.prompty.execution.json +1432 -0
  24. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/streaming.prompty +2 -0
  25. prompty-0.1.10/tests/test_tracing.py → prompty-0.1.13/tests/test_execute.py +44 -23
  26. {prompty-0.1.10 → prompty-0.1.13}/tests/test_factory_invoker.py +11 -0
  27. {prompty-0.1.10 → prompty-0.1.13}/tests/test_path_exec.py +2 -2
  28. prompty-0.1.10/tests/test_execute.py → prompty-0.1.13/tests/test_tracing.py +22 -1
  29. prompty-0.1.10/PKG-INFO +0 -136
  30. prompty-0.1.10/README.md +0 -120
  31. prompty-0.1.10/prompty/cli.py +0 -85
  32. {prompty-0.1.10 → prompty-0.1.13}/LICENSE +0 -0
  33. {prompty-0.1.10 → prompty-0.1.13}/prompty/parsers.py +0 -0
  34. {prompty-0.1.10 → prompty-0.1.13}/prompty/renderers.py +0 -0
  35. {prompty-0.1.10 → prompty-0.1.13}/tests/generated/1contoso.md +0 -0
  36. {prompty-0.1.10 → prompty-0.1.13}/tests/generated/2contoso.md +0 -0
  37. {prompty-0.1.10 → prompty-0.1.13}/tests/generated/3contoso.md +0 -0
  38. {prompty-0.1.10 → prompty-0.1.13}/tests/generated/4contoso.md +0 -0
  39. {prompty-0.1.10 → prompty-0.1.13}/tests/generated/basic.prompty.md +0 -0
  40. {prompty-0.1.10 → prompty-0.1.13}/tests/generated/camping.jpg +0 -0
  41. {prompty-0.1.10 → prompty-0.1.13}/tests/generated/context.prompty.md +0 -0
  42. {prompty-0.1.10 → prompty-0.1.13}/tests/generated/contoso_multi.md +0 -0
  43. {prompty-0.1.10 → prompty-0.1.13}/tests/generated/faithfulness.prompty.md +0 -0
  44. {prompty-0.1.10 → prompty-0.1.13}/tests/generated/groundedness.prompty.md +0 -0
  45. {prompty-0.1.10 → prompty-0.1.13}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
  46. {prompty-0.1.10 → prompty-0.1.13}/tests/hello_world.embedding.json +0 -0
  47. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/__init__.py +0 -0
  48. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/basic.prompty +0 -0
  49. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/basic.prompty.execution.json +0 -0
  50. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/basic_json_output.prompty +0 -0
  51. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/camping.jpg +0 -0
  52. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/context.json +0 -0
  53. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/context.prompty +0 -0
  54. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/context.prompty.execution.json +0 -0
  55. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/embedding.prompty +0 -0
  56. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/embedding.prompty.execution.json +0 -0
  57. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/evaluation.prompty +0 -0
  58. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/faithfulness.prompty +0 -0
  59. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/faithfulness.prompty.execution.json +0 -0
  60. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/fake.prompty +0 -0
  61. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/funcfile.json +0 -0
  62. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/funcfile.prompty +0 -0
  63. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/functions.prompty +0 -0
  64. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/functions.prompty.execution.json +0 -0
  65. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/groundedness.prompty +0 -0
  66. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/groundedness.prompty.execution.json +0 -0
  67. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/prompty.json +0 -0
  68. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/streaming.prompty.execution.json +0 -0
  69. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/sub/__init__.py +0 -0
  70. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/sub/basic.prompty +0 -0
  71. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/sub/sub/__init__.py +0 -0
  72. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/sub/sub/basic.prompty +0 -0
  73. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/sub/sub/prompty.json +0 -0
  74. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/sub/sub/test.py +0 -0
  75. {prompty-0.1.10 → prompty-0.1.13}/tests/prompts/test.py +0 -0
  76. {prompty-0.1.10 → prompty-0.1.13}/tests/prompty.json +0 -0
  77. {prompty-0.1.10 → prompty-0.1.13}/tests/test_common.py +0 -0
@@ -0,0 +1,216 @@
1
+ Metadata-Version: 2.1
2
+ Name: prompty
3
+ Version: 0.1.13
4
+ Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
+ Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
+ Requires-Dist: pyyaml>=6.0.1
7
+ Requires-Dist: pydantic>=2.8.2
8
+ Requires-Dist: jinja2>=3.1.4
9
+ Requires-Dist: python-dotenv>=1.0.1
10
+ Requires-Dist: click>=8.1.7
11
+ Requires-Dist: azure-identity>=1.17.1; extra == "azure"
12
+ Requires-Dist: openai>=1.35.10; extra == "azure"
13
+ Requires-Dist: openai>=1.35.10; extra == "openai"
14
+ Requires-Dist: azure-ai-inference>=1.0.0b3; extra == "serverless"
15
+ Provides-Extra: azure
16
+ Provides-Extra: openai
17
+ Provides-Extra: serverless
18
+ Description-Content-Type: text/markdown
19
+
20
+
21
+ Prompty is an asset class and format for LLM prompts designed to enhance observability, understandability, and portability for developers. The primary goal is to accelerate the developer inner loop of prompt engineering and prompt source management in a cross-language and cross-platform implementation.
22
+
23
+ The file format has a supporting toolchain with a VS Code extension and runtimes in multiple programming languages to simplify and accelerate your AI application development.
24
+
25
+ The tooling comes together in three ways: the *prompty file asset*, the *VS Code extension tool*, and *runtimes* in multiple programming languges.
26
+
27
+ ## The Prompty File Format
28
+ Prompty is a language agnostic prompt asset for creating prompts and engineering the responses. Learn more about the format [here](https://prompty.ai/docs/prompty-file-spec).
29
+
30
+ Examples prompty file:
31
+ ```markdown
32
+ ---
33
+ name: Basic Prompt
34
+ description: A basic prompt that uses the GPT-3 chat API to answer questions
35
+ authors:
36
+ - sethjuarez
37
+ - jietong
38
+ model:
39
+ api: chat
40
+ configuration:
41
+ api_version: 2023-12-01-preview
42
+ azure_deployment: gpt-35-turbo
43
+ azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
44
+ azure_deployment: ${env:AZURE_OPENAI_DEPLOYMENT:gpt-35-turbo}
45
+ sample:
46
+ firstName: Jane
47
+ lastName: Doe
48
+ question: What is the meaning of life?
49
+ ---
50
+ system:
51
+ You are an AI assistant who helps people find information.
52
+ As the assistant, you answer questions briefly, succinctly,
53
+ and in a personable manner using markdown and even add some personal flair with appropriate emojis.
54
+
55
+ # Customer
56
+ You are helping {{firstName}} {{lastName}} to find answers to their questions.
57
+ Use their name to address them in your responses.
58
+
59
+ user:
60
+ {{question}}
61
+ ```
62
+
63
+
64
+ ## The Prompty VS Code Extension
65
+ Run Prompty files directly in VS Code. This Visual Studio Code extension offers an intuitive prompt playground within VS Code to streamline the prompt engineering process. You can find the Prompty extension in the Visual Studio Code Marketplace.
66
+
67
+ Download the [VS Code extension here](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.prompty).
68
+
69
+
70
+ ## Using this Prompty Runtime
71
+ The Python runtime is a simple way to run your prompts in Python. The runtime is available as a Python package and can be installed using pip. Depending on the type of prompt you are running, you may need to install additional dependencies. The runtime is designed to be extensible and can be customized to fit your needs.
72
+
73
+ ```bash
74
+ pip install prompty[azure]
75
+ ```
76
+
77
+ Simple usage example:
78
+
79
+ ```python
80
+ import prompty
81
+ # import invoker
82
+ import prompty.azure
83
+
84
+ # execute the prompt
85
+ response = prompty.execute("path/to/prompty/file")
86
+
87
+ print(response)
88
+ ```
89
+
90
+ ## Available Invokers
91
+ The Prompty runtime comes with a set of built-in invokers that can be used to execute prompts. These include:
92
+
93
+ - `azure`: Invokes the Azure OpenAI API
94
+ - `openai`: Invokes the OpenAI API
95
+ - `serverless`: Invokes serverless models (like the ones on GitHub) using the [Azure AI Inference client library](https://learn.microsoft.com/en-us/python/api/overview/azure/ai-inference-readme?view=azure-python-preview) (currently only key based authentication is supported with more managed identity support coming soon)
96
+
97
+
98
+ ## Using Tracing in Prompty
99
+ Prompty supports tracing to help you understand the execution of your prompts. This functionality is customizeable and can be used to trace the execution of your prompts in a way that makes sense to you. Prompty has two default traces built in: `console_tracer` and `PromptyTracer`. The `console_tracer` writes the trace to the console, and the `PromptyTracer` writes the trace to a JSON file. You can also create your own tracer by creating your own hook.
100
+
101
+ ```python
102
+ import prompty
103
+ # import invoker
104
+ import prompty.azure
105
+ from prompty.tracer import trace, Tracer, console_tracer, PromptyTracer
106
+
107
+ # add console tracer
108
+ Tracer.add("console", console_tracer)
109
+
110
+ # add PromptyTracer
111
+ json_tracer = PromptyTracer(output_dir="path/to/output")
112
+ Tracer.add("console", json_tracer.tracer)
113
+
114
+ # execute the prompt
115
+ response = prompty.execute("path/to/prompty/file")
116
+
117
+ print(response)
118
+ ```
119
+
120
+ You can also bring your own tracer by your own tracing hook. The `console_tracer` is the simplest example of a tracer. It writes the trace to the console.
121
+ This is what it loks like:
122
+
123
+ ```python
124
+ @contextlib.contextmanager
125
+ def console_tracer(name: str) -> Iterator[Callable[[str, Any], None]]:
126
+ try:
127
+ print(f"Starting {name}")
128
+ yield lambda key, value: print(f"{key}:\n{json.dumps(value, indent=4)}")
129
+ finally:
130
+ print(f"Ending {name}")
131
+
132
+ ```
133
+
134
+ It uses a context manager to define the start and end of the trace so you can do whatever setup and teardown you need. The `yield` statement returns a function that you can use to write the trace. The `console_tracer` writes the trace to the console using the `print` function.
135
+
136
+ The `PromptyTracer` is a more complex example of a tracer. This tracer manages its internal state using a full class. Here's an example of the class based approach that writes each function trace to a JSON file:
137
+
138
+ ```python
139
+ class SimplePromptyTracer:
140
+ def __init__(self, output_dir: str):
141
+ self.output_dir = output_dir
142
+ self.tracer = self._tracer
143
+
144
+ @contextlib.contextmanager
145
+ def tracer(self, name: str) -> Iterator[Callable[[str, Any], None]]:
146
+ trace = {}
147
+ try:
148
+ yield lambda key, value: trace.update({key: value})
149
+ finally:
150
+ with open(os.path.join(self.output_dir, f"{name}.json"), "w") as f:
151
+ json.dump(trace, f, indent=4)
152
+ ```
153
+
154
+ The tracing mechanism is supported for all of the prompty runtime internals and can be used to trace the execution of the prompt along with all of the paramters. There is also a `@trace` decorator that can be used to trace the execution of any function external to the runtime. This is provided as a facility to trace the execution of the prompt and whatever supporting code you have.
155
+
156
+ ```python
157
+ import prompty
158
+ # import invoker
159
+ import prompty.azure
160
+ from prompty.tracer import trace, Tracer, PromptyTracer
161
+
162
+ json_tracer = PromptyTracer(output_dir="path/to/output")
163
+ Tracer.add("PromptyTracer", json_tracer.tracer)
164
+
165
+ @trace
166
+ def get_customer(customerId):
167
+ return {"id": customerId, "firstName": "Sally", "lastName": "Davis"}
168
+
169
+ @trace
170
+ def get_response(customerId, prompt):
171
+ customer = get_customer(customerId)
172
+
173
+ result = prompty.execute(
174
+ prompt,
175
+ inputs={"question": question, "customer": customer},
176
+ )
177
+ return {"question": question, "answer": result}
178
+
179
+ ```
180
+
181
+ In this case, whenever this code is executed, a `.ptrace` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
182
+
183
+ ## OpenTelemetry Tracing
184
+ You can add OpenTelemetry tracing to your application using the same hook mechanism. In your application, you might create something like `trace_span` to trace the execution of your prompts:
185
+
186
+ ```python
187
+ from opentelemetry import trace as oteltrace
188
+
189
+ _tracer = "prompty"
190
+
191
+ @contextlib.contextmanager
192
+ def trace_span(name: str):
193
+ tracer = oteltrace.get_tracer(_tracer)
194
+ with tracer.start_as_current_span(name) as span:
195
+ yield lambda key, value: span.set_attribute(
196
+ key, json.dumps(value).replace("\n", "")
197
+ )
198
+
199
+ # adding this hook to the prompty runtime
200
+ Tracer.add("OpenTelemetry", trace_span)
201
+
202
+ ```
203
+
204
+ This will produce spans during the execution of the prompt that can be sent to an OpenTelemetry collector for further analysis.
205
+
206
+ ## CLI
207
+ The Prompty runtime also comes with a CLI tool that allows you to run prompts from the command line. The CLI tool is installed with the Python package.
208
+
209
+ ```bash
210
+ prompty -s path/to/prompty/file
211
+ ```
212
+
213
+ This will execute the prompt and print the response to the console. It also has default tracing enabled.
214
+
215
+ ## Contributing
216
+ We welcome contributions to the Prompty project! This community led project is open to all contributors. The project cvan be found on [GitHub](https://github.com/Microsoft/prompty).
@@ -0,0 +1,197 @@
1
+
2
+ Prompty is an asset class and format for LLM prompts designed to enhance observability, understandability, and portability for developers. The primary goal is to accelerate the developer inner loop of prompt engineering and prompt source management in a cross-language and cross-platform implementation.
3
+
4
+ The file format has a supporting toolchain with a VS Code extension and runtimes in multiple programming languages to simplify and accelerate your AI application development.
5
+
6
+ The tooling comes together in three ways: the *prompty file asset*, the *VS Code extension tool*, and *runtimes* in multiple programming languges.
7
+
8
+ ## The Prompty File Format
9
+ Prompty is a language agnostic prompt asset for creating prompts and engineering the responses. Learn more about the format [here](https://prompty.ai/docs/prompty-file-spec).
10
+
11
+ Examples prompty file:
12
+ ```markdown
13
+ ---
14
+ name: Basic Prompt
15
+ description: A basic prompt that uses the GPT-3 chat API to answer questions
16
+ authors:
17
+ - sethjuarez
18
+ - jietong
19
+ model:
20
+ api: chat
21
+ configuration:
22
+ api_version: 2023-12-01-preview
23
+ azure_deployment: gpt-35-turbo
24
+ azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
25
+ azure_deployment: ${env:AZURE_OPENAI_DEPLOYMENT:gpt-35-turbo}
26
+ sample:
27
+ firstName: Jane
28
+ lastName: Doe
29
+ question: What is the meaning of life?
30
+ ---
31
+ system:
32
+ You are an AI assistant who helps people find information.
33
+ As the assistant, you answer questions briefly, succinctly,
34
+ and in a personable manner using markdown and even add some personal flair with appropriate emojis.
35
+
36
+ # Customer
37
+ You are helping {{firstName}} {{lastName}} to find answers to their questions.
38
+ Use their name to address them in your responses.
39
+
40
+ user:
41
+ {{question}}
42
+ ```
43
+
44
+
45
+ ## The Prompty VS Code Extension
46
+ Run Prompty files directly in VS Code. This Visual Studio Code extension offers an intuitive prompt playground within VS Code to streamline the prompt engineering process. You can find the Prompty extension in the Visual Studio Code Marketplace.
47
+
48
+ Download the [VS Code extension here](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.prompty).
49
+
50
+
51
+ ## Using this Prompty Runtime
52
+ The Python runtime is a simple way to run your prompts in Python. The runtime is available as a Python package and can be installed using pip. Depending on the type of prompt you are running, you may need to install additional dependencies. The runtime is designed to be extensible and can be customized to fit your needs.
53
+
54
+ ```bash
55
+ pip install prompty[azure]
56
+ ```
57
+
58
+ Simple usage example:
59
+
60
+ ```python
61
+ import prompty
62
+ # import invoker
63
+ import prompty.azure
64
+
65
+ # execute the prompt
66
+ response = prompty.execute("path/to/prompty/file")
67
+
68
+ print(response)
69
+ ```
70
+
71
+ ## Available Invokers
72
+ The Prompty runtime comes with a set of built-in invokers that can be used to execute prompts. These include:
73
+
74
+ - `azure`: Invokes the Azure OpenAI API
75
+ - `openai`: Invokes the OpenAI API
76
+ - `serverless`: Invokes serverless models (like the ones on GitHub) using the [Azure AI Inference client library](https://learn.microsoft.com/en-us/python/api/overview/azure/ai-inference-readme?view=azure-python-preview) (currently only key based authentication is supported with more managed identity support coming soon)
77
+
78
+
79
+ ## Using Tracing in Prompty
80
+ Prompty supports tracing to help you understand the execution of your prompts. This functionality is customizeable and can be used to trace the execution of your prompts in a way that makes sense to you. Prompty has two default traces built in: `console_tracer` and `PromptyTracer`. The `console_tracer` writes the trace to the console, and the `PromptyTracer` writes the trace to a JSON file. You can also create your own tracer by creating your own hook.
81
+
82
+ ```python
83
+ import prompty
84
+ # import invoker
85
+ import prompty.azure
86
+ from prompty.tracer import trace, Tracer, console_tracer, PromptyTracer
87
+
88
+ # add console tracer
89
+ Tracer.add("console", console_tracer)
90
+
91
+ # add PromptyTracer
92
+ json_tracer = PromptyTracer(output_dir="path/to/output")
93
+ Tracer.add("console", json_tracer.tracer)
94
+
95
+ # execute the prompt
96
+ response = prompty.execute("path/to/prompty/file")
97
+
98
+ print(response)
99
+ ```
100
+
101
+ You can also bring your own tracer by your own tracing hook. The `console_tracer` is the simplest example of a tracer. It writes the trace to the console.
102
+ This is what it loks like:
103
+
104
+ ```python
105
+ @contextlib.contextmanager
106
+ def console_tracer(name: str) -> Iterator[Callable[[str, Any], None]]:
107
+ try:
108
+ print(f"Starting {name}")
109
+ yield lambda key, value: print(f"{key}:\n{json.dumps(value, indent=4)}")
110
+ finally:
111
+ print(f"Ending {name}")
112
+
113
+ ```
114
+
115
+ It uses a context manager to define the start and end of the trace so you can do whatever setup and teardown you need. The `yield` statement returns a function that you can use to write the trace. The `console_tracer` writes the trace to the console using the `print` function.
116
+
117
+ The `PromptyTracer` is a more complex example of a tracer. This tracer manages its internal state using a full class. Here's an example of the class based approach that writes each function trace to a JSON file:
118
+
119
+ ```python
120
+ class SimplePromptyTracer:
121
+ def __init__(self, output_dir: str):
122
+ self.output_dir = output_dir
123
+ self.tracer = self._tracer
124
+
125
+ @contextlib.contextmanager
126
+ def tracer(self, name: str) -> Iterator[Callable[[str, Any], None]]:
127
+ trace = {}
128
+ try:
129
+ yield lambda key, value: trace.update({key: value})
130
+ finally:
131
+ with open(os.path.join(self.output_dir, f"{name}.json"), "w") as f:
132
+ json.dump(trace, f, indent=4)
133
+ ```
134
+
135
+ The tracing mechanism is supported for all of the prompty runtime internals and can be used to trace the execution of the prompt along with all of the paramters. There is also a `@trace` decorator that can be used to trace the execution of any function external to the runtime. This is provided as a facility to trace the execution of the prompt and whatever supporting code you have.
136
+
137
+ ```python
138
+ import prompty
139
+ # import invoker
140
+ import prompty.azure
141
+ from prompty.tracer import trace, Tracer, PromptyTracer
142
+
143
+ json_tracer = PromptyTracer(output_dir="path/to/output")
144
+ Tracer.add("PromptyTracer", json_tracer.tracer)
145
+
146
+ @trace
147
+ def get_customer(customerId):
148
+ return {"id": customerId, "firstName": "Sally", "lastName": "Davis"}
149
+
150
+ @trace
151
+ def get_response(customerId, prompt):
152
+ customer = get_customer(customerId)
153
+
154
+ result = prompty.execute(
155
+ prompt,
156
+ inputs={"question": question, "customer": customer},
157
+ )
158
+ return {"question": question, "answer": result}
159
+
160
+ ```
161
+
162
+ In this case, whenever this code is executed, a `.ptrace` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
163
+
164
+ ## OpenTelemetry Tracing
165
+ You can add OpenTelemetry tracing to your application using the same hook mechanism. In your application, you might create something like `trace_span` to trace the execution of your prompts:
166
+
167
+ ```python
168
+ from opentelemetry import trace as oteltrace
169
+
170
+ _tracer = "prompty"
171
+
172
+ @contextlib.contextmanager
173
+ def trace_span(name: str):
174
+ tracer = oteltrace.get_tracer(_tracer)
175
+ with tracer.start_as_current_span(name) as span:
176
+ yield lambda key, value: span.set_attribute(
177
+ key, json.dumps(value).replace("\n", "")
178
+ )
179
+
180
+ # adding this hook to the prompty runtime
181
+ Tracer.add("OpenTelemetry", trace_span)
182
+
183
+ ```
184
+
185
+ This will produce spans during the execution of the prompt that can be sent to an OpenTelemetry collector for further analysis.
186
+
187
+ ## CLI
188
+ The Prompty runtime also comes with a CLI tool that allows you to run prompts from the command line. The CLI tool is installed with the Python package.
189
+
190
+ ```bash
191
+ prompty -s path/to/prompty/file
192
+ ```
193
+
194
+ This will execute the prompt and print the response to the console. It also has default tracing enabled.
195
+
196
+ ## Contributing
197
+ We welcome contributions to the Prompty project! This community led project is open to all contributors. The project cvan be found on [GitHub](https://github.com/Microsoft/prompty).
@@ -3,8 +3,8 @@ import traceback
3
3
  from pathlib import Path
4
4
  from typing import Dict, List, Union
5
5
 
6
- from .tracer import trace
7
- from .core import (
6
+ from prompty.tracer import trace
7
+ from prompty.core import (
8
8
  Frontmatter,
9
9
  InvokerFactory,
10
10
  ModelSettings,
@@ -16,8 +16,6 @@ from .core import (
16
16
 
17
17
  from .renderers import *
18
18
  from .parsers import *
19
- from .executors import *
20
- from .processors import *
21
19
 
22
20
 
23
21
  def load_global_config(
@@ -0,0 +1,3 @@
1
+ # __init__.py
2
+ from .executor import AzureOpenAIExecutor
3
+ from .processor import AzureOpenAIProcessor
@@ -2,7 +2,7 @@ import azure.identity
2
2
  import importlib.metadata
3
3
  from typing import Iterator
4
4
  from openai import AzureOpenAI
5
- from .core import Invoker, InvokerFactory, Prompty, PromptyStream
5
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream
6
6
 
7
7
  VERSION = importlib.metadata.version("prompty")
8
8
 
@@ -10,7 +10,8 @@ VERSION = importlib.metadata.version("prompty")
10
10
  @InvokerFactory.register_executor("azure")
11
11
  @InvokerFactory.register_executor("azure_openai")
12
12
  class AzureOpenAIExecutor(Invoker):
13
- """ Azure OpenAI Executor """
13
+ """Azure OpenAI Executor"""
14
+
14
15
  def __init__(self, prompty: Prompty) -> None:
15
16
  super().__init__(prompty)
16
17
  kwargs = {
@@ -40,7 +41,7 @@ class AzureOpenAIExecutor(Invoker):
40
41
 
41
42
  self.client = AzureOpenAI(
42
43
  default_headers={
43
- "User-Agent": f"prompty{VERSION}",
44
+ "User-Agent": f"prompty/{VERSION}",
44
45
  "x-ms-useragent": f"prompty/{VERSION}",
45
46
  },
46
47
  **kwargs,
@@ -51,7 +52,7 @@ class AzureOpenAIExecutor(Invoker):
51
52
  self.parameters = self.prompty.model.parameters
52
53
 
53
54
  def invoke(self, data: any) -> any:
54
- """ Invoke the Azure OpenAI API
55
+ """Invoke the Azure OpenAI API
55
56
 
56
57
  Parameters
57
58
  ----------
@@ -1,22 +1,14 @@
1
1
  from typing import Iterator
2
- from pydantic import BaseModel
3
2
  from openai.types.completion import Completion
4
3
  from openai.types.chat.chat_completion import ChatCompletion
5
- from .core import Invoker, InvokerFactory, Prompty, PromptyStream
4
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, ToolCall
6
5
  from openai.types.create_embedding_response import CreateEmbeddingResponse
7
6
 
8
7
 
9
- class ToolCall(BaseModel):
10
- id: str
11
- name: str
12
- arguments: str
13
-
14
-
15
- @InvokerFactory.register_processor("openai")
16
8
  @InvokerFactory.register_processor("azure")
17
9
  @InvokerFactory.register_processor("azure_openai")
18
- class OpenAIProcessor(Invoker):
19
- """OpenAI/Azure Processor"""
10
+ class AzureOpenAIProcessor(Invoker):
11
+ """Azure OpenAI Processor"""
20
12
 
21
13
  def __init__(self, prompty: Prompty) -> None:
22
14
  super().__init__(prompty)
@@ -62,10 +54,13 @@ class OpenAIProcessor(Invoker):
62
54
 
63
55
  def generator():
64
56
  for chunk in data:
65
- if len(chunk.choices) == 1 and chunk.choices[0].delta.content != None:
57
+ if (
58
+ len(chunk.choices) == 1
59
+ and chunk.choices[0].delta.content != None
60
+ ):
66
61
  content = chunk.choices[0].delta.content
67
62
  yield content
68
63
 
69
- return PromptyStream("OpenAIProcessor", generator())
64
+ return PromptyStream("AzureOpenAIProcessor", generator())
70
65
  else:
71
66
  return data
@@ -0,0 +1,117 @@
1
+ import os
2
+ import json
3
+ import click
4
+ import importlib
5
+
6
+ from pathlib import Path
7
+ from pydantic import BaseModel
8
+
9
+ import prompty
10
+ from prompty.tracer import trace, PromptyTracer, console_tracer, Tracer
11
+ from dotenv import load_dotenv
12
+
13
+ load_dotenv()
14
+
15
+
16
+ def normalize_path(p, create_dir=False) -> Path:
17
+ path = Path(p)
18
+ if not path.is_absolute():
19
+ path = Path(os.getcwd()).joinpath(path).absolute().resolve()
20
+ else:
21
+ path = path.absolute().resolve()
22
+
23
+ if create_dir:
24
+ if not path.exists():
25
+ print(f"Creating directory {str(path)}")
26
+ os.makedirs(str(path))
27
+
28
+ return path
29
+
30
+ def dynamic_import(module: str):
31
+ t = module if "." in module else f"prompty.{module}"
32
+ print(f"Loading invokers from {t}")
33
+ importlib.import_module(t)
34
+
35
+
36
+ @trace
37
+ def chat_mode(prompt_path: str):
38
+ W = "\033[0m" # white (normal)
39
+ R = "\033[31m" # red
40
+ G = "\033[32m" # green
41
+ O = "\033[33m" # orange
42
+ B = "\033[34m" # blue
43
+ P = "\033[35m" # purple
44
+ print(f"Executing {str(prompt_path)} in chat mode...")
45
+ p = prompty.load(str(prompt_path))
46
+ if "chat_history" not in p.sample:
47
+ print(
48
+ f"{R}{str(prompt_path)} needs to have a chat_history input to work in chat mode{W}"
49
+ )
50
+ return
51
+ else:
52
+
53
+ try:
54
+ # load executor / processor types
55
+ dynamic_import(p.model.configuration["type"])
56
+ chat_history = p.sample["chat_history"]
57
+ while True:
58
+ user_input = input(f"\n{B}User:{W} ")
59
+ if user_input == "exit":
60
+ break
61
+ # reloadable prompty file
62
+ chat_history.append({"role": "user", "content": user_input})
63
+ result = prompty.execute(prompt_path, inputs={"chat_history": chat_history})
64
+ print(f"\n{G}Assistant:{W} {result}")
65
+ chat_history.append({"role": "assistant", "content": result})
66
+ except Exception as e:
67
+ print(f"{type(e).__qualname__}: {e}")
68
+
69
+ print(f"\n{R}Goodbye!{W}\n")
70
+
71
+
72
+ @trace
73
+ def execute(prompt_path: str, raw=False):
74
+ p = prompty.load(prompt_path)
75
+
76
+ try:
77
+ # load executor / processor types
78
+ dynamic_import(p.model.configuration["type"])
79
+
80
+ result = prompty.execute(p, raw=raw)
81
+ if issubclass(type(result), BaseModel):
82
+ print("\n", json.dumps(result.model_dump(), indent=4), "\n")
83
+ elif isinstance(result, list):
84
+ print(
85
+ "\n", json.dumps([item.model_dump() for item in result], indent=4), "\n"
86
+ )
87
+ else:
88
+ print("\n", result, "\n")
89
+ except Exception as e:
90
+ print(f"{type(e).__qualname__}: {e}", "\n")
91
+
92
+
93
+ @click.command()
94
+ @click.option("--source", "-s", required=True)
95
+ @click.option("--verbose", "-v", is_flag=True)
96
+ @click.option("--chat", "-c", is_flag=True)
97
+ @click.version_option()
98
+ def run(source, verbose, chat):
99
+ prompt_path = normalize_path(source)
100
+ if not prompt_path.exists():
101
+ print(f"{str(prompt_path)} does not exist")
102
+ return
103
+
104
+ if verbose:
105
+ Tracer.add("console", console_tracer)
106
+
107
+ ptrace = PromptyTracer()
108
+ Tracer.add("prompty", ptrace.tracer)
109
+
110
+ if chat:
111
+ chat_mode(str(prompt_path))
112
+ else:
113
+ execute(str(prompt_path), raw=verbose)
114
+
115
+
116
+ if __name__ == "__main__":
117
+ chat_mode(source="./tests/prompts/basic.prompt")