prompty 0.1.12__py2.py3-none-any.whl → 0.1.14__py2.py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
prompty/__init__.py CHANGED
@@ -344,7 +344,7 @@ def execute(
344
344
  parameters: Dict[str, any] = {},
345
345
  inputs: Dict[str, any] = {},
346
346
  raw: bool = False,
347
- connection: str = "default",
347
+ config_name: str = "default",
348
348
  ):
349
349
  """Execute a prompty.
350
350
 
@@ -380,7 +380,7 @@ def execute(
380
380
  # get caller's path (take into account trace frame)
381
381
  caller = Path(traceback.extract_stack()[-3].filename)
382
382
  path = Path(caller.parent / path).resolve().absolute()
383
- prompt = load(path, connection)
383
+ prompt = load(path, config_name)
384
384
 
385
385
  # prepare content
386
386
  content = prepare(prompt, inputs)
@@ -46,6 +46,7 @@ class ServerlessExecutor(Invoker):
46
46
  response = ChatCompletionsClient(
47
47
  endpoint=self.endpoint,
48
48
  credential=AzureKeyCredential(self.key),
49
+ user_agent=f"prompty/{VERSION}"
49
50
  ).complete(
50
51
  model=self.model,
51
52
  messages=data if isinstance(data, list) else [data],
@@ -61,6 +62,7 @@ class ServerlessExecutor(Invoker):
61
62
  response = EmbeddingsClient(
62
63
  endpoint=self.endpoint,
63
64
  credential=AzureKeyCredential(self.key),
65
+ user_agent=f"prompty/{VERSION}",
64
66
  ).complete(
65
67
  model=self.model,
66
68
  input=data if isinstance(data, list) else [data],
@@ -0,0 +1,216 @@
1
+ Metadata-Version: 2.1
2
+ Name: prompty
3
+ Version: 0.1.14
4
+ Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
+ Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
+ Requires-Dist: pyyaml>=6.0.1
7
+ Requires-Dist: pydantic>=2.8.2
8
+ Requires-Dist: jinja2>=3.1.4
9
+ Requires-Dist: python-dotenv>=1.0.1
10
+ Requires-Dist: click>=8.1.7
11
+ Requires-Dist: azure-identity>=1.17.1; extra == "azure"
12
+ Requires-Dist: openai>=1.35.10; extra == "azure"
13
+ Requires-Dist: openai>=1.35.10; extra == "openai"
14
+ Requires-Dist: azure-ai-inference>=1.0.0b3; extra == "serverless"
15
+ Provides-Extra: azure
16
+ Provides-Extra: openai
17
+ Provides-Extra: serverless
18
+ Description-Content-Type: text/markdown
19
+
20
+
21
+ Prompty is an asset class and format for LLM prompts designed to enhance observability, understandability, and portability for developers. The primary goal is to accelerate the developer inner loop of prompt engineering and prompt source management in a cross-language and cross-platform implementation.
22
+
23
+ The file format has a supporting toolchain with a VS Code extension and runtimes in multiple programming languages to simplify and accelerate your AI application development.
24
+
25
+ The tooling comes together in three ways: the *prompty file asset*, the *VS Code extension tool*, and *runtimes* in multiple programming languges.
26
+
27
+ ## The Prompty File Format
28
+ Prompty is a language agnostic prompt asset for creating prompts and engineering the responses. Learn more about the format [here](https://prompty.ai/docs/prompty-file-spec).
29
+
30
+ Examples prompty file:
31
+ ```markdown
32
+ ---
33
+ name: Basic Prompt
34
+ description: A basic prompt that uses the GPT-3 chat API to answer questions
35
+ authors:
36
+ - sethjuarez
37
+ - jietong
38
+ model:
39
+ api: chat
40
+ configuration:
41
+ api_version: 2023-12-01-preview
42
+ azure_deployment: gpt-35-turbo
43
+ azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
44
+ azure_deployment: ${env:AZURE_OPENAI_DEPLOYMENT:gpt-35-turbo}
45
+ sample:
46
+ firstName: Jane
47
+ lastName: Doe
48
+ question: What is the meaning of life?
49
+ ---
50
+ system:
51
+ You are an AI assistant who helps people find information.
52
+ As the assistant, you answer questions briefly, succinctly,
53
+ and in a personable manner using markdown and even add some personal flair with appropriate emojis.
54
+
55
+ # Customer
56
+ You are helping {{firstName}} {{lastName}} to find answers to their questions.
57
+ Use their name to address them in your responses.
58
+
59
+ user:
60
+ {{question}}
61
+ ```
62
+
63
+
64
+ ## The Prompty VS Code Extension
65
+ Run Prompty files directly in VS Code. This Visual Studio Code extension offers an intuitive prompt playground within VS Code to streamline the prompt engineering process. You can find the Prompty extension in the Visual Studio Code Marketplace.
66
+
67
+ Download the [VS Code extension here](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.prompty).
68
+
69
+
70
+ ## Using this Prompty Runtime
71
+ The Python runtime is a simple way to run your prompts in Python. The runtime is available as a Python package and can be installed using pip. Depending on the type of prompt you are running, you may need to install additional dependencies. The runtime is designed to be extensible and can be customized to fit your needs.
72
+
73
+ ```bash
74
+ pip install prompty[azure]
75
+ ```
76
+
77
+ Simple usage example:
78
+
79
+ ```python
80
+ import prompty
81
+ # import invoker
82
+ import prompty.azure
83
+
84
+ # execute the prompt
85
+ response = prompty.execute("path/to/prompty/file")
86
+
87
+ print(response)
88
+ ```
89
+
90
+ ## Available Invokers
91
+ The Prompty runtime comes with a set of built-in invokers that can be used to execute prompts. These include:
92
+
93
+ - `azure`: Invokes the Azure OpenAI API
94
+ - `openai`: Invokes the OpenAI API
95
+ - `serverless`: Invokes serverless models (like the ones on GitHub) using the [Azure AI Inference client library](https://learn.microsoft.com/en-us/python/api/overview/azure/ai-inference-readme?view=azure-python-preview) (currently only key based authentication is supported with more managed identity support coming soon)
96
+
97
+
98
+ ## Using Tracing in Prompty
99
+ Prompty supports tracing to help you understand the execution of your prompts. This functionality is customizeable and can be used to trace the execution of your prompts in a way that makes sense to you. Prompty has two default traces built in: `console_tracer` and `PromptyTracer`. The `console_tracer` writes the trace to the console, and the `PromptyTracer` writes the trace to a JSON file. You can also create your own tracer by creating your own hook.
100
+
101
+ ```python
102
+ import prompty
103
+ # import invoker
104
+ import prompty.azure
105
+ from prompty.tracer import trace, Tracer, console_tracer, PromptyTracer
106
+
107
+ # add console tracer
108
+ Tracer.add("console", console_tracer)
109
+
110
+ # add PromptyTracer
111
+ json_tracer = PromptyTracer(output_dir="path/to/output")
112
+ Tracer.add("console", json_tracer.tracer)
113
+
114
+ # execute the prompt
115
+ response = prompty.execute("path/to/prompty/file")
116
+
117
+ print(response)
118
+ ```
119
+
120
+ You can also bring your own tracer by your own tracing hook. The `console_tracer` is the simplest example of a tracer. It writes the trace to the console.
121
+ This is what it loks like:
122
+
123
+ ```python
124
+ @contextlib.contextmanager
125
+ def console_tracer(name: str) -> Iterator[Callable[[str, Any], None]]:
126
+ try:
127
+ print(f"Starting {name}")
128
+ yield lambda key, value: print(f"{key}:\n{json.dumps(value, indent=4)}")
129
+ finally:
130
+ print(f"Ending {name}")
131
+
132
+ ```
133
+
134
+ It uses a context manager to define the start and end of the trace so you can do whatever setup and teardown you need. The `yield` statement returns a function that you can use to write the trace. The `console_tracer` writes the trace to the console using the `print` function.
135
+
136
+ The `PromptyTracer` is a more complex example of a tracer. This tracer manages its internal state using a full class. Here's an example of the class based approach that writes each function trace to a JSON file:
137
+
138
+ ```python
139
+ class SimplePromptyTracer:
140
+ def __init__(self, output_dir: str):
141
+ self.output_dir = output_dir
142
+ self.tracer = self._tracer
143
+
144
+ @contextlib.contextmanager
145
+ def tracer(self, name: str) -> Iterator[Callable[[str, Any], None]]:
146
+ trace = {}
147
+ try:
148
+ yield lambda key, value: trace.update({key: value})
149
+ finally:
150
+ with open(os.path.join(self.output_dir, f"{name}.json"), "w") as f:
151
+ json.dump(trace, f, indent=4)
152
+ ```
153
+
154
+ The tracing mechanism is supported for all of the prompty runtime internals and can be used to trace the execution of the prompt along with all of the paramters. There is also a `@trace` decorator that can be used to trace the execution of any function external to the runtime. This is provided as a facility to trace the execution of the prompt and whatever supporting code you have.
155
+
156
+ ```python
157
+ import prompty
158
+ # import invoker
159
+ import prompty.azure
160
+ from prompty.tracer import trace, Tracer, PromptyTracer
161
+
162
+ json_tracer = PromptyTracer(output_dir="path/to/output")
163
+ Tracer.add("PromptyTracer", json_tracer.tracer)
164
+
165
+ @trace
166
+ def get_customer(customerId):
167
+ return {"id": customerId, "firstName": "Sally", "lastName": "Davis"}
168
+
169
+ @trace
170
+ def get_response(customerId, prompt):
171
+ customer = get_customer(customerId)
172
+
173
+ result = prompty.execute(
174
+ prompt,
175
+ inputs={"question": question, "customer": customer},
176
+ )
177
+ return {"question": question, "answer": result}
178
+
179
+ ```
180
+
181
+ In this case, whenever this code is executed, a `.ptrace` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
182
+
183
+ ## OpenTelemetry Tracing
184
+ You can add OpenTelemetry tracing to your application using the same hook mechanism. In your application, you might create something like `trace_span` to trace the execution of your prompts:
185
+
186
+ ```python
187
+ from opentelemetry import trace as oteltrace
188
+
189
+ _tracer = "prompty"
190
+
191
+ @contextlib.contextmanager
192
+ def trace_span(name: str):
193
+ tracer = oteltrace.get_tracer(_tracer)
194
+ with tracer.start_as_current_span(name) as span:
195
+ yield lambda key, value: span.set_attribute(
196
+ key, json.dumps(value).replace("\n", "")
197
+ )
198
+
199
+ # adding this hook to the prompty runtime
200
+ Tracer.add("OpenTelemetry", trace_span)
201
+
202
+ ```
203
+
204
+ This will produce spans during the execution of the prompt that can be sent to an OpenTelemetry collector for further analysis.
205
+
206
+ ## CLI
207
+ The Prompty runtime also comes with a CLI tool that allows you to run prompts from the command line. The CLI tool is installed with the Python package.
208
+
209
+ ```bash
210
+ prompty -s path/to/prompty/file
211
+ ```
212
+
213
+ This will execute the prompt and print the response to the console. It also has default tracing enabled.
214
+
215
+ ## Contributing
216
+ We welcome contributions to the Prompty project! This community led project is open to all contributors. The project cvan be found on [GitHub](https://github.com/Microsoft/prompty).
@@ -1,7 +1,7 @@
1
- prompty-0.1.12.dist-info/METADATA,sha256=4uJHCJDPTuM6Sp1ikmtS68WepC329WCDMAdfr9zBanQ,783
2
- prompty-0.1.12.dist-info/WHEEL,sha256=CuZGaXTwoRLAOVv0AcE3bCTxO5ejVuBEJkUBe9C-kvk,94
3
- prompty-0.1.12.dist-info/licenses/LICENSE,sha256=KWSC4z9cfML_t0xThoQYjzTdcZQj86Y_mhXdatzU-KM,1052
4
- prompty/__init__.py,sha256=5t_hxoRVxsbc7gfMyH9EVO2j259dM6uac3GmspQ-MjE,11146
1
+ prompty-0.1.14.dist-info/METADATA,sha256=lbEMlGMerWd1XGwjZwG-DDy2wQQ9rzUowD0JNxaqTZE,8951
2
+ prompty-0.1.14.dist-info/WHEEL,sha256=CuZGaXTwoRLAOVv0AcE3bCTxO5ejVuBEJkUBe9C-kvk,94
3
+ prompty-0.1.14.dist-info/licenses/LICENSE,sha256=KWSC4z9cfML_t0xThoQYjzTdcZQj86Y_mhXdatzU-KM,1052
4
+ prompty/__init__.py,sha256=mg_lSGVEe4etHHRhxoVLa6wgQDipFECd1rVFPyXzkUA,11148
5
5
  prompty/azure/__init__.py,sha256=6duJ79CDPG3w-cLk3vt8YfELDMOtSsnI2ClNLMFP_Og,100
6
6
  prompty/azure/executor.py,sha256=x2ng2EbYUxbingjy8w27TFGWezs4QC0LHh_S0F0-E1U,3082
7
7
  prompty/azure/processor.py,sha256=e9CcKG665zvCLPeJfS91FM6c_W_6YY0mVENxinCo19A,2253
@@ -13,7 +13,7 @@ prompty/openai/processor.py,sha256=PacKjMmGO-fd5KhOs98JyjsIf0Kl_J2SX5VroA8lVbI,2
13
13
  prompty/parsers.py,sha256=4mmIn4SVNs8B0R1BufanqUJk8v4r0OEEo8yx6UOxQpA,4670
14
14
  prompty/renderers.py,sha256=RSHFQFx7AtKLUfsMLCXR0a56Mb7DL1NJNgjUqgg3IqU,776
15
15
  prompty/serverless/__init__.py,sha256=KgsiNr-IhPiIuZoChvDf6xbbvFF467MCUKspJHo56yc,98
16
- prompty/serverless/executor.py,sha256=2XVzFX9SMX33sQTW-AZObiZ5NtVl3xVahb79ejMrlz8,2684
16
+ prompty/serverless/executor.py,sha256=fYCMV01iLBLpH2SQ9nxmrvY9ijAGpZezwkobZwUjSVQ,2781
17
17
  prompty/serverless/processor.py,sha256=pft1XGbPzo0MzQMbAt1VxsLsvRrjQO3B8MXEE2PfSA0,1982
18
18
  prompty/tracer.py,sha256=r-HC__xLtFPb2pr-xGUuhdlAaMA_PmsBB_NG79jKRO4,8810
19
- prompty-0.1.12.dist-info/RECORD,,
19
+ prompty-0.1.14.dist-info/RECORD,,
@@ -1,17 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: prompty
3
- Version: 0.1.12
4
- Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
- Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
- Requires-Dist: pyyaml>=6.0.1
7
- Requires-Dist: pydantic>=2.8.2
8
- Requires-Dist: jinja2>=3.1.4
9
- Requires-Dist: python-dotenv>=1.0.1
10
- Requires-Dist: click>=8.1.7
11
- Requires-Dist: azure-identity>=1.17.1; extra == "azure"
12
- Requires-Dist: openai>=1.35.10; extra == "azure"
13
- Requires-Dist: openai>=1.35.10; extra == "openai"
14
- Requires-Dist: azure-ai-inference>=1.0.0b3; extra == "serverless"
15
- Provides-Extra: azure
16
- Provides-Extra: openai
17
- Provides-Extra: serverless