prompty 0.1.9__py3-none-any.whl → 0.1.33__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
prompty/tracer.py CHANGED
@@ -1,6 +1,9 @@
1
1
  import os
2
2
  import json
3
3
  import inspect
4
+ import numbers
5
+ import traceback
6
+ import importlib
4
7
  import contextlib
5
8
  from pathlib import Path
6
9
  from numbers import Number
@@ -10,6 +13,18 @@ from functools import wraps, partial
10
13
  from typing import Any, Callable, Dict, Iterator, List
11
14
 
12
15
 
16
+ # clean up key value pairs for sensitive values
17
+ def sanitize(key: str, value: Any) -> Any:
18
+ if isinstance(value, str) and any(
19
+ [s in key.lower() for s in ["key", "token", "secret", "password", "credential"]]
20
+ ):
21
+ return len(str(value)) * "*"
22
+ elif isinstance(value, dict):
23
+ return {k: sanitize(k, v) for k, v in value.items()}
24
+ else:
25
+ return value
26
+
27
+
13
28
  class Tracer:
14
29
  _tracers: Dict[str, Callable[[str], Iterator[Callable[[str, Any], None]]]] = {}
15
30
 
@@ -30,7 +45,11 @@ class Tracer:
30
45
  traces = [
31
46
  stack.enter_context(tracer(name)) for tracer in cls._tracers.values()
32
47
  ]
33
- yield lambda key, value: [trace(key, value) for trace in traces]
48
+ yield lambda key, value: [
49
+ # normalize and sanitize any trace values
50
+ trace(key, sanitize(key, to_dict(value)))
51
+ for trace in traces
52
+ ]
34
53
 
35
54
 
36
55
  def to_dict(obj: Any) -> Dict[str, Any]:
@@ -46,6 +65,8 @@ def to_dict(obj: Any) -> Dict[str, Any]:
46
65
  # safe PromptyStream obj serialization
47
66
  elif type(obj).__name__ == "PromptyStream":
48
67
  return "PromptyStream"
68
+ elif type(obj).__name__ == "AsyncPromptyStream":
69
+ return "AsyncPromptyStream"
49
70
  # pydantic models have their own json serialization
50
71
  elif isinstance(obj, BaseModel):
51
72
  return obj.model_dump()
@@ -67,11 +88,14 @@ def _name(func: Callable, args):
67
88
  else:
68
89
  signature = f"{func.__module__}.{func.__name__}"
69
90
 
70
- # core invoker gets special treatment
71
- core_invoker = signature == "prompty.core.Invoker.__call__"
91
+ # core invoker gets special treatment prompty.invoker.Invoker
92
+ core_invoker = signature.startswith("prompty.invoker.Invoker.run")
72
93
  if core_invoker:
73
94
  name = type(args[0]).__name__
74
- signature = f"{args[0].__module__}.{args[0].__class__.__name__}.invoke"
95
+ if signature.endswith("async"):
96
+ signature = f"{args[0].__module__}.{args[0].__class__.__name__}.invoke_async"
97
+ else:
98
+ signature = f"{args[0].__module__}.{args[0].__class__.__name__}.invoke"
75
99
  else:
76
100
  name = func.__name__
77
101
 
@@ -91,7 +115,9 @@ def _results(result: Any) -> dict:
91
115
  return to_dict(result) if result is not None else "None"
92
116
 
93
117
 
94
- def _trace_sync(func: Callable = None, *, description: str = None) -> Callable:
118
+ def _trace_sync(
119
+ func: Callable = None, *, description: str = None, itemtype: str = None
120
+ ) -> Callable:
95
121
  description = description or ""
96
122
 
97
123
  @wraps(func)
@@ -102,18 +128,41 @@ def _trace_sync(func: Callable = None, *, description: str = None) -> Callable:
102
128
  if description and description != "":
103
129
  trace("description", description)
104
130
 
131
+ if itemtype and itemtype != "":
132
+ trace("type", itemtype)
133
+
105
134
  inputs = _inputs(func, args, kwargs)
106
135
  trace("inputs", inputs)
107
136
 
108
- result = func(*args, **kwargs)
109
- trace("result", _results(result))
137
+ try:
138
+ result = func(*args, **kwargs)
139
+ trace("result", _results(result))
140
+ except Exception as e:
141
+ trace(
142
+ "result",
143
+ {
144
+ "exception": {
145
+ "type": type(e),
146
+ "traceback": (
147
+ traceback.format_tb(tb=e.__traceback__)
148
+ if e.__traceback__
149
+ else None
150
+ ),
151
+ "message": str(e),
152
+ "args": to_dict(e.args),
153
+ }
154
+ },
155
+ )
156
+ raise e
110
157
 
111
158
  return result
112
159
 
113
160
  return wrapper
114
161
 
115
162
 
116
- def _trace_async(func: Callable = None, *, description: str = None) -> Callable:
163
+ def _trace_async(
164
+ func: Callable = None, *, description: str = None, itemtype: str = None
165
+ ) -> Callable:
117
166
  description = description or ""
118
167
 
119
168
  @wraps(func)
@@ -124,24 +173,46 @@ def _trace_async(func: Callable = None, *, description: str = None) -> Callable:
124
173
  if description and description != "":
125
174
  trace("description", description)
126
175
 
176
+ if itemtype and itemtype != "":
177
+ trace("type", itemtype)
178
+
127
179
  inputs = _inputs(func, args, kwargs)
128
180
  trace("inputs", inputs)
129
-
130
- result = await func(*args, **kwargs)
131
- trace("result", _results(result))
181
+ try:
182
+ result = await func(*args, **kwargs)
183
+ trace("result", _results(result))
184
+ except Exception as e:
185
+ trace(
186
+ "result",
187
+ {
188
+ "exception": {
189
+ "type": type(e),
190
+ "traceback": (
191
+ traceback.format_tb(tb=e.__traceback__)
192
+ if e.__traceback__
193
+ else None
194
+ ),
195
+ "message": str(e),
196
+ "args": to_dict(e.args),
197
+ }
198
+ },
199
+ )
200
+ raise e
132
201
 
133
202
  return result
134
203
 
135
204
  return wrapper
136
205
 
137
206
 
138
- def trace(func: Callable = None, *, description: str = None) -> Callable:
207
+ def trace(
208
+ func: Callable = None, *, description: str = None, itemtype: str = None
209
+ ) -> Callable:
139
210
  if func is None:
140
- return partial(trace, description=description)
211
+ return partial(trace, description=description, itemtype=itemtype)
141
212
 
142
213
  wrapped_method = _trace_async if inspect.iscoroutinefunction(func) else _trace_sync
143
214
 
144
- return wrapped_method(func, description=description)
215
+ return wrapped_method(func, description=description, itemtype=itemtype)
145
216
 
146
217
 
147
218
  class PromptyTracer:
@@ -161,6 +232,9 @@ class PromptyTracer:
161
232
  try:
162
233
  self.stack.append({"name": name})
163
234
  frame = self.stack[-1]
235
+ frame["__time"] = {
236
+ "start": datetime.now(),
237
+ }
164
238
 
165
239
  def add(key: str, value: Any) -> None:
166
240
  if key not in frame:
@@ -175,26 +249,92 @@ class PromptyTracer:
175
249
  yield add
176
250
  finally:
177
251
  frame = self.stack.pop()
252
+ start: datetime = frame["__time"]["start"]
253
+ end: datetime = datetime.now()
254
+
255
+ # add duration to frame
256
+ frame["__time"] = {
257
+ "start": start.strftime("%Y-%m-%dT%H:%M:%S.%f"),
258
+ "end": end.strftime("%Y-%m-%dT%H:%M:%S.%f"),
259
+ "duration": int((end - start).total_seconds() * 1000),
260
+ }
261
+
262
+ # hoist usage to parent frame
263
+ if "result" in frame and isinstance(frame["result"], dict):
264
+ if "usage" in frame["result"]:
265
+ frame["__usage"] = self.hoist_item(
266
+ frame["result"]["usage"],
267
+ frame["__usage"] if "__usage" in frame else {},
268
+ )
269
+
270
+ # streamed results may have usage as well
271
+ if "result" in frame and isinstance(frame["result"], list):
272
+ for result in frame["result"]:
273
+ if (
274
+ isinstance(result, dict)
275
+ and "usage" in result
276
+ and isinstance(result["usage"], dict)
277
+ ):
278
+ frame["__usage"] = self.hoist_item(
279
+ result["usage"],
280
+ frame["__usage"] if "__usage" in frame else {},
281
+ )
282
+
283
+ # add any usage frames from below
284
+ if "__frames" in frame:
285
+ for child in frame["__frames"]:
286
+ if "__usage" in child:
287
+ frame["__usage"] = self.hoist_item(
288
+ child["__usage"],
289
+ frame["__usage"] if "__usage" in frame else {},
290
+ )
291
+
178
292
  # if stack is empty, dump the frame
179
293
  if len(self.stack) == 0:
180
- trace_file = (
181
- self.output
182
- / f"{frame['name']}.{datetime.now().strftime('%Y%m%d.%H%M%S')}.ptrace"
183
- )
184
-
185
- with open(trace_file, "w") as f:
186
- json.dump(frame, f, indent=4)
294
+ self.write_trace(frame)
187
295
  # otherwise, append the frame to the parent
188
296
  else:
189
297
  if "__frames" not in self.stack[-1]:
190
298
  self.stack[-1]["__frames"] = []
191
299
  self.stack[-1]["__frames"].append(frame)
192
300
 
301
+ def hoist_item(self, src: Dict[str, Any], cur: Dict[str, Any]) -> None:
302
+ for key, value in src.items():
303
+ if value is None or isinstance(value, list) or isinstance(value, dict):
304
+ continue
305
+ try:
306
+ if key not in cur:
307
+ cur[key] = value
308
+ else:
309
+ cur[key] += value
310
+ except:
311
+ continue
312
+
313
+ return cur
314
+
315
+ def write_trace(self, frame: Dict[str, Any]) -> None:
316
+ trace_file = (
317
+ self.output
318
+ / f"{frame['name']}.{datetime.now().strftime('%Y%m%d.%H%M%S')}.tracy"
319
+ )
320
+
321
+ v = importlib.metadata.version("prompty")
322
+ enriched_frame = {
323
+ "runtime": "python",
324
+ "version": v,
325
+ "trace": frame,
326
+ }
327
+
328
+ with open(trace_file, "w") as f:
329
+ json.dump(enriched_frame, f, indent=4)
330
+
193
331
 
194
332
  @contextlib.contextmanager
195
333
  def console_tracer(name: str) -> Iterator[Callable[[str, Any], None]]:
196
334
  try:
197
335
  print(f"Starting {name}")
198
- yield lambda key, value: print(f"{key}:\n{json.dumps(value, indent=4)}")
336
+ yield lambda key, value: print(
337
+ f"{key}:\n{json.dumps(to_dict(value), indent=4)}"
338
+ )
199
339
  finally:
200
340
  print(f"Ending {name}")
prompty/utils.py ADDED
@@ -0,0 +1,105 @@
1
+ import re
2
+ import yaml
3
+ import json
4
+ import asyncio
5
+ import aiofiles
6
+ from typing import Dict
7
+ from pathlib import Path
8
+
9
+ _yaml_regex = re.compile(
10
+ r"^\s*" + r"(?:---|\+\+\+)" + r"(.*?)" + r"(?:---|\+\+\+)" + r"\s*(.+)$",
11
+ re.S | re.M,
12
+ )
13
+
14
+ def load_text(file_path, encoding='utf-8'):
15
+ with open(file_path, 'r', encoding=encoding) as file:
16
+ return file.read()
17
+
18
+ async def load_text_async(file_path, encoding='utf-8'):
19
+ async with aiofiles.open(file_path, mode='r', encoding=encoding) as f:
20
+ content = await f.read()
21
+ return content
22
+
23
+ def load_json(file_path, encoding='utf-8'):
24
+ return json.loads(load_text(file_path, encoding=encoding))
25
+
26
+ async def load_json_async(file_path, encoding='utf-8'):
27
+ # async file open
28
+ content = await load_text_async(file_path, encoding=encoding)
29
+ return json.loads(content)
30
+
31
+ def _find_global_config(prompty_path: Path = Path.cwd()) -> Path:
32
+ prompty_config = list(Path.cwd().glob("**/prompty.json"))
33
+
34
+ if len(prompty_config) > 0:
35
+ return sorted(
36
+ [
37
+ c
38
+ for c in prompty_config
39
+ if len(c.parent.parts) <= len(prompty_path.parts)
40
+ ],
41
+ key=lambda p: len(p.parts),
42
+ )[-1]
43
+ else:
44
+ return None
45
+
46
+
47
+ def load_global_config(
48
+ prompty_path: Path = Path.cwd(), configuration: str = "default"
49
+ ) -> Dict[str, any]:
50
+ # prompty.config laying around?
51
+ config = _find_global_config(prompty_path)
52
+
53
+ # if there is one load it
54
+ if config is not None:
55
+ c = load_json(config)
56
+ if configuration in c:
57
+ return c[configuration]
58
+ else:
59
+ raise ValueError(f'Item "{configuration}" not found in "{config}"')
60
+
61
+ return {}
62
+
63
+
64
+ async def load_global_config_async(
65
+ prompty_path: Path = Path.cwd(), configuration: str = "default"
66
+ ) -> Dict[str, any]:
67
+ # prompty.config laying around?
68
+ config = _find_global_config(prompty_path)
69
+
70
+ # if there is one load it
71
+ if config is not None:
72
+ c = await load_json_async(config)
73
+ if configuration in c:
74
+ return c[configuration]
75
+ else:
76
+ raise ValueError(f'Item "{configuration}" not found in "{config}"')
77
+
78
+ return {}
79
+
80
+
81
+ def load_prompty(file_path, encoding='utf-8'):
82
+ contents = load_text(file_path, encoding=encoding)
83
+ return parse(contents)
84
+
85
+
86
+ async def load_prompty_async(file_path, encoding="utf-8"):
87
+ contents = await load_text_async(file_path, encoding=encoding)
88
+ return parse(contents)
89
+
90
+
91
+ def parse(contents):
92
+ global _yaml_regex
93
+
94
+ fmatter = ""
95
+ body = ""
96
+ result = _yaml_regex.search(contents)
97
+
98
+ if result:
99
+ fmatter = result.group(1)
100
+ body = result.group(2)
101
+ return {
102
+ "attributes": yaml.load(fmatter, Loader=yaml.FullLoader),
103
+ "body": body,
104
+ "frontmatter": fmatter,
105
+ }
@@ -0,0 +1,218 @@
1
+ Metadata-Version: 2.1
2
+ Name: prompty
3
+ Version: 0.1.33
4
+ Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
+ Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
+ License: MIT
7
+ Requires-Python: >=3.9
8
+ Requires-Dist: pyyaml>=6.0.1
9
+ Requires-Dist: pydantic>=2.8.2
10
+ Requires-Dist: jinja2>=3.1.4
11
+ Requires-Dist: python-dotenv>=1.0.1
12
+ Requires-Dist: click>=8.1.7
13
+ Requires-Dist: aiofiles>=24.1.0
14
+ Provides-Extra: azure
15
+ Requires-Dist: azure-identity>=1.17.1; extra == "azure"
16
+ Requires-Dist: openai>=1.35.10; extra == "azure"
17
+ Provides-Extra: openai
18
+ Requires-Dist: openai>=1.35.10; extra == "openai"
19
+ Provides-Extra: serverless
20
+ Requires-Dist: azure-ai-inference>=1.0.0b3; extra == "serverless"
21
+ Description-Content-Type: text/markdown
22
+
23
+
24
+ Prompty is an asset class and format for LLM prompts designed to enhance observability, understandability, and portability for developers. The primary goal is to accelerate the developer inner loop of prompt engineering and prompt source management in a cross-language and cross-platform implementation.
25
+
26
+ The file format has a supporting toolchain with a VS Code extension and runtimes in multiple programming languages to simplify and accelerate your AI application development.
27
+
28
+ The tooling comes together in three ways: the *prompty file asset*, the *VS Code extension tool*, and *runtimes* in multiple programming languages.
29
+
30
+ ## The Prompty File Format
31
+ Prompty is a language agnostic prompt asset for creating prompts and engineering the responses. Learn more about the format [here](https://prompty.ai/docs/prompty-file-spec).
32
+
33
+ Examples prompty file:
34
+ ```markdown
35
+ ---
36
+ name: Basic Prompt
37
+ description: A basic prompt that uses the GPT-3 chat API to answer questions
38
+ authors:
39
+ - sethjuarez
40
+ - jietong
41
+ model:
42
+ api: chat
43
+ configuration:
44
+ api_version: 2023-12-01-preview
45
+ azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
46
+ azure_deployment: ${env:AZURE_OPENAI_DEPLOYMENT:gpt-35-turbo}
47
+ sample:
48
+ firstName: Jane
49
+ lastName: Doe
50
+ question: What is the meaning of life?
51
+ ---
52
+ system:
53
+ You are an AI assistant who helps people find information.
54
+ As the assistant, you answer questions briefly, succinctly,
55
+ and in a personable manner using markdown and even add some personal flair with appropriate emojis.
56
+
57
+ # Customer
58
+ You are helping {{firstName}} {{lastName}} to find answers to their questions.
59
+ Use their name to address them in your responses.
60
+
61
+ user:
62
+ {{question}}
63
+ ```
64
+
65
+
66
+ ## The Prompty VS Code Extension
67
+ Run Prompty files directly in VS Code. This Visual Studio Code extension offers an intuitive prompt playground within VS Code to streamline the prompt engineering process. You can find the Prompty extension in the Visual Studio Code Marketplace.
68
+
69
+ Download the [VS Code extension here](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.prompty).
70
+
71
+
72
+ ## Using this Prompty Runtime
73
+ The Python runtime is a simple way to run your prompts in Python. The runtime is available as a Python package and can be installed using pip. Depending on the type of prompt you are running, you may need to install additional dependencies. The runtime is designed to be extensible and can be customized to fit your needs.
74
+
75
+ ```bash
76
+ pip install prompty[azure]
77
+ ```
78
+
79
+ Simple usage example:
80
+
81
+ ```python
82
+ import prompty
83
+ # import invoker
84
+ import prompty.azure
85
+
86
+ # execute the prompt
87
+ response = prompty.execute("path/to/prompty/file")
88
+
89
+ print(response)
90
+ ```
91
+
92
+ ## Available Invokers
93
+ The Prompty runtime comes with a set of built-in invokers that can be used to execute prompts. These include:
94
+
95
+ - `azure`: Invokes the Azure OpenAI API
96
+ - `openai`: Invokes the OpenAI API
97
+ - `serverless`: Invokes serverless models (like the ones on GitHub) using the [Azure AI Inference client library](https://learn.microsoft.com/en-us/python/api/overview/azure/ai-inference-readme?view=azure-python-preview) (currently only key based authentication is supported with more managed identity support coming soon)
98
+
99
+
100
+ ## Using Tracing in Prompty
101
+ Prompty supports tracing to help you understand the execution of your prompts. This functionality is customizable and can be used to trace the execution of your prompts in a way that makes sense to you. Prompty has two default traces built in: `console_tracer` and `PromptyTracer`. The `console_tracer` writes the trace to the console, and the `PromptyTracer` writes the trace to a JSON file. You can also create your own tracer by creating your own hook.
102
+
103
+ ```python
104
+ import prompty
105
+ # import invoker
106
+ import prompty.azure
107
+ from prompty.tracer import trace, Tracer, console_tracer, PromptyTracer
108
+
109
+ # add console tracer
110
+ Tracer.add("console", console_tracer)
111
+
112
+ # add PromptyTracer
113
+ json_tracer = PromptyTracer(output_dir="path/to/output")
114
+ Tracer.add("console", json_tracer.tracer)
115
+
116
+ # execute the prompt
117
+ response = prompty.execute("path/to/prompty/file")
118
+
119
+ print(response)
120
+ ```
121
+
122
+ You can also bring your own tracer by your own tracing hook. The `console_tracer` is the simplest example of a tracer. It writes the trace to the console.
123
+ This is what it looks like:
124
+
125
+ ```python
126
+ @contextlib.contextmanager
127
+ def console_tracer(name: str) -> Iterator[Callable[[str, Any], None]]:
128
+ try:
129
+ print(f"Starting {name}")
130
+ yield lambda key, value: print(f"{key}:\n{json.dumps(value, indent=4)}")
131
+ finally:
132
+ print(f"Ending {name}")
133
+
134
+ ```
135
+
136
+ It uses a context manager to define the start and end of the trace so you can do whatever setup and teardown you need. The `yield` statement returns a function that you can use to write the trace. The `console_tracer` writes the trace to the console using the `print` function.
137
+
138
+ The `PromptyTracer` is a more complex example of a tracer. This tracer manages its internal state using a full class. Here's an example of the class based approach that writes each function trace to a JSON file:
139
+
140
+ ```python
141
+ class SimplePromptyTracer:
142
+ def __init__(self, output_dir: str):
143
+ self.output_dir = output_dir
144
+ self.tracer = self._tracer
145
+
146
+ @contextlib.contextmanager
147
+ def tracer(self, name: str) -> Iterator[Callable[[str, Any], None]]:
148
+ trace = {}
149
+ try:
150
+ yield lambda key, value: trace.update({key: value})
151
+ finally:
152
+ with open(os.path.join(self.output_dir, f"{name}.json"), "w") as f:
153
+ json.dump(trace, f, indent=4)
154
+ ```
155
+
156
+ The tracing mechanism is supported for all of the prompty runtime internals and can be used to trace the execution of the prompt along with all of the paramters. There is also a `@trace` decorator that can be used to trace the execution of any function external to the runtime. This is provided as a facility to trace the execution of the prompt and whatever supporting code you have.
157
+
158
+ ```python
159
+ import prompty
160
+ # import invoker
161
+ import prompty.azure
162
+ from prompty.tracer import trace, Tracer, PromptyTracer
163
+
164
+ json_tracer = PromptyTracer(output_dir="path/to/output")
165
+ Tracer.add("PromptyTracer", json_tracer.tracer)
166
+
167
+ @trace
168
+ def get_customer(customerId):
169
+ return {"id": customerId, "firstName": "Sally", "lastName": "Davis"}
170
+
171
+ @trace
172
+ def get_response(customerId, prompt):
173
+ customer = get_customer(customerId)
174
+
175
+ result = prompty.execute(
176
+ prompt,
177
+ inputs={"question": question, "customer": customer},
178
+ )
179
+ return {"question": question, "answer": result}
180
+
181
+ ```
182
+
183
+ In this case, whenever this code is executed, a `.tracy` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
184
+
185
+ ## OpenTelemetry Tracing
186
+ You can add OpenTelemetry tracing to your application using the same hook mechanism. In your application, you might create something like `trace_span` to trace the execution of your prompts:
187
+
188
+ ```python
189
+ from opentelemetry import trace as oteltrace
190
+
191
+ _tracer = "prompty"
192
+
193
+ @contextlib.contextmanager
194
+ def trace_span(name: str):
195
+ tracer = oteltrace.get_tracer(_tracer)
196
+ with tracer.start_as_current_span(name) as span:
197
+ yield lambda key, value: span.set_attribute(
198
+ key, json.dumps(value).replace("\n", "")
199
+ )
200
+
201
+ # adding this hook to the prompty runtime
202
+ Tracer.add("OpenTelemetry", trace_span)
203
+
204
+ ```
205
+
206
+ This will produce spans during the execution of the prompt that can be sent to an OpenTelemetry collector for further analysis.
207
+
208
+ ## CLI
209
+ The Prompty runtime also comes with a CLI tool that allows you to run prompts from the command line. The CLI tool is installed with the Python package.
210
+
211
+ ```bash
212
+ prompty -s path/to/prompty/file -e .env
213
+ ```
214
+
215
+ This will execute the prompt and print the response to the console. If there are any environment variables the CLI should take into account, you can pass those in via the `-e` flag. It also has default tracing enabled.
216
+
217
+ ## Contributing
218
+ We welcome contributions to the Prompty project! This community led project is open to all contributors. The project can be found on [GitHub](https://github.com/Microsoft/prompty).
@@ -0,0 +1,22 @@
1
+ prompty-0.1.33.dist-info/METADATA,sha256=nS7SXTCSlIm-LUsCFMAA9rvh6piDyFJLBkg_G96l85k,9105
2
+ prompty-0.1.33.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
+ prompty-0.1.33.dist-info/entry_points.txt,sha256=a3i7Kvf--3DOkkv9VQpstwaNKgsnXwDGaPL18lPpKeI,60
4
+ prompty-0.1.33.dist-info/licenses/LICENSE,sha256=KWSC4z9cfML_t0xThoQYjzTdcZQj86Y_mhXdatzU-KM,1052
5
+ prompty/__init__.py,sha256=HCAvInBgNcIDO54rR4-RDIF4KUmGVQ2TRam_dS7xHEk,16561
6
+ prompty/azure/__init__.py,sha256=WI8qeNWfxqggj21bznL-mxGUS-v67bUrunX0Lf2hsI8,295
7
+ prompty/azure/executor.py,sha256=RJXMB0W7KcVvQ7l3xJaau7YM8PqOCQwuN4IwIe0sTLg,7930
8
+ prompty/azure/processor.py,sha256=eWcHTLwxxBw7ZfK-rSf2cdljJgouxGXuRh_7EtV-MGk,4974
9
+ prompty/cli.py,sha256=k8Rxm41fMFNvmnsX737UiN6v-7756tpoJPN4rPXMNcU,3726
10
+ prompty/core.py,sha256=EvkXV_mH7Mj1skT21XMZ4VX-Jlwx6AF-WEJ9yPc50AE,13061
11
+ prompty/invoker.py,sha256=O77E5iQ1552wQXxL8FhZGERbCi_0O3mDTd5Ozqw-O-E,8593
12
+ prompty/openai/__init__.py,sha256=hbBhgCwB_uSq-1NWL02yiOiNkyi39-G-AyVlTSgKTkU,276
13
+ prompty/openai/executor.py,sha256=qkFSMA-pWlA1c602Dx5aR1cFEOnYsUUp_E7P3zFhSPs,3644
14
+ prompty/openai/processor.py,sha256=l9-91_CCgRtYvkwMO-jV6rkgeCA4gV_MFamQcvoNGQ0,2499
15
+ prompty/parsers.py,sha256=zHqcRpFPUDG6BOI7ipaJf6yGc6ZbKnsLmO7jKEYNct4,5013
16
+ prompty/renderers.py,sha256=80HNtCp3osgaLfhKxkG4j1kiRhJ727ITzT_yL5JLjEQ,1104
17
+ prompty/serverless/__init__.py,sha256=xoXOTRXO8C631swNKaa-ek5_R3X-87bJpTm0z_Rsg6A,282
18
+ prompty/serverless/executor.py,sha256=pwR_9itQUJ65Nk-sRJcfueJVZlvJdQ87R9FSHPCOi5o,5086
19
+ prompty/serverless/processor.py,sha256=xjVvm4TSbp0PYbGzqLnm0PGxITlg-PIp1Uh4Rhvc1JY,2312
20
+ prompty/tracer.py,sha256=pBCFs5xtWUOqF_QksHE3iRIIIxPyioxb4xHok-76ppQ,11169
21
+ prompty/utils.py,sha256=jm7HEzOGk3zz8d5aquXK3zWIQWuDpBpJTzlz5sswtdg,2836
22
+ prompty-0.1.33.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: pdm-backend (2.3.3)
2
+ Generator: pdm-backend (2.4.3)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -0,0 +1,5 @@
1
+ [console_scripts]
2
+ prompty = prompty.cli:run
3
+
4
+ [gui_scripts]
5
+