prompty 0.1.23__tar.gz → 0.1.33__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {prompty-0.1.23 → prompty-0.1.33}/PKG-INFO +11 -8
- {prompty-0.1.23 → prompty-0.1.33}/README.md +4 -4
- {prompty-0.1.23 → prompty-0.1.33}/prompty/__init__.py +309 -123
- {prompty-0.1.23 → prompty-0.1.33}/prompty/azure/__init__.py +1 -1
- {prompty-0.1.23 → prompty-0.1.33}/prompty/azure/executor.py +89 -3
- {prompty-0.1.23 → prompty-0.1.33}/prompty/azure/processor.py +66 -2
- {prompty-0.1.23 → prompty-0.1.33}/prompty/core.py +87 -258
- prompty-0.1.33/prompty/invoker.py +297 -0
- {prompty-0.1.23 → prompty-0.1.33}/prompty/openai/__init__.py +1 -1
- {prompty-0.1.23 → prompty-0.1.33}/prompty/openai/executor.py +17 -1
- {prompty-0.1.23 → prompty-0.1.33}/prompty/openai/processor.py +17 -1
- {prompty-0.1.23 → prompty-0.1.33}/prompty/parsers.py +18 -1
- {prompty-0.1.23 → prompty-0.1.33}/prompty/renderers.py +19 -2
- {prompty-0.1.23 → prompty-0.1.33}/prompty/serverless/__init__.py +1 -1
- {prompty-0.1.23 → prompty-0.1.33}/prompty/serverless/executor.py +27 -5
- {prompty-0.1.23 → prompty-0.1.33}/prompty/serverless/processor.py +17 -1
- {prompty-0.1.23 → prompty-0.1.33}/prompty/tracer.py +60 -38
- prompty-0.1.33/prompty/utils.py +105 -0
- {prompty-0.1.23 → prompty-0.1.33}/pyproject.toml +12 -6
- {prompty-0.1.23 → prompty-0.1.33}/tests/fake_azure_executor.py +52 -2
- {prompty-0.1.23 → prompty-0.1.33}/tests/fake_serverless_executor.py +36 -1
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/context.prompty +1 -1
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/funcfile.prompty +1 -1
- prompty-0.1.33/tests/prompts/prompty.json +9 -0
- prompty-0.1.33/tests/prompts/sub/sub/prompty.json +9 -0
- prompty-0.1.33/tests/prompts/sub/sub/test.py +10 -0
- prompty-0.1.33/tests/prompts/test.py +9 -0
- prompty-0.1.33/tests/test_common.py +48 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/test_execute.py +118 -3
- {prompty-0.1.23 → prompty-0.1.33}/tests/test_factory_invoker.py +11 -24
- prompty-0.1.33/tests/test_path_exec.py +74 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/test_tracing.py +109 -3
- prompty-0.1.23/tests/prompts/prompty.json +0 -9
- prompty-0.1.23/tests/prompts/sub/sub/prompty.json +0 -9
- prompty-0.1.23/tests/prompts/sub/sub/test.py +0 -5
- prompty-0.1.23/tests/prompts/test.py +0 -5
- prompty-0.1.23/tests/test_common.py +0 -24
- prompty-0.1.23/tests/test_path_exec.py +0 -37
- {prompty-0.1.23 → prompty-0.1.33}/LICENSE +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/prompty/cli.py +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/generated/1contoso.md +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/generated/2contoso.md +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/generated/3contoso.md +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/generated/4contoso.md +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/generated/basic.prompty.md +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/generated/camping.jpg +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/generated/context.prompty.md +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/generated/contoso_multi.md +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/generated/faithfulness.prompty.md +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/generated/groundedness.prompty.md +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/hello_world.embedding.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/__init__.py +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/basic.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/basic.prompty.execution.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/basic_json_output.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/camping.jpg +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/chat.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/context.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/context.prompty.execution.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/embedding.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/embedding.prompty.execution.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/evaluation.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/faithfulness.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/faithfulness.prompty.execution.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/fake.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/funcfile.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/functions.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/functions.prompty.execution.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/groundedness.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/groundedness.prompty.execution.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/serverless.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/serverless.prompty.execution.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/serverless_stream.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/serverless_stream.prompty.execution.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/streaming.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/streaming.prompty.execution.json +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/sub/__init__.py +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/sub/basic.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/sub/sub/__init__.py +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompts/sub/sub/basic.prompty +0 -0
- {prompty-0.1.23 → prompty-0.1.33}/tests/prompty.json +0 -0
@@ -1,20 +1,23 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: prompty
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.33
|
4
4
|
Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
|
5
5
|
Author-Email: Seth Juarez <seth.juarez@microsoft.com>
|
6
|
+
License: MIT
|
7
|
+
Requires-Python: >=3.9
|
6
8
|
Requires-Dist: pyyaml>=6.0.1
|
7
9
|
Requires-Dist: pydantic>=2.8.2
|
8
10
|
Requires-Dist: jinja2>=3.1.4
|
9
11
|
Requires-Dist: python-dotenv>=1.0.1
|
10
12
|
Requires-Dist: click>=8.1.7
|
13
|
+
Requires-Dist: aiofiles>=24.1.0
|
14
|
+
Provides-Extra: azure
|
11
15
|
Requires-Dist: azure-identity>=1.17.1; extra == "azure"
|
12
16
|
Requires-Dist: openai>=1.35.10; extra == "azure"
|
13
|
-
Requires-Dist: openai>=1.35.10; extra == "openai"
|
14
|
-
Requires-Dist: azure-ai-inference>=1.0.0b3; extra == "serverless"
|
15
|
-
Provides-Extra: azure
|
16
17
|
Provides-Extra: openai
|
18
|
+
Requires-Dist: openai>=1.35.10; extra == "openai"
|
17
19
|
Provides-Extra: serverless
|
20
|
+
Requires-Dist: azure-ai-inference>=1.0.0b3; extra == "serverless"
|
18
21
|
Description-Content-Type: text/markdown
|
19
22
|
|
20
23
|
|
@@ -22,7 +25,7 @@ Prompty is an asset class and format for LLM prompts designed to enhance observa
|
|
22
25
|
|
23
26
|
The file format has a supporting toolchain with a VS Code extension and runtimes in multiple programming languages to simplify and accelerate your AI application development.
|
24
27
|
|
25
|
-
The tooling comes together in three ways: the *prompty file asset*, the *VS Code extension tool*, and *runtimes* in multiple programming
|
28
|
+
The tooling comes together in three ways: the *prompty file asset*, the *VS Code extension tool*, and *runtimes* in multiple programming languages.
|
26
29
|
|
27
30
|
## The Prompty File Format
|
28
31
|
Prompty is a language agnostic prompt asset for creating prompts and engineering the responses. Learn more about the format [here](https://prompty.ai/docs/prompty-file-spec).
|
@@ -95,7 +98,7 @@ The Prompty runtime comes with a set of built-in invokers that can be used to ex
|
|
95
98
|
|
96
99
|
|
97
100
|
## Using Tracing in Prompty
|
98
|
-
Prompty supports tracing to help you understand the execution of your prompts. This functionality is
|
101
|
+
Prompty supports tracing to help you understand the execution of your prompts. This functionality is customizable and can be used to trace the execution of your prompts in a way that makes sense to you. Prompty has two default traces built in: `console_tracer` and `PromptyTracer`. The `console_tracer` writes the trace to the console, and the `PromptyTracer` writes the trace to a JSON file. You can also create your own tracer by creating your own hook.
|
99
102
|
|
100
103
|
```python
|
101
104
|
import prompty
|
@@ -117,7 +120,7 @@ print(response)
|
|
117
120
|
```
|
118
121
|
|
119
122
|
You can also bring your own tracer by your own tracing hook. The `console_tracer` is the simplest example of a tracer. It writes the trace to the console.
|
120
|
-
This is what it
|
123
|
+
This is what it looks like:
|
121
124
|
|
122
125
|
```python
|
123
126
|
@contextlib.contextmanager
|
@@ -212,4 +215,4 @@ prompty -s path/to/prompty/file -e .env
|
|
212
215
|
This will execute the prompt and print the response to the console. If there are any environment variables the CLI should take into account, you can pass those in via the `-e` flag. It also has default tracing enabled.
|
213
216
|
|
214
217
|
## Contributing
|
215
|
-
We welcome contributions to the Prompty project! This community led project is open to all contributors. The project
|
218
|
+
We welcome contributions to the Prompty project! This community led project is open to all contributors. The project can be found on [GitHub](https://github.com/Microsoft/prompty).
|
@@ -3,7 +3,7 @@ Prompty is an asset class and format for LLM prompts designed to enhance observa
|
|
3
3
|
|
4
4
|
The file format has a supporting toolchain with a VS Code extension and runtimes in multiple programming languages to simplify and accelerate your AI application development.
|
5
5
|
|
6
|
-
The tooling comes together in three ways: the *prompty file asset*, the *VS Code extension tool*, and *runtimes* in multiple programming
|
6
|
+
The tooling comes together in three ways: the *prompty file asset*, the *VS Code extension tool*, and *runtimes* in multiple programming languages.
|
7
7
|
|
8
8
|
## The Prompty File Format
|
9
9
|
Prompty is a language agnostic prompt asset for creating prompts and engineering the responses. Learn more about the format [here](https://prompty.ai/docs/prompty-file-spec).
|
@@ -76,7 +76,7 @@ The Prompty runtime comes with a set of built-in invokers that can be used to ex
|
|
76
76
|
|
77
77
|
|
78
78
|
## Using Tracing in Prompty
|
79
|
-
Prompty supports tracing to help you understand the execution of your prompts. This functionality is
|
79
|
+
Prompty supports tracing to help you understand the execution of your prompts. This functionality is customizable and can be used to trace the execution of your prompts in a way that makes sense to you. Prompty has two default traces built in: `console_tracer` and `PromptyTracer`. The `console_tracer` writes the trace to the console, and the `PromptyTracer` writes the trace to a JSON file. You can also create your own tracer by creating your own hook.
|
80
80
|
|
81
81
|
```python
|
82
82
|
import prompty
|
@@ -98,7 +98,7 @@ print(response)
|
|
98
98
|
```
|
99
99
|
|
100
100
|
You can also bring your own tracer by your own tracing hook. The `console_tracer` is the simplest example of a tracer. It writes the trace to the console.
|
101
|
-
This is what it
|
101
|
+
This is what it looks like:
|
102
102
|
|
103
103
|
```python
|
104
104
|
@contextlib.contextmanager
|
@@ -193,4 +193,4 @@ prompty -s path/to/prompty/file -e .env
|
|
193
193
|
This will execute the prompt and print the response to the console. If there are any environment variables the CLI should take into account, you can pass those in via the `-e` flag. It also has default tracing enabled.
|
194
194
|
|
195
195
|
## Contributing
|
196
|
-
We welcome contributions to the Prompty project! This community led project is open to all contributors. The project
|
196
|
+
We welcome contributions to the Prompty project! This community led project is open to all contributors. The project can be found on [GitHub](https://github.com/Microsoft/prompty).
|
@@ -1,52 +1,26 @@
|
|
1
|
-
import json
|
2
1
|
import traceback
|
3
2
|
from pathlib import Path
|
4
3
|
from typing import Dict, List, Union
|
5
|
-
|
6
|
-
from
|
7
|
-
from
|
8
|
-
Frontmatter,
|
9
|
-
InvokerException,
|
10
|
-
InvokerFactory,
|
4
|
+
from .tracer import trace
|
5
|
+
from .invoker import InvokerFactory, NoOp
|
6
|
+
from .core import (
|
11
7
|
ModelSettings,
|
12
8
|
Prompty,
|
13
9
|
PropertySettings,
|
14
10
|
TemplateSettings,
|
15
11
|
param_hoisting,
|
16
12
|
)
|
13
|
+
from .utils import (
|
14
|
+
load_global_config,
|
15
|
+
load_global_config_async,
|
16
|
+
load_prompty_async,
|
17
|
+
load_prompty,
|
18
|
+
)
|
17
19
|
|
18
20
|
from .renderers import *
|
19
21
|
from .parsers import *
|
20
22
|
|
21
23
|
|
22
|
-
def load_global_config(
|
23
|
-
prompty_path: Path = Path.cwd(), configuration: str = "default"
|
24
|
-
) -> Dict[str, any]:
|
25
|
-
# prompty.config laying around?
|
26
|
-
prompty_config = list(Path.cwd().glob("**/prompty.json"))
|
27
|
-
|
28
|
-
# if there is one load it
|
29
|
-
if len(prompty_config) > 0:
|
30
|
-
# pick the nearest prompty.json
|
31
|
-
config = sorted(
|
32
|
-
[
|
33
|
-
c
|
34
|
-
for c in prompty_config
|
35
|
-
if len(c.parent.parts) <= len(prompty_path.parts)
|
36
|
-
],
|
37
|
-
key=lambda p: len(p.parts),
|
38
|
-
)[-1]
|
39
|
-
|
40
|
-
with open(config, "r") as f:
|
41
|
-
c = json.load(f)
|
42
|
-
if configuration in c:
|
43
|
-
return c[configuration]
|
44
|
-
else:
|
45
|
-
raise ValueError(f'Item "{configuration}" not found in "{config}"')
|
46
|
-
|
47
|
-
return {}
|
48
|
-
|
49
|
-
|
50
24
|
@trace(description="Create a headless prompty object for programmatic use.")
|
51
25
|
def headless(
|
52
26
|
api: str,
|
@@ -104,47 +78,65 @@ def headless(
|
|
104
78
|
return Prompty(model=modelSettings, template=templateSettings, content=content)
|
105
79
|
|
106
80
|
|
107
|
-
@trace(description="
|
108
|
-
def
|
109
|
-
|
81
|
+
@trace(description="Create a headless prompty object for programmatic use.")
|
82
|
+
async def headless_async(
|
83
|
+
api: str,
|
84
|
+
content: str | List[str] | dict,
|
85
|
+
configuration: Dict[str, any] = {},
|
86
|
+
parameters: Dict[str, any] = {},
|
87
|
+
connection: str = "default",
|
88
|
+
) -> Prompty:
|
89
|
+
"""Create a headless prompty object for programmatic use.
|
110
90
|
|
111
91
|
Parameters
|
112
92
|
----------
|
113
|
-
|
114
|
-
The
|
115
|
-
|
116
|
-
The
|
93
|
+
api : str
|
94
|
+
The API to use for the model
|
95
|
+
content : str | List[str] | dict
|
96
|
+
The content to process
|
97
|
+
configuration : Dict[str, any], optional
|
98
|
+
The configuration to use, by default {}
|
99
|
+
parameters : Dict[str, any], optional
|
100
|
+
The parameters to use, by default {}
|
101
|
+
connection : str, optional
|
102
|
+
The connection to use, by default "default"
|
117
103
|
|
118
104
|
Returns
|
119
105
|
-------
|
120
106
|
Prompty
|
121
|
-
The
|
107
|
+
The headless prompty object
|
122
108
|
|
123
109
|
Example
|
124
110
|
-------
|
125
111
|
>>> import prompty
|
126
|
-
>>> p = prompty.
|
127
|
-
|
128
|
-
|
112
|
+
>>> p = await prompty.headless_async(
|
113
|
+
api="embedding",
|
114
|
+
configuration={"type": "azure", "azure_deployment": "text-embedding-ada-002"},
|
115
|
+
content="hello world",
|
116
|
+
)
|
117
|
+
>>> emb = prompty.execute(p)
|
129
118
|
|
130
|
-
|
131
|
-
if not p.is_absolute():
|
132
|
-
# get caller's path (take into account trace frame)
|
133
|
-
caller = Path(traceback.extract_stack()[-3].filename)
|
134
|
-
p = Path(caller.parent / p).resolve().absolute()
|
119
|
+
"""
|
135
120
|
|
136
|
-
#
|
137
|
-
|
138
|
-
|
139
|
-
content = matter["body"]
|
121
|
+
# get caller's path (to get relative path for prompty.json)
|
122
|
+
caller = Path(traceback.extract_stack()[-2].filename)
|
123
|
+
templateSettings = TemplateSettings(type="NOOP", parser="NOOP")
|
140
124
|
|
141
|
-
|
142
|
-
|
125
|
+
global_config = await load_global_config_async(caller.parent, connection)
|
126
|
+
c = await Prompty.normalize_async(
|
127
|
+
param_hoisting(configuration, global_config), caller.parent
|
128
|
+
)
|
143
129
|
|
144
|
-
|
145
|
-
|
146
|
-
|
130
|
+
modelSettings = ModelSettings(
|
131
|
+
api=api,
|
132
|
+
configuration=c,
|
133
|
+
parameters=parameters,
|
147
134
|
)
|
135
|
+
|
136
|
+
return Prompty(model=modelSettings, template=templateSettings, content=content)
|
137
|
+
|
138
|
+
|
139
|
+
def _load_raw_prompty(attributes: dict, content: str, p: Path, global_config: dict):
|
148
140
|
if "model" not in attributes:
|
149
141
|
attributes["model"] = {}
|
150
142
|
|
@@ -196,47 +188,132 @@ def load(prompty_file: str, configuration: str = "default") -> Prompty:
|
|
196
188
|
else:
|
197
189
|
outputs = {}
|
198
190
|
|
191
|
+
p = Prompty(
|
192
|
+
**attributes,
|
193
|
+
model=model,
|
194
|
+
inputs=inputs,
|
195
|
+
outputs=outputs,
|
196
|
+
template=template,
|
197
|
+
content=content,
|
198
|
+
file=p,
|
199
|
+
)
|
200
|
+
|
201
|
+
return p
|
202
|
+
|
203
|
+
|
204
|
+
@trace(description="Load a prompty file.")
|
205
|
+
def load(prompty_file: str, configuration: str = "default") -> Prompty:
|
206
|
+
"""Load a prompty file.
|
207
|
+
|
208
|
+
Parameters
|
209
|
+
----------
|
210
|
+
prompty_file : str
|
211
|
+
The path to the prompty file
|
212
|
+
configuration : str, optional
|
213
|
+
The configuration to use, by default "default"
|
214
|
+
|
215
|
+
Returns
|
216
|
+
-------
|
217
|
+
Prompty
|
218
|
+
The loaded prompty object
|
219
|
+
|
220
|
+
Example
|
221
|
+
-------
|
222
|
+
>>> import prompty
|
223
|
+
>>> p = prompty.load("prompts/basic.prompty")
|
224
|
+
>>> print(p)
|
225
|
+
"""
|
226
|
+
|
227
|
+
p = Path(prompty_file)
|
228
|
+
if not p.is_absolute():
|
229
|
+
# get caller's path (take into account trace frame)
|
230
|
+
caller = Path(traceback.extract_stack()[-3].filename)
|
231
|
+
p = Path(caller.parent / p).resolve().absolute()
|
232
|
+
|
233
|
+
# load dictionary from prompty file
|
234
|
+
matter = load_prompty(p)
|
235
|
+
|
236
|
+
attributes = matter["attributes"]
|
237
|
+
content = matter["body"]
|
238
|
+
|
239
|
+
# normalize attribute dictionary resolve keys and files
|
240
|
+
attributes = Prompty.normalize(attributes, p.parent)
|
241
|
+
|
242
|
+
# load global configuration
|
243
|
+
global_config = Prompty.normalize(
|
244
|
+
load_global_config(p.parent, configuration), p.parent
|
245
|
+
)
|
246
|
+
|
247
|
+
prompty = _load_raw_prompty(attributes, content, p, global_config)
|
248
|
+
|
199
249
|
# recursive loading of base prompty
|
200
250
|
if "base" in attributes:
|
201
251
|
# load the base prompty from the same directory as the current prompty
|
202
252
|
base = load(p.parent / attributes["base"])
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
253
|
+
prompty = Prompty.hoist_base_prompty(prompty, base)
|
254
|
+
|
255
|
+
return prompty
|
256
|
+
|
257
|
+
|
258
|
+
@trace(description="Load a prompty file.")
|
259
|
+
async def load_async(prompty_file: str, configuration: str = "default") -> Prompty:
|
260
|
+
"""Load a prompty file.
|
261
|
+
|
262
|
+
Parameters
|
263
|
+
----------
|
264
|
+
prompty_file : str
|
265
|
+
The path to the prompty file
|
266
|
+
configuration : str, optional
|
267
|
+
The configuration to use, by default "default"
|
268
|
+
|
269
|
+
Returns
|
270
|
+
-------
|
271
|
+
Prompty
|
272
|
+
The loaded prompty object
|
273
|
+
|
274
|
+
Example
|
275
|
+
-------
|
276
|
+
>>> import prompty
|
277
|
+
>>> p = prompty.load("prompts/basic.prompty")
|
278
|
+
>>> print(p)
|
279
|
+
"""
|
280
|
+
|
281
|
+
p = Path(prompty_file)
|
282
|
+
if not p.is_absolute():
|
283
|
+
# get caller's path (take into account trace frame)
|
284
|
+
caller = Path(traceback.extract_stack()[-3].filename)
|
285
|
+
p = Path(caller.parent / p).resolve().absolute()
|
286
|
+
|
287
|
+
# load dictionary from prompty file
|
288
|
+
matter = await load_prompty_async(p)
|
289
|
+
|
290
|
+
attributes = matter["attributes"]
|
291
|
+
content = matter["body"]
|
292
|
+
|
293
|
+
# normalize attribute dictionary resolve keys and files
|
294
|
+
attributes = await Prompty.normalize_async(attributes, p.parent)
|
295
|
+
|
296
|
+
# load global configuration
|
297
|
+
config = await load_global_config_async(p.parent, configuration)
|
298
|
+
global_config = await Prompty.normalize_async(config, p.parent)
|
299
|
+
|
300
|
+
prompty = _load_raw_prompty(attributes, content, p, global_config)
|
301
|
+
|
302
|
+
# recursive loading of base prompty
|
303
|
+
if "base" in attributes:
|
304
|
+
# load the base prompty from the same directory as the current prompty
|
305
|
+
base = await load_async(p.parent / attributes["base"])
|
306
|
+
prompty = Prompty.hoist_base_prompty(prompty, base)
|
307
|
+
|
308
|
+
return prompty
|
309
|
+
|
233
310
|
|
234
311
|
@trace(description="Prepare the inputs for the prompt.")
|
235
312
|
def prepare(
|
236
313
|
prompt: Prompty,
|
237
314
|
inputs: Dict[str, any] = {},
|
238
315
|
):
|
239
|
-
"""
|
316
|
+
"""Prepare the inputs for the prompt.
|
240
317
|
|
241
318
|
Parameters
|
242
319
|
----------
|
@@ -259,24 +336,46 @@ def prepare(
|
|
259
336
|
"""
|
260
337
|
inputs = param_hoisting(inputs, prompt.sample)
|
261
338
|
|
262
|
-
|
263
|
-
|
264
|
-
else:
|
265
|
-
# render
|
266
|
-
renderer = InvokerFactory.create_renderer(prompt.template.type, prompt)
|
267
|
-
render = renderer(inputs)
|
339
|
+
render = InvokerFactory.run_renderer(prompt, inputs, prompt.content)
|
340
|
+
result = InvokerFactory.run_parser(prompt, render)
|
268
341
|
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
342
|
+
return result
|
343
|
+
|
344
|
+
|
345
|
+
@trace(description="Prepare the inputs for the prompt.")
|
346
|
+
async def prepare_async(
|
347
|
+
prompt: Prompty,
|
348
|
+
inputs: Dict[str, any] = {},
|
349
|
+
):
|
350
|
+
"""Prepare the inputs for the prompt.
|
351
|
+
|
352
|
+
Parameters
|
353
|
+
----------
|
354
|
+
prompt : Prompty
|
355
|
+
The prompty object
|
356
|
+
inputs : Dict[str, any], optional
|
357
|
+
The inputs to the prompt, by default {}
|
358
|
+
|
359
|
+
Returns
|
360
|
+
-------
|
361
|
+
dict
|
362
|
+
The prepared and hidrated template shaped to the LLM model
|
363
|
+
|
364
|
+
Example
|
365
|
+
-------
|
366
|
+
>>> import prompty
|
367
|
+
>>> p = prompty.load("prompts/basic.prompty")
|
368
|
+
>>> inputs = {"name": "John Doe"}
|
369
|
+
>>> content = await prompty.prepare_async(p, inputs)
|
370
|
+
"""
|
371
|
+
inputs = param_hoisting(inputs, prompt.sample)
|
372
|
+
|
373
|
+
render = await InvokerFactory.run_renderer_async(prompt, inputs, prompt.content)
|
374
|
+
result = await InvokerFactory.run_parser_async(prompt, render)
|
277
375
|
|
278
376
|
return result
|
279
377
|
|
378
|
+
|
280
379
|
@trace(description="Run the prepared Prompty content against the model.")
|
281
380
|
def run(
|
282
381
|
prompt: Prompty,
|
@@ -322,32 +421,65 @@ def run(
|
|
322
421
|
if parameters != {}:
|
323
422
|
prompt.model.parameters = param_hoisting(parameters, prompt.model.parameters)
|
324
423
|
|
325
|
-
|
424
|
+
result = InvokerFactory.run_executor(prompt, content)
|
425
|
+
if not raw:
|
426
|
+
result = InvokerFactory.run_processor(prompt, result)
|
427
|
+
|
428
|
+
return result
|
429
|
+
|
430
|
+
|
431
|
+
@trace(description="Run the prepared Prompty content against the model.")
|
432
|
+
async def run_async(
|
433
|
+
prompt: Prompty,
|
434
|
+
content: dict | list | str,
|
435
|
+
configuration: Dict[str, any] = {},
|
436
|
+
parameters: Dict[str, any] = {},
|
437
|
+
raw: bool = False,
|
438
|
+
):
|
439
|
+
"""Run the prepared Prompty content.
|
326
440
|
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
441
|
+
Parameters
|
442
|
+
----------
|
443
|
+
prompt : Prompty
|
444
|
+
The prompty object
|
445
|
+
content : dict | list | str
|
446
|
+
The content to process
|
447
|
+
configuration : Dict[str, any], optional
|
448
|
+
The configuration to use, by default {}
|
449
|
+
parameters : Dict[str, any], optional
|
450
|
+
The parameters to use, by default {}
|
451
|
+
raw : bool, optional
|
452
|
+
Whether to skip processing, by default False
|
453
|
+
|
454
|
+
Returns
|
455
|
+
-------
|
456
|
+
any
|
457
|
+
The result of the prompt
|
458
|
+
|
459
|
+
Example
|
460
|
+
-------
|
461
|
+
>>> import prompty
|
462
|
+
>>> p = prompty.load("prompts/basic.prompty")
|
463
|
+
>>> inputs = {"name": "John Doe"}
|
464
|
+
>>> content = await prompty.prepare_async(p, inputs)
|
465
|
+
>>> result = await prompty.run_async(p, content)
|
466
|
+
"""
|
467
|
+
|
468
|
+
if configuration != {}:
|
469
|
+
prompt.model.configuration = param_hoisting(
|
470
|
+
configuration, prompt.model.configuration
|
331
471
|
)
|
332
472
|
|
333
|
-
|
334
|
-
|
335
|
-
result = executor(content)
|
473
|
+
if parameters != {}:
|
474
|
+
prompt.model.parameters = param_hoisting(parameters, prompt.model.parameters)
|
336
475
|
|
337
|
-
|
476
|
+
result = await InvokerFactory.run_executor_async(prompt, content)
|
338
477
|
if not raw:
|
339
|
-
|
340
|
-
if not InvokerFactory.has_invoker("processor", invoker_type):
|
341
|
-
raise InvokerException(
|
342
|
-
f"{invoker_type} Invoker has not been registered properly.", invoker_type
|
343
|
-
)
|
344
|
-
|
345
|
-
# process
|
346
|
-
processor = InvokerFactory.create_processor(invoker_type, prompt)
|
347
|
-
result = processor(result)
|
478
|
+
result = await InvokerFactory.run_processor_async(prompt, result)
|
348
479
|
|
349
480
|
return result
|
350
481
|
|
482
|
+
|
351
483
|
@trace(description="Execute a prompty")
|
352
484
|
def execute(
|
353
485
|
prompt: Union[str, Prompty],
|
@@ -400,3 +532,57 @@ def execute(
|
|
400
532
|
result = run(prompt, content, configuration, parameters, raw)
|
401
533
|
|
402
534
|
return result
|
535
|
+
|
536
|
+
|
537
|
+
@trace(description="Execute a prompty")
|
538
|
+
async def execute_async(
|
539
|
+
prompt: Union[str, Prompty],
|
540
|
+
configuration: Dict[str, any] = {},
|
541
|
+
parameters: Dict[str, any] = {},
|
542
|
+
inputs: Dict[str, any] = {},
|
543
|
+
raw: bool = False,
|
544
|
+
config_name: str = "default",
|
545
|
+
):
|
546
|
+
"""Execute a prompty.
|
547
|
+
|
548
|
+
Parameters
|
549
|
+
----------
|
550
|
+
prompt : Union[str, Prompty]
|
551
|
+
The prompty object or path to the prompty file
|
552
|
+
configuration : Dict[str, any], optional
|
553
|
+
The configuration to use, by default {}
|
554
|
+
parameters : Dict[str, any], optional
|
555
|
+
The parameters to use, by default {}
|
556
|
+
inputs : Dict[str, any], optional
|
557
|
+
The inputs to the prompt, by default {}
|
558
|
+
raw : bool, optional
|
559
|
+
Whether to skip processing, by default False
|
560
|
+
connection : str, optional
|
561
|
+
The connection to use, by default "default"
|
562
|
+
|
563
|
+
Returns
|
564
|
+
-------
|
565
|
+
any
|
566
|
+
The result of the prompt
|
567
|
+
|
568
|
+
Example
|
569
|
+
-------
|
570
|
+
>>> import prompty
|
571
|
+
>>> inputs = {"name": "John Doe"}
|
572
|
+
>>> result = await prompty.execute_async("prompts/basic.prompty", inputs=inputs)
|
573
|
+
"""
|
574
|
+
if isinstance(prompt, str):
|
575
|
+
path = Path(prompt)
|
576
|
+
if not path.is_absolute():
|
577
|
+
# get caller's path (take into account trace frame)
|
578
|
+
caller = Path(traceback.extract_stack()[-3].filename)
|
579
|
+
path = Path(caller.parent / path).resolve().absolute()
|
580
|
+
prompt = await load_async(path, config_name)
|
581
|
+
|
582
|
+
# prepare content
|
583
|
+
content = await prepare_async(prompt, inputs)
|
584
|
+
|
585
|
+
# run LLM model
|
586
|
+
result = await run_async(prompt, content, configuration, parameters, raw)
|
587
|
+
|
588
|
+
return result
|