prompty 0.1.47__py3-none-any.whl → 0.1.49__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prompty/__init__.py +2 -1
- prompty/cli.py +4 -4
- prompty/core.py +50 -80
- prompty/mustache.py +666 -0
- prompty/renderers.py +33 -0
- prompty/tracer.py +7 -6
- prompty/utils.py +3 -2
- {prompty-0.1.47.dist-info → prompty-0.1.49.dist-info}/METADATA +5 -5
- {prompty-0.1.47.dist-info → prompty-0.1.49.dist-info}/RECORD +12 -11
- {prompty-0.1.47.dist-info → prompty-0.1.49.dist-info}/WHEEL +0 -0
- {prompty-0.1.47.dist-info → prompty-0.1.49.dist-info}/entry_points.txt +0 -0
- {prompty-0.1.47.dist-info → prompty-0.1.49.dist-info}/licenses/LICENSE +0 -0
prompty/__init__.py
CHANGED
@@ -12,7 +12,7 @@ from .core import (
|
|
12
12
|
)
|
13
13
|
from .invoker import InvokerFactory
|
14
14
|
from .parsers import PromptyChatParser
|
15
|
-
from .renderers import Jinja2Renderer
|
15
|
+
from .renderers import Jinja2Renderer, MustacheRenderer
|
16
16
|
from .tracer import trace
|
17
17
|
from .utils import (
|
18
18
|
load_global_config,
|
@@ -22,6 +22,7 @@ from .utils import (
|
|
22
22
|
)
|
23
23
|
|
24
24
|
InvokerFactory.add_renderer("jinja2", Jinja2Renderer)
|
25
|
+
InvokerFactory.add_renderer("mustache", MustacheRenderer)
|
25
26
|
InvokerFactory.add_parser("prompty.chat", PromptyChatParser)
|
26
27
|
|
27
28
|
|
prompty/cli.py
CHANGED
@@ -6,7 +6,7 @@ from typing import Any, Optional
|
|
6
6
|
|
7
7
|
import click
|
8
8
|
from dotenv import load_dotenv
|
9
|
-
from
|
9
|
+
from dataclasses import asdict, is_dataclass
|
10
10
|
|
11
11
|
import prompty
|
12
12
|
from prompty.tracer import PromptyTracer, Tracer, console_tracer, trace
|
@@ -91,11 +91,11 @@ def execute(prompt_path: str, inputs: Optional[dict[str, Any]] = None, raw=False
|
|
91
91
|
dynamic_import(p.model.configuration["type"])
|
92
92
|
|
93
93
|
result = prompty.execute(p, inputs=inputs, raw=raw)
|
94
|
-
if
|
95
|
-
print("\n", json.dumps(result
|
94
|
+
if is_dataclass(result) and not isinstance(result, type):
|
95
|
+
print("\n", json.dumps(asdict(result), indent=4), "\n")
|
96
96
|
elif isinstance(result, list):
|
97
97
|
print(
|
98
|
-
"\n", json.dumps([item
|
98
|
+
"\n", json.dumps([asdict(item) for item in result], indent=4), "\n"
|
99
99
|
)
|
100
100
|
else:
|
101
101
|
print("\n", result, "\n")
|
prompty/core.py
CHANGED
@@ -1,23 +1,23 @@
|
|
1
|
+
import copy
|
1
2
|
import os
|
2
3
|
import typing
|
3
4
|
from collections.abc import AsyncIterator, Iterator
|
5
|
+
from dataclasses import dataclass, field, fields, asdict
|
4
6
|
from pathlib import Path
|
5
|
-
from typing import Literal, Union
|
6
|
-
|
7
|
-
from pydantic import BaseModel, Field, FilePath
|
8
|
-
from pydantic.main import IncEx
|
9
|
-
|
10
|
-
from .tracer import Tracer, sanitize, to_dict
|
7
|
+
from typing import Any, Dict, List, Literal, Union
|
8
|
+
from .tracer import Tracer, to_dict
|
11
9
|
from .utils import load_json, load_json_async
|
12
10
|
|
13
11
|
|
14
|
-
|
12
|
+
@dataclass
|
13
|
+
class ToolCall:
|
15
14
|
id: str
|
16
15
|
name: str
|
17
16
|
arguments: str
|
18
17
|
|
19
18
|
|
20
|
-
|
19
|
+
@dataclass
|
20
|
+
class PropertySettings:
|
21
21
|
"""PropertySettings class to define the properties of the model
|
22
22
|
|
23
23
|
Attributes
|
@@ -31,11 +31,12 @@ class PropertySettings(BaseModel):
|
|
31
31
|
"""
|
32
32
|
|
33
33
|
type: Literal["string", "number", "array", "object", "boolean"]
|
34
|
-
default: Union[str, int, float, list, dict, bool, None] =
|
35
|
-
description: str =
|
34
|
+
default: Union[str, int, float, list, dict, bool, None] = field(default=None)
|
35
|
+
description: str = field(default="")
|
36
36
|
|
37
37
|
|
38
|
-
|
38
|
+
@dataclass
|
39
|
+
class ModelSettings:
|
39
40
|
"""ModelSettings class to define the model of the prompty
|
40
41
|
|
41
42
|
Attributes
|
@@ -50,48 +51,14 @@ class ModelSettings(BaseModel):
|
|
50
51
|
The response of the model
|
51
52
|
"""
|
52
53
|
|
53
|
-
api: str =
|
54
|
-
configuration: dict =
|
55
|
-
parameters: dict =
|
56
|
-
response: dict =
|
57
|
-
|
58
|
-
def model_dump(
|
59
|
-
self,
|
60
|
-
*,
|
61
|
-
mode: str = "python",
|
62
|
-
include: Union[IncEx, None] = None,
|
63
|
-
exclude: Union[IncEx, None] = None,
|
64
|
-
context: Union[typing.Any, None] = None,
|
65
|
-
by_alias: bool = False,
|
66
|
-
exclude_unset: bool = False,
|
67
|
-
exclude_defaults: bool = False,
|
68
|
-
exclude_none: bool = False,
|
69
|
-
round_trip: bool = False,
|
70
|
-
warnings: Union[
|
71
|
-
bool, Literal["none"], Literal["warn"], Literal["error"]
|
72
|
-
] = True,
|
73
|
-
serialize_as_any: bool = False,
|
74
|
-
) -> dict[str, typing.Any]:
|
75
|
-
"""Method to dump the model in a safe way"""
|
76
|
-
d = super().model_dump(
|
77
|
-
mode=mode,
|
78
|
-
include=include,
|
79
|
-
exclude=exclude,
|
80
|
-
context=context,
|
81
|
-
by_alias=by_alias,
|
82
|
-
exclude_unset=exclude_unset,
|
83
|
-
exclude_defaults=exclude_defaults,
|
84
|
-
exclude_none=exclude_none,
|
85
|
-
round_trip=round_trip,
|
86
|
-
warnings=warnings,
|
87
|
-
serialize_as_any=serialize_as_any,
|
88
|
-
)
|
89
|
-
|
90
|
-
d["configuration"] = {k: sanitize(k, v) for k, v in d["configuration"].items()}
|
91
|
-
return d
|
54
|
+
api: str = field(default="")
|
55
|
+
configuration: dict = field(default_factory=dict)
|
56
|
+
parameters: dict = field(default_factory=dict)
|
57
|
+
response: dict = field(default_factory=dict)
|
92
58
|
|
93
59
|
|
94
|
-
|
60
|
+
@dataclass
|
61
|
+
class TemplateSettings:
|
95
62
|
"""TemplateSettings class to define the template of the prompty
|
96
63
|
|
97
64
|
Attributes
|
@@ -102,11 +69,12 @@ class TemplateSettings(BaseModel):
|
|
102
69
|
The parser of the template
|
103
70
|
"""
|
104
71
|
|
105
|
-
type: str =
|
106
|
-
parser: str =
|
72
|
+
type: str = field(default="mustache")
|
73
|
+
parser: str = field(default="")
|
107
74
|
|
108
75
|
|
109
|
-
|
76
|
+
@dataclass
|
77
|
+
class Prompty:
|
110
78
|
"""Prompty class to define the prompty
|
111
79
|
|
112
80
|
Attributes
|
@@ -115,9 +83,9 @@ class Prompty(BaseModel):
|
|
115
83
|
The name of the prompty
|
116
84
|
description : str
|
117
85
|
The description of the prompty
|
118
|
-
authors :
|
86
|
+
authors : list[str]
|
119
87
|
The authors of the prompty
|
120
|
-
tags :
|
88
|
+
tags : list[str]
|
121
89
|
The tags of the prompty
|
122
90
|
version : str
|
123
91
|
The version of the prompty
|
@@ -129,52 +97,54 @@ class Prompty(BaseModel):
|
|
129
97
|
The model of the prompty
|
130
98
|
sample : dict
|
131
99
|
The sample of the prompty
|
132
|
-
inputs :
|
100
|
+
inputs : dict[str, PropertySettings]
|
133
101
|
The inputs of the prompty
|
134
|
-
outputs :
|
102
|
+
outputs : dict[str, PropertySettings]
|
135
103
|
The outputs of the prompty
|
136
104
|
template : TemplateSettings
|
137
105
|
The template of the prompty
|
138
106
|
file : FilePath
|
139
107
|
The file of the prompty
|
140
|
-
content : str
|
108
|
+
content : Union[str, list[str], dict]
|
141
109
|
The content of the prompty
|
142
110
|
"""
|
143
111
|
|
144
112
|
# metadata
|
145
|
-
name: str =
|
146
|
-
description: str =
|
147
|
-
authors:
|
148
|
-
tags:
|
149
|
-
version: str =
|
150
|
-
base: str =
|
151
|
-
basePrompty: Union["Prompty", None] =
|
113
|
+
name: str = field(default="")
|
114
|
+
description: str = field(default="")
|
115
|
+
authors: List[str] = field(default_factory=list)
|
116
|
+
tags: List[str] = field(default_factory=list)
|
117
|
+
version: str = field(default="")
|
118
|
+
base: str = field(default="")
|
119
|
+
basePrompty: Union["Prompty", None] = field(default=None)
|
152
120
|
# model
|
153
|
-
model: ModelSettings =
|
121
|
+
model: ModelSettings = field(default_factory=ModelSettings)
|
154
122
|
|
155
123
|
# sample
|
156
|
-
sample: dict =
|
124
|
+
sample: dict = field(default_factory=dict)
|
157
125
|
|
158
126
|
# input / output
|
159
|
-
inputs: dict[str, PropertySettings] =
|
160
|
-
outputs: dict[str, PropertySettings] =
|
127
|
+
inputs: dict[str, PropertySettings] = field(default_factory=dict)
|
128
|
+
outputs: dict[str, PropertySettings] = field(default_factory=dict)
|
161
129
|
|
162
130
|
# template
|
163
|
-
template: TemplateSettings
|
131
|
+
template: TemplateSettings = field(default_factory=TemplateSettings)
|
164
132
|
|
165
|
-
file: Union[str,
|
166
|
-
content: Union[str, list[str], dict] =
|
133
|
+
file: Union[str, Path] = field(default="")
|
134
|
+
content: Union[str, list[str], dict] = field(default="")
|
167
135
|
|
168
136
|
def to_safe_dict(self) -> dict[str, typing.Any]:
|
169
|
-
d = {}
|
170
|
-
for
|
137
|
+
d: dict[str, typing.Any] = {}
|
138
|
+
for field in fields(self):
|
139
|
+
k = field.name
|
140
|
+
v = getattr(self, field.name)
|
171
141
|
if v != "" and v != {} and v != [] and v is not None:
|
172
142
|
if k == "model":
|
173
|
-
d[k] =
|
143
|
+
d[k] = asdict(self.model)
|
174
144
|
elif k == "template":
|
175
|
-
d[k] =
|
145
|
+
d[k] = asdict(self.template)
|
176
146
|
elif k == "inputs" or k == "outputs":
|
177
|
-
d[k] =
|
147
|
+
d[k] = copy.deepcopy(v)
|
178
148
|
elif k == "file":
|
179
149
|
d[k] = (
|
180
150
|
str(self.file.as_posix())
|
@@ -217,7 +187,7 @@ class Prompty(BaseModel):
|
|
217
187
|
f = Path(parent / Path(file)).resolve().absolute()
|
218
188
|
if f.exists():
|
219
189
|
items = load_json(f)
|
220
|
-
if isinstance(items,
|
190
|
+
if isinstance(items, List):
|
221
191
|
return [Prompty.normalize(value, parent) for value in items]
|
222
192
|
elif isinstance(items, dict):
|
223
193
|
return {
|
@@ -325,7 +295,7 @@ def param_hoisting(
|
|
325
295
|
top: dict[str, typing.Any],
|
326
296
|
bottom: dict[str, typing.Any],
|
327
297
|
top_key: Union[str, None] = None,
|
328
|
-
) ->
|
298
|
+
) -> Dict[str, typing.Any]:
|
329
299
|
if top_key:
|
330
300
|
new_dict = {**top[top_key]} if top_key in top else {}
|
331
301
|
else:
|
prompty/mustache.py
ADDED
@@ -0,0 +1,666 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
import logging
|
3
|
+
from collections.abc import Iterator, Sequence
|
4
|
+
from types import MappingProxyType
|
5
|
+
from typing import (
|
6
|
+
Any,
|
7
|
+
Dict,
|
8
|
+
List,
|
9
|
+
Literal,
|
10
|
+
Mapping,
|
11
|
+
Optional,
|
12
|
+
Union,
|
13
|
+
cast,
|
14
|
+
)
|
15
|
+
from typing_extensions import TypeAlias
|
16
|
+
|
17
|
+
logger = logging.getLogger(__name__)
|
18
|
+
|
19
|
+
|
20
|
+
Scopes: TypeAlias = List[Union[Literal[False, 0], Mapping[str, Any]]]
|
21
|
+
|
22
|
+
|
23
|
+
# Globals
|
24
|
+
_CURRENT_LINE = 1
|
25
|
+
_LAST_TAG_LINE = None
|
26
|
+
|
27
|
+
|
28
|
+
class ChevronError(SyntaxError):
|
29
|
+
"""Custom exception for Chevron errors."""
|
30
|
+
|
31
|
+
|
32
|
+
#
|
33
|
+
# Helper functions
|
34
|
+
#
|
35
|
+
|
36
|
+
|
37
|
+
def grab_literal(template: str, l_del: str) -> tuple[str, str]:
|
38
|
+
"""Parse a literal from the template.
|
39
|
+
|
40
|
+
Args:
|
41
|
+
template: The template to parse.
|
42
|
+
l_del: The left delimiter.
|
43
|
+
|
44
|
+
Returns:
|
45
|
+
Tuple[str, str]: The literal and the template.
|
46
|
+
"""
|
47
|
+
|
48
|
+
global _CURRENT_LINE
|
49
|
+
|
50
|
+
try:
|
51
|
+
# Look for the next tag and move the template to it
|
52
|
+
literal, template = template.split(l_del, 1)
|
53
|
+
_CURRENT_LINE += literal.count("\n")
|
54
|
+
return (literal, template)
|
55
|
+
|
56
|
+
# There are no more tags in the template?
|
57
|
+
except ValueError:
|
58
|
+
# Then the rest of the template is a literal
|
59
|
+
return (template, "")
|
60
|
+
|
61
|
+
|
62
|
+
def l_sa_check(template: str, literal: str, is_standalone: bool) -> bool:
|
63
|
+
"""Do a preliminary check to see if a tag could be a standalone.
|
64
|
+
|
65
|
+
Args:
|
66
|
+
template: The template. (Not used.)
|
67
|
+
literal: The literal.
|
68
|
+
is_standalone: Whether the tag is standalone.
|
69
|
+
|
70
|
+
Returns:
|
71
|
+
bool: Whether the tag could be a standalone.
|
72
|
+
"""
|
73
|
+
|
74
|
+
# If there is a newline, or the previous tag was a standalone
|
75
|
+
if literal.find("\n") != -1 or is_standalone:
|
76
|
+
padding = literal.split("\n")[-1]
|
77
|
+
|
78
|
+
# If all the characters since the last newline are spaces
|
79
|
+
# Then the next tag could be a standalone
|
80
|
+
# Otherwise it can't be
|
81
|
+
return padding.isspace() or padding == ""
|
82
|
+
else:
|
83
|
+
return False
|
84
|
+
|
85
|
+
|
86
|
+
def r_sa_check(template: str, tag_type: str, is_standalone: bool) -> bool:
|
87
|
+
"""Do a final check to see if a tag could be a standalone.
|
88
|
+
|
89
|
+
Args:
|
90
|
+
template: The template.
|
91
|
+
tag_type: The type of the tag.
|
92
|
+
is_standalone: Whether the tag is standalone.
|
93
|
+
|
94
|
+
Returns:
|
95
|
+
bool: Whether the tag could be a standalone.
|
96
|
+
"""
|
97
|
+
|
98
|
+
# Check right side if we might be a standalone
|
99
|
+
if is_standalone and tag_type not in ["variable", "no escape"]:
|
100
|
+
on_newline = template.split("\n", 1)
|
101
|
+
|
102
|
+
# If the stuff to the right of us are spaces we're a standalone
|
103
|
+
return on_newline[0].isspace() or not on_newline[0]
|
104
|
+
|
105
|
+
# If we're a tag can't be a standalone
|
106
|
+
else:
|
107
|
+
return False
|
108
|
+
|
109
|
+
|
110
|
+
def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], str]:
|
111
|
+
"""Parse a tag from a template.
|
112
|
+
|
113
|
+
Args:
|
114
|
+
template: The template.
|
115
|
+
l_del: The left delimiter.
|
116
|
+
r_del: The right delimiter.
|
117
|
+
|
118
|
+
Returns:
|
119
|
+
Tuple[Tuple[str, str], str]: The tag and the template.
|
120
|
+
|
121
|
+
Raises:
|
122
|
+
ChevronError: If the tag is unclosed.
|
123
|
+
ChevronError: If the set delimiter tag is unclosed.
|
124
|
+
"""
|
125
|
+
global _CURRENT_LINE
|
126
|
+
global _LAST_TAG_LINE
|
127
|
+
|
128
|
+
tag_types = {
|
129
|
+
"!": "comment",
|
130
|
+
"#": "section",
|
131
|
+
"^": "inverted section",
|
132
|
+
"/": "end",
|
133
|
+
">": "partial",
|
134
|
+
"=": "set delimiter?",
|
135
|
+
"{": "no escape?",
|
136
|
+
"&": "no escape",
|
137
|
+
}
|
138
|
+
|
139
|
+
# Get the tag
|
140
|
+
try:
|
141
|
+
tag, template = template.split(r_del, 1)
|
142
|
+
except ValueError as e:
|
143
|
+
msg = "unclosed tag " f"at line {_CURRENT_LINE}"
|
144
|
+
raise ChevronError(msg) from e
|
145
|
+
|
146
|
+
# Find the type meaning of the first character
|
147
|
+
tag_type = tag_types.get(tag[0], "variable")
|
148
|
+
|
149
|
+
# If the type is not a variable
|
150
|
+
if tag_type != "variable":
|
151
|
+
# Then that first character is not needed
|
152
|
+
tag = tag[1:]
|
153
|
+
|
154
|
+
# If we might be a set delimiter tag
|
155
|
+
if tag_type == "set delimiter?":
|
156
|
+
# Double check to make sure we are
|
157
|
+
if tag.endswith("="):
|
158
|
+
tag_type = "set delimiter"
|
159
|
+
# Remove the equal sign
|
160
|
+
tag = tag[:-1]
|
161
|
+
|
162
|
+
# Otherwise we should complain
|
163
|
+
else:
|
164
|
+
msg = "unclosed set delimiter tag\n" f"at line {_CURRENT_LINE}"
|
165
|
+
raise ChevronError(msg)
|
166
|
+
|
167
|
+
elif (
|
168
|
+
# If we might be a no html escape tag
|
169
|
+
tag_type == "no escape?"
|
170
|
+
# And we have a third curly brace
|
171
|
+
# (And are using curly braces as delimiters)
|
172
|
+
and l_del == "{{"
|
173
|
+
and r_del == "}}"
|
174
|
+
and template.startswith("}")
|
175
|
+
):
|
176
|
+
# Then we are a no html escape tag
|
177
|
+
template = template[1:]
|
178
|
+
tag_type = "no escape"
|
179
|
+
|
180
|
+
# Strip the whitespace off the key and return
|
181
|
+
return ((tag_type, tag.strip()), template)
|
182
|
+
|
183
|
+
|
184
|
+
#
|
185
|
+
# The main tokenizing function
|
186
|
+
#
|
187
|
+
|
188
|
+
|
189
|
+
def tokenize(template: str, def_ldel: str = "{{", def_rdel: str = "}}") -> Iterator[tuple[str, str]]:
|
190
|
+
"""Tokenize a mustache template.
|
191
|
+
|
192
|
+
Tokenizes a mustache template in a generator fashion,
|
193
|
+
using file-like objects. It also accepts a string containing
|
194
|
+
the template.
|
195
|
+
|
196
|
+
|
197
|
+
Arguments:
|
198
|
+
|
199
|
+
template -- a file-like object, or a string of a mustache template
|
200
|
+
|
201
|
+
def_ldel -- The default left delimiter
|
202
|
+
("{{" by default, as in spec compliant mustache)
|
203
|
+
|
204
|
+
def_rdel -- The default right delimiter
|
205
|
+
("}}" by default, as in spec compliant mustache)
|
206
|
+
|
207
|
+
|
208
|
+
Returns:
|
209
|
+
|
210
|
+
A generator of mustache tags in the form of a tuple
|
211
|
+
|
212
|
+
-- (tag_type, tag_key)
|
213
|
+
|
214
|
+
Where tag_type is one of:
|
215
|
+
* literal
|
216
|
+
* section
|
217
|
+
* inverted section
|
218
|
+
* end
|
219
|
+
* partial
|
220
|
+
* no escape
|
221
|
+
|
222
|
+
And tag_key is either the key or in the case of a literal tag,
|
223
|
+
the literal itself.
|
224
|
+
"""
|
225
|
+
|
226
|
+
global _CURRENT_LINE, _LAST_TAG_LINE
|
227
|
+
_CURRENT_LINE = 1
|
228
|
+
_LAST_TAG_LINE = None
|
229
|
+
|
230
|
+
is_standalone = True
|
231
|
+
open_sections = []
|
232
|
+
l_del = def_ldel
|
233
|
+
r_del = def_rdel
|
234
|
+
|
235
|
+
while template:
|
236
|
+
literal, template = grab_literal(template, l_del)
|
237
|
+
|
238
|
+
# If the template is completed
|
239
|
+
if not template:
|
240
|
+
# Then yield the literal and leave
|
241
|
+
yield ("literal", literal)
|
242
|
+
break
|
243
|
+
|
244
|
+
# Do the first check to see if we could be a standalone
|
245
|
+
is_standalone = l_sa_check(template, literal, is_standalone)
|
246
|
+
|
247
|
+
# Parse the tag
|
248
|
+
tag, template = parse_tag(template, l_del, r_del)
|
249
|
+
tag_type, tag_key = tag
|
250
|
+
|
251
|
+
# Special tag logic
|
252
|
+
|
253
|
+
# If we are a set delimiter tag
|
254
|
+
if tag_type == "set delimiter":
|
255
|
+
# Then get and set the delimiters
|
256
|
+
dels = tag_key.strip().split(" ")
|
257
|
+
l_del, r_del = dels[0], dels[-1]
|
258
|
+
|
259
|
+
# If we are a section tag
|
260
|
+
elif tag_type in ["section", "inverted section"]:
|
261
|
+
# Then open a new section
|
262
|
+
open_sections.append(tag_key)
|
263
|
+
_LAST_TAG_LINE = _CURRENT_LINE
|
264
|
+
|
265
|
+
# If we are an end tag
|
266
|
+
elif tag_type == "end":
|
267
|
+
# Then check to see if the last opened section
|
268
|
+
# is the same as us
|
269
|
+
try:
|
270
|
+
last_section = open_sections.pop()
|
271
|
+
except IndexError as e:
|
272
|
+
msg = f'Trying to close tag "{tag_key}"\n' "Looks like it was not opened.\n" f"line {_CURRENT_LINE + 1}"
|
273
|
+
raise ChevronError(msg) from e
|
274
|
+
if tag_key != last_section:
|
275
|
+
# Otherwise we need to complain
|
276
|
+
msg = (
|
277
|
+
f'Trying to close tag "{tag_key}"\n'
|
278
|
+
f'last open tag is "{last_section}"\n'
|
279
|
+
f"line {_CURRENT_LINE + 1}"
|
280
|
+
)
|
281
|
+
raise ChevronError(msg)
|
282
|
+
|
283
|
+
# Do the second check to see if we're a standalone
|
284
|
+
is_standalone = r_sa_check(template, tag_type, is_standalone)
|
285
|
+
|
286
|
+
# Which if we are
|
287
|
+
if is_standalone:
|
288
|
+
# Remove the stuff before the newline
|
289
|
+
template = template.split("\n", 1)[-1]
|
290
|
+
|
291
|
+
# Partials need to keep the spaces on their left
|
292
|
+
if tag_type != "partial":
|
293
|
+
# But other tags don't
|
294
|
+
literal = literal.rstrip(" ")
|
295
|
+
|
296
|
+
# Start yielding
|
297
|
+
# Ignore literals that are empty
|
298
|
+
if literal != "":
|
299
|
+
yield ("literal", literal)
|
300
|
+
|
301
|
+
# Ignore comments and set delimiters
|
302
|
+
if tag_type not in ["comment", "set delimiter?"]:
|
303
|
+
yield (tag_type, tag_key)
|
304
|
+
|
305
|
+
# If there are any open sections when we're done
|
306
|
+
if open_sections:
|
307
|
+
# Then we need to complain
|
308
|
+
msg = (
|
309
|
+
"Unexpected EOF\n"
|
310
|
+
f'the tag "{open_sections[-1]}" was never closed\n'
|
311
|
+
f"was opened at line {_LAST_TAG_LINE}"
|
312
|
+
)
|
313
|
+
raise ChevronError(msg)
|
314
|
+
|
315
|
+
|
316
|
+
#
|
317
|
+
# Helper functions
|
318
|
+
#
|
319
|
+
|
320
|
+
|
321
|
+
def _html_escape(string: str) -> str:
|
322
|
+
"""HTML escape all of these " & < >"""
|
323
|
+
|
324
|
+
html_codes = {
|
325
|
+
'"': """,
|
326
|
+
"<": "<",
|
327
|
+
">": ">",
|
328
|
+
}
|
329
|
+
|
330
|
+
# & must be handled first
|
331
|
+
string = string.replace("&", "&")
|
332
|
+
for char in html_codes:
|
333
|
+
string = string.replace(char, html_codes[char])
|
334
|
+
return string
|
335
|
+
|
336
|
+
|
337
|
+
def _get_key(
|
338
|
+
key: str,
|
339
|
+
scopes: Scopes,
|
340
|
+
warn: bool,
|
341
|
+
keep: bool,
|
342
|
+
def_ldel: str,
|
343
|
+
def_rdel: str,
|
344
|
+
) -> Any:
|
345
|
+
"""Get a key from the current scope"""
|
346
|
+
|
347
|
+
# If the key is a dot
|
348
|
+
if key == ".":
|
349
|
+
# Then just return the current scope
|
350
|
+
return scopes[0]
|
351
|
+
|
352
|
+
# Loop through the scopes
|
353
|
+
for scope in scopes:
|
354
|
+
try:
|
355
|
+
# Return an empty string if falsy, with two exceptions
|
356
|
+
# 0 should return 0, and False should return False
|
357
|
+
if scope in (0, False):
|
358
|
+
return scope
|
359
|
+
|
360
|
+
# For every dot separated key
|
361
|
+
for child in key.split("."):
|
362
|
+
# Return an empty string if falsy, with two exceptions
|
363
|
+
# 0 should return 0, and False should return False
|
364
|
+
if scope in (0, False):
|
365
|
+
return scope
|
366
|
+
# Move into the scope
|
367
|
+
try:
|
368
|
+
# Try subscripting (Normal dictionaries)
|
369
|
+
scope = cast(Dict[str, Any], scope)[child]
|
370
|
+
except (TypeError, AttributeError):
|
371
|
+
try:
|
372
|
+
scope = getattr(scope, child)
|
373
|
+
except (TypeError, AttributeError):
|
374
|
+
# Try as a list
|
375
|
+
scope = scope[int(child)] # type: ignore
|
376
|
+
|
377
|
+
try:
|
378
|
+
# This allows for custom falsy data types
|
379
|
+
# https://github.com/noahmorrison/chevron/issues/35
|
380
|
+
if scope._CHEVRON_return_scope_when_falsy: # type: ignore
|
381
|
+
return scope
|
382
|
+
except AttributeError:
|
383
|
+
if scope in (0, False):
|
384
|
+
return scope
|
385
|
+
return scope or ""
|
386
|
+
except (AttributeError, KeyError, IndexError, ValueError):
|
387
|
+
# We couldn't find the key in the current scope
|
388
|
+
# We'll try again on the next pass
|
389
|
+
pass
|
390
|
+
|
391
|
+
# We couldn't find the key in any of the scopes
|
392
|
+
|
393
|
+
if warn:
|
394
|
+
logger.warn(f"Could not find key '{key}'")
|
395
|
+
|
396
|
+
if keep:
|
397
|
+
return f"{def_ldel} {key} {def_rdel}"
|
398
|
+
|
399
|
+
return ""
|
400
|
+
|
401
|
+
|
402
|
+
def _get_partial(name: str, partials_dict: Mapping[str, str]) -> str:
|
403
|
+
"""Load a partial"""
|
404
|
+
try:
|
405
|
+
# Maybe the partial is in the dictionary
|
406
|
+
return partials_dict[name]
|
407
|
+
except KeyError:
|
408
|
+
return ""
|
409
|
+
|
410
|
+
|
411
|
+
#
|
412
|
+
# The main rendering function
|
413
|
+
#
|
414
|
+
g_token_cache: Dict[str, List[tuple[str, str]]] = {}
|
415
|
+
|
416
|
+
EMPTY_DICT: MappingProxyType[str, str] = MappingProxyType({})
|
417
|
+
|
418
|
+
|
419
|
+
def render(
|
420
|
+
template: Union[str, List[tuple[str, str]]] = "",
|
421
|
+
data: Mapping[str, Any] = EMPTY_DICT,
|
422
|
+
partials_dict: Mapping[str, str] = EMPTY_DICT,
|
423
|
+
padding: str = "",
|
424
|
+
def_ldel: str = "{{",
|
425
|
+
def_rdel: str = "}}",
|
426
|
+
scopes: Optional[Scopes] = None,
|
427
|
+
warn: bool = False,
|
428
|
+
keep: bool = False,
|
429
|
+
) -> str:
|
430
|
+
"""Render a mustache template.
|
431
|
+
|
432
|
+
Renders a mustache template with a data scope and inline partial capability.
|
433
|
+
|
434
|
+
Arguments:
|
435
|
+
|
436
|
+
template -- A file-like object or a string containing the template.
|
437
|
+
|
438
|
+
data -- A python dictionary with your data scope.
|
439
|
+
|
440
|
+
partials_path -- The path to where your partials are stored.
|
441
|
+
If set to None, then partials won't be loaded from the file system
|
442
|
+
(defaults to '.').
|
443
|
+
|
444
|
+
partials_ext -- The extension that you want the parser to look for
|
445
|
+
(defaults to 'mustache').
|
446
|
+
|
447
|
+
partials_dict -- A python dictionary which will be search for partials
|
448
|
+
before the filesystem is. {'include': 'foo'} is the same
|
449
|
+
as a file called include.mustache
|
450
|
+
(defaults to {}).
|
451
|
+
|
452
|
+
padding -- This is for padding partials, and shouldn't be used
|
453
|
+
(but can be if you really want to).
|
454
|
+
|
455
|
+
def_ldel -- The default left delimiter
|
456
|
+
("{{" by default, as in spec compliant mustache).
|
457
|
+
|
458
|
+
def_rdel -- The default right delimiter
|
459
|
+
("}}" by default, as in spec compliant mustache).
|
460
|
+
|
461
|
+
scopes -- The list of scopes that get_key will look through.
|
462
|
+
|
463
|
+
warn -- Log a warning when a template substitution isn't found in the data
|
464
|
+
|
465
|
+
keep -- Keep unreplaced tags when a substitution isn't found in the data.
|
466
|
+
|
467
|
+
|
468
|
+
Returns:
|
469
|
+
|
470
|
+
A string containing the rendered template.
|
471
|
+
"""
|
472
|
+
|
473
|
+
# If the template is a sequence but not derived from a string
|
474
|
+
if isinstance(template, Sequence) and not isinstance(template, str):
|
475
|
+
# Then we don't need to tokenize it
|
476
|
+
# But it does need to be a generator
|
477
|
+
tokens: Iterator[tuple[str, str]] = (token for token in template)
|
478
|
+
else:
|
479
|
+
if template in g_token_cache:
|
480
|
+
tokens = (token for token in g_token_cache[template])
|
481
|
+
else:
|
482
|
+
# Otherwise make a generator
|
483
|
+
tokens = tokenize(template, def_ldel, def_rdel)
|
484
|
+
|
485
|
+
output = ""
|
486
|
+
|
487
|
+
if scopes is None:
|
488
|
+
scopes = [data]
|
489
|
+
|
490
|
+
# Run through the tokens
|
491
|
+
for tag, key in tokens:
|
492
|
+
# Set the current scope
|
493
|
+
current_scope = scopes[0]
|
494
|
+
|
495
|
+
# If we're an end tag
|
496
|
+
if tag == "end":
|
497
|
+
# Pop out of the latest scope
|
498
|
+
del scopes[0]
|
499
|
+
|
500
|
+
# If the current scope is falsy and not the only scope
|
501
|
+
elif not current_scope and len(scopes) != 1:
|
502
|
+
if tag in ["section", "inverted section"]:
|
503
|
+
# Set the most recent scope to a falsy value
|
504
|
+
scopes.insert(0, False)
|
505
|
+
|
506
|
+
# If we're a literal tag
|
507
|
+
elif tag == "literal":
|
508
|
+
# Add padding to the key and add it to the output
|
509
|
+
output += key.replace("\n", "\n" + padding)
|
510
|
+
|
511
|
+
# If we're a variable tag
|
512
|
+
elif tag == "variable":
|
513
|
+
# Add the html escaped key to the output
|
514
|
+
thing = _get_key(key, scopes, warn=warn, keep=keep, def_ldel=def_ldel, def_rdel=def_rdel)
|
515
|
+
if thing is True and key == ".":
|
516
|
+
# if we've coerced into a boolean by accident
|
517
|
+
# (inverted tags do this)
|
518
|
+
# then get the un-coerced object (next in the stack)
|
519
|
+
thing = scopes[1]
|
520
|
+
if not isinstance(thing, str):
|
521
|
+
thing = str(thing)
|
522
|
+
output += _html_escape(thing)
|
523
|
+
|
524
|
+
# If we're a no html escape tag
|
525
|
+
elif tag == "no escape":
|
526
|
+
# Just lookup the key and add it
|
527
|
+
thing = _get_key(key, scopes, warn=warn, keep=keep, def_ldel=def_ldel, def_rdel=def_rdel)
|
528
|
+
if not isinstance(thing, str):
|
529
|
+
thing = str(thing)
|
530
|
+
output += thing
|
531
|
+
|
532
|
+
# If we're a section tag
|
533
|
+
elif tag == "section":
|
534
|
+
# Get the sections scope
|
535
|
+
scope = _get_key(key, scopes, warn=warn, keep=keep, def_ldel=def_ldel, def_rdel=def_rdel)
|
536
|
+
|
537
|
+
# If the scope is a callable (as described in
|
538
|
+
# https://mustache.github.io/mustache.5.html)
|
539
|
+
if callable(scope):
|
540
|
+
# Generate template text from tags
|
541
|
+
text = ""
|
542
|
+
tags: List[tuple[str, str]] = []
|
543
|
+
for token in tokens:
|
544
|
+
if token == ("end", key):
|
545
|
+
break
|
546
|
+
|
547
|
+
tags.append(token)
|
548
|
+
tag_type, tag_key = token
|
549
|
+
if tag_type == "literal":
|
550
|
+
text += tag_key
|
551
|
+
elif tag_type == "no escape":
|
552
|
+
text += f"{def_ldel}& {tag_key} {def_rdel}"
|
553
|
+
else:
|
554
|
+
text += "{}{} {}{}".format(
|
555
|
+
def_ldel,
|
556
|
+
{
|
557
|
+
"comment": "!",
|
558
|
+
"section": "#",
|
559
|
+
"inverted section": "^",
|
560
|
+
"end": "/",
|
561
|
+
"partial": ">",
|
562
|
+
"set delimiter": "=",
|
563
|
+
"no escape": "&",
|
564
|
+
"variable": "",
|
565
|
+
}[tag_type],
|
566
|
+
tag_key,
|
567
|
+
def_rdel,
|
568
|
+
)
|
569
|
+
|
570
|
+
g_token_cache[text] = tags
|
571
|
+
|
572
|
+
rend = scope(
|
573
|
+
text,
|
574
|
+
lambda template, data=None: render(
|
575
|
+
template,
|
576
|
+
data={},
|
577
|
+
partials_dict=partials_dict,
|
578
|
+
padding=padding,
|
579
|
+
def_ldel=def_ldel,
|
580
|
+
def_rdel=def_rdel,
|
581
|
+
scopes=data and [data] + scopes or scopes,
|
582
|
+
warn=warn,
|
583
|
+
keep=keep,
|
584
|
+
),
|
585
|
+
)
|
586
|
+
|
587
|
+
output += rend # type: ignore[reportOperatorIssue]
|
588
|
+
|
589
|
+
# If the scope is a sequence, an iterator or generator but not
|
590
|
+
# derived from a string
|
591
|
+
elif isinstance(scope, (Sequence, Iterator)) and not isinstance(scope, str):
|
592
|
+
# Then we need to do some looping
|
593
|
+
|
594
|
+
# Gather up all the tags inside the section
|
595
|
+
# (And don't be tricked by nested end tags with the same key)
|
596
|
+
# TODO: This feels like it still has edge cases, no?
|
597
|
+
tags = []
|
598
|
+
tags_with_same_key = 0
|
599
|
+
for token in tokens:
|
600
|
+
if token == ("section", key):
|
601
|
+
tags_with_same_key += 1
|
602
|
+
if token == ("end", key):
|
603
|
+
tags_with_same_key -= 1
|
604
|
+
if tags_with_same_key < 0:
|
605
|
+
break
|
606
|
+
tags.append(token)
|
607
|
+
|
608
|
+
# For every item in the scope
|
609
|
+
for thing in scope:
|
610
|
+
# Append it as the most recent scope and render
|
611
|
+
new_scope = [thing] + scopes
|
612
|
+
rend = render(
|
613
|
+
template=tags,
|
614
|
+
scopes=new_scope,
|
615
|
+
padding=padding,
|
616
|
+
partials_dict=partials_dict,
|
617
|
+
def_ldel=def_ldel,
|
618
|
+
def_rdel=def_rdel,
|
619
|
+
warn=warn,
|
620
|
+
keep=keep,
|
621
|
+
)
|
622
|
+
|
623
|
+
output += rend
|
624
|
+
|
625
|
+
else:
|
626
|
+
# Otherwise we're just a scope section
|
627
|
+
scopes.insert(0, scope) # type: ignore[reportArgumentType]
|
628
|
+
|
629
|
+
# If we're an inverted section
|
630
|
+
elif tag == "inverted section":
|
631
|
+
# Add the flipped scope to the scopes
|
632
|
+
scope = _get_key(key, scopes, warn=warn, keep=keep, def_ldel=def_ldel, def_rdel=def_rdel)
|
633
|
+
scopes.insert(0, cast(Literal[False], not scope))
|
634
|
+
|
635
|
+
# If we're a partial
|
636
|
+
elif tag == "partial":
|
637
|
+
# Load the partial
|
638
|
+
partial = _get_partial(key, partials_dict)
|
639
|
+
|
640
|
+
# Find what to pad the partial with
|
641
|
+
left = output.rpartition("\n")[2]
|
642
|
+
part_padding = padding
|
643
|
+
if left.isspace():
|
644
|
+
part_padding += left
|
645
|
+
|
646
|
+
# Render the partial
|
647
|
+
part_out = render(
|
648
|
+
template=partial,
|
649
|
+
partials_dict=partials_dict,
|
650
|
+
def_ldel=def_ldel,
|
651
|
+
def_rdel=def_rdel,
|
652
|
+
padding=part_padding,
|
653
|
+
scopes=scopes,
|
654
|
+
warn=warn,
|
655
|
+
keep=keep,
|
656
|
+
)
|
657
|
+
|
658
|
+
# If the partial was indented
|
659
|
+
if left.isspace():
|
660
|
+
# then remove the spaces from the end
|
661
|
+
part_out = part_out.rstrip(" \t")
|
662
|
+
|
663
|
+
# Add the partials output to the output
|
664
|
+
output += part_out
|
665
|
+
|
666
|
+
return output
|
prompty/renderers.py
CHANGED
@@ -2,6 +2,7 @@ import typing
|
|
2
2
|
from pathlib import Path
|
3
3
|
|
4
4
|
from jinja2 import DictLoader, Environment
|
5
|
+
from .mustache import render
|
5
6
|
|
6
7
|
from .core import Prompty
|
7
8
|
from .invoker import Invoker
|
@@ -49,3 +50,35 @@ class Jinja2Renderer(Invoker):
|
|
49
50
|
The parsed data
|
50
51
|
"""
|
51
52
|
return self.invoke(data)
|
53
|
+
|
54
|
+
|
55
|
+
class MustacheRenderer(Invoker):
|
56
|
+
"""Render a mustache template."""
|
57
|
+
|
58
|
+
def __init__(self, prompty: Prompty) -> None:
|
59
|
+
super().__init__(prompty)
|
60
|
+
self.templates = {}
|
61
|
+
cur_prompt: typing.Union[Prompty, None] = self.prompty
|
62
|
+
while cur_prompt:
|
63
|
+
self.templates[Path(cur_prompt.file).name] = cur_prompt.content
|
64
|
+
cur_prompt = cur_prompt.basePrompty
|
65
|
+
self.name = Path(self.prompty.file).name
|
66
|
+
|
67
|
+
def invoke(self, data: str) -> str:
|
68
|
+
generated = render(self.prompty.content, data) # type: ignore
|
69
|
+
return generated
|
70
|
+
|
71
|
+
async def invoke_async(self, data: str) -> str:
|
72
|
+
"""Invoke the Prompty Chat Parser (Async)
|
73
|
+
|
74
|
+
Parameters
|
75
|
+
----------
|
76
|
+
data : str
|
77
|
+
The data to parse
|
78
|
+
|
79
|
+
Returns
|
80
|
+
-------
|
81
|
+
str
|
82
|
+
The parsed data
|
83
|
+
"""
|
84
|
+
return self.invoke(data)
|
prompty/tracer.py
CHANGED
@@ -5,14 +5,13 @@ import json
|
|
5
5
|
import os
|
6
6
|
import traceback
|
7
7
|
from collections.abc import Iterator
|
8
|
+
from dataclasses import asdict, is_dataclass
|
8
9
|
from datetime import datetime
|
9
10
|
from functools import partial, wraps
|
10
11
|
from numbers import Number
|
11
12
|
from pathlib import Path
|
12
13
|
from typing import Any, Callable, Union
|
13
14
|
|
14
|
-
from pydantic import BaseModel
|
15
|
-
|
16
15
|
|
17
16
|
# clean up key value pairs for sensitive values
|
18
17
|
def sanitize(key: str, value: Any) -> Any:
|
@@ -83,15 +82,17 @@ def to_dict(obj: Any) -> Any:
|
|
83
82
|
return obj.isoformat()
|
84
83
|
# safe Prompty obj serialization
|
85
84
|
elif type(obj).__name__ == "Prompty":
|
86
|
-
|
85
|
+
obj_dict = asdict(obj)
|
86
|
+
if "model" in obj_dict and "configuration" in obj_dict["model"]:
|
87
|
+
obj_dict["model"]["configuration"] = sanitize("configuration", obj_dict["model"]["configuration"])
|
88
|
+
return obj_dict
|
87
89
|
# safe PromptyStream obj serialization
|
88
90
|
elif type(obj).__name__ == "PromptyStream":
|
89
91
|
return "PromptyStream"
|
92
|
+
elif is_dataclass(obj) and not isinstance(obj, type):
|
93
|
+
return asdict(obj)
|
90
94
|
elif type(obj).__name__ == "AsyncPromptyStream":
|
91
95
|
return "AsyncPromptyStream"
|
92
|
-
# pydantic models have their own json serialization
|
93
|
-
elif isinstance(obj, BaseModel):
|
94
|
-
return obj.model_dump()
|
95
96
|
# recursive list and dict
|
96
97
|
elif isinstance(obj, list):
|
97
98
|
return [to_dict(item) for item in obj]
|
prompty/utils.py
CHANGED
@@ -37,14 +37,15 @@ def _find_global_config(prompty_path: Path = Path.cwd()) -> typing.Union[Path, N
|
|
37
37
|
prompty_config = list(Path.cwd().glob("**/prompty.json"))
|
38
38
|
|
39
39
|
if len(prompty_config) > 0:
|
40
|
-
|
40
|
+
sorted_list = sorted(
|
41
41
|
[
|
42
42
|
c
|
43
43
|
for c in prompty_config
|
44
44
|
if len(c.parent.parts) <= len(prompty_path.parts)
|
45
45
|
],
|
46
46
|
key=lambda p: len(p.parts),
|
47
|
-
)
|
47
|
+
)
|
48
|
+
return sorted_list[-1] if len(sorted_list) > 0 else None
|
48
49
|
else:
|
49
50
|
return None
|
50
51
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: prompty
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.49
|
4
4
|
Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
|
5
5
|
Author-Email: Seth Juarez <seth.juarez@microsoft.com>
|
6
6
|
License: MIT
|
@@ -9,7 +9,6 @@ Project-URL: Documentation, https://prompty.ai/docs
|
|
9
9
|
Project-URL: Issues, https://github.com/microsoft/prompty/issues
|
10
10
|
Requires-Python: >=3.9
|
11
11
|
Requires-Dist: pyyaml>=6.0.1
|
12
|
-
Requires-Dist: pydantic>=2.8.2
|
13
12
|
Requires-Dist: jinja2>=3.1.4
|
14
13
|
Requires-Dist: python-dotenv>=1.0.1
|
15
14
|
Requires-Dist: click>=8.1.7
|
@@ -38,16 +37,17 @@ Examples prompty file:
|
|
38
37
|
```markdown
|
39
38
|
---
|
40
39
|
name: Basic Prompt
|
41
|
-
description: A basic prompt that uses the
|
40
|
+
description: A basic prompt that uses the gpt-3.5-turbo chat API to answer questions
|
42
41
|
authors:
|
43
42
|
- sethjuarez
|
44
43
|
- jietong
|
45
44
|
model:
|
46
45
|
api: chat
|
47
46
|
configuration:
|
48
|
-
api_version:
|
47
|
+
api_version: 2024-10-21
|
49
48
|
azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
|
50
49
|
azure_deployment: ${env:AZURE_OPENAI_DEPLOYMENT:gpt-35-turbo}
|
50
|
+
type: azure_openai
|
51
51
|
sample:
|
52
52
|
firstName: Jane
|
53
53
|
lastName: Doe
|
@@ -77,7 +77,7 @@ Download the [VS Code extension here](https://marketplace.visualstudio.com/items
|
|
77
77
|
The Python runtime is a simple way to run your prompts in Python. The runtime is available as a Python package and can be installed using pip. Depending on the type of prompt you are running, you may need to install additional dependencies. The runtime is designed to be extensible and can be customized to fit your needs.
|
78
78
|
|
79
79
|
```bash
|
80
|
-
pip install prompty[azure]
|
80
|
+
pip install "prompty[azure]"
|
81
81
|
```
|
82
82
|
|
83
83
|
Simple usage example:
|
@@ -1,25 +1,26 @@
|
|
1
|
-
prompty-0.1.
|
2
|
-
prompty-0.1.
|
3
|
-
prompty-0.1.
|
4
|
-
prompty-0.1.
|
5
|
-
prompty/__init__.py,sha256=
|
1
|
+
prompty-0.1.49.dist-info/METADATA,sha256=me6Z8gphAgWMs0lfuSOjf7E2afCeNCI_y_-5mkxO_9k,9337
|
2
|
+
prompty-0.1.49.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
|
3
|
+
prompty-0.1.49.dist-info/entry_points.txt,sha256=a3i7Kvf--3DOkkv9VQpstwaNKgsnXwDGaPL18lPpKeI,60
|
4
|
+
prompty-0.1.49.dist-info/licenses/LICENSE,sha256=KWSC4z9cfML_t0xThoQYjzTdcZQj86Y_mhXdatzU-KM,1052
|
5
|
+
prompty/__init__.py,sha256=Nq49LsDwe8smD51MLsjGDlk6soc3knKoXkAmdUJNibA,16937
|
6
6
|
prompty/azure/__init__.py,sha256=zBxzOMQCwtiz2CsI8gNWzr7T2ZJHF3TZNIUtvfuaEQI,309
|
7
7
|
prompty/azure/executor.py,sha256=MWeBwprLY2NmPySoOq-K3qMui5lPlMYMh5ktb3S7mgo,9367
|
8
8
|
prompty/azure/processor.py,sha256=gNmUkPBoSLPE0t7IVJUxGdwZ2otiycDE4Fu6L1vurZI,5330
|
9
9
|
prompty/azure_beta/__init__.py,sha256=-LPrqoAv2UGI29SUjiZCkPlAGARvixlYe2IW1edswWo,375
|
10
10
|
prompty/azure_beta/executor.py,sha256=bARyO2k42ZQfd5NrNKyMUd7JZ2Rcm8urUP0sJ1P1Qxk,10157
|
11
|
-
prompty/cli.py,sha256=
|
12
|
-
prompty/core.py,sha256=
|
11
|
+
prompty/cli.py,sha256=x4t5pkjLmSppxbwHmv0WZ5OMJ8hz6z48RuahMF9WDQc,4858
|
12
|
+
prompty/core.py,sha256=cy2KoQgruk01Aaz34pUxJz4qLJOAt_1CLhU7-PhxhhU,12118
|
13
13
|
prompty/invoker.py,sha256=ARmyVkinm_Nk91k8mRlsDffox7MtGSVLpwNycavMSSI,9028
|
14
|
+
prompty/mustache.py,sha256=VSo9zlMlbOPIa5TSH7MKRQi7i67SgeHz0-ymFrHubaA,20751
|
14
15
|
prompty/openai/__init__.py,sha256=aRdXZ5pL4tURFqRwVX7gNdVy3PPWE6BvUbK1o73AqQc,303
|
15
16
|
prompty/openai/executor.py,sha256=v5K8kDpGUsbtTpCb1hOVxPt7twX2p-a5yL2iC3Xm8TQ,3752
|
16
17
|
prompty/openai/processor.py,sha256=X6yfSlHoTgXxCfGvUwADACjNtzLMNumcn0SX3U4T9as,2542
|
17
18
|
prompty/parsers.py,sha256=ptET4j8Rf55Ob0uOx6A41nlQ6cwo1CHRytSjeMIp6JE,5083
|
18
19
|
prompty/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
|
-
prompty/renderers.py,sha256=
|
20
|
+
prompty/renderers.py,sha256=jidLNr85HRPyevCi7ObTZvsIcLmZ_CB0NTGDNqf1SdU,2376
|
20
21
|
prompty/serverless/__init__.py,sha256=ILtbqhy6E-wWodbnq-_aftruTGk1Z0EI9zURFH4FECM,297
|
21
22
|
prompty/serverless/executor.py,sha256=QZXeFTXv_aT22rbBBgBFLtdAJqGp4W8GS3H3Rtoy8bE,8936
|
22
23
|
prompty/serverless/processor.py,sha256=d42MalWRf8RUpHRiueqAPAj_nGmJSMUE2yL-Tgxrss4,3754
|
23
|
-
prompty/tracer.py,sha256=
|
24
|
-
prompty/utils.py,sha256=
|
25
|
-
prompty-0.1.
|
24
|
+
prompty/tracer.py,sha256=GYJsFfsF5uexv1RANENX_Xm5sOnIcQHT_LLAtr6JocU,12158
|
25
|
+
prompty/utils.py,sha256=0EmzWgRWPe6IgHOzoIy0k4_BMDUN4FHo0iITD-g1Ny8,2904
|
26
|
+
prompty-0.1.49.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|