prompty 0.1.1__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- prompty-0.1.1/PKG-INFO +15 -0
- prompty-0.1.1/README.md +1 -0
- prompty-0.1.1/prompty/__init__.py +261 -0
- prompty-0.1.1/prompty/core.py +305 -0
- prompty-0.1.1/prompty/executors.py +70 -0
- prompty-0.1.1/prompty/parsers.py +103 -0
- prompty-0.1.1/prompty/processors.py +55 -0
- prompty-0.1.1/prompty/renderers.py +22 -0
- prompty-0.1.1/pyproject.toml +40 -0
- prompty-0.1.1/tests/__init__.py +59 -0
- prompty-0.1.1/tests/generated/1contoso.md +65 -0
- prompty-0.1.1/tests/generated/2contoso.md +66 -0
- prompty-0.1.1/tests/generated/3contoso.md +65 -0
- prompty-0.1.1/tests/generated/4contoso.md +64 -0
- prompty-0.1.1/tests/generated/basic.prompty.md +12 -0
- prompty-0.1.1/tests/generated/camping.jpg +0 -0
- prompty-0.1.1/tests/generated/context.prompty.md +42 -0
- prompty-0.1.1/tests/generated/contoso_multi.md +70 -0
- prompty-0.1.1/tests/generated/faithfulness.prompty.md +86 -0
- prompty-0.1.1/tests/generated/groundedness.prompty.md +35 -0
- prompty-0.1.1/tests/hello_world-goodbye_world-hello_again.embedding.json +4636 -0
- prompty-0.1.1/tests/hello_world.embedding.json +1552 -0
- prompty-0.1.1/tests/prompts/__init__.py +0 -0
- prompty-0.1.1/tests/prompts/basic.prompty +26 -0
- prompty-0.1.1/tests/prompts/basic.prompty.execution.json +67 -0
- prompty-0.1.1/tests/prompts/basic_json_output.prompty +26 -0
- prompty-0.1.1/tests/prompts/camping.jpg +0 -0
- prompty-0.1.1/tests/prompts/chat.prompty +32 -0
- prompty-0.1.1/tests/prompts/context.json +34 -0
- prompty-0.1.1/tests/prompts/context.prompty +46 -0
- prompty-0.1.1/tests/prompts/context.prompty.execution.json +67 -0
- prompty-0.1.1/tests/prompts/embedding.prompty +14 -0
- prompty-0.1.1/tests/prompts/embedding.prompty.execution.json +1552 -0
- prompty-0.1.1/tests/prompts/evaluation.prompty +54 -0
- prompty-0.1.1/tests/prompts/faithfulness.prompty +70 -0
- prompty-0.1.1/tests/prompts/faithfulness.prompty.execution.json +67 -0
- prompty-0.1.1/tests/prompts/fake.prompty +30 -0
- prompty-0.1.1/tests/prompts/funcfile.json +28 -0
- prompty-0.1.1/tests/prompts/funcfile.prompty +30 -0
- prompty-0.1.1/tests/prompts/functions.prompty +61 -0
- prompty-0.1.1/tests/prompts/functions.prompty.execution.json +59 -0
- prompty-0.1.1/tests/prompts/groundedness.prompty +51 -0
- prompty-0.1.1/tests/prompts/groundedness.prompty.execution.json +67 -0
- prompty-0.1.1/tests/prompts/prompty.json +9 -0
- prompty-0.1.1/tests/prompts/sub/__init__.py +0 -0
- prompty-0.1.1/tests/prompts/sub/basic.prompty +26 -0
- prompty-0.1.1/tests/prompts/sub/sub/__init__.py +0 -0
- prompty-0.1.1/tests/prompts/sub/sub/basic.prompty +26 -0
- prompty-0.1.1/tests/prompts/sub/sub/prompty.json +9 -0
- prompty-0.1.1/tests/prompts/sub/sub/test.py +5 -0
- prompty-0.1.1/tests/prompts/test.py +5 -0
- prompty-0.1.1/tests/prompty.json +8 -0
- prompty-0.1.1/tests/test_common.py +27 -0
- prompty-0.1.1/tests/test_execute.py +121 -0
- prompty-0.1.1/tests/test_factory_invoker.py +94 -0
- prompty-0.1.1/tests/test_path_exec.py +32 -0
prompty-0.1.1/PKG-INFO
ADDED
@@ -0,0 +1,15 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: prompty
|
3
|
+
Version: 0.1.1
|
4
|
+
Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
|
5
|
+
Author-Email: Seth Juarez <seth.juarez@microsoft.com>
|
6
|
+
License: MIT
|
7
|
+
Requires-Python: >=3.9
|
8
|
+
Requires-Dist: pyyaml>=6.0.1
|
9
|
+
Requires-Dist: pydantic>=2.8.2
|
10
|
+
Requires-Dist: jinja2>=3.1.4
|
11
|
+
Requires-Dist: openai>=1.35.10
|
12
|
+
Requires-Dist: azure-identity>=1.17.1
|
13
|
+
Description-Content-Type: text/markdown
|
14
|
+
|
15
|
+
# prompty
|
prompty-0.1.1/README.md
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
# prompty
|
@@ -0,0 +1,261 @@
|
|
1
|
+
import json
|
2
|
+
import traceback
|
3
|
+
from pathlib import Path
|
4
|
+
from typing import Dict, List, Union
|
5
|
+
from .core import (
|
6
|
+
Frontmatter,
|
7
|
+
InvokerFactory,
|
8
|
+
ModelSettings,
|
9
|
+
Prompty,
|
10
|
+
PropertySettings,
|
11
|
+
TemplateSettings,
|
12
|
+
param_hoisting,
|
13
|
+
)
|
14
|
+
|
15
|
+
from .renderers import *
|
16
|
+
from .parsers import *
|
17
|
+
from .executors import *
|
18
|
+
from .processors import *
|
19
|
+
|
20
|
+
|
21
|
+
def load_global_config(
|
22
|
+
prompty_path: Path = Path.cwd(), configuration: str = "default"
|
23
|
+
) -> Dict[str, any]:
|
24
|
+
# prompty.config laying around?
|
25
|
+
prompty_config = list(Path.cwd().glob("**/prompty.json"))
|
26
|
+
|
27
|
+
# if there is one load it
|
28
|
+
if len(prompty_config) > 0:
|
29
|
+
# pick the nearest prompty.json
|
30
|
+
config = sorted(
|
31
|
+
[
|
32
|
+
c
|
33
|
+
for c in prompty_config
|
34
|
+
if len(c.parent.parts) <= len(prompty_path.parts)
|
35
|
+
],
|
36
|
+
key=lambda p: len(p.parts),
|
37
|
+
)[-1]
|
38
|
+
|
39
|
+
with open(config, "r") as f:
|
40
|
+
c = json.load(f)
|
41
|
+
if configuration in c:
|
42
|
+
return c[configuration]
|
43
|
+
else:
|
44
|
+
raise ValueError(f'Item "{configuration}" not found in "{config}"')
|
45
|
+
|
46
|
+
return {}
|
47
|
+
|
48
|
+
|
49
|
+
def headless(
|
50
|
+
api: str,
|
51
|
+
content: str | List[str] | dict,
|
52
|
+
configuration: Dict[str, any] = {},
|
53
|
+
parameters: Dict[str, any] = {},
|
54
|
+
connection: str = "default",
|
55
|
+
) -> Prompty:
|
56
|
+
# get caller's path (to get relative path for prompty.json)
|
57
|
+
caller = Path(traceback.extract_stack()[-2].filename)
|
58
|
+
templateSettings = TemplateSettings(type="NOOP", parser="NOOP")
|
59
|
+
modelSettings = ModelSettings(
|
60
|
+
api=api,
|
61
|
+
configuration=Prompty.normalize(
|
62
|
+
param_hoisting(
|
63
|
+
configuration, load_global_config(caller.parent, connection)
|
64
|
+
),
|
65
|
+
caller.parent,
|
66
|
+
),
|
67
|
+
parameters=parameters,
|
68
|
+
)
|
69
|
+
|
70
|
+
return Prompty(model=modelSettings, template=templateSettings, content=content)
|
71
|
+
|
72
|
+
|
73
|
+
def load(prompty_file: str, configuration: str = "default") -> Prompty:
|
74
|
+
p = Path(prompty_file)
|
75
|
+
if not p.is_absolute():
|
76
|
+
# get caller's path (take into account trace frame)
|
77
|
+
caller = Path(traceback.extract_stack()[-2].filename)
|
78
|
+
p = Path(caller.parent / p).resolve().absolute()
|
79
|
+
|
80
|
+
# load dictionary from prompty file
|
81
|
+
matter = Frontmatter.read_file(p)
|
82
|
+
attributes = matter["attributes"]
|
83
|
+
content = matter["body"]
|
84
|
+
|
85
|
+
# normalize attribute dictionary resolve keys and files
|
86
|
+
attributes = Prompty.normalize(attributes, p.parent)
|
87
|
+
|
88
|
+
# load global configuration
|
89
|
+
global_config = Prompty.normalize(
|
90
|
+
load_global_config(p.parent, configuration), p.parent
|
91
|
+
)
|
92
|
+
if "model" not in attributes:
|
93
|
+
attributes["model"] = {}
|
94
|
+
|
95
|
+
if "configuration" not in attributes["model"]:
|
96
|
+
attributes["model"]["configuration"] = global_config
|
97
|
+
else:
|
98
|
+
attributes["model"]["configuration"] = param_hoisting(
|
99
|
+
attributes["model"]["configuration"],
|
100
|
+
global_config,
|
101
|
+
)
|
102
|
+
|
103
|
+
# pull model settings out of attributes
|
104
|
+
try:
|
105
|
+
model = ModelSettings(**attributes.pop("model"))
|
106
|
+
except Exception as e:
|
107
|
+
raise ValueError(f"Error in model settings: {e}")
|
108
|
+
|
109
|
+
# pull template settings
|
110
|
+
try:
|
111
|
+
if "template" in attributes:
|
112
|
+
t = attributes.pop("template")
|
113
|
+
if isinstance(t, dict):
|
114
|
+
template = TemplateSettings(**t)
|
115
|
+
# has to be a string denoting the type
|
116
|
+
else:
|
117
|
+
template = TemplateSettings(type=t, parser="prompty")
|
118
|
+
else:
|
119
|
+
template = TemplateSettings(type="jinja2", parser="prompty")
|
120
|
+
except Exception as e:
|
121
|
+
raise ValueError(f"Error in template loader: {e}")
|
122
|
+
|
123
|
+
# formalize inputs and outputs
|
124
|
+
if "inputs" in attributes:
|
125
|
+
try:
|
126
|
+
inputs = {
|
127
|
+
k: PropertySettings(**v) for (k, v) in attributes.pop("inputs").items()
|
128
|
+
}
|
129
|
+
except Exception as e:
|
130
|
+
raise ValueError(f"Error in inputs: {e}")
|
131
|
+
else:
|
132
|
+
inputs = {}
|
133
|
+
if "outputs" in attributes:
|
134
|
+
try:
|
135
|
+
outputs = {
|
136
|
+
k: PropertySettings(**v) for (k, v) in attributes.pop("outputs").items()
|
137
|
+
}
|
138
|
+
except Exception as e:
|
139
|
+
raise ValueError(f"Error in outputs: {e}")
|
140
|
+
else:
|
141
|
+
outputs = {}
|
142
|
+
|
143
|
+
# recursive loading of base prompty
|
144
|
+
if "base" in attributes:
|
145
|
+
# load the base prompty from the same directory as the current prompty
|
146
|
+
base = load(p.parent / attributes["base"])
|
147
|
+
# hoist the base prompty's attributes to the current prompty
|
148
|
+
model.api = base.model.api if model.api == "" else model.api
|
149
|
+
model.configuration = param_hoisting(
|
150
|
+
model.configuration, base.model.configuration
|
151
|
+
)
|
152
|
+
model.parameters = param_hoisting(model.parameters, base.model.parameters)
|
153
|
+
model.response = param_hoisting(model.response, base.model.response)
|
154
|
+
attributes["sample"] = param_hoisting(attributes, base.sample, "sample")
|
155
|
+
|
156
|
+
p = Prompty(
|
157
|
+
**attributes,
|
158
|
+
model=model,
|
159
|
+
inputs=inputs,
|
160
|
+
outputs=outputs,
|
161
|
+
template=template,
|
162
|
+
content=content,
|
163
|
+
file=p,
|
164
|
+
basePrompty=base,
|
165
|
+
)
|
166
|
+
else:
|
167
|
+
p = Prompty(
|
168
|
+
**attributes,
|
169
|
+
model=model,
|
170
|
+
inputs=inputs,
|
171
|
+
outputs=outputs,
|
172
|
+
template=template,
|
173
|
+
content=content,
|
174
|
+
file=p,
|
175
|
+
)
|
176
|
+
return p
|
177
|
+
|
178
|
+
|
179
|
+
def prepare(
|
180
|
+
prompt: Prompty,
|
181
|
+
inputs: Dict[str, any] = {},
|
182
|
+
):
|
183
|
+
inputs = param_hoisting(inputs, prompt.sample)
|
184
|
+
|
185
|
+
if prompt.template.type == "NOOP":
|
186
|
+
render = prompt.content
|
187
|
+
else:
|
188
|
+
# render
|
189
|
+
renderer = InvokerFactory.create_renderer(prompt.template.type, prompt)
|
190
|
+
render = renderer(inputs)
|
191
|
+
|
192
|
+
if prompt.template.parser == "NOOP":
|
193
|
+
result = render
|
194
|
+
else:
|
195
|
+
# parse [parser].[api]
|
196
|
+
parser = InvokerFactory.create_parser(
|
197
|
+
f"{prompt.template.parser}.{prompt.model.api}", prompt
|
198
|
+
)
|
199
|
+
result = parser(render)
|
200
|
+
|
201
|
+
return result
|
202
|
+
|
203
|
+
|
204
|
+
def run(
|
205
|
+
prompt: Prompty,
|
206
|
+
content: dict | list | str,
|
207
|
+
configuration: Dict[str, any] = {},
|
208
|
+
parameters: Dict[str, any] = {},
|
209
|
+
raw: bool = False,
|
210
|
+
):
|
211
|
+
# invoker = InvokerFactory()
|
212
|
+
|
213
|
+
if configuration != {}:
|
214
|
+
prompt.model.configuration = param_hoisting(
|
215
|
+
configuration, prompt.model.configuration
|
216
|
+
)
|
217
|
+
|
218
|
+
if parameters != {}:
|
219
|
+
prompt.model.parameters = param_hoisting(parameters, prompt.model.parameters)
|
220
|
+
|
221
|
+
# execute
|
222
|
+
executor = InvokerFactory.create_executor(
|
223
|
+
prompt.model.configuration["type"], prompt
|
224
|
+
)
|
225
|
+
result = executor(content)
|
226
|
+
|
227
|
+
# skip?
|
228
|
+
if not raw:
|
229
|
+
# process
|
230
|
+
processor = InvokerFactory.create_processor(
|
231
|
+
prompt.model.configuration["type"], prompt
|
232
|
+
)
|
233
|
+
result = processor(result)
|
234
|
+
|
235
|
+
return result
|
236
|
+
|
237
|
+
|
238
|
+
def execute(
|
239
|
+
prompt: Union[str, Prompty],
|
240
|
+
configuration: Dict[str, any] = {},
|
241
|
+
parameters: Dict[str, any] = {},
|
242
|
+
inputs: Dict[str, any] = {},
|
243
|
+
raw: bool = False,
|
244
|
+
connection: str = "default",
|
245
|
+
):
|
246
|
+
|
247
|
+
if isinstance(prompt, str):
|
248
|
+
path = Path(prompt)
|
249
|
+
if not path.is_absolute():
|
250
|
+
# get caller's path (take into account trace frame)
|
251
|
+
caller = Path(traceback.extract_stack()[-2].filename)
|
252
|
+
path = Path(caller.parent / path).resolve().absolute()
|
253
|
+
prompt = load(path, connection)
|
254
|
+
|
255
|
+
# prepare content
|
256
|
+
content = prepare(prompt, inputs)
|
257
|
+
|
258
|
+
# run LLM model
|
259
|
+
result = run(prompt, content, configuration, parameters, raw)
|
260
|
+
|
261
|
+
return result
|
@@ -0,0 +1,305 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import os
|
4
|
+
import re
|
5
|
+
import yaml
|
6
|
+
import json
|
7
|
+
import abc
|
8
|
+
from pathlib import Path
|
9
|
+
from pydantic import BaseModel, Field, FilePath
|
10
|
+
from typing import List, Literal, Dict, Callable, TypeVar
|
11
|
+
|
12
|
+
|
13
|
+
T = TypeVar("T")
|
14
|
+
|
15
|
+
|
16
|
+
class PropertySettings(BaseModel):
|
17
|
+
type: Literal["string", "number", "array", "object", "boolean"]
|
18
|
+
default: str | int | float | List | dict | bool = Field(default=None)
|
19
|
+
description: str = Field(default="")
|
20
|
+
|
21
|
+
|
22
|
+
class ModelSettings(BaseModel):
|
23
|
+
api: str = Field(default="")
|
24
|
+
configuration: dict = Field(default={})
|
25
|
+
parameters: dict = Field(default={})
|
26
|
+
response: dict = Field(default={})
|
27
|
+
|
28
|
+
def model_dump_safe(self) -> dict:
|
29
|
+
d = self.model_dump()
|
30
|
+
d["configuration"] = {
|
31
|
+
k: "*" * len(v) if "key" in k.lower() or "secret" in k.lower() else v
|
32
|
+
for k, v in d["configuration"].items()
|
33
|
+
}
|
34
|
+
return d
|
35
|
+
|
36
|
+
|
37
|
+
class TemplateSettings(BaseModel):
|
38
|
+
type: str = Field(default="jinja2")
|
39
|
+
parser: str = Field(default="")
|
40
|
+
|
41
|
+
|
42
|
+
class Prompty(BaseModel):
|
43
|
+
# metadata
|
44
|
+
name: str = Field(default="")
|
45
|
+
description: str = Field(default="")
|
46
|
+
authors: List[str] = Field(default=[])
|
47
|
+
tags: List[str] = Field(default=[])
|
48
|
+
version: str = Field(default="")
|
49
|
+
base: str = Field(default="")
|
50
|
+
basePrompty: Prompty | None = Field(default=None)
|
51
|
+
# model
|
52
|
+
model: ModelSettings = Field(default_factory=ModelSettings)
|
53
|
+
|
54
|
+
# sample
|
55
|
+
sample: dict = Field(default={})
|
56
|
+
|
57
|
+
# input / output
|
58
|
+
inputs: Dict[str, PropertySettings] = Field(default={})
|
59
|
+
outputs: Dict[str, PropertySettings] = Field(default={})
|
60
|
+
|
61
|
+
# template
|
62
|
+
template: TemplateSettings
|
63
|
+
|
64
|
+
file: FilePath = Field(default="")
|
65
|
+
content: str | List[str] | dict = Field(default="")
|
66
|
+
|
67
|
+
def to_safe_dict(self) -> Dict[str, any]:
|
68
|
+
d = {}
|
69
|
+
for k, v in self:
|
70
|
+
if v != "" and v != {} and v != [] and v != None:
|
71
|
+
if k == "model":
|
72
|
+
d[k] = v.model_dump_safe()
|
73
|
+
elif k == "template":
|
74
|
+
d[k] = v.model_dump()
|
75
|
+
elif k == "inputs" or k == "outputs":
|
76
|
+
d[k] = {k: v.model_dump() for k, v in v.items()}
|
77
|
+
elif k == "file":
|
78
|
+
d[k] = (
|
79
|
+
str(self.file.as_posix())
|
80
|
+
if isinstance(self.file, Path)
|
81
|
+
else self.file
|
82
|
+
)
|
83
|
+
elif k == "basePrompty":
|
84
|
+
# no need to serialize basePrompty
|
85
|
+
continue
|
86
|
+
|
87
|
+
else:
|
88
|
+
d[k] = v
|
89
|
+
return d
|
90
|
+
|
91
|
+
# generate json representation of the prompty
|
92
|
+
def to_safe_json(self) -> str:
|
93
|
+
d = self.to_safe_dict()
|
94
|
+
return json.dumps(d)
|
95
|
+
|
96
|
+
@staticmethod
|
97
|
+
def _process_file(file: str, parent: Path) -> any:
|
98
|
+
file = Path(parent / Path(file)).resolve().absolute()
|
99
|
+
if file.exists():
|
100
|
+
with open(str(file), "r") as f:
|
101
|
+
items = json.load(f)
|
102
|
+
if isinstance(items, list):
|
103
|
+
return [Prompty.normalize(value, parent) for value in items]
|
104
|
+
elif isinstance(items, dict):
|
105
|
+
return {
|
106
|
+
key: Prompty.normalize(value, parent)
|
107
|
+
for key, value in items.items()
|
108
|
+
}
|
109
|
+
else:
|
110
|
+
return items
|
111
|
+
else:
|
112
|
+
raise FileNotFoundError(f"File {file} not found")
|
113
|
+
|
114
|
+
@staticmethod
|
115
|
+
def _process_env(variable: str, env_error=True) -> any:
|
116
|
+
if variable in os.environ.keys():
|
117
|
+
return os.environ[variable]
|
118
|
+
else:
|
119
|
+
if env_error:
|
120
|
+
raise ValueError(f"Variable {variable} not found in environment")
|
121
|
+
else:
|
122
|
+
return ""
|
123
|
+
|
124
|
+
@staticmethod
|
125
|
+
def normalize(attribute: any, parent: Path, env_error=True) -> any:
|
126
|
+
if isinstance(attribute, str):
|
127
|
+
attribute = attribute.strip()
|
128
|
+
if attribute.startswith("${") and attribute.endswith("}"):
|
129
|
+
# check if env or file
|
130
|
+
variable = attribute[2:-1].split(":")
|
131
|
+
if variable[0] == "env" and len(variable) > 1:
|
132
|
+
return Prompty._process_env(variable[1], env_error)
|
133
|
+
elif variable[0] == "file" and len(variable) > 1:
|
134
|
+
return Prompty._process_file(variable[1], parent)
|
135
|
+
else:
|
136
|
+
# old way of doing things for back compatibility
|
137
|
+
v = Prompty._process_env(variable[0], False)
|
138
|
+
if len(v) == 0:
|
139
|
+
if len(variable) > 1:
|
140
|
+
return variable[1]
|
141
|
+
else:
|
142
|
+
if env_error:
|
143
|
+
raise ValueError(
|
144
|
+
f"Variable {variable[0]} not found in environment"
|
145
|
+
)
|
146
|
+
else:
|
147
|
+
return v
|
148
|
+
else:
|
149
|
+
return v
|
150
|
+
elif (
|
151
|
+
attribute.startswith("file:")
|
152
|
+
and Path(parent / attribute.split(":")[1]).exists()
|
153
|
+
):
|
154
|
+
# old way of doing things for back compatibility
|
155
|
+
return Prompty._process_file(attribute.split(":")[1], parent)
|
156
|
+
else:
|
157
|
+
return attribute
|
158
|
+
elif isinstance(attribute, list):
|
159
|
+
return [Prompty.normalize(value, parent) for value in attribute]
|
160
|
+
elif isinstance(attribute, dict):
|
161
|
+
return {
|
162
|
+
key: Prompty.normalize(value, parent)
|
163
|
+
for key, value in attribute.items()
|
164
|
+
}
|
165
|
+
else:
|
166
|
+
return attribute
|
167
|
+
|
168
|
+
|
169
|
+
def param_hoisting(
|
170
|
+
top: Dict[str, any], bottom: Dict[str, any], top_key: str = None
|
171
|
+
) -> Dict[str, any]:
|
172
|
+
if top_key:
|
173
|
+
new_dict = {**top[top_key]} if top_key in top else {}
|
174
|
+
else:
|
175
|
+
new_dict = {**top}
|
176
|
+
for key, value in bottom.items():
|
177
|
+
if not key in new_dict:
|
178
|
+
new_dict[key] = value
|
179
|
+
return new_dict
|
180
|
+
|
181
|
+
|
182
|
+
class Invoker(abc.ABC):
|
183
|
+
def __init__(self, prompty: Prompty) -> None:
|
184
|
+
self.prompty = prompty
|
185
|
+
|
186
|
+
@abc.abstractmethod
|
187
|
+
def invoke(self, data: any) -> any:
|
188
|
+
pass
|
189
|
+
|
190
|
+
def __call__(self, data: any) -> any:
|
191
|
+
return self.invoke(data)
|
192
|
+
|
193
|
+
|
194
|
+
class InvokerFactory:
|
195
|
+
_renderers: Dict[str, Invoker] = {}
|
196
|
+
_parsers: Dict[str, Invoker] = {}
|
197
|
+
_executors: Dict[str, Invoker] = {}
|
198
|
+
_processors: Dict[str, Invoker] = {}
|
199
|
+
|
200
|
+
@classmethod
|
201
|
+
def register_renderer(cls, name: str) -> Callable:
|
202
|
+
def inner_wrapper(wrapped_class: Invoker) -> Callable:
|
203
|
+
cls._renderers[name] = wrapped_class
|
204
|
+
return wrapped_class
|
205
|
+
|
206
|
+
return inner_wrapper
|
207
|
+
|
208
|
+
@classmethod
|
209
|
+
def register_parser(cls, name: str) -> Callable:
|
210
|
+
def inner_wrapper(wrapped_class: Invoker) -> Callable:
|
211
|
+
cls._parsers[name] = wrapped_class
|
212
|
+
return wrapped_class
|
213
|
+
|
214
|
+
return inner_wrapper
|
215
|
+
|
216
|
+
@classmethod
|
217
|
+
def register_executor(cls, name: str) -> Callable:
|
218
|
+
def inner_wrapper(wrapped_class: Invoker) -> Callable:
|
219
|
+
cls._executors[name] = wrapped_class
|
220
|
+
return wrapped_class
|
221
|
+
|
222
|
+
return inner_wrapper
|
223
|
+
|
224
|
+
@classmethod
|
225
|
+
def register_processor(cls, name: str) -> Callable:
|
226
|
+
def inner_wrapper(wrapped_class: Invoker) -> Callable:
|
227
|
+
cls._processors[name] = wrapped_class
|
228
|
+
return wrapped_class
|
229
|
+
|
230
|
+
return inner_wrapper
|
231
|
+
|
232
|
+
@classmethod
|
233
|
+
def create_renderer(cls, name: str, prompty: Prompty) -> Invoker:
|
234
|
+
if name not in cls._renderers:
|
235
|
+
raise ValueError(f"Renderer {name} not found")
|
236
|
+
return cls._renderers[name](prompty)
|
237
|
+
|
238
|
+
@classmethod
|
239
|
+
def create_parser(cls, name: str, prompty: Prompty) -> Invoker:
|
240
|
+
if name not in cls._parsers:
|
241
|
+
raise ValueError(f"Parser {name} not found")
|
242
|
+
return cls._parsers[name](prompty)
|
243
|
+
|
244
|
+
@classmethod
|
245
|
+
def create_executor(cls, name: str, prompty: Prompty) -> Invoker:
|
246
|
+
if name not in cls._executors:
|
247
|
+
raise ValueError(f"Executor {name} not found")
|
248
|
+
return cls._executors[name](prompty)
|
249
|
+
|
250
|
+
@classmethod
|
251
|
+
def create_processor(cls, name: str, prompty: Prompty) -> Invoker:
|
252
|
+
if name not in cls._processors:
|
253
|
+
raise ValueError(f"Processor {name} not found")
|
254
|
+
return cls._processors[name](prompty)
|
255
|
+
|
256
|
+
|
257
|
+
@InvokerFactory.register_renderer("NOOP")
|
258
|
+
@InvokerFactory.register_parser("NOOP")
|
259
|
+
@InvokerFactory.register_executor("NOOP")
|
260
|
+
@InvokerFactory.register_processor("NOOP")
|
261
|
+
@InvokerFactory.register_parser("prompty.embedding")
|
262
|
+
@InvokerFactory.register_parser("prompty.image")
|
263
|
+
@InvokerFactory.register_parser("prompty.completion")
|
264
|
+
class NoOp(Invoker):
|
265
|
+
def invoke(self, data: any) -> any:
|
266
|
+
return data
|
267
|
+
|
268
|
+
|
269
|
+
class Frontmatter:
|
270
|
+
_yaml_delim = r"(?:---|\+\+\+)"
|
271
|
+
_yaml = r"(.*?)"
|
272
|
+
_content = r"\s*(.+)$"
|
273
|
+
_re_pattern = r"^\s*" + _yaml_delim + _yaml + _yaml_delim + _content
|
274
|
+
_regex = re.compile(_re_pattern, re.S | re.M)
|
275
|
+
|
276
|
+
@classmethod
|
277
|
+
def read_file(cls, path):
|
278
|
+
"""Reads file at path and returns dict with separated frontmatter.
|
279
|
+
See read() for more info on dict return value.
|
280
|
+
"""
|
281
|
+
with open(path, encoding="utf-8") as file:
|
282
|
+
file_contents = file.read()
|
283
|
+
return cls.read(file_contents)
|
284
|
+
|
285
|
+
@classmethod
|
286
|
+
def read(cls, string):
|
287
|
+
"""Returns dict with separated frontmatter from string.
|
288
|
+
|
289
|
+
Returned dict keys:
|
290
|
+
attributes -- extracted YAML attributes in dict form.
|
291
|
+
body -- string contents below the YAML separators
|
292
|
+
frontmatter -- string representation of YAML
|
293
|
+
"""
|
294
|
+
fmatter = ""
|
295
|
+
body = ""
|
296
|
+
result = cls._regex.search(string)
|
297
|
+
|
298
|
+
if result:
|
299
|
+
fmatter = result.group(1)
|
300
|
+
body = result.group(2)
|
301
|
+
return {
|
302
|
+
"attributes": yaml.load(fmatter, Loader=yaml.FullLoader),
|
303
|
+
"body": body,
|
304
|
+
"frontmatter": fmatter,
|
305
|
+
}
|
@@ -0,0 +1,70 @@
|
|
1
|
+
import azure.identity
|
2
|
+
from openai import AzureOpenAI
|
3
|
+
from .core import Invoker, InvokerFactory, Prompty
|
4
|
+
from pathlib import Path
|
5
|
+
|
6
|
+
|
7
|
+
@InvokerFactory.register_executor("azure")
|
8
|
+
@InvokerFactory.register_executor("azure_openai")
|
9
|
+
class AzureOpenAIExecutor(Invoker):
|
10
|
+
def __init__(self, prompty: Prompty) -> None:
|
11
|
+
self.prompty = prompty
|
12
|
+
kwargs = {
|
13
|
+
key: value
|
14
|
+
for key, value in self.prompty.model.configuration.items()
|
15
|
+
if key != "type"
|
16
|
+
}
|
17
|
+
|
18
|
+
# no key, use default credentials
|
19
|
+
if "api_key" not in kwargs:
|
20
|
+
# managed identity if client id
|
21
|
+
if "client_id" in kwargs:
|
22
|
+
default_credential = azure.identity.ManagedIdentityCredential(
|
23
|
+
client_id=kwargs.pop("client_id"),
|
24
|
+
)
|
25
|
+
# default credential
|
26
|
+
else:
|
27
|
+
default_credential = azure.identity.DefaultAzureCredential(
|
28
|
+
exclude_shared_token_cache_credential=True
|
29
|
+
)
|
30
|
+
|
31
|
+
kwargs["azure_ad_token_provider"] = (
|
32
|
+
azure.identity.get_bearer_token_provider(
|
33
|
+
default_credential, "https://cognitiveservices.azure.com/.default"
|
34
|
+
)
|
35
|
+
)
|
36
|
+
|
37
|
+
self.client = AzureOpenAI(
|
38
|
+
default_headers={"User-Agent": "prompty/0.1.0"},
|
39
|
+
**kwargs,
|
40
|
+
)
|
41
|
+
|
42
|
+
self.api = self.prompty.model.api
|
43
|
+
self.deployment = self.prompty.model.configuration["azure_deployment"]
|
44
|
+
self.parameters = self.prompty.model.parameters
|
45
|
+
|
46
|
+
def invoke(self, data: any) -> any:
|
47
|
+
if self.api == "chat":
|
48
|
+
response = self.client.chat.completions.create(
|
49
|
+
model=self.deployment,
|
50
|
+
messages=data if isinstance(data, list) else [data],
|
51
|
+
**self.parameters,
|
52
|
+
)
|
53
|
+
elif self.api == "completion":
|
54
|
+
response = self.client.completions.create(
|
55
|
+
prompt=data.item,
|
56
|
+
model=self.deployment,
|
57
|
+
**self.parameters,
|
58
|
+
)
|
59
|
+
|
60
|
+
elif self.api == "embedding":
|
61
|
+
response = self.client.embeddings.create(
|
62
|
+
input=data if isinstance(data, list) else [data],
|
63
|
+
model=self.deployment,
|
64
|
+
**self.parameters,
|
65
|
+
)
|
66
|
+
|
67
|
+
elif self.api == "image":
|
68
|
+
raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
|
69
|
+
|
70
|
+
return response
|