prompty 0.1.9__py3-none-any.whl → 0.1.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prompty/__init__.py +312 -117
- prompty/azure/__init__.py +10 -0
- prompty/azure/executor.py +218 -0
- prompty/azure/processor.py +142 -0
- prompty/cli.py +74 -28
- prompty/core.py +138 -221
- prompty/invoker.py +297 -0
- prompty/openai/__init__.py +10 -0
- prompty/openai/executor.py +114 -0
- prompty/{processors.py → openai/processor.py} +25 -15
- prompty/parsers.py +18 -1
- prompty/renderers.py +19 -2
- prompty/serverless/__init__.py +8 -0
- prompty/serverless/executor.py +153 -0
- prompty/serverless/processor.py +78 -0
- prompty/tracer.py +162 -22
- prompty/utils.py +105 -0
- prompty-0.1.33.dist-info/METADATA +218 -0
- prompty-0.1.33.dist-info/RECORD +22 -0
- {prompty-0.1.9.dist-info → prompty-0.1.33.dist-info}/WHEEL +1 -1
- prompty-0.1.33.dist-info/entry_points.txt +5 -0
- prompty/executors.py +0 -94
- prompty-0.1.9.dist-info/METADATA +0 -136
- prompty-0.1.9.dist-info/RECORD +0 -12
- {prompty-0.1.9.dist-info → prompty-0.1.33.dist-info}/licenses/LICENSE +0 -0
prompty/__init__.py
CHANGED
@@ -1,51 +1,24 @@
|
|
1
|
-
import json
|
2
1
|
import traceback
|
3
2
|
from pathlib import Path
|
4
3
|
from typing import Dict, List, Union
|
5
|
-
|
6
4
|
from .tracer import trace
|
5
|
+
from .invoker import InvokerFactory, NoOp
|
7
6
|
from .core import (
|
8
|
-
Frontmatter,
|
9
|
-
InvokerFactory,
|
10
7
|
ModelSettings,
|
11
8
|
Prompty,
|
12
9
|
PropertySettings,
|
13
10
|
TemplateSettings,
|
14
11
|
param_hoisting,
|
15
12
|
)
|
13
|
+
from .utils import (
|
14
|
+
load_global_config,
|
15
|
+
load_global_config_async,
|
16
|
+
load_prompty_async,
|
17
|
+
load_prompty,
|
18
|
+
)
|
16
19
|
|
17
20
|
from .renderers import *
|
18
21
|
from .parsers import *
|
19
|
-
from .executors import *
|
20
|
-
from .processors import *
|
21
|
-
|
22
|
-
|
23
|
-
def load_global_config(
|
24
|
-
prompty_path: Path = Path.cwd(), configuration: str = "default"
|
25
|
-
) -> Dict[str, any]:
|
26
|
-
# prompty.config laying around?
|
27
|
-
prompty_config = list(Path.cwd().glob("**/prompty.json"))
|
28
|
-
|
29
|
-
# if there is one load it
|
30
|
-
if len(prompty_config) > 0:
|
31
|
-
# pick the nearest prompty.json
|
32
|
-
config = sorted(
|
33
|
-
[
|
34
|
-
c
|
35
|
-
for c in prompty_config
|
36
|
-
if len(c.parent.parts) <= len(prompty_path.parts)
|
37
|
-
],
|
38
|
-
key=lambda p: len(p.parts),
|
39
|
-
)[-1]
|
40
|
-
|
41
|
-
with open(config, "r") as f:
|
42
|
-
c = json.load(f)
|
43
|
-
if configuration in c:
|
44
|
-
return c[configuration]
|
45
|
-
else:
|
46
|
-
raise ValueError(f'Item "{configuration}" not found in "{config}"')
|
47
|
-
|
48
|
-
return {}
|
49
22
|
|
50
23
|
|
51
24
|
@trace(description="Create a headless prompty object for programmatic use.")
|
@@ -105,47 +78,65 @@ def headless(
|
|
105
78
|
return Prompty(model=modelSettings, template=templateSettings, content=content)
|
106
79
|
|
107
80
|
|
108
|
-
@trace(description="
|
109
|
-
def
|
110
|
-
|
81
|
+
@trace(description="Create a headless prompty object for programmatic use.")
|
82
|
+
async def headless_async(
|
83
|
+
api: str,
|
84
|
+
content: str | List[str] | dict,
|
85
|
+
configuration: Dict[str, any] = {},
|
86
|
+
parameters: Dict[str, any] = {},
|
87
|
+
connection: str = "default",
|
88
|
+
) -> Prompty:
|
89
|
+
"""Create a headless prompty object for programmatic use.
|
111
90
|
|
112
91
|
Parameters
|
113
92
|
----------
|
114
|
-
|
115
|
-
The
|
116
|
-
|
117
|
-
The
|
93
|
+
api : str
|
94
|
+
The API to use for the model
|
95
|
+
content : str | List[str] | dict
|
96
|
+
The content to process
|
97
|
+
configuration : Dict[str, any], optional
|
98
|
+
The configuration to use, by default {}
|
99
|
+
parameters : Dict[str, any], optional
|
100
|
+
The parameters to use, by default {}
|
101
|
+
connection : str, optional
|
102
|
+
The connection to use, by default "default"
|
118
103
|
|
119
104
|
Returns
|
120
105
|
-------
|
121
106
|
Prompty
|
122
|
-
The
|
107
|
+
The headless prompty object
|
123
108
|
|
124
109
|
Example
|
125
110
|
-------
|
126
111
|
>>> import prompty
|
127
|
-
>>> p = prompty.
|
128
|
-
|
129
|
-
|
112
|
+
>>> p = await prompty.headless_async(
|
113
|
+
api="embedding",
|
114
|
+
configuration={"type": "azure", "azure_deployment": "text-embedding-ada-002"},
|
115
|
+
content="hello world",
|
116
|
+
)
|
117
|
+
>>> emb = prompty.execute(p)
|
130
118
|
|
131
|
-
|
132
|
-
if not p.is_absolute():
|
133
|
-
# get caller's path (take into account trace frame)
|
134
|
-
caller = Path(traceback.extract_stack()[-3].filename)
|
135
|
-
p = Path(caller.parent / p).resolve().absolute()
|
119
|
+
"""
|
136
120
|
|
137
|
-
#
|
138
|
-
|
139
|
-
|
140
|
-
content = matter["body"]
|
121
|
+
# get caller's path (to get relative path for prompty.json)
|
122
|
+
caller = Path(traceback.extract_stack()[-2].filename)
|
123
|
+
templateSettings = TemplateSettings(type="NOOP", parser="NOOP")
|
141
124
|
|
142
|
-
|
143
|
-
|
125
|
+
global_config = await load_global_config_async(caller.parent, connection)
|
126
|
+
c = await Prompty.normalize_async(
|
127
|
+
param_hoisting(configuration, global_config), caller.parent
|
128
|
+
)
|
144
129
|
|
145
|
-
|
146
|
-
|
147
|
-
|
130
|
+
modelSettings = ModelSettings(
|
131
|
+
api=api,
|
132
|
+
configuration=c,
|
133
|
+
parameters=parameters,
|
148
134
|
)
|
135
|
+
|
136
|
+
return Prompty(model=modelSettings, template=templateSettings, content=content)
|
137
|
+
|
138
|
+
|
139
|
+
def _load_raw_prompty(attributes: dict, content: str, p: Path, global_config: dict):
|
149
140
|
if "model" not in attributes:
|
150
141
|
attributes["model"] = {}
|
151
142
|
|
@@ -197,47 +188,132 @@ def load(prompty_file: str, configuration: str = "default") -> Prompty:
|
|
197
188
|
else:
|
198
189
|
outputs = {}
|
199
190
|
|
191
|
+
p = Prompty(
|
192
|
+
**attributes,
|
193
|
+
model=model,
|
194
|
+
inputs=inputs,
|
195
|
+
outputs=outputs,
|
196
|
+
template=template,
|
197
|
+
content=content,
|
198
|
+
file=p,
|
199
|
+
)
|
200
|
+
|
201
|
+
return p
|
202
|
+
|
203
|
+
|
204
|
+
@trace(description="Load a prompty file.")
|
205
|
+
def load(prompty_file: str, configuration: str = "default") -> Prompty:
|
206
|
+
"""Load a prompty file.
|
207
|
+
|
208
|
+
Parameters
|
209
|
+
----------
|
210
|
+
prompty_file : str
|
211
|
+
The path to the prompty file
|
212
|
+
configuration : str, optional
|
213
|
+
The configuration to use, by default "default"
|
214
|
+
|
215
|
+
Returns
|
216
|
+
-------
|
217
|
+
Prompty
|
218
|
+
The loaded prompty object
|
219
|
+
|
220
|
+
Example
|
221
|
+
-------
|
222
|
+
>>> import prompty
|
223
|
+
>>> p = prompty.load("prompts/basic.prompty")
|
224
|
+
>>> print(p)
|
225
|
+
"""
|
226
|
+
|
227
|
+
p = Path(prompty_file)
|
228
|
+
if not p.is_absolute():
|
229
|
+
# get caller's path (take into account trace frame)
|
230
|
+
caller = Path(traceback.extract_stack()[-3].filename)
|
231
|
+
p = Path(caller.parent / p).resolve().absolute()
|
232
|
+
|
233
|
+
# load dictionary from prompty file
|
234
|
+
matter = load_prompty(p)
|
235
|
+
|
236
|
+
attributes = matter["attributes"]
|
237
|
+
content = matter["body"]
|
238
|
+
|
239
|
+
# normalize attribute dictionary resolve keys and files
|
240
|
+
attributes = Prompty.normalize(attributes, p.parent)
|
241
|
+
|
242
|
+
# load global configuration
|
243
|
+
global_config = Prompty.normalize(
|
244
|
+
load_global_config(p.parent, configuration), p.parent
|
245
|
+
)
|
246
|
+
|
247
|
+
prompty = _load_raw_prompty(attributes, content, p, global_config)
|
248
|
+
|
200
249
|
# recursive loading of base prompty
|
201
250
|
if "base" in attributes:
|
202
251
|
# load the base prompty from the same directory as the current prompty
|
203
252
|
base = load(p.parent / attributes["base"])
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
253
|
+
prompty = Prompty.hoist_base_prompty(prompty, base)
|
254
|
+
|
255
|
+
return prompty
|
256
|
+
|
257
|
+
|
258
|
+
@trace(description="Load a prompty file.")
|
259
|
+
async def load_async(prompty_file: str, configuration: str = "default") -> Prompty:
|
260
|
+
"""Load a prompty file.
|
261
|
+
|
262
|
+
Parameters
|
263
|
+
----------
|
264
|
+
prompty_file : str
|
265
|
+
The path to the prompty file
|
266
|
+
configuration : str, optional
|
267
|
+
The configuration to use, by default "default"
|
268
|
+
|
269
|
+
Returns
|
270
|
+
-------
|
271
|
+
Prompty
|
272
|
+
The loaded prompty object
|
273
|
+
|
274
|
+
Example
|
275
|
+
-------
|
276
|
+
>>> import prompty
|
277
|
+
>>> p = prompty.load("prompts/basic.prompty")
|
278
|
+
>>> print(p)
|
279
|
+
"""
|
280
|
+
|
281
|
+
p = Path(prompty_file)
|
282
|
+
if not p.is_absolute():
|
283
|
+
# get caller's path (take into account trace frame)
|
284
|
+
caller = Path(traceback.extract_stack()[-3].filename)
|
285
|
+
p = Path(caller.parent / p).resolve().absolute()
|
286
|
+
|
287
|
+
# load dictionary from prompty file
|
288
|
+
matter = await load_prompty_async(p)
|
289
|
+
|
290
|
+
attributes = matter["attributes"]
|
291
|
+
content = matter["body"]
|
292
|
+
|
293
|
+
# normalize attribute dictionary resolve keys and files
|
294
|
+
attributes = await Prompty.normalize_async(attributes, p.parent)
|
295
|
+
|
296
|
+
# load global configuration
|
297
|
+
config = await load_global_config_async(p.parent, configuration)
|
298
|
+
global_config = await Prompty.normalize_async(config, p.parent)
|
299
|
+
|
300
|
+
prompty = _load_raw_prompty(attributes, content, p, global_config)
|
301
|
+
|
302
|
+
# recursive loading of base prompty
|
303
|
+
if "base" in attributes:
|
304
|
+
# load the base prompty from the same directory as the current prompty
|
305
|
+
base = await load_async(p.parent / attributes["base"])
|
306
|
+
prompty = Prompty.hoist_base_prompty(prompty, base)
|
307
|
+
|
308
|
+
return prompty
|
309
|
+
|
234
310
|
|
235
311
|
@trace(description="Prepare the inputs for the prompt.")
|
236
312
|
def prepare(
|
237
313
|
prompt: Prompty,
|
238
314
|
inputs: Dict[str, any] = {},
|
239
315
|
):
|
240
|
-
"""
|
316
|
+
"""Prepare the inputs for the prompt.
|
241
317
|
|
242
318
|
Parameters
|
243
319
|
----------
|
@@ -260,24 +336,46 @@ def prepare(
|
|
260
336
|
"""
|
261
337
|
inputs = param_hoisting(inputs, prompt.sample)
|
262
338
|
|
263
|
-
|
264
|
-
|
265
|
-
else:
|
266
|
-
# render
|
267
|
-
renderer = InvokerFactory.create_renderer(prompt.template.type, prompt)
|
268
|
-
render = renderer(inputs)
|
339
|
+
render = InvokerFactory.run_renderer(prompt, inputs, prompt.content)
|
340
|
+
result = InvokerFactory.run_parser(prompt, render)
|
269
341
|
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
342
|
+
return result
|
343
|
+
|
344
|
+
|
345
|
+
@trace(description="Prepare the inputs for the prompt.")
|
346
|
+
async def prepare_async(
|
347
|
+
prompt: Prompty,
|
348
|
+
inputs: Dict[str, any] = {},
|
349
|
+
):
|
350
|
+
"""Prepare the inputs for the prompt.
|
351
|
+
|
352
|
+
Parameters
|
353
|
+
----------
|
354
|
+
prompt : Prompty
|
355
|
+
The prompty object
|
356
|
+
inputs : Dict[str, any], optional
|
357
|
+
The inputs to the prompt, by default {}
|
358
|
+
|
359
|
+
Returns
|
360
|
+
-------
|
361
|
+
dict
|
362
|
+
The prepared and hidrated template shaped to the LLM model
|
363
|
+
|
364
|
+
Example
|
365
|
+
-------
|
366
|
+
>>> import prompty
|
367
|
+
>>> p = prompty.load("prompts/basic.prompty")
|
368
|
+
>>> inputs = {"name": "John Doe"}
|
369
|
+
>>> content = await prompty.prepare_async(p, inputs)
|
370
|
+
"""
|
371
|
+
inputs = param_hoisting(inputs, prompt.sample)
|
372
|
+
|
373
|
+
render = await InvokerFactory.run_renderer_async(prompt, inputs, prompt.content)
|
374
|
+
result = await InvokerFactory.run_parser_async(prompt, render)
|
278
375
|
|
279
376
|
return result
|
280
377
|
|
378
|
+
|
281
379
|
@trace(description="Run the prepared Prompty content against the model.")
|
282
380
|
def run(
|
283
381
|
prompt: Prompty,
|
@@ -323,22 +421,65 @@ def run(
|
|
323
421
|
if parameters != {}:
|
324
422
|
prompt.model.parameters = param_hoisting(parameters, prompt.model.parameters)
|
325
423
|
|
326
|
-
|
327
|
-
executor = InvokerFactory.create_executor(
|
328
|
-
prompt.model.configuration["type"], prompt
|
329
|
-
)
|
330
|
-
result = executor(content)
|
331
|
-
|
332
|
-
# skip?
|
424
|
+
result = InvokerFactory.run_executor(prompt, content)
|
333
425
|
if not raw:
|
334
|
-
|
335
|
-
|
336
|
-
|
426
|
+
result = InvokerFactory.run_processor(prompt, result)
|
427
|
+
|
428
|
+
return result
|
429
|
+
|
430
|
+
|
431
|
+
@trace(description="Run the prepared Prompty content against the model.")
|
432
|
+
async def run_async(
|
433
|
+
prompt: Prompty,
|
434
|
+
content: dict | list | str,
|
435
|
+
configuration: Dict[str, any] = {},
|
436
|
+
parameters: Dict[str, any] = {},
|
437
|
+
raw: bool = False,
|
438
|
+
):
|
439
|
+
"""Run the prepared Prompty content.
|
440
|
+
|
441
|
+
Parameters
|
442
|
+
----------
|
443
|
+
prompt : Prompty
|
444
|
+
The prompty object
|
445
|
+
content : dict | list | str
|
446
|
+
The content to process
|
447
|
+
configuration : Dict[str, any], optional
|
448
|
+
The configuration to use, by default {}
|
449
|
+
parameters : Dict[str, any], optional
|
450
|
+
The parameters to use, by default {}
|
451
|
+
raw : bool, optional
|
452
|
+
Whether to skip processing, by default False
|
453
|
+
|
454
|
+
Returns
|
455
|
+
-------
|
456
|
+
any
|
457
|
+
The result of the prompt
|
458
|
+
|
459
|
+
Example
|
460
|
+
-------
|
461
|
+
>>> import prompty
|
462
|
+
>>> p = prompty.load("prompts/basic.prompty")
|
463
|
+
>>> inputs = {"name": "John Doe"}
|
464
|
+
>>> content = await prompty.prepare_async(p, inputs)
|
465
|
+
>>> result = await prompty.run_async(p, content)
|
466
|
+
"""
|
467
|
+
|
468
|
+
if configuration != {}:
|
469
|
+
prompt.model.configuration = param_hoisting(
|
470
|
+
configuration, prompt.model.configuration
|
337
471
|
)
|
338
|
-
|
472
|
+
|
473
|
+
if parameters != {}:
|
474
|
+
prompt.model.parameters = param_hoisting(parameters, prompt.model.parameters)
|
475
|
+
|
476
|
+
result = await InvokerFactory.run_executor_async(prompt, content)
|
477
|
+
if not raw:
|
478
|
+
result = await InvokerFactory.run_processor_async(prompt, result)
|
339
479
|
|
340
480
|
return result
|
341
481
|
|
482
|
+
|
342
483
|
@trace(description="Execute a prompty")
|
343
484
|
def execute(
|
344
485
|
prompt: Union[str, Prompty],
|
@@ -346,7 +487,7 @@ def execute(
|
|
346
487
|
parameters: Dict[str, any] = {},
|
347
488
|
inputs: Dict[str, any] = {},
|
348
489
|
raw: bool = False,
|
349
|
-
|
490
|
+
config_name: str = "default",
|
350
491
|
):
|
351
492
|
"""Execute a prompty.
|
352
493
|
|
@@ -382,7 +523,7 @@ def execute(
|
|
382
523
|
# get caller's path (take into account trace frame)
|
383
524
|
caller = Path(traceback.extract_stack()[-3].filename)
|
384
525
|
path = Path(caller.parent / path).resolve().absolute()
|
385
|
-
prompt = load(path,
|
526
|
+
prompt = load(path, config_name)
|
386
527
|
|
387
528
|
# prepare content
|
388
529
|
content = prepare(prompt, inputs)
|
@@ -391,3 +532,57 @@ def execute(
|
|
391
532
|
result = run(prompt, content, configuration, parameters, raw)
|
392
533
|
|
393
534
|
return result
|
535
|
+
|
536
|
+
|
537
|
+
@trace(description="Execute a prompty")
|
538
|
+
async def execute_async(
|
539
|
+
prompt: Union[str, Prompty],
|
540
|
+
configuration: Dict[str, any] = {},
|
541
|
+
parameters: Dict[str, any] = {},
|
542
|
+
inputs: Dict[str, any] = {},
|
543
|
+
raw: bool = False,
|
544
|
+
config_name: str = "default",
|
545
|
+
):
|
546
|
+
"""Execute a prompty.
|
547
|
+
|
548
|
+
Parameters
|
549
|
+
----------
|
550
|
+
prompt : Union[str, Prompty]
|
551
|
+
The prompty object or path to the prompty file
|
552
|
+
configuration : Dict[str, any], optional
|
553
|
+
The configuration to use, by default {}
|
554
|
+
parameters : Dict[str, any], optional
|
555
|
+
The parameters to use, by default {}
|
556
|
+
inputs : Dict[str, any], optional
|
557
|
+
The inputs to the prompt, by default {}
|
558
|
+
raw : bool, optional
|
559
|
+
Whether to skip processing, by default False
|
560
|
+
connection : str, optional
|
561
|
+
The connection to use, by default "default"
|
562
|
+
|
563
|
+
Returns
|
564
|
+
-------
|
565
|
+
any
|
566
|
+
The result of the prompt
|
567
|
+
|
568
|
+
Example
|
569
|
+
-------
|
570
|
+
>>> import prompty
|
571
|
+
>>> inputs = {"name": "John Doe"}
|
572
|
+
>>> result = await prompty.execute_async("prompts/basic.prompty", inputs=inputs)
|
573
|
+
"""
|
574
|
+
if isinstance(prompt, str):
|
575
|
+
path = Path(prompt)
|
576
|
+
if not path.is_absolute():
|
577
|
+
# get caller's path (take into account trace frame)
|
578
|
+
caller = Path(traceback.extract_stack()[-3].filename)
|
579
|
+
path = Path(caller.parent / path).resolve().absolute()
|
580
|
+
prompt = await load_async(path, config_name)
|
581
|
+
|
582
|
+
# prepare content
|
583
|
+
content = await prepare_async(prompt, inputs)
|
584
|
+
|
585
|
+
# run LLM model
|
586
|
+
result = await run_async(prompt, content, configuration, parameters, raw)
|
587
|
+
|
588
|
+
return result
|
@@ -0,0 +1,10 @@
|
|
1
|
+
# __init__.py
|
2
|
+
from prompty.invoker import InvokerException
|
3
|
+
|
4
|
+
try:
|
5
|
+
from .executor import AzureOpenAIExecutor
|
6
|
+
from .processor import AzureOpenAIProcessor
|
7
|
+
except ImportError:
|
8
|
+
raise InvokerException(
|
9
|
+
"Error registering AzureOpenAIExecutor and AzureOpenAIProcessor", "azure"
|
10
|
+
)
|