prompty 0.1.10__py3-none-any.whl → 0.1.34__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
prompty/__init__.py CHANGED
@@ -1,51 +1,24 @@
1
- import json
2
1
  import traceback
3
2
  from pathlib import Path
4
3
  from typing import Dict, List, Union
5
-
6
4
  from .tracer import trace
5
+ from .invoker import InvokerFactory, NoOp
7
6
  from .core import (
8
- Frontmatter,
9
- InvokerFactory,
10
7
  ModelSettings,
11
8
  Prompty,
12
9
  PropertySettings,
13
10
  TemplateSettings,
14
11
  param_hoisting,
15
12
  )
13
+ from .utils import (
14
+ load_global_config,
15
+ load_global_config_async,
16
+ load_prompty_async,
17
+ load_prompty,
18
+ )
16
19
 
17
20
  from .renderers import *
18
21
  from .parsers import *
19
- from .executors import *
20
- from .processors import *
21
-
22
-
23
- def load_global_config(
24
- prompty_path: Path = Path.cwd(), configuration: str = "default"
25
- ) -> Dict[str, any]:
26
- # prompty.config laying around?
27
- prompty_config = list(Path.cwd().glob("**/prompty.json"))
28
-
29
- # if there is one load it
30
- if len(prompty_config) > 0:
31
- # pick the nearest prompty.json
32
- config = sorted(
33
- [
34
- c
35
- for c in prompty_config
36
- if len(c.parent.parts) <= len(prompty_path.parts)
37
- ],
38
- key=lambda p: len(p.parts),
39
- )[-1]
40
-
41
- with open(config, "r") as f:
42
- c = json.load(f)
43
- if configuration in c:
44
- return c[configuration]
45
- else:
46
- raise ValueError(f'Item "{configuration}" not found in "{config}"')
47
-
48
- return {}
49
22
 
50
23
 
51
24
  @trace(description="Create a headless prompty object for programmatic use.")
@@ -105,47 +78,65 @@ def headless(
105
78
  return Prompty(model=modelSettings, template=templateSettings, content=content)
106
79
 
107
80
 
108
- @trace(description="Load a prompty file.")
109
- def load(prompty_file: str, configuration: str = "default") -> Prompty:
110
- """Load a prompty file.
81
+ @trace(description="Create a headless prompty object for programmatic use.")
82
+ async def headless_async(
83
+ api: str,
84
+ content: str | List[str] | dict,
85
+ configuration: Dict[str, any] = {},
86
+ parameters: Dict[str, any] = {},
87
+ connection: str = "default",
88
+ ) -> Prompty:
89
+ """Create a headless prompty object for programmatic use.
111
90
 
112
91
  Parameters
113
92
  ----------
114
- prompty_file : str
115
- The path to the prompty file
116
- configuration : str, optional
117
- The configuration to use, by default "default"
93
+ api : str
94
+ The API to use for the model
95
+ content : str | List[str] | dict
96
+ The content to process
97
+ configuration : Dict[str, any], optional
98
+ The configuration to use, by default {}
99
+ parameters : Dict[str, any], optional
100
+ The parameters to use, by default {}
101
+ connection : str, optional
102
+ The connection to use, by default "default"
118
103
 
119
104
  Returns
120
105
  -------
121
106
  Prompty
122
- The loaded prompty object
107
+ The headless prompty object
123
108
 
124
109
  Example
125
110
  -------
126
111
  >>> import prompty
127
- >>> p = prompty.load("prompts/basic.prompty")
128
- >>> print(p)
129
- """
112
+ >>> p = await prompty.headless_async(
113
+ api="embedding",
114
+ configuration={"type": "azure", "azure_deployment": "text-embedding-ada-002"},
115
+ content="hello world",
116
+ )
117
+ >>> emb = prompty.execute(p)
130
118
 
131
- p = Path(prompty_file)
132
- if not p.is_absolute():
133
- # get caller's path (take into account trace frame)
134
- caller = Path(traceback.extract_stack()[-3].filename)
135
- p = Path(caller.parent / p).resolve().absolute()
119
+ """
136
120
 
137
- # load dictionary from prompty file
138
- matter = Frontmatter.read_file(p)
139
- attributes = matter["attributes"]
140
- content = matter["body"]
121
+ # get caller's path (to get relative path for prompty.json)
122
+ caller = Path(traceback.extract_stack()[-2].filename)
123
+ templateSettings = TemplateSettings(type="NOOP", parser="NOOP")
141
124
 
142
- # normalize attribute dictionary resolve keys and files
143
- attributes = Prompty.normalize(attributes, p.parent)
125
+ global_config = await load_global_config_async(caller.parent, connection)
126
+ c = await Prompty.normalize_async(
127
+ param_hoisting(configuration, global_config), caller.parent
128
+ )
144
129
 
145
- # load global configuration
146
- global_config = Prompty.normalize(
147
- load_global_config(p.parent, configuration), p.parent
130
+ modelSettings = ModelSettings(
131
+ api=api,
132
+ configuration=c,
133
+ parameters=parameters,
148
134
  )
135
+
136
+ return Prompty(model=modelSettings, template=templateSettings, content=content)
137
+
138
+
139
+ def _load_raw_prompty(attributes: dict, content: str, p: Path, global_config: dict):
149
140
  if "model" not in attributes:
150
141
  attributes["model"] = {}
151
142
 
@@ -197,47 +188,132 @@ def load(prompty_file: str, configuration: str = "default") -> Prompty:
197
188
  else:
198
189
  outputs = {}
199
190
 
191
+ p = Prompty(
192
+ **attributes,
193
+ model=model,
194
+ inputs=inputs,
195
+ outputs=outputs,
196
+ template=template,
197
+ content=content,
198
+ file=p,
199
+ )
200
+
201
+ return p
202
+
203
+
204
+ @trace(description="Load a prompty file.")
205
+ def load(prompty_file: str, configuration: str = "default") -> Prompty:
206
+ """Load a prompty file.
207
+
208
+ Parameters
209
+ ----------
210
+ prompty_file : str
211
+ The path to the prompty file
212
+ configuration : str, optional
213
+ The configuration to use, by default "default"
214
+
215
+ Returns
216
+ -------
217
+ Prompty
218
+ The loaded prompty object
219
+
220
+ Example
221
+ -------
222
+ >>> import prompty
223
+ >>> p = prompty.load("prompts/basic.prompty")
224
+ >>> print(p)
225
+ """
226
+
227
+ p = Path(prompty_file)
228
+ if not p.is_absolute():
229
+ # get caller's path (take into account trace frame)
230
+ caller = Path(traceback.extract_stack()[-3].filename)
231
+ p = Path(caller.parent / p).resolve().absolute()
232
+
233
+ # load dictionary from prompty file
234
+ matter = load_prompty(p)
235
+
236
+ attributes = matter["attributes"]
237
+ content = matter["body"]
238
+
239
+ # normalize attribute dictionary resolve keys and files
240
+ attributes = Prompty.normalize(attributes, p.parent)
241
+
242
+ # load global configuration
243
+ global_config = Prompty.normalize(
244
+ load_global_config(p.parent, configuration), p.parent
245
+ )
246
+
247
+ prompty = _load_raw_prompty(attributes, content, p, global_config)
248
+
200
249
  # recursive loading of base prompty
201
250
  if "base" in attributes:
202
251
  # load the base prompty from the same directory as the current prompty
203
252
  base = load(p.parent / attributes["base"])
204
- # hoist the base prompty's attributes to the current prompty
205
- model.api = base.model.api if model.api == "" else model.api
206
- model.configuration = param_hoisting(
207
- model.configuration, base.model.configuration
208
- )
209
- model.parameters = param_hoisting(model.parameters, base.model.parameters)
210
- model.response = param_hoisting(model.response, base.model.response)
211
- attributes["sample"] = param_hoisting(attributes, base.sample, "sample")
212
-
213
- p = Prompty(
214
- **attributes,
215
- model=model,
216
- inputs=inputs,
217
- outputs=outputs,
218
- template=template,
219
- content=content,
220
- file=p,
221
- basePrompty=base,
222
- )
223
- else:
224
- p = Prompty(
225
- **attributes,
226
- model=model,
227
- inputs=inputs,
228
- outputs=outputs,
229
- template=template,
230
- content=content,
231
- file=p,
232
- )
233
- return p
253
+ prompty = Prompty.hoist_base_prompty(prompty, base)
254
+
255
+ return prompty
256
+
257
+
258
+ @trace(description="Load a prompty file.")
259
+ async def load_async(prompty_file: str, configuration: str = "default") -> Prompty:
260
+ """Load a prompty file.
261
+
262
+ Parameters
263
+ ----------
264
+ prompty_file : str
265
+ The path to the prompty file
266
+ configuration : str, optional
267
+ The configuration to use, by default "default"
268
+
269
+ Returns
270
+ -------
271
+ Prompty
272
+ The loaded prompty object
273
+
274
+ Example
275
+ -------
276
+ >>> import prompty
277
+ >>> p = prompty.load("prompts/basic.prompty")
278
+ >>> print(p)
279
+ """
280
+
281
+ p = Path(prompty_file)
282
+ if not p.is_absolute():
283
+ # get caller's path (take into account trace frame)
284
+ caller = Path(traceback.extract_stack()[-3].filename)
285
+ p = Path(caller.parent / p).resolve().absolute()
286
+
287
+ # load dictionary from prompty file
288
+ matter = await load_prompty_async(p)
289
+
290
+ attributes = matter["attributes"]
291
+ content = matter["body"]
292
+
293
+ # normalize attribute dictionary resolve keys and files
294
+ attributes = await Prompty.normalize_async(attributes, p.parent)
295
+
296
+ # load global configuration
297
+ config = await load_global_config_async(p.parent, configuration)
298
+ global_config = await Prompty.normalize_async(config, p.parent)
299
+
300
+ prompty = _load_raw_prompty(attributes, content, p, global_config)
301
+
302
+ # recursive loading of base prompty
303
+ if "base" in attributes:
304
+ # load the base prompty from the same directory as the current prompty
305
+ base = await load_async(p.parent / attributes["base"])
306
+ prompty = Prompty.hoist_base_prompty(prompty, base)
307
+
308
+ return prompty
309
+
234
310
 
235
311
  @trace(description="Prepare the inputs for the prompt.")
236
312
  def prepare(
237
313
  prompt: Prompty,
238
314
  inputs: Dict[str, any] = {},
239
315
  ):
240
- """ Prepare the inputs for the prompt.
316
+ """Prepare the inputs for the prompt.
241
317
 
242
318
  Parameters
243
319
  ----------
@@ -260,24 +336,46 @@ def prepare(
260
336
  """
261
337
  inputs = param_hoisting(inputs, prompt.sample)
262
338
 
263
- if prompt.template.type == "NOOP":
264
- render = prompt.content
265
- else:
266
- # render
267
- renderer = InvokerFactory.create_renderer(prompt.template.type, prompt)
268
- render = renderer(inputs)
339
+ render = InvokerFactory.run_renderer(prompt, inputs, prompt.content)
340
+ result = InvokerFactory.run_parser(prompt, render)
269
341
 
270
- if prompt.template.parser == "NOOP":
271
- result = render
272
- else:
273
- # parse [parser].[api]
274
- parser = InvokerFactory.create_parser(
275
- f"{prompt.template.parser}.{prompt.model.api}", prompt
276
- )
277
- result = parser(render)
342
+ return result
343
+
344
+
345
+ @trace(description="Prepare the inputs for the prompt.")
346
+ async def prepare_async(
347
+ prompt: Prompty,
348
+ inputs: Dict[str, any] = {},
349
+ ):
350
+ """Prepare the inputs for the prompt.
351
+
352
+ Parameters
353
+ ----------
354
+ prompt : Prompty
355
+ The prompty object
356
+ inputs : Dict[str, any], optional
357
+ The inputs to the prompt, by default {}
358
+
359
+ Returns
360
+ -------
361
+ dict
362
+ The prepared and hidrated template shaped to the LLM model
363
+
364
+ Example
365
+ -------
366
+ >>> import prompty
367
+ >>> p = prompty.load("prompts/basic.prompty")
368
+ >>> inputs = {"name": "John Doe"}
369
+ >>> content = await prompty.prepare_async(p, inputs)
370
+ """
371
+ inputs = param_hoisting(inputs, prompt.sample)
372
+
373
+ render = await InvokerFactory.run_renderer_async(prompt, inputs, prompt.content)
374
+ result = await InvokerFactory.run_parser_async(prompt, render)
278
375
 
279
376
  return result
280
377
 
378
+
281
379
  @trace(description="Run the prepared Prompty content against the model.")
282
380
  def run(
283
381
  prompt: Prompty,
@@ -323,22 +421,65 @@ def run(
323
421
  if parameters != {}:
324
422
  prompt.model.parameters = param_hoisting(parameters, prompt.model.parameters)
325
423
 
326
- # execute
327
- executor = InvokerFactory.create_executor(
328
- prompt.model.configuration["type"], prompt
329
- )
330
- result = executor(content)
331
-
332
- # skip?
424
+ result = InvokerFactory.run_executor(prompt, content)
333
425
  if not raw:
334
- # process
335
- processor = InvokerFactory.create_processor(
336
- prompt.model.configuration["type"], prompt
426
+ result = InvokerFactory.run_processor(prompt, result)
427
+
428
+ return result
429
+
430
+
431
+ @trace(description="Run the prepared Prompty content against the model.")
432
+ async def run_async(
433
+ prompt: Prompty,
434
+ content: dict | list | str,
435
+ configuration: Dict[str, any] = {},
436
+ parameters: Dict[str, any] = {},
437
+ raw: bool = False,
438
+ ):
439
+ """Run the prepared Prompty content.
440
+
441
+ Parameters
442
+ ----------
443
+ prompt : Prompty
444
+ The prompty object
445
+ content : dict | list | str
446
+ The content to process
447
+ configuration : Dict[str, any], optional
448
+ The configuration to use, by default {}
449
+ parameters : Dict[str, any], optional
450
+ The parameters to use, by default {}
451
+ raw : bool, optional
452
+ Whether to skip processing, by default False
453
+
454
+ Returns
455
+ -------
456
+ any
457
+ The result of the prompt
458
+
459
+ Example
460
+ -------
461
+ >>> import prompty
462
+ >>> p = prompty.load("prompts/basic.prompty")
463
+ >>> inputs = {"name": "John Doe"}
464
+ >>> content = await prompty.prepare_async(p, inputs)
465
+ >>> result = await prompty.run_async(p, content)
466
+ """
467
+
468
+ if configuration != {}:
469
+ prompt.model.configuration = param_hoisting(
470
+ configuration, prompt.model.configuration
337
471
  )
338
- result = processor(result)
472
+
473
+ if parameters != {}:
474
+ prompt.model.parameters = param_hoisting(parameters, prompt.model.parameters)
475
+
476
+ result = await InvokerFactory.run_executor_async(prompt, content)
477
+ if not raw:
478
+ result = await InvokerFactory.run_processor_async(prompt, result)
339
479
 
340
480
  return result
341
481
 
482
+
342
483
  @trace(description="Execute a prompty")
343
484
  def execute(
344
485
  prompt: Union[str, Prompty],
@@ -346,7 +487,7 @@ def execute(
346
487
  parameters: Dict[str, any] = {},
347
488
  inputs: Dict[str, any] = {},
348
489
  raw: bool = False,
349
- connection: str = "default",
490
+ config_name: str = "default",
350
491
  ):
351
492
  """Execute a prompty.
352
493
 
@@ -382,7 +523,7 @@ def execute(
382
523
  # get caller's path (take into account trace frame)
383
524
  caller = Path(traceback.extract_stack()[-3].filename)
384
525
  path = Path(caller.parent / path).resolve().absolute()
385
- prompt = load(path, connection)
526
+ prompt = load(path, config_name)
386
527
 
387
528
  # prepare content
388
529
  content = prepare(prompt, inputs)
@@ -391,3 +532,57 @@ def execute(
391
532
  result = run(prompt, content, configuration, parameters, raw)
392
533
 
393
534
  return result
535
+
536
+
537
+ @trace(description="Execute a prompty")
538
+ async def execute_async(
539
+ prompt: Union[str, Prompty],
540
+ configuration: Dict[str, any] = {},
541
+ parameters: Dict[str, any] = {},
542
+ inputs: Dict[str, any] = {},
543
+ raw: bool = False,
544
+ config_name: str = "default",
545
+ ):
546
+ """Execute a prompty.
547
+
548
+ Parameters
549
+ ----------
550
+ prompt : Union[str, Prompty]
551
+ The prompty object or path to the prompty file
552
+ configuration : Dict[str, any], optional
553
+ The configuration to use, by default {}
554
+ parameters : Dict[str, any], optional
555
+ The parameters to use, by default {}
556
+ inputs : Dict[str, any], optional
557
+ The inputs to the prompt, by default {}
558
+ raw : bool, optional
559
+ Whether to skip processing, by default False
560
+ connection : str, optional
561
+ The connection to use, by default "default"
562
+
563
+ Returns
564
+ -------
565
+ any
566
+ The result of the prompt
567
+
568
+ Example
569
+ -------
570
+ >>> import prompty
571
+ >>> inputs = {"name": "John Doe"}
572
+ >>> result = await prompty.execute_async("prompts/basic.prompty", inputs=inputs)
573
+ """
574
+ if isinstance(prompt, str):
575
+ path = Path(prompt)
576
+ if not path.is_absolute():
577
+ # get caller's path (take into account trace frame)
578
+ caller = Path(traceback.extract_stack()[-3].filename)
579
+ path = Path(caller.parent / path).resolve().absolute()
580
+ prompt = await load_async(path, config_name)
581
+
582
+ # prepare content
583
+ content = await prepare_async(prompt, inputs)
584
+
585
+ # run LLM model
586
+ result = await run_async(prompt, content, configuration, parameters, raw)
587
+
588
+ return result
@@ -0,0 +1,10 @@
1
+ # __init__.py
2
+ from prompty.invoker import InvokerException
3
+
4
+ try:
5
+ from .executor import AzureOpenAIExecutor
6
+ from .processor import AzureOpenAIProcessor
7
+ except ImportError:
8
+ raise InvokerException(
9
+ "Error registering AzureOpenAIExecutor and AzureOpenAIProcessor", "azure"
10
+ )