prompty 0.1.24__tar.gz → 0.1.33__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (82) hide show
  1. {prompty-0.1.24 → prompty-0.1.33}/PKG-INFO +7 -4
  2. {prompty-0.1.24 → prompty-0.1.33}/prompty/__init__.py +309 -123
  3. {prompty-0.1.24 → prompty-0.1.33}/prompty/azure/__init__.py +1 -1
  4. {prompty-0.1.24 → prompty-0.1.33}/prompty/azure/executor.py +89 -3
  5. {prompty-0.1.24 → prompty-0.1.33}/prompty/azure/processor.py +66 -2
  6. {prompty-0.1.24 → prompty-0.1.33}/prompty/core.py +87 -258
  7. prompty-0.1.33/prompty/invoker.py +297 -0
  8. {prompty-0.1.24 → prompty-0.1.33}/prompty/openai/__init__.py +1 -1
  9. {prompty-0.1.24 → prompty-0.1.33}/prompty/openai/executor.py +17 -1
  10. {prompty-0.1.24 → prompty-0.1.33}/prompty/openai/processor.py +17 -1
  11. {prompty-0.1.24 → prompty-0.1.33}/prompty/parsers.py +18 -1
  12. {prompty-0.1.24 → prompty-0.1.33}/prompty/renderers.py +19 -2
  13. {prompty-0.1.24 → prompty-0.1.33}/prompty/serverless/__init__.py +1 -1
  14. {prompty-0.1.24 → prompty-0.1.33}/prompty/serverless/executor.py +27 -5
  15. {prompty-0.1.24 → prompty-0.1.33}/prompty/serverless/processor.py +17 -1
  16. {prompty-0.1.24 → prompty-0.1.33}/prompty/tracer.py +6 -3
  17. prompty-0.1.33/prompty/utils.py +105 -0
  18. {prompty-0.1.24 → prompty-0.1.33}/pyproject.toml +12 -6
  19. {prompty-0.1.24 → prompty-0.1.33}/tests/fake_azure_executor.py +52 -2
  20. {prompty-0.1.24 → prompty-0.1.33}/tests/fake_serverless_executor.py +36 -1
  21. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/context.prompty +1 -1
  22. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/funcfile.prompty +1 -1
  23. prompty-0.1.33/tests/prompts/prompty.json +9 -0
  24. prompty-0.1.33/tests/prompts/sub/sub/prompty.json +9 -0
  25. prompty-0.1.33/tests/prompts/sub/sub/test.py +10 -0
  26. prompty-0.1.33/tests/prompts/test.py +9 -0
  27. prompty-0.1.33/tests/test_common.py +48 -0
  28. {prompty-0.1.24 → prompty-0.1.33}/tests/test_execute.py +118 -3
  29. {prompty-0.1.24 → prompty-0.1.33}/tests/test_factory_invoker.py +11 -24
  30. prompty-0.1.33/tests/test_path_exec.py +74 -0
  31. {prompty-0.1.24 → prompty-0.1.33}/tests/test_tracing.py +109 -3
  32. prompty-0.1.24/tests/prompts/prompty.json +0 -9
  33. prompty-0.1.24/tests/prompts/sub/sub/prompty.json +0 -9
  34. prompty-0.1.24/tests/prompts/sub/sub/test.py +0 -5
  35. prompty-0.1.24/tests/prompts/test.py +0 -5
  36. prompty-0.1.24/tests/test_common.py +0 -24
  37. prompty-0.1.24/tests/test_path_exec.py +0 -37
  38. {prompty-0.1.24 → prompty-0.1.33}/LICENSE +0 -0
  39. {prompty-0.1.24 → prompty-0.1.33}/README.md +0 -0
  40. {prompty-0.1.24 → prompty-0.1.33}/prompty/cli.py +0 -0
  41. {prompty-0.1.24 → prompty-0.1.33}/tests/generated/1contoso.md +0 -0
  42. {prompty-0.1.24 → prompty-0.1.33}/tests/generated/2contoso.md +0 -0
  43. {prompty-0.1.24 → prompty-0.1.33}/tests/generated/3contoso.md +0 -0
  44. {prompty-0.1.24 → prompty-0.1.33}/tests/generated/4contoso.md +0 -0
  45. {prompty-0.1.24 → prompty-0.1.33}/tests/generated/basic.prompty.md +0 -0
  46. {prompty-0.1.24 → prompty-0.1.33}/tests/generated/camping.jpg +0 -0
  47. {prompty-0.1.24 → prompty-0.1.33}/tests/generated/context.prompty.md +0 -0
  48. {prompty-0.1.24 → prompty-0.1.33}/tests/generated/contoso_multi.md +0 -0
  49. {prompty-0.1.24 → prompty-0.1.33}/tests/generated/faithfulness.prompty.md +0 -0
  50. {prompty-0.1.24 → prompty-0.1.33}/tests/generated/groundedness.prompty.md +0 -0
  51. {prompty-0.1.24 → prompty-0.1.33}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
  52. {prompty-0.1.24 → prompty-0.1.33}/tests/hello_world.embedding.json +0 -0
  53. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/__init__.py +0 -0
  54. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/basic.prompty +0 -0
  55. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/basic.prompty.execution.json +0 -0
  56. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/basic_json_output.prompty +0 -0
  57. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/camping.jpg +0 -0
  58. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/chat.prompty +0 -0
  59. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/context.json +0 -0
  60. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/context.prompty.execution.json +0 -0
  61. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/embedding.prompty +0 -0
  62. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/embedding.prompty.execution.json +0 -0
  63. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/evaluation.prompty +0 -0
  64. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/faithfulness.prompty +0 -0
  65. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/faithfulness.prompty.execution.json +0 -0
  66. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/fake.prompty +0 -0
  67. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/funcfile.json +0 -0
  68. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/functions.prompty +0 -0
  69. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/functions.prompty.execution.json +0 -0
  70. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/groundedness.prompty +0 -0
  71. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/groundedness.prompty.execution.json +0 -0
  72. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/serverless.prompty +0 -0
  73. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/serverless.prompty.execution.json +0 -0
  74. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/serverless_stream.prompty +0 -0
  75. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/serverless_stream.prompty.execution.json +0 -0
  76. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/streaming.prompty +0 -0
  77. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/streaming.prompty.execution.json +0 -0
  78. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/sub/__init__.py +0 -0
  79. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/sub/basic.prompty +0 -0
  80. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/sub/sub/__init__.py +0 -0
  81. {prompty-0.1.24 → prompty-0.1.33}/tests/prompts/sub/sub/basic.prompty +0 -0
  82. {prompty-0.1.24 → prompty-0.1.33}/tests/prompty.json +0 -0
@@ -1,20 +1,23 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompty
3
- Version: 0.1.24
3
+ Version: 0.1.33
4
4
  Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
5
  Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
+ License: MIT
7
+ Requires-Python: >=3.9
6
8
  Requires-Dist: pyyaml>=6.0.1
7
9
  Requires-Dist: pydantic>=2.8.2
8
10
  Requires-Dist: jinja2>=3.1.4
9
11
  Requires-Dist: python-dotenv>=1.0.1
10
12
  Requires-Dist: click>=8.1.7
13
+ Requires-Dist: aiofiles>=24.1.0
14
+ Provides-Extra: azure
11
15
  Requires-Dist: azure-identity>=1.17.1; extra == "azure"
12
16
  Requires-Dist: openai>=1.35.10; extra == "azure"
13
- Requires-Dist: openai>=1.35.10; extra == "openai"
14
- Requires-Dist: azure-ai-inference>=1.0.0b3; extra == "serverless"
15
- Provides-Extra: azure
16
17
  Provides-Extra: openai
18
+ Requires-Dist: openai>=1.35.10; extra == "openai"
17
19
  Provides-Extra: serverless
20
+ Requires-Dist: azure-ai-inference>=1.0.0b3; extra == "serverless"
18
21
  Description-Content-Type: text/markdown
19
22
 
20
23
 
@@ -1,52 +1,26 @@
1
- import json
2
1
  import traceback
3
2
  from pathlib import Path
4
3
  from typing import Dict, List, Union
5
-
6
- from prompty.tracer import trace
7
- from prompty.core import (
8
- Frontmatter,
9
- InvokerException,
10
- InvokerFactory,
4
+ from .tracer import trace
5
+ from .invoker import InvokerFactory, NoOp
6
+ from .core import (
11
7
  ModelSettings,
12
8
  Prompty,
13
9
  PropertySettings,
14
10
  TemplateSettings,
15
11
  param_hoisting,
16
12
  )
13
+ from .utils import (
14
+ load_global_config,
15
+ load_global_config_async,
16
+ load_prompty_async,
17
+ load_prompty,
18
+ )
17
19
 
18
20
  from .renderers import *
19
21
  from .parsers import *
20
22
 
21
23
 
22
- def load_global_config(
23
- prompty_path: Path = Path.cwd(), configuration: str = "default"
24
- ) -> Dict[str, any]:
25
- # prompty.config laying around?
26
- prompty_config = list(Path.cwd().glob("**/prompty.json"))
27
-
28
- # if there is one load it
29
- if len(prompty_config) > 0:
30
- # pick the nearest prompty.json
31
- config = sorted(
32
- [
33
- c
34
- for c in prompty_config
35
- if len(c.parent.parts) <= len(prompty_path.parts)
36
- ],
37
- key=lambda p: len(p.parts),
38
- )[-1]
39
-
40
- with open(config, "r") as f:
41
- c = json.load(f)
42
- if configuration in c:
43
- return c[configuration]
44
- else:
45
- raise ValueError(f'Item "{configuration}" not found in "{config}"')
46
-
47
- return {}
48
-
49
-
50
24
  @trace(description="Create a headless prompty object for programmatic use.")
51
25
  def headless(
52
26
  api: str,
@@ -104,47 +78,65 @@ def headless(
104
78
  return Prompty(model=modelSettings, template=templateSettings, content=content)
105
79
 
106
80
 
107
- @trace(description="Load a prompty file.")
108
- def load(prompty_file: str, configuration: str = "default") -> Prompty:
109
- """Load a prompty file.
81
+ @trace(description="Create a headless prompty object for programmatic use.")
82
+ async def headless_async(
83
+ api: str,
84
+ content: str | List[str] | dict,
85
+ configuration: Dict[str, any] = {},
86
+ parameters: Dict[str, any] = {},
87
+ connection: str = "default",
88
+ ) -> Prompty:
89
+ """Create a headless prompty object for programmatic use.
110
90
 
111
91
  Parameters
112
92
  ----------
113
- prompty_file : str
114
- The path to the prompty file
115
- configuration : str, optional
116
- The configuration to use, by default "default"
93
+ api : str
94
+ The API to use for the model
95
+ content : str | List[str] | dict
96
+ The content to process
97
+ configuration : Dict[str, any], optional
98
+ The configuration to use, by default {}
99
+ parameters : Dict[str, any], optional
100
+ The parameters to use, by default {}
101
+ connection : str, optional
102
+ The connection to use, by default "default"
117
103
 
118
104
  Returns
119
105
  -------
120
106
  Prompty
121
- The loaded prompty object
107
+ The headless prompty object
122
108
 
123
109
  Example
124
110
  -------
125
111
  >>> import prompty
126
- >>> p = prompty.load("prompts/basic.prompty")
127
- >>> print(p)
128
- """
112
+ >>> p = await prompty.headless_async(
113
+ api="embedding",
114
+ configuration={"type": "azure", "azure_deployment": "text-embedding-ada-002"},
115
+ content="hello world",
116
+ )
117
+ >>> emb = prompty.execute(p)
129
118
 
130
- p = Path(prompty_file)
131
- if not p.is_absolute():
132
- # get caller's path (take into account trace frame)
133
- caller = Path(traceback.extract_stack()[-3].filename)
134
- p = Path(caller.parent / p).resolve().absolute()
119
+ """
135
120
 
136
- # load dictionary from prompty file
137
- matter = Frontmatter.read_file(p)
138
- attributes = matter["attributes"]
139
- content = matter["body"]
121
+ # get caller's path (to get relative path for prompty.json)
122
+ caller = Path(traceback.extract_stack()[-2].filename)
123
+ templateSettings = TemplateSettings(type="NOOP", parser="NOOP")
140
124
 
141
- # normalize attribute dictionary resolve keys and files
142
- attributes = Prompty.normalize(attributes, p.parent)
125
+ global_config = await load_global_config_async(caller.parent, connection)
126
+ c = await Prompty.normalize_async(
127
+ param_hoisting(configuration, global_config), caller.parent
128
+ )
143
129
 
144
- # load global configuration
145
- global_config = Prompty.normalize(
146
- load_global_config(p.parent, configuration), p.parent
130
+ modelSettings = ModelSettings(
131
+ api=api,
132
+ configuration=c,
133
+ parameters=parameters,
147
134
  )
135
+
136
+ return Prompty(model=modelSettings, template=templateSettings, content=content)
137
+
138
+
139
+ def _load_raw_prompty(attributes: dict, content: str, p: Path, global_config: dict):
148
140
  if "model" not in attributes:
149
141
  attributes["model"] = {}
150
142
 
@@ -196,47 +188,132 @@ def load(prompty_file: str, configuration: str = "default") -> Prompty:
196
188
  else:
197
189
  outputs = {}
198
190
 
191
+ p = Prompty(
192
+ **attributes,
193
+ model=model,
194
+ inputs=inputs,
195
+ outputs=outputs,
196
+ template=template,
197
+ content=content,
198
+ file=p,
199
+ )
200
+
201
+ return p
202
+
203
+
204
+ @trace(description="Load a prompty file.")
205
+ def load(prompty_file: str, configuration: str = "default") -> Prompty:
206
+ """Load a prompty file.
207
+
208
+ Parameters
209
+ ----------
210
+ prompty_file : str
211
+ The path to the prompty file
212
+ configuration : str, optional
213
+ The configuration to use, by default "default"
214
+
215
+ Returns
216
+ -------
217
+ Prompty
218
+ The loaded prompty object
219
+
220
+ Example
221
+ -------
222
+ >>> import prompty
223
+ >>> p = prompty.load("prompts/basic.prompty")
224
+ >>> print(p)
225
+ """
226
+
227
+ p = Path(prompty_file)
228
+ if not p.is_absolute():
229
+ # get caller's path (take into account trace frame)
230
+ caller = Path(traceback.extract_stack()[-3].filename)
231
+ p = Path(caller.parent / p).resolve().absolute()
232
+
233
+ # load dictionary from prompty file
234
+ matter = load_prompty(p)
235
+
236
+ attributes = matter["attributes"]
237
+ content = matter["body"]
238
+
239
+ # normalize attribute dictionary resolve keys and files
240
+ attributes = Prompty.normalize(attributes, p.parent)
241
+
242
+ # load global configuration
243
+ global_config = Prompty.normalize(
244
+ load_global_config(p.parent, configuration), p.parent
245
+ )
246
+
247
+ prompty = _load_raw_prompty(attributes, content, p, global_config)
248
+
199
249
  # recursive loading of base prompty
200
250
  if "base" in attributes:
201
251
  # load the base prompty from the same directory as the current prompty
202
252
  base = load(p.parent / attributes["base"])
203
- # hoist the base prompty's attributes to the current prompty
204
- model.api = base.model.api if model.api == "" else model.api
205
- model.configuration = param_hoisting(
206
- model.configuration, base.model.configuration
207
- )
208
- model.parameters = param_hoisting(model.parameters, base.model.parameters)
209
- model.response = param_hoisting(model.response, base.model.response)
210
- attributes["sample"] = param_hoisting(attributes, base.sample, "sample")
211
-
212
- p = Prompty(
213
- **attributes,
214
- model=model,
215
- inputs=inputs,
216
- outputs=outputs,
217
- template=template,
218
- content=content,
219
- file=p,
220
- basePrompty=base,
221
- )
222
- else:
223
- p = Prompty(
224
- **attributes,
225
- model=model,
226
- inputs=inputs,
227
- outputs=outputs,
228
- template=template,
229
- content=content,
230
- file=p,
231
- )
232
- return p
253
+ prompty = Prompty.hoist_base_prompty(prompty, base)
254
+
255
+ return prompty
256
+
257
+
258
+ @trace(description="Load a prompty file.")
259
+ async def load_async(prompty_file: str, configuration: str = "default") -> Prompty:
260
+ """Load a prompty file.
261
+
262
+ Parameters
263
+ ----------
264
+ prompty_file : str
265
+ The path to the prompty file
266
+ configuration : str, optional
267
+ The configuration to use, by default "default"
268
+
269
+ Returns
270
+ -------
271
+ Prompty
272
+ The loaded prompty object
273
+
274
+ Example
275
+ -------
276
+ >>> import prompty
277
+ >>> p = prompty.load("prompts/basic.prompty")
278
+ >>> print(p)
279
+ """
280
+
281
+ p = Path(prompty_file)
282
+ if not p.is_absolute():
283
+ # get caller's path (take into account trace frame)
284
+ caller = Path(traceback.extract_stack()[-3].filename)
285
+ p = Path(caller.parent / p).resolve().absolute()
286
+
287
+ # load dictionary from prompty file
288
+ matter = await load_prompty_async(p)
289
+
290
+ attributes = matter["attributes"]
291
+ content = matter["body"]
292
+
293
+ # normalize attribute dictionary resolve keys and files
294
+ attributes = await Prompty.normalize_async(attributes, p.parent)
295
+
296
+ # load global configuration
297
+ config = await load_global_config_async(p.parent, configuration)
298
+ global_config = await Prompty.normalize_async(config, p.parent)
299
+
300
+ prompty = _load_raw_prompty(attributes, content, p, global_config)
301
+
302
+ # recursive loading of base prompty
303
+ if "base" in attributes:
304
+ # load the base prompty from the same directory as the current prompty
305
+ base = await load_async(p.parent / attributes["base"])
306
+ prompty = Prompty.hoist_base_prompty(prompty, base)
307
+
308
+ return prompty
309
+
233
310
 
234
311
  @trace(description="Prepare the inputs for the prompt.")
235
312
  def prepare(
236
313
  prompt: Prompty,
237
314
  inputs: Dict[str, any] = {},
238
315
  ):
239
- """ Prepare the inputs for the prompt.
316
+ """Prepare the inputs for the prompt.
240
317
 
241
318
  Parameters
242
319
  ----------
@@ -259,24 +336,46 @@ def prepare(
259
336
  """
260
337
  inputs = param_hoisting(inputs, prompt.sample)
261
338
 
262
- if prompt.template.type == "NOOP":
263
- render = prompt.content
264
- else:
265
- # render
266
- renderer = InvokerFactory.create_renderer(prompt.template.type, prompt)
267
- render = renderer(inputs)
339
+ render = InvokerFactory.run_renderer(prompt, inputs, prompt.content)
340
+ result = InvokerFactory.run_parser(prompt, render)
268
341
 
269
- if prompt.template.parser == "NOOP":
270
- result = render
271
- else:
272
- # parse [parser].[api]
273
- parser = InvokerFactory.create_parser(
274
- f"{prompt.template.parser}.{prompt.model.api}", prompt
275
- )
276
- result = parser(render)
342
+ return result
343
+
344
+
345
+ @trace(description="Prepare the inputs for the prompt.")
346
+ async def prepare_async(
347
+ prompt: Prompty,
348
+ inputs: Dict[str, any] = {},
349
+ ):
350
+ """Prepare the inputs for the prompt.
351
+
352
+ Parameters
353
+ ----------
354
+ prompt : Prompty
355
+ The prompty object
356
+ inputs : Dict[str, any], optional
357
+ The inputs to the prompt, by default {}
358
+
359
+ Returns
360
+ -------
361
+ dict
362
+ The prepared and hidrated template shaped to the LLM model
363
+
364
+ Example
365
+ -------
366
+ >>> import prompty
367
+ >>> p = prompty.load("prompts/basic.prompty")
368
+ >>> inputs = {"name": "John Doe"}
369
+ >>> content = await prompty.prepare_async(p, inputs)
370
+ """
371
+ inputs = param_hoisting(inputs, prompt.sample)
372
+
373
+ render = await InvokerFactory.run_renderer_async(prompt, inputs, prompt.content)
374
+ result = await InvokerFactory.run_parser_async(prompt, render)
277
375
 
278
376
  return result
279
377
 
378
+
280
379
  @trace(description="Run the prepared Prompty content against the model.")
281
380
  def run(
282
381
  prompt: Prompty,
@@ -322,32 +421,65 @@ def run(
322
421
  if parameters != {}:
323
422
  prompt.model.parameters = param_hoisting(parameters, prompt.model.parameters)
324
423
 
325
- invoker_type = prompt.model.configuration["type"]
424
+ result = InvokerFactory.run_executor(prompt, content)
425
+ if not raw:
426
+ result = InvokerFactory.run_processor(prompt, result)
427
+
428
+ return result
429
+
430
+
431
+ @trace(description="Run the prepared Prompty content against the model.")
432
+ async def run_async(
433
+ prompt: Prompty,
434
+ content: dict | list | str,
435
+ configuration: Dict[str, any] = {},
436
+ parameters: Dict[str, any] = {},
437
+ raw: bool = False,
438
+ ):
439
+ """Run the prepared Prompty content.
326
440
 
327
- # invoker registration check
328
- if not InvokerFactory.has_invoker("executor", invoker_type):
329
- raise InvokerException(
330
- f"{invoker_type} Invoker has not been registered properly.", invoker_type
441
+ Parameters
442
+ ----------
443
+ prompt : Prompty
444
+ The prompty object
445
+ content : dict | list | str
446
+ The content to process
447
+ configuration : Dict[str, any], optional
448
+ The configuration to use, by default {}
449
+ parameters : Dict[str, any], optional
450
+ The parameters to use, by default {}
451
+ raw : bool, optional
452
+ Whether to skip processing, by default False
453
+
454
+ Returns
455
+ -------
456
+ any
457
+ The result of the prompt
458
+
459
+ Example
460
+ -------
461
+ >>> import prompty
462
+ >>> p = prompty.load("prompts/basic.prompty")
463
+ >>> inputs = {"name": "John Doe"}
464
+ >>> content = await prompty.prepare_async(p, inputs)
465
+ >>> result = await prompty.run_async(p, content)
466
+ """
467
+
468
+ if configuration != {}:
469
+ prompt.model.configuration = param_hoisting(
470
+ configuration, prompt.model.configuration
331
471
  )
332
472
 
333
- # execute
334
- executor = InvokerFactory.create_executor(invoker_type, prompt)
335
- result = executor(content)
473
+ if parameters != {}:
474
+ prompt.model.parameters = param_hoisting(parameters, prompt.model.parameters)
336
475
 
337
- # skip?
476
+ result = await InvokerFactory.run_executor_async(prompt, content)
338
477
  if not raw:
339
- # invoker registration check
340
- if not InvokerFactory.has_invoker("processor", invoker_type):
341
- raise InvokerException(
342
- f"{invoker_type} Invoker has not been registered properly.", invoker_type
343
- )
344
-
345
- # process
346
- processor = InvokerFactory.create_processor(invoker_type, prompt)
347
- result = processor(result)
478
+ result = await InvokerFactory.run_processor_async(prompt, result)
348
479
 
349
480
  return result
350
481
 
482
+
351
483
  @trace(description="Execute a prompty")
352
484
  def execute(
353
485
  prompt: Union[str, Prompty],
@@ -400,3 +532,57 @@ def execute(
400
532
  result = run(prompt, content, configuration, parameters, raw)
401
533
 
402
534
  return result
535
+
536
+
537
+ @trace(description="Execute a prompty")
538
+ async def execute_async(
539
+ prompt: Union[str, Prompty],
540
+ configuration: Dict[str, any] = {},
541
+ parameters: Dict[str, any] = {},
542
+ inputs: Dict[str, any] = {},
543
+ raw: bool = False,
544
+ config_name: str = "default",
545
+ ):
546
+ """Execute a prompty.
547
+
548
+ Parameters
549
+ ----------
550
+ prompt : Union[str, Prompty]
551
+ The prompty object or path to the prompty file
552
+ configuration : Dict[str, any], optional
553
+ The configuration to use, by default {}
554
+ parameters : Dict[str, any], optional
555
+ The parameters to use, by default {}
556
+ inputs : Dict[str, any], optional
557
+ The inputs to the prompt, by default {}
558
+ raw : bool, optional
559
+ Whether to skip processing, by default False
560
+ connection : str, optional
561
+ The connection to use, by default "default"
562
+
563
+ Returns
564
+ -------
565
+ any
566
+ The result of the prompt
567
+
568
+ Example
569
+ -------
570
+ >>> import prompty
571
+ >>> inputs = {"name": "John Doe"}
572
+ >>> result = await prompty.execute_async("prompts/basic.prompty", inputs=inputs)
573
+ """
574
+ if isinstance(prompt, str):
575
+ path = Path(prompt)
576
+ if not path.is_absolute():
577
+ # get caller's path (take into account trace frame)
578
+ caller = Path(traceback.extract_stack()[-3].filename)
579
+ path = Path(caller.parent / path).resolve().absolute()
580
+ prompt = await load_async(path, config_name)
581
+
582
+ # prepare content
583
+ content = await prepare_async(prompt, inputs)
584
+
585
+ # run LLM model
586
+ result = await run_async(prompt, content, configuration, parameters, raw)
587
+
588
+ return result
@@ -1,5 +1,5 @@
1
1
  # __init__.py
2
- from prompty.core import InvokerException
2
+ from prompty.invoker import InvokerException
3
3
 
4
4
  try:
5
5
  from .executor import AzureOpenAIExecutor
@@ -1,10 +1,11 @@
1
1
  import azure.identity
2
2
  import importlib.metadata
3
- from typing import Iterator
4
- from openai import AzureOpenAI
3
+ from typing import AsyncIterator, Iterator
4
+ from openai import AzureOpenAI, AsyncAzureOpenAI
5
5
 
6
6
  from prompty.tracer import Tracer
7
- from ..core import Invoker, InvokerFactory, Prompty, PromptyStream
7
+ from ..core import AsyncPromptyStream, Prompty, PromptyStream
8
+ from ..invoker import Invoker, InvokerFactory
8
9
 
9
10
  VERSION = importlib.metadata.version("prompty")
10
11
 
@@ -130,3 +131,88 @@ class AzureOpenAIExecutor(Invoker):
130
131
  return PromptyStream("AzureOpenAIExecutor", response)
131
132
  else:
132
133
  return response
134
+
135
+ async def invoke_async(self, data: str) -> str:
136
+ """Invoke the Prompty Chat Parser (Async)
137
+
138
+ Parameters
139
+ ----------
140
+ data : str
141
+ The data to parse
142
+
143
+ Returns
144
+ -------
145
+ str
146
+ The parsed data
147
+ """
148
+ with Tracer.start("AzureOpenAIAsync") as trace:
149
+ trace("type", "LLM")
150
+ trace("signature", "AzureOpenAIAsync.ctor")
151
+ trace("description", "Async Azure OpenAI Constructor")
152
+ trace("inputs", self.kwargs)
153
+ client = AsyncAzureOpenAI(
154
+ default_headers={
155
+ "User-Agent": f"prompty/{VERSION}",
156
+ "x-ms-useragent": f"prompty/{VERSION}",
157
+ },
158
+ **self.kwargs,
159
+ )
160
+ trace("result", client)
161
+
162
+ with Tracer.start("create") as trace:
163
+ trace("type", "LLM")
164
+ trace("description", "Azure OpenAI Client")
165
+
166
+ if self.api == "chat":
167
+ trace("signature", "AzureOpenAIAsync.chat.completions.create")
168
+ args = {
169
+ "model": self.deployment,
170
+ "messages": data if isinstance(data, list) else [data],
171
+ **self.parameters,
172
+ }
173
+ trace("inputs", args)
174
+ response = await client.chat.completions.create(**args)
175
+ trace("result", response)
176
+
177
+ elif self.api == "completion":
178
+ trace("signature", "AzureOpenAIAsync.completions.create")
179
+ args = {
180
+ "prompt": data,
181
+ "model": self.deployment,
182
+ **self.parameters,
183
+ }
184
+ trace("inputs", args)
185
+ response = await client.completions.create(**args)
186
+ trace("result", response)
187
+
188
+ elif self.api == "embedding":
189
+ trace("signature", "AzureOpenAIAsync.embeddings.create")
190
+ args = {
191
+ "input": data if isinstance(data, list) else [data],
192
+ "model": self.deployment,
193
+ **self.parameters,
194
+ }
195
+ trace("inputs", args)
196
+ response = await client.embeddings.create(**args)
197
+ trace("result", response)
198
+
199
+ elif self.api == "image":
200
+ trace("signature", "AzureOpenAIAsync.images.generate")
201
+ args = {
202
+ "prompt": data,
203
+ "model": self.deployment,
204
+ **self.parameters,
205
+ }
206
+ trace("inputs", args)
207
+ response = await client.images.generate.create(**args)
208
+ trace("result", response)
209
+
210
+ # stream response
211
+ if isinstance(response, AsyncIterator):
212
+ if self.api == "chat":
213
+ # TODO: handle the case where there might be no usage in the stream
214
+ return AsyncPromptyStream("AzureOpenAIExecutorAsync", response)
215
+ else:
216
+ return AsyncPromptyStream("AzureOpenAIExecutorAsync", response)
217
+ else:
218
+ return response