prompty 0.1.39__tar.gz → 0.1.44__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (83) hide show
  1. {prompty-0.1.39 → prompty-0.1.44}/PKG-INFO +1 -1
  2. {prompty-0.1.39 → prompty-0.1.44}/prompty/__init__.py +35 -31
  3. {prompty-0.1.39 → prompty-0.1.44}/prompty/azure/__init__.py +2 -2
  4. {prompty-0.1.39 → prompty-0.1.44}/prompty/azure/executor.py +23 -19
  5. {prompty-0.1.39 → prompty-0.1.44}/prompty/azure/processor.py +18 -7
  6. {prompty-0.1.39 → prompty-0.1.44}/prompty/azure_beta/__init__.py +2 -2
  7. {prompty-0.1.39 → prompty-0.1.44}/prompty/azure_beta/executor.py +26 -17
  8. {prompty-0.1.39 → prompty-0.1.44}/prompty/cli.py +47 -13
  9. {prompty-0.1.39 → prompty-0.1.44}/prompty/core.py +45 -40
  10. {prompty-0.1.39 → prompty-0.1.44}/prompty/invoker.py +51 -37
  11. prompty-0.1.44/prompty/openai/__init__.py +10 -0
  12. {prompty-0.1.39 → prompty-0.1.44}/prompty/openai/executor.py +9 -5
  13. {prompty-0.1.39 → prompty-0.1.44}/prompty/openai/processor.py +9 -6
  14. {prompty-0.1.39 → prompty-0.1.44}/prompty/parsers.py +21 -17
  15. {prompty-0.1.39 → prompty-0.1.44}/prompty/renderers.py +18 -7
  16. {prompty-0.1.39 → prompty-0.1.44}/prompty/serverless/__init__.py +2 -2
  17. {prompty-0.1.39 → prompty-0.1.44}/prompty/serverless/executor.py +32 -16
  18. {prompty-0.1.39 → prompty-0.1.44}/prompty/serverless/processor.py +11 -7
  19. {prompty-0.1.39 → prompty-0.1.44}/prompty/tracer.py +41 -32
  20. {prompty-0.1.39 → prompty-0.1.44}/prompty/utils.py +20 -15
  21. {prompty-0.1.39 → prompty-0.1.44}/pyproject.toml +22 -1
  22. {prompty-0.1.39 → prompty-0.1.44}/tests/fake_azure_executor.py +17 -9
  23. {prompty-0.1.39 → prompty-0.1.44}/tests/fake_serverless_executor.py +16 -6
  24. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/sub/sub/test.py +1 -0
  25. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/test.py +1 -0
  26. {prompty-0.1.39 → prompty-0.1.44}/tests/test_common.py +1 -1
  27. {prompty-0.1.39 → prompty-0.1.44}/tests/test_execute.py +19 -7
  28. {prompty-0.1.39 → prompty-0.1.44}/tests/test_factory_invoker.py +5 -4
  29. {prompty-0.1.39 → prompty-0.1.44}/tests/test_path_exec.py +4 -2
  30. {prompty-0.1.39 → prompty-0.1.44}/tests/test_tracing.py +9 -15
  31. prompty-0.1.39/prompty/openai/__init__.py +0 -10
  32. {prompty-0.1.39 → prompty-0.1.44}/LICENSE +0 -0
  33. {prompty-0.1.39 → prompty-0.1.44}/README.md +0 -0
  34. {prompty-0.1.39 → prompty-0.1.44}/tests/__init__.py +0 -0
  35. {prompty-0.1.39 → prompty-0.1.44}/tests/generated/1contoso.md +0 -0
  36. {prompty-0.1.39 → prompty-0.1.44}/tests/generated/2contoso.md +0 -0
  37. {prompty-0.1.39 → prompty-0.1.44}/tests/generated/3contoso.md +0 -0
  38. {prompty-0.1.39 → prompty-0.1.44}/tests/generated/4contoso.md +0 -0
  39. {prompty-0.1.39 → prompty-0.1.44}/tests/generated/basic.prompty.md +0 -0
  40. {prompty-0.1.39 → prompty-0.1.44}/tests/generated/camping.jpg +0 -0
  41. {prompty-0.1.39 → prompty-0.1.44}/tests/generated/context.prompty.md +0 -0
  42. {prompty-0.1.39 → prompty-0.1.44}/tests/generated/contoso_multi.md +0 -0
  43. {prompty-0.1.39 → prompty-0.1.44}/tests/generated/faithfulness.prompty.md +0 -0
  44. {prompty-0.1.39 → prompty-0.1.44}/tests/generated/groundedness.prompty.md +0 -0
  45. {prompty-0.1.39 → prompty-0.1.44}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
  46. {prompty-0.1.39 → prompty-0.1.44}/tests/hello_world.embedding.json +0 -0
  47. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/__init__.py +0 -0
  48. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/basic.prompty +0 -0
  49. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/basic.prompty.execution.json +0 -0
  50. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/basic_json_output.prompty +0 -0
  51. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/camping.jpg +0 -0
  52. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/chat.prompty +0 -0
  53. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/context.json +0 -0
  54. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/context.prompty +0 -0
  55. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/context.prompty.execution.json +0 -0
  56. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/embedding.prompty +0 -0
  57. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/embedding.prompty.execution.json +0 -0
  58. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/evaluation.prompty +0 -0
  59. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/faithfulness.prompty +0 -0
  60. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/faithfulness.prompty.execution.json +0 -0
  61. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/fake.prompty +0 -0
  62. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/funcfile.json +0 -0
  63. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/funcfile.prompty +0 -0
  64. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/functions.prompty +0 -0
  65. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/functions.prompty.execution.json +0 -0
  66. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/groundedness.prompty +0 -0
  67. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/groundedness.prompty.execution.json +0 -0
  68. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/prompty.json +0 -0
  69. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/serverless.prompty +0 -0
  70. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/serverless.prompty.execution.json +0 -0
  71. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/serverless_stream.prompty +0 -0
  72. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/serverless_stream.prompty.execution.json +0 -0
  73. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/streaming.prompty +0 -0
  74. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/streaming.prompty.execution.json +0 -0
  75. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/structured_output.prompty +0 -0
  76. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/structured_output.prompty.execution.json +0 -0
  77. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/structured_output_schema.json +0 -0
  78. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/sub/__init__.py +0 -0
  79. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/sub/basic.prompty +0 -0
  80. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/sub/sub/__init__.py +0 -0
  81. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/sub/sub/basic.prompty +0 -0
  82. {prompty-0.1.39 → prompty-0.1.44}/tests/prompts/sub/sub/prompty.json +0 -0
  83. {prompty-0.1.39 → prompty-0.1.44}/tests/prompty.json +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompty
3
- Version: 0.1.39
3
+ Version: 0.1.44
4
4
  Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
5
  Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
6
  License: MIT
@@ -1,8 +1,8 @@
1
1
  import traceback
2
+ import typing
2
3
  from pathlib import Path
3
- from typing import Dict, List, Union
4
- from .tracer import trace
5
- from .invoker import InvokerFactory, NoOp
4
+ from typing import Union
5
+
6
6
  from .core import (
7
7
  ModelSettings,
8
8
  Prompty,
@@ -10,23 +10,27 @@ from .core import (
10
10
  TemplateSettings,
11
11
  param_hoisting,
12
12
  )
13
+ from .invoker import InvokerFactory
14
+ from .parsers import PromptyChatParser
15
+ from .renderers import Jinja2Renderer
16
+ from .tracer import trace
13
17
  from .utils import (
14
18
  load_global_config,
15
19
  load_global_config_async,
16
- load_prompty_async,
17
20
  load_prompty,
21
+ load_prompty_async,
18
22
  )
19
23
 
20
- from .renderers import *
21
- from .parsers import *
24
+ InvokerFactory.add_renderer("jinja2", Jinja2Renderer)
25
+ InvokerFactory.add_parser("prompty.chat", PromptyChatParser)
22
26
 
23
27
 
24
28
  @trace(description="Create a headless prompty object for programmatic use.")
25
29
  def headless(
26
30
  api: str,
27
- content: str | List[str] | dict,
28
- configuration: Dict[str, any] = {},
29
- parameters: Dict[str, any] = {},
31
+ content: Union[str, list[str], dict],
32
+ configuration: dict[str, typing.Any] = {},
33
+ parameters: dict[str, typing.Any] = {},
30
34
  connection: str = "default",
31
35
  ) -> Prompty:
32
36
  """Create a headless prompty object for programmatic use.
@@ -81,9 +85,9 @@ def headless(
81
85
  @trace(description="Create a headless prompty object for programmatic use.")
82
86
  async def headless_async(
83
87
  api: str,
84
- content: str | List[str] | dict,
85
- configuration: Dict[str, any] = {},
86
- parameters: Dict[str, any] = {},
88
+ content: Union[str, list[str], dict],
89
+ configuration: dict[str, typing.Any] = {},
90
+ parameters: dict[str, typing.Any] = {},
87
91
  connection: str = "default",
88
92
  ) -> Prompty:
89
93
  """Create a headless prompty object for programmatic use.
@@ -188,17 +192,17 @@ def _load_raw_prompty(attributes: dict, content: str, p: Path, global_config: di
188
192
  else:
189
193
  outputs = {}
190
194
 
191
- p = Prompty(
192
- **attributes,
195
+ prompty = Prompty(
193
196
  model=model,
194
197
  inputs=inputs,
195
198
  outputs=outputs,
196
199
  template=template,
197
200
  content=content,
198
201
  file=p,
202
+ **attributes
199
203
  )
200
204
 
201
- return p
205
+ return prompty
202
206
 
203
207
 
204
208
  @trace(description="Load a prompty file.")
@@ -311,7 +315,7 @@ async def load_async(prompty_file: str, configuration: str = "default") -> Promp
311
315
  @trace(description="Prepare the inputs for the prompt.")
312
316
  def prepare(
313
317
  prompt: Prompty,
314
- inputs: Dict[str, any] = {},
318
+ inputs: dict[str, typing.Any] = {},
315
319
  ):
316
320
  """Prepare the inputs for the prompt.
317
321
 
@@ -345,7 +349,7 @@ def prepare(
345
349
  @trace(description="Prepare the inputs for the prompt.")
346
350
  async def prepare_async(
347
351
  prompt: Prompty,
348
- inputs: Dict[str, any] = {},
352
+ inputs: dict[str, typing.Any] = {},
349
353
  ):
350
354
  """Prepare the inputs for the prompt.
351
355
 
@@ -379,9 +383,9 @@ async def prepare_async(
379
383
  @trace(description="Run the prepared Prompty content against the model.")
380
384
  def run(
381
385
  prompt: Prompty,
382
- content: dict | list | str,
383
- configuration: Dict[str, any] = {},
384
- parameters: Dict[str, any] = {},
386
+ content: Union[dict, list, str],
387
+ configuration: dict[str, typing.Any] = {},
388
+ parameters: dict[str, typing.Any] = {},
385
389
  raw: bool = False,
386
390
  ):
387
391
  """Run the prepared Prompty content.
@@ -431,9 +435,9 @@ def run(
431
435
  @trace(description="Run the prepared Prompty content against the model.")
432
436
  async def run_async(
433
437
  prompt: Prompty,
434
- content: dict | list | str,
435
- configuration: Dict[str, any] = {},
436
- parameters: Dict[str, any] = {},
438
+ content: Union[dict, list, str],
439
+ configuration: dict[str, typing.Any] = {},
440
+ parameters: dict[str, typing.Any] = {},
437
441
  raw: bool = False,
438
442
  ):
439
443
  """Run the prepared Prompty content.
@@ -483,9 +487,9 @@ async def run_async(
483
487
  @trace(description="Execute a prompty")
484
488
  def execute(
485
489
  prompt: Union[str, Prompty],
486
- configuration: Dict[str, any] = {},
487
- parameters: Dict[str, any] = {},
488
- inputs: Dict[str, any] = {},
490
+ configuration: dict[str, typing.Any] = {},
491
+ parameters: dict[str, typing.Any] = {},
492
+ inputs: dict[str, typing.Any] = {},
489
493
  raw: bool = False,
490
494
  config_name: str = "default",
491
495
  ):
@@ -517,7 +521,7 @@ def execute(
517
521
  >>> inputs = {"name": "John Doe"}
518
522
  >>> result = prompty.execute("prompts/basic.prompty", inputs=inputs)
519
523
  """
520
- if isinstance(prompt, str):
524
+ if isinstance(prompt, (str, Path)):
521
525
  path = Path(prompt)
522
526
  if not path.is_absolute():
523
527
  # get caller's path (take into account trace frame)
@@ -537,9 +541,9 @@ def execute(
537
541
  @trace(description="Execute a prompty")
538
542
  async def execute_async(
539
543
  prompt: Union[str, Prompty],
540
- configuration: Dict[str, any] = {},
541
- parameters: Dict[str, any] = {},
542
- inputs: Dict[str, any] = {},
544
+ configuration: dict[str, typing.Any] = {},
545
+ parameters: dict[str, typing.Any] = {},
546
+ inputs: dict[str, typing.Any] = {},
543
547
  raw: bool = False,
544
548
  config_name: str = "default",
545
549
  ):
@@ -571,7 +575,7 @@ async def execute_async(
571
575
  >>> inputs = {"name": "John Doe"}
572
576
  >>> result = await prompty.execute_async("prompts/basic.prompty", inputs=inputs)
573
577
  """
574
- if isinstance(prompt, str):
578
+ if isinstance(prompt, (str, Path)):
575
579
  path = Path(prompt)
576
580
  if not path.is_absolute():
577
581
  # get caller's path (take into account trace frame)
@@ -2,8 +2,8 @@
2
2
  from prompty.invoker import InvokerException
3
3
 
4
4
  try:
5
- from .executor import AzureOpenAIExecutor
6
- from .processor import AzureOpenAIProcessor
5
+ from .executor import AzureOpenAIExecutor # noqa
6
+ from .processor import AzureOpenAIProcessor # noqa
7
7
  except ImportError:
8
8
  raise InvokerException(
9
9
  "Error registering AzureOpenAIExecutor and AzureOpenAIProcessor", "azure"
@@ -1,12 +1,14 @@
1
- import json
2
- import azure.identity
3
1
  import importlib.metadata
4
- from typing import AsyncIterator, Iterator
5
- from openai import APIResponse, AzureOpenAI, AsyncAzureOpenAI
2
+ import typing
3
+ from collections.abc import AsyncIterator, Iterator
6
4
 
7
- from prompty.tracer import Tracer, sanitize
8
- from ..core import AsyncPromptyStream, Prompty, PromptyStream
5
+ import azure.identity
6
+ from openai import APIResponse, AsyncAzureOpenAI, AzureOpenAI
9
7
  from openai.types.chat.chat_completion import ChatCompletion
8
+
9
+ from prompty.tracer import Tracer
10
+
11
+ from ..core import AsyncPromptyStream, Prompty, PromptyStream
10
12
  from ..invoker import Invoker, InvokerFactory
11
13
 
12
14
  VERSION = importlib.metadata.version("prompty")
@@ -29,7 +31,10 @@ class AzureOpenAIExecutor(Invoker):
29
31
  if "api_key" not in self.kwargs:
30
32
  # managed identity if client id
31
33
  if "client_id" in self.kwargs:
32
- default_credential = azure.identity.ManagedIdentityCredential(
34
+ default_credential: typing.Union[
35
+ azure.identity.ManagedIdentityCredential,
36
+ azure.identity.DefaultAzureCredential,
37
+ ] = azure.identity.ManagedIdentityCredential(
33
38
  client_id=self.kwargs.pop("client_id"),
34
39
  )
35
40
  # default credential
@@ -48,7 +53,7 @@ class AzureOpenAIExecutor(Invoker):
48
53
  self.deployment = self.prompty.model.configuration["azure_deployment"]
49
54
  self.parameters = self.prompty.model.parameters
50
55
 
51
- def invoke(self, data: any) -> any:
56
+ def invoke(self, data: typing.Any) -> typing.Union[str, PromptyStream]:
52
57
  """Invoke the Azure OpenAI API
53
58
 
54
59
  Parameters
@@ -89,12 +94,11 @@ class AzureOpenAIExecutor(Invoker):
89
94
  }
90
95
  trace("inputs", args)
91
96
 
92
- if "stream" in args and args["stream"] == True:
97
+ if "stream" in args and args["stream"]:
93
98
  response = client.chat.completions.create(**args)
94
99
  else:
95
- raw: APIResponse = client.chat.completions.with_raw_response.create(
96
- **args
97
- )
100
+ raw = client.chat.completions.with_raw_response.create(**args)
101
+
98
102
  response = ChatCompletion.model_validate_json(raw.text)
99
103
 
100
104
  for k, v in raw.headers.raw:
@@ -135,7 +139,7 @@ class AzureOpenAIExecutor(Invoker):
135
139
  **self.parameters,
136
140
  }
137
141
  trace("inputs", args)
138
- response = client.images.generate.create(**args)
142
+ response = client.images.generate(**args)
139
143
  trace("result", response)
140
144
 
141
145
  # stream response
@@ -148,7 +152,7 @@ class AzureOpenAIExecutor(Invoker):
148
152
  else:
149
153
  return response
150
154
 
151
- async def invoke_async(self, data: str) -> str:
155
+ async def invoke_async(self, data: str) -> typing.Union[str, AsyncPromptyStream]:
152
156
  """Invoke the Prompty Chat Parser (Async)
153
157
 
154
158
  Parameters
@@ -188,13 +192,13 @@ class AzureOpenAIExecutor(Invoker):
188
192
  }
189
193
  trace("inputs", args)
190
194
 
191
- if "stream" in args and args["stream"] == True:
195
+ if "stream" in args and args["stream"]:
192
196
  response = await client.chat.completions.create(**args)
193
197
  else:
194
- raw: APIResponse = await client.chat.completions.with_raw_response.create(
195
- **args
198
+ raw: APIResponse = (
199
+ await client.chat.completions.with_raw_response.create(**args)
196
200
  )
197
- response = ChatCompletion.model_validate_json(raw.text)
201
+ response = ChatCompletion.model_validate_json(raw.text())
198
202
  for k, v in raw.headers.raw:
199
203
  trace(k.decode("utf-8"), v.decode("utf-8"))
200
204
 
@@ -234,7 +238,7 @@ class AzureOpenAIExecutor(Invoker):
234
238
  **self.parameters,
235
239
  }
236
240
  trace("inputs", args)
237
- response = await client.images.generate.create(**args)
241
+ response = await client.images.generate(**args)
238
242
  trace("result", response)
239
243
 
240
244
  # stream response
@@ -1,10 +1,13 @@
1
- from typing import AsyncIterator, Iterator
1
+ import typing
2
+ from collections.abc import AsyncIterator, Iterator
3
+
4
+ from openai.types.chat.chat_completion import ChatCompletion
2
5
  from openai.types.completion import Completion
6
+ from openai.types.create_embedding_response import CreateEmbeddingResponse
3
7
  from openai.types.images_response import ImagesResponse
4
- from openai.types.chat.chat_completion import ChatCompletion
8
+
5
9
  from ..core import AsyncPromptyStream, Prompty, PromptyStream, ToolCall
6
10
  from ..invoker import Invoker, InvokerFactory
7
- from openai.types.create_embedding_response import CreateEmbeddingResponse
8
11
 
9
12
 
10
13
  @InvokerFactory.register_processor("azure")
@@ -17,7 +20,15 @@ class AzureOpenAIProcessor(Invoker):
17
20
  def __init__(self, prompty: Prompty) -> None:
18
21
  super().__init__(prompty)
19
22
 
20
- def invoke(self, data: any) -> any:
23
+ def invoke(self, data: typing.Any) -> typing.Union[
24
+ str,
25
+ list[typing.Union[str, None]],
26
+ list[ToolCall],
27
+ list[float],
28
+ list[list[float]],
29
+ PromptyStream,
30
+ None,
31
+ ]:
21
32
  """Invoke the OpenAI/Azure API
22
33
 
23
34
  Parameters
@@ -71,7 +82,7 @@ class AzureOpenAIProcessor(Invoker):
71
82
  for chunk in data:
72
83
  if (
73
84
  len(chunk.choices) == 1
74
- and chunk.choices[0].delta.content != None
85
+ and chunk.choices[0].delta.content is not None
75
86
  ):
76
87
  content = chunk.choices[0].delta.content
77
88
  yield content
@@ -80,7 +91,7 @@ class AzureOpenAIProcessor(Invoker):
80
91
  else:
81
92
  return data
82
93
 
83
- async def invoke_async(self, data: str) -> str:
94
+ async def invoke_async(self, data: str) -> typing.Union[str, AsyncPromptyStream]:
84
95
  """Invoke the Prompty Chat Parser (Async)
85
96
 
86
97
  Parameters
@@ -134,7 +145,7 @@ class AzureOpenAIProcessor(Invoker):
134
145
  async for chunk in data:
135
146
  if (
136
147
  len(chunk.choices) == 1
137
- and chunk.choices[0].delta.content != None
148
+ and chunk.choices[0].delta.content is not None
138
149
  ):
139
150
  content = chunk.choices[0].delta.content
140
151
  yield content
@@ -2,9 +2,9 @@
2
2
  from prompty.invoker import InvokerException
3
3
 
4
4
  try:
5
- from .executor import AzureOpenAIBetaExecutor
6
5
  # Reuse the common Azure OpenAI Processor
7
- from ..azure.processor import AzureOpenAIProcessor
6
+ from ..azure.processor import AzureOpenAIProcessor # noqa
7
+ from .executor import AzureOpenAIBetaExecutor # noqa
8
8
  except ImportError:
9
9
  raise InvokerException(
10
10
  "Error registering AzureOpenAIBetaExecutor and AzureOpenAIProcessor", "azure_beta"
@@ -1,15 +1,19 @@
1
- import azure.identity
2
1
  import importlib.metadata
3
- from typing import AsyncIterator, Iterator
4
- from openai import AzureOpenAI, AsyncAzureOpenAI
2
+ import re
3
+ import typing
4
+ from collections.abc import AsyncIterator, Iterator
5
+ from datetime import datetime
6
+
7
+ import azure.identity
8
+ from openai import AsyncAzureOpenAI, AzureOpenAI
5
9
 
6
10
  from prompty.tracer import Tracer
11
+
7
12
  from ..core import AsyncPromptyStream, Prompty, PromptyStream
8
13
  from ..invoker import Invoker, InvokerFactory
9
- import re
10
- from datetime import datetime
11
14
 
12
- def extract_date(data: str) -> datetime:
15
+
16
+ def extract_date(data: str) -> typing.Union[datetime, None]:
13
17
  """Extract date from a string
14
18
 
15
19
  Parameters
@@ -24,17 +28,18 @@ def extract_date(data: str) -> datetime:
24
28
  """
25
29
 
26
30
  # Regular expression to find dates in the format YYYY-MM-DD
27
- date_pattern = re.compile(r'\b\d{4}-\d{2}-\d{2}\b')
31
+ date_pattern = re.compile(r"\b\d{4}-\d{2}-\d{2}\b")
28
32
  match = date_pattern.search(data)
29
33
  if match:
30
34
  date_str = match.group(0)
31
35
  # Validate the date format
32
36
  try:
33
- return datetime.strptime(date_str, '%Y-%m-%d')
37
+ return datetime.strptime(date_str, "%Y-%m-%d")
34
38
  except ValueError:
35
39
  pass
36
40
  return None
37
41
 
42
+
38
43
  def is_structured_output_available(api_version: str) -> bool:
39
44
  """Check if the structured output API is available for the given API version
40
45
 
@@ -55,10 +60,11 @@ def is_structured_output_available(api_version: str) -> bool:
55
60
  api_version_date = extract_date(api_version)
56
61
 
57
62
  # Check if the API version are on or after the threshold date
58
- if api_version_date >= threshold_api_version_date:
63
+ if api_version_date is not None and api_version_date >= threshold_api_version_date:
59
64
  return True
60
65
  return False
61
66
 
67
+
62
68
  VERSION = importlib.metadata.version("prompty")
63
69
 
64
70
 
@@ -79,7 +85,10 @@ class AzureOpenAIBetaExecutor(Invoker):
79
85
  if "api_key" not in self.kwargs:
80
86
  # managed identity if client id
81
87
  if "client_id" in self.kwargs:
82
- default_credential = azure.identity.ManagedIdentityCredential(
88
+ default_credential: typing.Union[
89
+ azure.identity.ManagedIdentityCredential,
90
+ azure.identity.DefaultAzureCredential,
91
+ ] = azure.identity.ManagedIdentityCredential(
83
92
  client_id=self.kwargs.pop("client_id"),
84
93
  )
85
94
  # default credential
@@ -99,7 +108,7 @@ class AzureOpenAIBetaExecutor(Invoker):
99
108
  self.deployment = self.prompty.model.configuration["azure_deployment"]
100
109
  self.parameters = self.prompty.model.parameters
101
110
 
102
- def invoke(self, data: any) -> any:
111
+ def invoke(self, data: typing.Any) -> typing.Any:
103
112
  """Invoke the Azure OpenAI API
104
113
 
105
114
  Parameters
@@ -133,13 +142,13 @@ class AzureOpenAIBetaExecutor(Invoker):
133
142
 
134
143
  if self.api == "chat":
135
144
  # We can only verify the API version as the model and its version are not part of prompty configuration
136
- # Should be gpt-4o and 2024-08-06 or later
145
+ # Should be gpt-4o and 2024-08-06 or later
137
146
  choose_beta = is_structured_output_available(self.api_version)
138
147
  if choose_beta:
139
148
  trace("signature", "AzureOpenAI.beta.chat.completions.parse")
140
149
  else:
141
150
  trace("signature", "AzureOpenAI.chat.completions.create")
142
-
151
+
143
152
  args = {
144
153
  "model": self.deployment,
145
154
  "messages": data if isinstance(data, list) else [data],
@@ -147,7 +156,7 @@ class AzureOpenAIBetaExecutor(Invoker):
147
156
  }
148
157
  trace("inputs", args)
149
158
  if choose_beta:
150
- response = client.beta.chat.completions.parse(**args)
159
+ response: typing.Any = client.beta.chat.completions.parse(**args)
151
160
  else:
152
161
  response = client.chat.completions.create(**args)
153
162
  trace("result", response)
@@ -182,7 +191,7 @@ class AzureOpenAIBetaExecutor(Invoker):
182
191
  **self.parameters,
183
192
  }
184
193
  trace("inputs", args)
185
- response = client.images.generate.create(**args)
194
+ response = client.images.generate(**args)
186
195
  trace("result", response)
187
196
 
188
197
  # stream response
@@ -195,7 +204,7 @@ class AzureOpenAIBetaExecutor(Invoker):
195
204
  else:
196
205
  return response
197
206
 
198
- async def invoke_async(self, data: str) -> str:
207
+ async def invoke_async(self, data: str) -> typing.Union[str, AsyncPromptyStream]:
199
208
  """Invoke the Prompty Chat Parser (Async)
200
209
 
201
210
  Parameters
@@ -267,7 +276,7 @@ class AzureOpenAIBetaExecutor(Invoker):
267
276
  **self.parameters,
268
277
  }
269
278
  trace("inputs", args)
270
- response = await client.images.generate.create(**args)
279
+ response = await client.images.generate(**args)
271
280
  trace("result", response)
272
281
 
273
282
  # stream response
@@ -1,14 +1,15 @@
1
- import os
2
- import json
3
- import click
4
1
  import importlib
5
-
2
+ import json
3
+ import os
6
4
  from pathlib import Path
5
+ from typing import Any, Optional
6
+
7
+ import click
8
+ from dotenv import load_dotenv
7
9
  from pydantic import BaseModel
8
10
 
9
11
  import prompty
10
- from prompty.tracer import trace, PromptyTracer, console_tracer, Tracer
11
- from dotenv import load_dotenv
12
+ from prompty.tracer import PromptyTracer, Tracer, console_tracer, trace
12
13
 
13
14
 
14
15
  def normalize_path(p, create_dir=False) -> Path:
@@ -32,6 +33,8 @@ def dynamic_import(module: str):
32
33
  t = "prompty.azure"
33
34
  elif module == "serverless":
34
35
  t = "prompty.serverless"
36
+ elif module == "openai":
37
+ t = "prompty.openai"
35
38
  else:
36
39
  t = module
37
40
 
@@ -44,9 +47,9 @@ def chat_mode(prompt_path: str):
44
47
  W = "\033[0m" # white (normal)
45
48
  R = "\033[31m" # red
46
49
  G = "\033[32m" # green
47
- O = "\033[33m" # orange
50
+ #O = "\033[33m" # orange
48
51
  B = "\033[34m" # blue
49
- P = "\033[35m" # purple
52
+ #P = "\033[35m" # purple
50
53
  print(f"Executing {str(prompt_path)} in chat mode...")
51
54
  p = prompty.load(str(prompt_path))
52
55
  if "chat_history" not in p.sample:
@@ -78,14 +81,16 @@ def chat_mode(prompt_path: str):
78
81
 
79
82
 
80
83
  @trace
81
- def execute(prompt_path: str, raw=False):
84
+ def execute(prompt_path: str, inputs: Optional[dict[str, Any]] = None, raw=False):
82
85
  p = prompty.load(prompt_path)
83
86
 
87
+ inputs = inputs or {}
88
+
84
89
  try:
85
90
  # load executor / processor types
86
91
  dynamic_import(p.model.configuration["type"])
87
92
 
88
- result = prompty.execute(p, raw=raw)
93
+ result = prompty.execute(p, inputs=inputs, raw=raw)
89
94
  if issubclass(type(result), BaseModel):
90
95
  print("\n", json.dumps(result.model_dump(), indent=4), "\n")
91
96
  elif isinstance(result, list):
@@ -98,13 +103,42 @@ def execute(prompt_path: str, raw=False):
98
103
  print(f"{type(e).__qualname__}: {e}", "\n")
99
104
 
100
105
 
101
- @click.command()
106
+ def _attributes_to_dict(
107
+ ctx: click.Context, attribute: click.Option, attributes: tuple[str, ...]
108
+ ) -> dict[str, str]:
109
+ """Click callback that converts attributes specified in the form `key=value` to a
110
+ dictionary"""
111
+ result = {}
112
+ for arg in attributes:
113
+ k, v = arg.split("=")
114
+ if k in result:
115
+ raise click.BadParameter(f"Attribute {k!r} is specified twice")
116
+ if v == "@-":
117
+ v = click.get_text_stream("stdin").read()
118
+ if v.startswith("@"):
119
+ v = Path(v[1:]).read_text()
120
+ result[k] = v
121
+
122
+ return result
123
+
124
+
125
+ @click.command(epilog="""
126
+ \b
127
+ INPUTS: key=value pairs
128
+ The values can come from:
129
+ - plain strings - e.g.: question="Does it have windows?"
130
+ - files - e.g.: question=@question.txt
131
+ - stdin - e.g.: question=@-
132
+
133
+ For more information, visit https://prompty.ai/
134
+ """)
102
135
  @click.option("--source", "-s", required=True)
103
136
  @click.option("--env", "-e", required=False)
104
137
  @click.option("--verbose", "-v", is_flag=True)
105
138
  @click.option("--chat", "-c", is_flag=True)
139
+ @click.argument("inputs", nargs=-1, callback=_attributes_to_dict)
106
140
  @click.version_option()
107
- def run(source, env, verbose, chat):
141
+ def run(source, env, verbose, chat, inputs):
108
142
  # load external env file
109
143
  if env:
110
144
  print(f"Loading environment variables from {env}")
@@ -124,7 +158,7 @@ def run(source, env, verbose, chat):
124
158
  if chat:
125
159
  chat_mode(str(prompt_path))
126
160
  else:
127
- execute(str(prompt_path), raw=verbose)
161
+ execute(str(prompt_path), inputs=inputs, raw=verbose)
128
162
 
129
163
 
130
164
  if __name__ == "__main__":