prompty 0.1.10__tar.gz → 0.1.12__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (76) hide show
  1. prompty-0.1.12/PKG-INFO +17 -0
  2. {prompty-0.1.10 → prompty-0.1.12}/prompty/__init__.py +2 -4
  3. prompty-0.1.12/prompty/azure/__init__.py +3 -0
  4. prompty-0.1.10/prompty/executors.py → prompty-0.1.12/prompty/azure/executor.py +5 -4
  5. prompty-0.1.10/prompty/processors.py → prompty-0.1.12/prompty/azure/processor.py +8 -13
  6. prompty-0.1.12/prompty/cli.py +117 -0
  7. {prompty-0.1.10 → prompty-0.1.12}/prompty/core.py +35 -7
  8. prompty-0.1.12/prompty/openai/__init__.py +3 -0
  9. prompty-0.1.12/prompty/openai/executor.py +74 -0
  10. prompty-0.1.12/prompty/openai/processor.py +65 -0
  11. prompty-0.1.12/prompty/serverless/__init__.py +3 -0
  12. prompty-0.1.12/prompty/serverless/executor.py +82 -0
  13. prompty-0.1.12/prompty/serverless/processor.py +62 -0
  14. {prompty-0.1.10 → prompty-0.1.12}/prompty/tracer.py +59 -1
  15. {prompty-0.1.10 → prompty-0.1.12}/pyproject.toml +24 -12
  16. prompty-0.1.10/tests/__init__.py → prompty-0.1.12/tests/fake_azure_executor.py +1 -3
  17. prompty-0.1.12/tests/fake_serverless_executor.py +45 -0
  18. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/chat.prompty +4 -3
  19. prompty-0.1.12/tests/prompts/serverless.prompty +38 -0
  20. prompty-0.1.12/tests/prompts/serverless.prompty.execution.json +22 -0
  21. prompty-0.1.12/tests/prompts/serverless_stream.prompty +39 -0
  22. prompty-0.1.12/tests/prompts/serverless_stream.prompty.execution.json +1432 -0
  23. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/streaming.prompty +2 -0
  24. prompty-0.1.10/tests/test_tracing.py → prompty-0.1.12/tests/test_execute.py +44 -23
  25. {prompty-0.1.10 → prompty-0.1.12}/tests/test_factory_invoker.py +11 -0
  26. {prompty-0.1.10 → prompty-0.1.12}/tests/test_path_exec.py +2 -2
  27. prompty-0.1.10/tests/test_execute.py → prompty-0.1.12/tests/test_tracing.py +22 -1
  28. prompty-0.1.10/PKG-INFO +0 -136
  29. prompty-0.1.10/README.md +0 -120
  30. prompty-0.1.10/prompty/cli.py +0 -85
  31. {prompty-0.1.10 → prompty-0.1.12}/LICENSE +0 -0
  32. {prompty-0.1.10 → prompty-0.1.12}/prompty/parsers.py +0 -0
  33. {prompty-0.1.10 → prompty-0.1.12}/prompty/renderers.py +0 -0
  34. {prompty-0.1.10 → prompty-0.1.12}/tests/generated/1contoso.md +0 -0
  35. {prompty-0.1.10 → prompty-0.1.12}/tests/generated/2contoso.md +0 -0
  36. {prompty-0.1.10 → prompty-0.1.12}/tests/generated/3contoso.md +0 -0
  37. {prompty-0.1.10 → prompty-0.1.12}/tests/generated/4contoso.md +0 -0
  38. {prompty-0.1.10 → prompty-0.1.12}/tests/generated/basic.prompty.md +0 -0
  39. {prompty-0.1.10 → prompty-0.1.12}/tests/generated/camping.jpg +0 -0
  40. {prompty-0.1.10 → prompty-0.1.12}/tests/generated/context.prompty.md +0 -0
  41. {prompty-0.1.10 → prompty-0.1.12}/tests/generated/contoso_multi.md +0 -0
  42. {prompty-0.1.10 → prompty-0.1.12}/tests/generated/faithfulness.prompty.md +0 -0
  43. {prompty-0.1.10 → prompty-0.1.12}/tests/generated/groundedness.prompty.md +0 -0
  44. {prompty-0.1.10 → prompty-0.1.12}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
  45. {prompty-0.1.10 → prompty-0.1.12}/tests/hello_world.embedding.json +0 -0
  46. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/__init__.py +0 -0
  47. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/basic.prompty +0 -0
  48. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/basic.prompty.execution.json +0 -0
  49. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/basic_json_output.prompty +0 -0
  50. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/camping.jpg +0 -0
  51. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/context.json +0 -0
  52. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/context.prompty +0 -0
  53. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/context.prompty.execution.json +0 -0
  54. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/embedding.prompty +0 -0
  55. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/embedding.prompty.execution.json +0 -0
  56. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/evaluation.prompty +0 -0
  57. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/faithfulness.prompty +0 -0
  58. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/faithfulness.prompty.execution.json +0 -0
  59. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/fake.prompty +0 -0
  60. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/funcfile.json +0 -0
  61. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/funcfile.prompty +0 -0
  62. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/functions.prompty +0 -0
  63. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/functions.prompty.execution.json +0 -0
  64. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/groundedness.prompty +0 -0
  65. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/groundedness.prompty.execution.json +0 -0
  66. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/prompty.json +0 -0
  67. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/streaming.prompty.execution.json +0 -0
  68. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/sub/__init__.py +0 -0
  69. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/sub/basic.prompty +0 -0
  70. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/sub/sub/__init__.py +0 -0
  71. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/sub/sub/basic.prompty +0 -0
  72. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/sub/sub/prompty.json +0 -0
  73. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/sub/sub/test.py +0 -0
  74. {prompty-0.1.10 → prompty-0.1.12}/tests/prompts/test.py +0 -0
  75. {prompty-0.1.10 → prompty-0.1.12}/tests/prompty.json +0 -0
  76. {prompty-0.1.10 → prompty-0.1.12}/tests/test_common.py +0 -0
@@ -0,0 +1,17 @@
1
+ Metadata-Version: 2.1
2
+ Name: prompty
3
+ Version: 0.1.12
4
+ Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
+ Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
+ Requires-Dist: pyyaml>=6.0.1
7
+ Requires-Dist: pydantic>=2.8.2
8
+ Requires-Dist: jinja2>=3.1.4
9
+ Requires-Dist: python-dotenv>=1.0.1
10
+ Requires-Dist: click>=8.1.7
11
+ Requires-Dist: azure-identity>=1.17.1; extra == "azure"
12
+ Requires-Dist: openai>=1.35.10; extra == "azure"
13
+ Requires-Dist: openai>=1.35.10; extra == "openai"
14
+ Requires-Dist: azure-ai-inference>=1.0.0b3; extra == "serverless"
15
+ Provides-Extra: azure
16
+ Provides-Extra: openai
17
+ Provides-Extra: serverless
@@ -3,8 +3,8 @@ import traceback
3
3
  from pathlib import Path
4
4
  from typing import Dict, List, Union
5
5
 
6
- from .tracer import trace
7
- from .core import (
6
+ from prompty.tracer import trace
7
+ from prompty.core import (
8
8
  Frontmatter,
9
9
  InvokerFactory,
10
10
  ModelSettings,
@@ -16,8 +16,6 @@ from .core import (
16
16
 
17
17
  from .renderers import *
18
18
  from .parsers import *
19
- from .executors import *
20
- from .processors import *
21
19
 
22
20
 
23
21
  def load_global_config(
@@ -0,0 +1,3 @@
1
+ # __init__.py
2
+ from .executor import AzureOpenAIExecutor
3
+ from .processor import AzureOpenAIProcessor
@@ -2,7 +2,7 @@ import azure.identity
2
2
  import importlib.metadata
3
3
  from typing import Iterator
4
4
  from openai import AzureOpenAI
5
- from .core import Invoker, InvokerFactory, Prompty, PromptyStream
5
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream
6
6
 
7
7
  VERSION = importlib.metadata.version("prompty")
8
8
 
@@ -10,7 +10,8 @@ VERSION = importlib.metadata.version("prompty")
10
10
  @InvokerFactory.register_executor("azure")
11
11
  @InvokerFactory.register_executor("azure_openai")
12
12
  class AzureOpenAIExecutor(Invoker):
13
- """ Azure OpenAI Executor """
13
+ """Azure OpenAI Executor"""
14
+
14
15
  def __init__(self, prompty: Prompty) -> None:
15
16
  super().__init__(prompty)
16
17
  kwargs = {
@@ -40,7 +41,7 @@ class AzureOpenAIExecutor(Invoker):
40
41
 
41
42
  self.client = AzureOpenAI(
42
43
  default_headers={
43
- "User-Agent": f"prompty{VERSION}",
44
+ "User-Agent": f"prompty/{VERSION}",
44
45
  "x-ms-useragent": f"prompty/{VERSION}",
45
46
  },
46
47
  **kwargs,
@@ -51,7 +52,7 @@ class AzureOpenAIExecutor(Invoker):
51
52
  self.parameters = self.prompty.model.parameters
52
53
 
53
54
  def invoke(self, data: any) -> any:
54
- """ Invoke the Azure OpenAI API
55
+ """Invoke the Azure OpenAI API
55
56
 
56
57
  Parameters
57
58
  ----------
@@ -1,22 +1,14 @@
1
1
  from typing import Iterator
2
- from pydantic import BaseModel
3
2
  from openai.types.completion import Completion
4
3
  from openai.types.chat.chat_completion import ChatCompletion
5
- from .core import Invoker, InvokerFactory, Prompty, PromptyStream
4
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, ToolCall
6
5
  from openai.types.create_embedding_response import CreateEmbeddingResponse
7
6
 
8
7
 
9
- class ToolCall(BaseModel):
10
- id: str
11
- name: str
12
- arguments: str
13
-
14
-
15
- @InvokerFactory.register_processor("openai")
16
8
  @InvokerFactory.register_processor("azure")
17
9
  @InvokerFactory.register_processor("azure_openai")
18
- class OpenAIProcessor(Invoker):
19
- """OpenAI/Azure Processor"""
10
+ class AzureOpenAIProcessor(Invoker):
11
+ """Azure OpenAI Processor"""
20
12
 
21
13
  def __init__(self, prompty: Prompty) -> None:
22
14
  super().__init__(prompty)
@@ -62,10 +54,13 @@ class OpenAIProcessor(Invoker):
62
54
 
63
55
  def generator():
64
56
  for chunk in data:
65
- if len(chunk.choices) == 1 and chunk.choices[0].delta.content != None:
57
+ if (
58
+ len(chunk.choices) == 1
59
+ and chunk.choices[0].delta.content != None
60
+ ):
66
61
  content = chunk.choices[0].delta.content
67
62
  yield content
68
63
 
69
- return PromptyStream("OpenAIProcessor", generator())
64
+ return PromptyStream("AzureOpenAIProcessor", generator())
70
65
  else:
71
66
  return data
@@ -0,0 +1,117 @@
1
+ import os
2
+ import json
3
+ import click
4
+ import importlib
5
+
6
+ from pathlib import Path
7
+ from pydantic import BaseModel
8
+
9
+ import prompty
10
+ from prompty.tracer import trace, PromptyTracer, console_tracer, Tracer
11
+ from dotenv import load_dotenv
12
+
13
+ load_dotenv()
14
+
15
+
16
+ def normalize_path(p, create_dir=False) -> Path:
17
+ path = Path(p)
18
+ if not path.is_absolute():
19
+ path = Path(os.getcwd()).joinpath(path).absolute().resolve()
20
+ else:
21
+ path = path.absolute().resolve()
22
+
23
+ if create_dir:
24
+ if not path.exists():
25
+ print(f"Creating directory {str(path)}")
26
+ os.makedirs(str(path))
27
+
28
+ return path
29
+
30
+ def dynamic_import(module: str):
31
+ t = module if "." in module else f"prompty.{module}"
32
+ print(f"Loading invokers from {t}")
33
+ importlib.import_module(t)
34
+
35
+
36
+ @trace
37
+ def chat_mode(prompt_path: str):
38
+ W = "\033[0m" # white (normal)
39
+ R = "\033[31m" # red
40
+ G = "\033[32m" # green
41
+ O = "\033[33m" # orange
42
+ B = "\033[34m" # blue
43
+ P = "\033[35m" # purple
44
+ print(f"Executing {str(prompt_path)} in chat mode...")
45
+ p = prompty.load(str(prompt_path))
46
+ if "chat_history" not in p.sample:
47
+ print(
48
+ f"{R}{str(prompt_path)} needs to have a chat_history input to work in chat mode{W}"
49
+ )
50
+ return
51
+ else:
52
+
53
+ try:
54
+ # load executor / processor types
55
+ dynamic_import(p.model.configuration["type"])
56
+ chat_history = p.sample["chat_history"]
57
+ while True:
58
+ user_input = input(f"\n{B}User:{W} ")
59
+ if user_input == "exit":
60
+ break
61
+ # reloadable prompty file
62
+ chat_history.append({"role": "user", "content": user_input})
63
+ result = prompty.execute(prompt_path, inputs={"chat_history": chat_history})
64
+ print(f"\n{G}Assistant:{W} {result}")
65
+ chat_history.append({"role": "assistant", "content": result})
66
+ except Exception as e:
67
+ print(f"{type(e).__qualname__}: {e}")
68
+
69
+ print(f"\n{R}Goodbye!{W}\n")
70
+
71
+
72
+ @trace
73
+ def execute(prompt_path: str, raw=False):
74
+ p = prompty.load(prompt_path)
75
+
76
+ try:
77
+ # load executor / processor types
78
+ dynamic_import(p.model.configuration["type"])
79
+
80
+ result = prompty.execute(p, raw=raw)
81
+ if issubclass(type(result), BaseModel):
82
+ print("\n", json.dumps(result.model_dump(), indent=4), "\n")
83
+ elif isinstance(result, list):
84
+ print(
85
+ "\n", json.dumps([item.model_dump() for item in result], indent=4), "\n"
86
+ )
87
+ else:
88
+ print("\n", result, "\n")
89
+ except Exception as e:
90
+ print(f"{type(e).__qualname__}: {e}", "\n")
91
+
92
+
93
+ @click.command()
94
+ @click.option("--source", "-s", required=True)
95
+ @click.option("--verbose", "-v", is_flag=True)
96
+ @click.option("--chat", "-c", is_flag=True)
97
+ @click.version_option()
98
+ def run(source, verbose, chat):
99
+ prompt_path = normalize_path(source)
100
+ if not prompt_path.exists():
101
+ print(f"{str(prompt_path)} does not exist")
102
+ return
103
+
104
+ if verbose:
105
+ Tracer.add("console", console_tracer)
106
+
107
+ ptrace = PromptyTracer()
108
+ Tracer.add("prompty", ptrace.tracer)
109
+
110
+ if chat:
111
+ chat_mode(str(prompt_path))
112
+ else:
113
+ execute(str(prompt_path), raw=verbose)
114
+
115
+
116
+ if __name__ == "__main__":
117
+ chat_mode(source="./tests/prompts/basic.prompt")
@@ -11,6 +11,12 @@ from pydantic import BaseModel, Field, FilePath
11
11
  from typing import AsyncIterator, Iterator, List, Literal, Dict, Callable, Set
12
12
 
13
13
 
14
+ class ToolCall(BaseModel):
15
+ id: str
16
+ name: str
17
+ arguments: str
18
+
19
+
14
20
  class PropertySettings(BaseModel):
15
21
  """PropertySettings class to define the properties of the model
16
22
 
@@ -207,14 +213,16 @@ class Prompty(BaseModel):
207
213
  raise FileNotFoundError(f"File {file} not found")
208
214
 
209
215
  @staticmethod
210
- def _process_env(variable: str, env_error=True) -> any:
216
+ def _process_env(variable: str, env_error=True, default: str = None) -> any:
211
217
  if variable in os.environ.keys():
212
218
  return os.environ[variable]
213
219
  else:
220
+ if default:
221
+ return default
214
222
  if env_error:
215
223
  raise ValueError(f"Variable {variable} not found in environment")
216
- else:
217
- return ""
224
+
225
+ return ""
218
226
 
219
227
  @staticmethod
220
228
  def normalize(attribute: any, parent: Path, env_error=True) -> any:
@@ -224,7 +232,11 @@ class Prompty(BaseModel):
224
232
  # check if env or file
225
233
  variable = attribute[2:-1].split(":")
226
234
  if variable[0] == "env" and len(variable) > 1:
227
- return Prompty._process_env(variable[1], env_error)
235
+ return Prompty._process_env(
236
+ variable[1],
237
+ env_error,
238
+ variable[2] if len(variable) > 2 else None,
239
+ )
228
240
  elif variable[0] == "file" and len(variable) > 1:
229
241
  return Prompty._process_file(variable[1], parent)
230
242
  else:
@@ -331,6 +343,22 @@ class InvokerFactory:
331
343
  _executors: Dict[str, Invoker] = {}
332
344
  _processors: Dict[str, Invoker] = {}
333
345
 
346
+ @classmethod
347
+ def add_renderer(cls, name: str, invoker: Invoker) -> None:
348
+ cls._renderers[name] = invoker
349
+
350
+ @classmethod
351
+ def add_parser(cls, name: str, invoker: Invoker) -> None:
352
+ cls._parsers[name] = invoker
353
+
354
+ @classmethod
355
+ def add_executor(cls, name: str, invoker: Invoker) -> None:
356
+ cls._executors[name] = invoker
357
+
358
+ @classmethod
359
+ def add_processor(cls, name: str, invoker: Invoker) -> None:
360
+ cls._processors[name] = invoker
361
+
334
362
  @classmethod
335
363
  def register_renderer(cls, name: str) -> Callable:
336
364
  def inner_wrapper(wrapped_class: Invoker) -> Callable:
@@ -474,9 +502,9 @@ class PromptyStream(Iterator):
474
502
  except StopIteration:
475
503
  # StopIteration is raised
476
504
  # contents are exhausted
477
- if len(self.items) > 0:
505
+ if len(self.items) > 0:
478
506
  with Tracer.start(f"{self.name}.PromptyStream") as trace:
479
- trace("items", [to_dict(s) for s in self.items])
507
+ trace("result", [to_dict(s) for s in self.items])
480
508
 
481
509
  raise StopIteration
482
510
 
@@ -506,6 +534,6 @@ class AsyncPromptyStream(AsyncIterator):
506
534
  # contents are exhausted
507
535
  if len(self.items) > 0:
508
536
  with Tracer.start(f"{self.name}.AsyncPromptyStream") as trace:
509
- trace("items", [to_dict(s) for s in self.items])
537
+ trace("result", [to_dict(s) for s in self.items])
510
538
 
511
539
  raise StopIteration
@@ -0,0 +1,3 @@
1
+ # __init__.py
2
+ from .executor import AzureOpenAIExecutor
3
+ from .processor import AzureOpenAIProcessor
@@ -0,0 +1,74 @@
1
+ import importlib.metadata
2
+ from openai import OpenAI
3
+ from typing import Iterator
4
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream
5
+
6
+ VERSION = importlib.metadata.version("prompty")
7
+
8
+
9
+ @InvokerFactory.register_executor("openai")
10
+ class AzureOpenAIExecutor(Invoker):
11
+ """OpenAI Executor"""
12
+
13
+ def __init__(self, prompty: Prompty) -> None:
14
+ super().__init__(prompty)
15
+ kwargs = {
16
+ key: value
17
+ for key, value in self.prompty.model.configuration.items()
18
+ if key != "type"
19
+ }
20
+
21
+ self.client = OpenAI(
22
+ default_headers={
23
+ "User-Agent": f"prompty/{VERSION}",
24
+ "x-ms-useragent": f"prompty/{VERSION}",
25
+ },
26
+ **kwargs,
27
+ )
28
+
29
+ self.api = self.prompty.model.api
30
+ self.deployment = self.prompty.model.configuration["azure_deployment"]
31
+ self.parameters = self.prompty.model.parameters
32
+
33
+ def invoke(self, data: any) -> any:
34
+ """Invoke the OpenAI API
35
+
36
+ Parameters
37
+ ----------
38
+ data : any
39
+ The data to send to the OpenAI API
40
+
41
+ Returns
42
+ -------
43
+ any
44
+ The response from the OpenAI API
45
+ """
46
+ if self.api == "chat":
47
+ response = self.client.chat.completions.create(
48
+ model=self.deployment,
49
+ messages=data if isinstance(data, list) else [data],
50
+ **self.parameters,
51
+ )
52
+
53
+ elif self.api == "completion":
54
+ response = self.client.completions.create(
55
+ prompt=data.item,
56
+ model=self.deployment,
57
+ **self.parameters,
58
+ )
59
+
60
+ elif self.api == "embedding":
61
+ response = self.client.embeddings.create(
62
+ input=data if isinstance(data, list) else [data],
63
+ model=self.deployment,
64
+ **self.parameters,
65
+ )
66
+
67
+ elif self.api == "image":
68
+ raise NotImplementedError("OpenAI Image API is not implemented yet")
69
+
70
+ # stream response
71
+ if isinstance(response, Iterator):
72
+ return PromptyStream("OpenAIExecutor", response)
73
+ else:
74
+ return response
@@ -0,0 +1,65 @@
1
+ from typing import Iterator
2
+ from openai.types.completion import Completion
3
+ from openai.types.chat.chat_completion import ChatCompletion
4
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, ToolCall
5
+ from openai.types.create_embedding_response import CreateEmbeddingResponse
6
+
7
+
8
+ @InvokerFactory.register_processor("openai")
9
+ class AzureOpenAIProcessor(Invoker):
10
+ """OpenAI Processor"""
11
+
12
+ def __init__(self, prompty: Prompty) -> None:
13
+ super().__init__(prompty)
14
+
15
+ def invoke(self, data: any) -> any:
16
+ """Invoke the OpenAI API
17
+
18
+ Parameters
19
+ ----------
20
+ data : any
21
+ The data to send to the OpenAI API
22
+
23
+ Returns
24
+ -------
25
+ any
26
+ The response from the OpenAI API
27
+ """
28
+ if isinstance(data, ChatCompletion):
29
+ response = data.choices[0].message
30
+ # tool calls available in response
31
+ if response.tool_calls:
32
+ return [
33
+ ToolCall(
34
+ id=tool_call.id,
35
+ name=tool_call.function.name,
36
+ arguments=tool_call.function.arguments,
37
+ )
38
+ for tool_call in response.tool_calls
39
+ ]
40
+ else:
41
+ return response.content
42
+
43
+ elif isinstance(data, Completion):
44
+ return data.choices[0].text
45
+ elif isinstance(data, CreateEmbeddingResponse):
46
+ if len(data.data) == 0:
47
+ raise ValueError("Invalid data")
48
+ elif len(data.data) == 1:
49
+ return data.data[0].embedding
50
+ else:
51
+ return [item.embedding for item in data.data]
52
+ elif isinstance(data, Iterator):
53
+
54
+ def generator():
55
+ for chunk in data:
56
+ if (
57
+ len(chunk.choices) == 1
58
+ and chunk.choices[0].delta.content != None
59
+ ):
60
+ content = chunk.choices[0].delta.content
61
+ yield content
62
+
63
+ return PromptyStream("OpenAIProcessor", generator())
64
+ else:
65
+ return data
@@ -0,0 +1,3 @@
1
+ # __init__.py
2
+ from .executor import ServerlessExecutor
3
+ from .processor import ServerlessProcessor
@@ -0,0 +1,82 @@
1
+ import importlib.metadata
2
+ from typing import Iterator
3
+ from azure.core.credentials import AzureKeyCredential
4
+ from azure.ai.inference import (
5
+ ChatCompletionsClient,
6
+ EmbeddingsClient,
7
+ )
8
+ from azure.ai.inference.models import (
9
+ StreamingChatCompletions,
10
+ AsyncStreamingChatCompletions,
11
+ )
12
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, AsyncPromptyStream
13
+
14
+ VERSION = importlib.metadata.version("prompty")
15
+
16
+
17
+ @InvokerFactory.register_executor("serverless")
18
+ class ServerlessExecutor(Invoker):
19
+ """Azure OpenAI Executor"""
20
+
21
+ def __init__(self, prompty: Prompty) -> None:
22
+ super().__init__(prompty)
23
+
24
+ # serverless configuration
25
+ self.endpoint = self.prompty.model.configuration["endpoint"]
26
+ self.model = self.prompty.model.configuration["model"]
27
+ self.key = self.prompty.model.configuration["key"]
28
+
29
+ # api type
30
+ self.api = self.prompty.model.api
31
+
32
+ def invoke(self, data: any) -> any:
33
+ """Invoke the Serverless SDK
34
+
35
+ Parameters
36
+ ----------
37
+ data : any
38
+ The data to send to the Serverless SDK
39
+
40
+ Returns
41
+ -------
42
+ any
43
+ The response from the Serverless SDK
44
+ """
45
+ if self.api == "chat":
46
+ response = ChatCompletionsClient(
47
+ endpoint=self.endpoint,
48
+ credential=AzureKeyCredential(self.key),
49
+ ).complete(
50
+ model=self.model,
51
+ messages=data if isinstance(data, list) else [data],
52
+ **self.prompty.model.parameters,
53
+ )
54
+
55
+ elif self.api == "completion":
56
+ raise NotImplementedError(
57
+ "Serverless Completions API is not implemented yet"
58
+ )
59
+
60
+ elif self.api == "embedding":
61
+ response = EmbeddingsClient(
62
+ endpoint=self.endpoint,
63
+ credential=AzureKeyCredential(self.key),
64
+ ).complete(
65
+ model=self.model,
66
+ input=data if isinstance(data, list) else [data],
67
+ **self.prompty.model.parameters,
68
+ )
69
+
70
+ elif self.api == "image":
71
+ raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
72
+
73
+ # stream response
74
+ if isinstance(response, Iterator):
75
+ if isinstance(response, StreamingChatCompletions):
76
+ return PromptyStream("ServerlessExecutor", response)
77
+ elif isinstance(response, AsyncStreamingChatCompletions):
78
+ return AsyncPromptyStream("ServerlessExecutor", response)
79
+ return PromptyStream("ServerlessExecutor", response)
80
+ else:
81
+
82
+ return response
@@ -0,0 +1,62 @@
1
+ from typing import Iterator
2
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, ToolCall
3
+
4
+ from azure.ai.inference.models import ChatCompletions, EmbeddingsResult
5
+
6
+
7
+ @InvokerFactory.register_processor("serverless")
8
+ class ServerlessProcessor(Invoker):
9
+ """OpenAI Processor"""
10
+
11
+ def __init__(self, prompty: Prompty) -> None:
12
+ super().__init__(prompty)
13
+
14
+ def invoke(self, data: any) -> any:
15
+ """Invoke the OpenAI API
16
+
17
+ Parameters
18
+ ----------
19
+ data : any
20
+ The data to send to the OpenAI API
21
+
22
+ Returns
23
+ -------
24
+ any
25
+ The response from the OpenAI API
26
+ """
27
+ if isinstance(data, ChatCompletions):
28
+ response = data.choices[0].message
29
+ # tool calls available in response
30
+ if response.tool_calls:
31
+ return [
32
+ ToolCall(
33
+ id=tool_call.id,
34
+ name=tool_call.function.name,
35
+ arguments=tool_call.function.arguments,
36
+ )
37
+ for tool_call in response.tool_calls
38
+ ]
39
+ else:
40
+ return response.content
41
+
42
+ elif isinstance(data, EmbeddingsResult):
43
+ if len(data.data) == 0:
44
+ raise ValueError("Invalid data")
45
+ elif len(data.data) == 1:
46
+ return data.data[0].embedding
47
+ else:
48
+ return [item.embedding for item in data.data]
49
+ elif isinstance(data, Iterator):
50
+
51
+ def generator():
52
+ for chunk in data:
53
+ if (
54
+ len(chunk.choices) == 1
55
+ and chunk.choices[0].delta.content != None
56
+ ):
57
+ content = chunk.choices[0].delta.content
58
+ yield content
59
+
60
+ return PromptyStream("ServerlessProcessor", generator())
61
+ else:
62
+ return data