prompty 0.1.38__py3-none-any.whl → 0.1.40__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
prompty/azure/executor.py CHANGED
@@ -1,10 +1,12 @@
1
+ import json
1
2
  import azure.identity
2
3
  import importlib.metadata
3
4
  from typing import AsyncIterator, Iterator
4
- from openai import AzureOpenAI, AsyncAzureOpenAI
5
+ from openai import APIResponse, AzureOpenAI, AsyncAzureOpenAI
5
6
 
6
- from prompty.tracer import Tracer
7
+ from prompty.tracer import Tracer, sanitize
7
8
  from ..core import AsyncPromptyStream, Prompty, PromptyStream
9
+ from openai.types.chat.chat_completion import ChatCompletion
8
10
  from ..invoker import Invoker, InvokerFactory
9
11
 
10
12
  VERSION = importlib.metadata.version("prompty")
@@ -86,7 +88,21 @@ class AzureOpenAIExecutor(Invoker):
86
88
  **self.parameters,
87
89
  }
88
90
  trace("inputs", args)
89
- response = client.chat.completions.create(**args)
91
+
92
+ if "stream" in args and args["stream"] == True:
93
+ response = client.chat.completions.create(**args)
94
+ else:
95
+ raw: APIResponse = client.chat.completions.with_raw_response.create(
96
+ **args
97
+ )
98
+ response = ChatCompletion.model_validate_json(raw.text)
99
+
100
+ for k, v in raw.headers.raw:
101
+ trace(k.decode("utf-8"), v.decode("utf-8"))
102
+
103
+ trace("request_id", raw.request_id)
104
+ trace("retries_taken", raw.retries_taken)
105
+
90
106
  trace("result", response)
91
107
 
92
108
  elif self.api == "completion":
@@ -171,7 +187,20 @@ class AzureOpenAIExecutor(Invoker):
171
187
  **self.parameters,
172
188
  }
173
189
  trace("inputs", args)
174
- response = await client.chat.completions.create(**args)
190
+
191
+ if "stream" in args and args["stream"] == True:
192
+ response = await client.chat.completions.create(**args)
193
+ else:
194
+ raw: APIResponse = await client.chat.completions.with_raw_response.create(
195
+ **args
196
+ )
197
+ response = ChatCompletion.model_validate_json(raw.text)
198
+ for k, v in raw.headers.raw:
199
+ trace(k.decode("utf-8"), v.decode("utf-8"))
200
+
201
+ trace("request_id", raw.request_id)
202
+ trace("retries_taken", raw.retries_taken)
203
+
175
204
  trace("result", response)
176
205
 
177
206
  elif self.api == "completion":
@@ -182,6 +211,7 @@ class AzureOpenAIExecutor(Invoker):
182
211
  **self.parameters,
183
212
  }
184
213
  trace("inputs", args)
214
+
185
215
  response = await client.completions.create(**args)
186
216
  trace("result", response)
187
217
 
prompty/cli.py CHANGED
@@ -2,6 +2,7 @@ import os
2
2
  import json
3
3
  import click
4
4
  import importlib
5
+ from typing import Any, Dict, Optional
5
6
 
6
7
  from pathlib import Path
7
8
  from pydantic import BaseModel
@@ -32,6 +33,8 @@ def dynamic_import(module: str):
32
33
  t = "prompty.azure"
33
34
  elif module == "serverless":
34
35
  t = "prompty.serverless"
36
+ elif module == "openai":
37
+ t = "prompty.openai"
35
38
  else:
36
39
  t = module
37
40
 
@@ -78,14 +81,16 @@ def chat_mode(prompt_path: str):
78
81
 
79
82
 
80
83
  @trace
81
- def execute(prompt_path: str, raw=False):
84
+ def execute(prompt_path: str, inputs: Optional[Dict[str, Any]] = None, raw=False):
82
85
  p = prompty.load(prompt_path)
83
86
 
87
+ inputs = inputs or {}
88
+
84
89
  try:
85
90
  # load executor / processor types
86
91
  dynamic_import(p.model.configuration["type"])
87
92
 
88
- result = prompty.execute(p, raw=raw)
93
+ result = prompty.execute(p, inputs=inputs, raw=raw)
89
94
  if issubclass(type(result), BaseModel):
90
95
  print("\n", json.dumps(result.model_dump(), indent=4), "\n")
91
96
  elif isinstance(result, list):
@@ -98,13 +103,42 @@ def execute(prompt_path: str, raw=False):
98
103
  print(f"{type(e).__qualname__}: {e}", "\n")
99
104
 
100
105
 
101
- @click.command()
106
+ def _attributes_to_dict(
107
+ ctx: click.Context, attribute: click.Option, attributes: tuple[str, ...]
108
+ ) -> dict[str, str]:
109
+ """Click callback that converts attributes specified in the form `key=value` to a
110
+ dictionary"""
111
+ result = {}
112
+ for arg in attributes:
113
+ k, v = arg.split("=")
114
+ if k in result:
115
+ raise click.BadParameter(f"Attribute {k!r} is specified twice")
116
+ if v == "@-":
117
+ v = click.get_text_stream("stdin").read()
118
+ if v.startswith("@"):
119
+ v = Path(v[1:]).read_text()
120
+ result[k] = v
121
+
122
+ return result
123
+
124
+
125
+ @click.command(epilog="""
126
+ \b
127
+ INPUTS: key=value pairs
128
+ The values can come from:
129
+ - plain strings - e.g.: question="Does it have windows?"
130
+ - files - e.g.: question=@question.txt
131
+ - stdin - e.g.: question=@-
132
+
133
+ For more information, visit https://prompty.ai/
134
+ """)
102
135
  @click.option("--source", "-s", required=True)
103
136
  @click.option("--env", "-e", required=False)
104
137
  @click.option("--verbose", "-v", is_flag=True)
105
138
  @click.option("--chat", "-c", is_flag=True)
139
+ @click.argument("inputs", nargs=-1, callback=_attributes_to_dict)
106
140
  @click.version_option()
107
- def run(source, env, verbose, chat):
141
+ def run(source, env, verbose, chat, inputs):
108
142
  # load external env file
109
143
  if env:
110
144
  print(f"Loading environment variables from {env}")
@@ -124,7 +158,7 @@ def run(source, env, verbose, chat):
124
158
  if chat:
125
159
  chat_mode(str(prompt_path))
126
160
  else:
127
- execute(str(prompt_path), raw=verbose)
161
+ execute(str(prompt_path), inputs=inputs, raw=verbose)
128
162
 
129
163
 
130
164
  if __name__ == "__main__":
prompty/core.py CHANGED
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  import os
4
4
  from pathlib import Path
5
5
 
6
- from .tracer import Tracer, to_dict
6
+ from .tracer import Tracer, to_dict, sanitize
7
7
  from pydantic import BaseModel, Field, FilePath
8
8
  from typing import AsyncIterator, Iterator, List, Literal, Dict, Callable, Set, Tuple
9
9
 
@@ -88,10 +88,7 @@ class ModelSettings(BaseModel):
88
88
  serialize_as_any=serialize_as_any,
89
89
  )
90
90
 
91
- d["configuration"] = {
92
- k: "*" * len(v) if "key" in k.lower() or "secret" in k.lower() else v
93
- for k, v in d["configuration"].items()
94
- }
91
+ d["configuration"] = {k: sanitize(k, v) for k, v in d["configuration"].items()}
95
92
  return d
96
93
 
97
94
 
@@ -4,7 +4,7 @@ from prompty.invoker import InvokerException
4
4
  try:
5
5
  from .executor import OpenAIExecutor
6
6
  from .processor import OpenAIProcessor
7
- except ImportError:
7
+ except ImportError as e:
8
8
  raise InvokerException(
9
- "Error registering OpenAIExecutor and OpenAIProcessor", "openai"
9
+ f"Error registering OpenAIExecutor and OpenAIProcessor: {e}", "openai"
10
10
  )
@@ -18,12 +18,12 @@ class OpenAIExecutor(Invoker):
18
18
  self.kwargs = {
19
19
  key: value
20
20
  for key, value in self.prompty.model.configuration.items()
21
- if key != "type"
21
+ if key != "type" and key != "name"
22
22
  }
23
23
 
24
24
  self.api = self.prompty.model.api
25
- self.deployment = self.prompty.model.configuration["azure_deployment"]
26
25
  self.parameters = self.prompty.model.parameters
26
+ self.model = self.prompty.model.configuration["name"]
27
27
 
28
28
  def invoke(self, data: any) -> any:
29
29
  """Invoke the OpenAI API
@@ -59,7 +59,7 @@ class OpenAIExecutor(Invoker):
59
59
  if self.api == "chat":
60
60
  trace("signature", "OpenAI.chat.completions.create")
61
61
  args = {
62
- "model": self.deployment,
62
+ "model": self.model,
63
63
  "messages": data if isinstance(data, list) else [data],
64
64
  **self.parameters,
65
65
  }
prompty/tracer.py CHANGED
@@ -16,9 +16,9 @@ from typing import Any, Callable, Dict, Iterator, List
16
16
  # clean up key value pairs for sensitive values
17
17
  def sanitize(key: str, value: Any) -> Any:
18
18
  if isinstance(value, str) and any(
19
- [s in key.lower() for s in ["key", "token", "secret", "password", "credential"]]
19
+ [s in key.lower() for s in ["key", "secret", "password", "credential"]]
20
20
  ):
21
- return len(str(value)) * "*"
21
+ return 10 * "*"
22
22
  elif isinstance(value, dict):
23
23
  return {k: sanitize(k, v) for k, v in value.items()}
24
24
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompty
3
- Version: 0.1.38
3
+ Version: 0.1.40
4
4
  Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
5
  Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
6
  License: MIT
@@ -1,24 +1,24 @@
1
- prompty-0.1.38.dist-info/METADATA,sha256=m7yAPiF7NsDcD6RQMfdkSbBM-w9uphfEQY_4mwUaJ1w,9164
2
- prompty-0.1.38.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
- prompty-0.1.38.dist-info/entry_points.txt,sha256=a3i7Kvf--3DOkkv9VQpstwaNKgsnXwDGaPL18lPpKeI,60
4
- prompty-0.1.38.dist-info/licenses/LICENSE,sha256=KWSC4z9cfML_t0xThoQYjzTdcZQj86Y_mhXdatzU-KM,1052
1
+ prompty-0.1.40.dist-info/METADATA,sha256=W1QjkCeD1gnusTefPLneqE0zXfQImfR4bV9QbDFE4-c,9164
2
+ prompty-0.1.40.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
+ prompty-0.1.40.dist-info/entry_points.txt,sha256=a3i7Kvf--3DOkkv9VQpstwaNKgsnXwDGaPL18lPpKeI,60
4
+ prompty-0.1.40.dist-info/licenses/LICENSE,sha256=KWSC4z9cfML_t0xThoQYjzTdcZQj86Y_mhXdatzU-KM,1052
5
5
  prompty/__init__.py,sha256=HCAvInBgNcIDO54rR4-RDIF4KUmGVQ2TRam_dS7xHEk,16561
6
6
  prompty/azure/__init__.py,sha256=WI8qeNWfxqggj21bznL-mxGUS-v67bUrunX0Lf2hsI8,295
7
- prompty/azure/executor.py,sha256=RJXMB0W7KcVvQ7l3xJaau7YM8PqOCQwuN4IwIe0sTLg,7930
7
+ prompty/azure/executor.py,sha256=LZG0U5AZB4H6CDfZxlsvcUnmq0LnTxMEFgLd9nzHAgc,9126
8
8
  prompty/azure/processor.py,sha256=-CWc_1h4xdb0nyHwUkaI40NtzTxxenCXkgjJTh76AOk,5079
9
9
  prompty/azure_beta/__init__.py,sha256=QF4qcILpsryBLl1nvc1AhRzkKI2uqc6OAU_fA3LISNE,361
10
10
  prompty/azure_beta/executor.py,sha256=PIPfeOTLk9YEM80adktL2zxpa51gO4itlQzUDoq0QVg,9896
11
- prompty/cli.py,sha256=k8Rxm41fMFNvmnsX737UiN6v-7756tpoJPN4rPXMNcU,3726
12
- prompty/core.py,sha256=EvkXV_mH7Mj1skT21XMZ4VX-Jlwx6AF-WEJ9yPc50AE,13061
11
+ prompty/cli.py,sha256=lo9mxw9V1gkeVcZZ2wkdAo_csmKTPpGfrr7XYmxDUr8,4840
12
+ prompty/core.py,sha256=haX415_MciCn6K0zU4nZ394ZaJemZe9-iOsdRbe9G6A,12985
13
13
  prompty/invoker.py,sha256=O77E5iQ1552wQXxL8FhZGERbCi_0O3mDTd5Ozqw-O-E,8593
14
- prompty/openai/__init__.py,sha256=hbBhgCwB_uSq-1NWL02yiOiNkyi39-G-AyVlTSgKTkU,276
15
- prompty/openai/executor.py,sha256=qkFSMA-pWlA1c602Dx5aR1cFEOnYsUUp_E7P3zFhSPs,3644
14
+ prompty/openai/__init__.py,sha256=XX9j2zX_qt4saFJdZYWUNnrNnetHSnHC8dJ9YyFedL0,287
15
+ prompty/openai/executor.py,sha256=8i5z_OxcESSYcjkDYIZwftr6YMvOHsOypzbNSCiLQ-Q,3640
16
16
  prompty/openai/processor.py,sha256=l9-91_CCgRtYvkwMO-jV6rkgeCA4gV_MFamQcvoNGQ0,2499
17
17
  prompty/parsers.py,sha256=zHqcRpFPUDG6BOI7ipaJf6yGc6ZbKnsLmO7jKEYNct4,5013
18
18
  prompty/renderers.py,sha256=80HNtCp3osgaLfhKxkG4j1kiRhJ727ITzT_yL5JLjEQ,1104
19
19
  prompty/serverless/__init__.py,sha256=xoXOTRXO8C631swNKaa-ek5_R3X-87bJpTm0z_Rsg6A,282
20
20
  prompty/serverless/executor.py,sha256=PUDJsYcJLQx9JSTh-R3HdJd0ehEC6w2Ch5OEqz52uVI,8395
21
21
  prompty/serverless/processor.py,sha256=ZSL9y8JC-G4qbtWOSbQAqEcFMWEaLskyOr5VjLthelU,3660
22
- prompty/tracer.py,sha256=GdCvqJLiW6PEyW2lnElSD45y4IBmgw5gtNczImbg-44,11700
22
+ prompty/tracer.py,sha256=VcrXkM71VO45xWY2b7H2AdCp5bSsCtcAv1m00ycf7XM,11678
23
23
  prompty/utils.py,sha256=jm7HEzOGk3zz8d5aquXK3zWIQWuDpBpJTzlz5sswtdg,2836
24
- prompty-0.1.38.dist-info/RECORD,,
24
+ prompty-0.1.40.dist-info/RECORD,,