prompty 0.1.38__tar.gz → 0.1.40__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (82) hide show
  1. {prompty-0.1.38 → prompty-0.1.40}/PKG-INFO +1 -1
  2. {prompty-0.1.38 → prompty-0.1.40}/prompty/azure/executor.py +34 -4
  3. {prompty-0.1.38 → prompty-0.1.40}/prompty/cli.py +39 -5
  4. {prompty-0.1.38 → prompty-0.1.40}/prompty/core.py +2 -5
  5. {prompty-0.1.38 → prompty-0.1.40}/prompty/openai/__init__.py +2 -2
  6. {prompty-0.1.38 → prompty-0.1.40}/prompty/openai/executor.py +3 -3
  7. {prompty-0.1.38 → prompty-0.1.40}/prompty/tracer.py +2 -2
  8. {prompty-0.1.38 → prompty-0.1.40}/pyproject.toml +1 -1
  9. {prompty-0.1.38 → prompty-0.1.40}/LICENSE +0 -0
  10. {prompty-0.1.38 → prompty-0.1.40}/README.md +0 -0
  11. {prompty-0.1.38 → prompty-0.1.40}/prompty/__init__.py +0 -0
  12. {prompty-0.1.38 → prompty-0.1.40}/prompty/azure/__init__.py +0 -0
  13. {prompty-0.1.38 → prompty-0.1.40}/prompty/azure/processor.py +0 -0
  14. {prompty-0.1.38 → prompty-0.1.40}/prompty/azure_beta/__init__.py +0 -0
  15. {prompty-0.1.38 → prompty-0.1.40}/prompty/azure_beta/executor.py +0 -0
  16. {prompty-0.1.38 → prompty-0.1.40}/prompty/invoker.py +0 -0
  17. {prompty-0.1.38 → prompty-0.1.40}/prompty/openai/processor.py +0 -0
  18. {prompty-0.1.38 → prompty-0.1.40}/prompty/parsers.py +0 -0
  19. {prompty-0.1.38 → prompty-0.1.40}/prompty/renderers.py +0 -0
  20. {prompty-0.1.38 → prompty-0.1.40}/prompty/serverless/__init__.py +0 -0
  21. {prompty-0.1.38 → prompty-0.1.40}/prompty/serverless/executor.py +0 -0
  22. {prompty-0.1.38 → prompty-0.1.40}/prompty/serverless/processor.py +0 -0
  23. {prompty-0.1.38 → prompty-0.1.40}/prompty/utils.py +0 -0
  24. {prompty-0.1.38 → prompty-0.1.40}/tests/__init__.py +0 -0
  25. {prompty-0.1.38 → prompty-0.1.40}/tests/fake_azure_executor.py +0 -0
  26. {prompty-0.1.38 → prompty-0.1.40}/tests/fake_serverless_executor.py +0 -0
  27. {prompty-0.1.38 → prompty-0.1.40}/tests/generated/1contoso.md +0 -0
  28. {prompty-0.1.38 → prompty-0.1.40}/tests/generated/2contoso.md +0 -0
  29. {prompty-0.1.38 → prompty-0.1.40}/tests/generated/3contoso.md +0 -0
  30. {prompty-0.1.38 → prompty-0.1.40}/tests/generated/4contoso.md +0 -0
  31. {prompty-0.1.38 → prompty-0.1.40}/tests/generated/basic.prompty.md +0 -0
  32. {prompty-0.1.38 → prompty-0.1.40}/tests/generated/camping.jpg +0 -0
  33. {prompty-0.1.38 → prompty-0.1.40}/tests/generated/context.prompty.md +0 -0
  34. {prompty-0.1.38 → prompty-0.1.40}/tests/generated/contoso_multi.md +0 -0
  35. {prompty-0.1.38 → prompty-0.1.40}/tests/generated/faithfulness.prompty.md +0 -0
  36. {prompty-0.1.38 → prompty-0.1.40}/tests/generated/groundedness.prompty.md +0 -0
  37. {prompty-0.1.38 → prompty-0.1.40}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
  38. {prompty-0.1.38 → prompty-0.1.40}/tests/hello_world.embedding.json +0 -0
  39. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/__init__.py +0 -0
  40. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/basic.prompty +0 -0
  41. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/basic.prompty.execution.json +0 -0
  42. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/basic_json_output.prompty +0 -0
  43. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/camping.jpg +0 -0
  44. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/chat.prompty +0 -0
  45. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/context.json +0 -0
  46. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/context.prompty +0 -0
  47. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/context.prompty.execution.json +0 -0
  48. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/embedding.prompty +0 -0
  49. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/embedding.prompty.execution.json +0 -0
  50. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/evaluation.prompty +0 -0
  51. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/faithfulness.prompty +0 -0
  52. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/faithfulness.prompty.execution.json +0 -0
  53. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/fake.prompty +0 -0
  54. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/funcfile.json +0 -0
  55. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/funcfile.prompty +0 -0
  56. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/functions.prompty +0 -0
  57. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/functions.prompty.execution.json +0 -0
  58. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/groundedness.prompty +0 -0
  59. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/groundedness.prompty.execution.json +0 -0
  60. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/prompty.json +0 -0
  61. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/serverless.prompty +0 -0
  62. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/serverless.prompty.execution.json +0 -0
  63. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/serverless_stream.prompty +0 -0
  64. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/serverless_stream.prompty.execution.json +0 -0
  65. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/streaming.prompty +0 -0
  66. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/streaming.prompty.execution.json +0 -0
  67. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/structured_output.prompty +0 -0
  68. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/structured_output.prompty.execution.json +0 -0
  69. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/structured_output_schema.json +0 -0
  70. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/sub/__init__.py +0 -0
  71. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/sub/basic.prompty +0 -0
  72. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/sub/sub/__init__.py +0 -0
  73. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/sub/sub/basic.prompty +0 -0
  74. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/sub/sub/prompty.json +0 -0
  75. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/sub/sub/test.py +0 -0
  76. {prompty-0.1.38 → prompty-0.1.40}/tests/prompts/test.py +0 -0
  77. {prompty-0.1.38 → prompty-0.1.40}/tests/prompty.json +0 -0
  78. {prompty-0.1.38 → prompty-0.1.40}/tests/test_common.py +0 -0
  79. {prompty-0.1.38 → prompty-0.1.40}/tests/test_execute.py +0 -0
  80. {prompty-0.1.38 → prompty-0.1.40}/tests/test_factory_invoker.py +0 -0
  81. {prompty-0.1.38 → prompty-0.1.40}/tests/test_path_exec.py +0 -0
  82. {prompty-0.1.38 → prompty-0.1.40}/tests/test_tracing.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompty
3
- Version: 0.1.38
3
+ Version: 0.1.40
4
4
  Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
5
  Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
6
  License: MIT
@@ -1,10 +1,12 @@
1
+ import json
1
2
  import azure.identity
2
3
  import importlib.metadata
3
4
  from typing import AsyncIterator, Iterator
4
- from openai import AzureOpenAI, AsyncAzureOpenAI
5
+ from openai import APIResponse, AzureOpenAI, AsyncAzureOpenAI
5
6
 
6
- from prompty.tracer import Tracer
7
+ from prompty.tracer import Tracer, sanitize
7
8
  from ..core import AsyncPromptyStream, Prompty, PromptyStream
9
+ from openai.types.chat.chat_completion import ChatCompletion
8
10
  from ..invoker import Invoker, InvokerFactory
9
11
 
10
12
  VERSION = importlib.metadata.version("prompty")
@@ -86,7 +88,21 @@ class AzureOpenAIExecutor(Invoker):
86
88
  **self.parameters,
87
89
  }
88
90
  trace("inputs", args)
89
- response = client.chat.completions.create(**args)
91
+
92
+ if "stream" in args and args["stream"] == True:
93
+ response = client.chat.completions.create(**args)
94
+ else:
95
+ raw: APIResponse = client.chat.completions.with_raw_response.create(
96
+ **args
97
+ )
98
+ response = ChatCompletion.model_validate_json(raw.text)
99
+
100
+ for k, v in raw.headers.raw:
101
+ trace(k.decode("utf-8"), v.decode("utf-8"))
102
+
103
+ trace("request_id", raw.request_id)
104
+ trace("retries_taken", raw.retries_taken)
105
+
90
106
  trace("result", response)
91
107
 
92
108
  elif self.api == "completion":
@@ -171,7 +187,20 @@ class AzureOpenAIExecutor(Invoker):
171
187
  **self.parameters,
172
188
  }
173
189
  trace("inputs", args)
174
- response = await client.chat.completions.create(**args)
190
+
191
+ if "stream" in args and args["stream"] == True:
192
+ response = await client.chat.completions.create(**args)
193
+ else:
194
+ raw: APIResponse = await client.chat.completions.with_raw_response.create(
195
+ **args
196
+ )
197
+ response = ChatCompletion.model_validate_json(raw.text)
198
+ for k, v in raw.headers.raw:
199
+ trace(k.decode("utf-8"), v.decode("utf-8"))
200
+
201
+ trace("request_id", raw.request_id)
202
+ trace("retries_taken", raw.retries_taken)
203
+
175
204
  trace("result", response)
176
205
 
177
206
  elif self.api == "completion":
@@ -182,6 +211,7 @@ class AzureOpenAIExecutor(Invoker):
182
211
  **self.parameters,
183
212
  }
184
213
  trace("inputs", args)
214
+
185
215
  response = await client.completions.create(**args)
186
216
  trace("result", response)
187
217
 
@@ -2,6 +2,7 @@ import os
2
2
  import json
3
3
  import click
4
4
  import importlib
5
+ from typing import Any, Dict, Optional
5
6
 
6
7
  from pathlib import Path
7
8
  from pydantic import BaseModel
@@ -32,6 +33,8 @@ def dynamic_import(module: str):
32
33
  t = "prompty.azure"
33
34
  elif module == "serverless":
34
35
  t = "prompty.serverless"
36
+ elif module == "openai":
37
+ t = "prompty.openai"
35
38
  else:
36
39
  t = module
37
40
 
@@ -78,14 +81,16 @@ def chat_mode(prompt_path: str):
78
81
 
79
82
 
80
83
  @trace
81
- def execute(prompt_path: str, raw=False):
84
+ def execute(prompt_path: str, inputs: Optional[Dict[str, Any]] = None, raw=False):
82
85
  p = prompty.load(prompt_path)
83
86
 
87
+ inputs = inputs or {}
88
+
84
89
  try:
85
90
  # load executor / processor types
86
91
  dynamic_import(p.model.configuration["type"])
87
92
 
88
- result = prompty.execute(p, raw=raw)
93
+ result = prompty.execute(p, inputs=inputs, raw=raw)
89
94
  if issubclass(type(result), BaseModel):
90
95
  print("\n", json.dumps(result.model_dump(), indent=4), "\n")
91
96
  elif isinstance(result, list):
@@ -98,13 +103,42 @@ def execute(prompt_path: str, raw=False):
98
103
  print(f"{type(e).__qualname__}: {e}", "\n")
99
104
 
100
105
 
101
- @click.command()
106
+ def _attributes_to_dict(
107
+ ctx: click.Context, attribute: click.Option, attributes: tuple[str, ...]
108
+ ) -> dict[str, str]:
109
+ """Click callback that converts attributes specified in the form `key=value` to a
110
+ dictionary"""
111
+ result = {}
112
+ for arg in attributes:
113
+ k, v = arg.split("=")
114
+ if k in result:
115
+ raise click.BadParameter(f"Attribute {k!r} is specified twice")
116
+ if v == "@-":
117
+ v = click.get_text_stream("stdin").read()
118
+ if v.startswith("@"):
119
+ v = Path(v[1:]).read_text()
120
+ result[k] = v
121
+
122
+ return result
123
+
124
+
125
+ @click.command(epilog="""
126
+ \b
127
+ INPUTS: key=value pairs
128
+ The values can come from:
129
+ - plain strings - e.g.: question="Does it have windows?"
130
+ - files - e.g.: question=@question.txt
131
+ - stdin - e.g.: question=@-
132
+
133
+ For more information, visit https://prompty.ai/
134
+ """)
102
135
  @click.option("--source", "-s", required=True)
103
136
  @click.option("--env", "-e", required=False)
104
137
  @click.option("--verbose", "-v", is_flag=True)
105
138
  @click.option("--chat", "-c", is_flag=True)
139
+ @click.argument("inputs", nargs=-1, callback=_attributes_to_dict)
106
140
  @click.version_option()
107
- def run(source, env, verbose, chat):
141
+ def run(source, env, verbose, chat, inputs):
108
142
  # load external env file
109
143
  if env:
110
144
  print(f"Loading environment variables from {env}")
@@ -124,7 +158,7 @@ def run(source, env, verbose, chat):
124
158
  if chat:
125
159
  chat_mode(str(prompt_path))
126
160
  else:
127
- execute(str(prompt_path), raw=verbose)
161
+ execute(str(prompt_path), inputs=inputs, raw=verbose)
128
162
 
129
163
 
130
164
  if __name__ == "__main__":
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  import os
4
4
  from pathlib import Path
5
5
 
6
- from .tracer import Tracer, to_dict
6
+ from .tracer import Tracer, to_dict, sanitize
7
7
  from pydantic import BaseModel, Field, FilePath
8
8
  from typing import AsyncIterator, Iterator, List, Literal, Dict, Callable, Set, Tuple
9
9
 
@@ -88,10 +88,7 @@ class ModelSettings(BaseModel):
88
88
  serialize_as_any=serialize_as_any,
89
89
  )
90
90
 
91
- d["configuration"] = {
92
- k: "*" * len(v) if "key" in k.lower() or "secret" in k.lower() else v
93
- for k, v in d["configuration"].items()
94
- }
91
+ d["configuration"] = {k: sanitize(k, v) for k, v in d["configuration"].items()}
95
92
  return d
96
93
 
97
94
 
@@ -4,7 +4,7 @@ from prompty.invoker import InvokerException
4
4
  try:
5
5
  from .executor import OpenAIExecutor
6
6
  from .processor import OpenAIProcessor
7
- except ImportError:
7
+ except ImportError as e:
8
8
  raise InvokerException(
9
- "Error registering OpenAIExecutor and OpenAIProcessor", "openai"
9
+ f"Error registering OpenAIExecutor and OpenAIProcessor: {e}", "openai"
10
10
  )
@@ -18,12 +18,12 @@ class OpenAIExecutor(Invoker):
18
18
  self.kwargs = {
19
19
  key: value
20
20
  for key, value in self.prompty.model.configuration.items()
21
- if key != "type"
21
+ if key != "type" and key != "name"
22
22
  }
23
23
 
24
24
  self.api = self.prompty.model.api
25
- self.deployment = self.prompty.model.configuration["azure_deployment"]
26
25
  self.parameters = self.prompty.model.parameters
26
+ self.model = self.prompty.model.configuration["name"]
27
27
 
28
28
  def invoke(self, data: any) -> any:
29
29
  """Invoke the OpenAI API
@@ -59,7 +59,7 @@ class OpenAIExecutor(Invoker):
59
59
  if self.api == "chat":
60
60
  trace("signature", "OpenAI.chat.completions.create")
61
61
  args = {
62
- "model": self.deployment,
62
+ "model": self.model,
63
63
  "messages": data if isinstance(data, list) else [data],
64
64
  **self.parameters,
65
65
  }
@@ -16,9 +16,9 @@ from typing import Any, Callable, Dict, Iterator, List
16
16
  # clean up key value pairs for sensitive values
17
17
  def sanitize(key: str, value: Any) -> Any:
18
18
  if isinstance(value, str) and any(
19
- [s in key.lower() for s in ["key", "token", "secret", "password", "credential"]]
19
+ [s in key.lower() for s in ["key", "secret", "password", "credential"]]
20
20
  ):
21
- return len(str(value)) * "*"
21
+ return 10 * "*"
22
22
  elif isinstance(value, dict):
23
23
  return {k: sanitize(k, v) for k, v in value.items()}
24
24
  else:
@@ -15,7 +15,7 @@ dependencies = [
15
15
  "click>=8.1.7",
16
16
  "aiofiles>=24.1.0",
17
17
  ]
18
- version = "0.1.38"
18
+ version = "0.1.40"
19
19
 
20
20
  [project.license]
21
21
  text = "MIT"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes