prompty 0.1.19__tar.gz → 0.1.20__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (74) hide show
  1. {prompty-0.1.19 → prompty-0.1.20}/PKG-INFO +4 -4
  2. {prompty-0.1.19 → prompty-0.1.20}/README.md +3 -3
  3. {prompty-0.1.19 → prompty-0.1.20}/prompty/azure/executor.py +15 -3
  4. {prompty-0.1.19 → prompty-0.1.20}/prompty/azure/processor.py +12 -0
  5. {prompty-0.1.19 → prompty-0.1.20}/prompty/core.py +3 -1
  6. {prompty-0.1.19 → prompty-0.1.20}/prompty/tracer.py +3 -1
  7. {prompty-0.1.19 → prompty-0.1.20}/pyproject.toml +1 -1
  8. {prompty-0.1.19 → prompty-0.1.20}/tests/test_tracing.py +4 -1
  9. {prompty-0.1.19 → prompty-0.1.20}/LICENSE +0 -0
  10. {prompty-0.1.19 → prompty-0.1.20}/prompty/__init__.py +0 -0
  11. {prompty-0.1.19 → prompty-0.1.20}/prompty/azure/__init__.py +0 -0
  12. {prompty-0.1.19 → prompty-0.1.20}/prompty/cli.py +0 -0
  13. {prompty-0.1.19 → prompty-0.1.20}/prompty/openai/__init__.py +0 -0
  14. {prompty-0.1.19 → prompty-0.1.20}/prompty/openai/executor.py +0 -0
  15. {prompty-0.1.19 → prompty-0.1.20}/prompty/openai/processor.py +0 -0
  16. {prompty-0.1.19 → prompty-0.1.20}/prompty/parsers.py +0 -0
  17. {prompty-0.1.19 → prompty-0.1.20}/prompty/renderers.py +0 -0
  18. {prompty-0.1.19 → prompty-0.1.20}/prompty/serverless/__init__.py +0 -0
  19. {prompty-0.1.19 → prompty-0.1.20}/prompty/serverless/executor.py +0 -0
  20. {prompty-0.1.19 → prompty-0.1.20}/prompty/serverless/processor.py +0 -0
  21. {prompty-0.1.19 → prompty-0.1.20}/tests/fake_azure_executor.py +0 -0
  22. {prompty-0.1.19 → prompty-0.1.20}/tests/fake_serverless_executor.py +0 -0
  23. {prompty-0.1.19 → prompty-0.1.20}/tests/generated/1contoso.md +0 -0
  24. {prompty-0.1.19 → prompty-0.1.20}/tests/generated/2contoso.md +0 -0
  25. {prompty-0.1.19 → prompty-0.1.20}/tests/generated/3contoso.md +0 -0
  26. {prompty-0.1.19 → prompty-0.1.20}/tests/generated/4contoso.md +0 -0
  27. {prompty-0.1.19 → prompty-0.1.20}/tests/generated/basic.prompty.md +0 -0
  28. {prompty-0.1.19 → prompty-0.1.20}/tests/generated/camping.jpg +0 -0
  29. {prompty-0.1.19 → prompty-0.1.20}/tests/generated/context.prompty.md +0 -0
  30. {prompty-0.1.19 → prompty-0.1.20}/tests/generated/contoso_multi.md +0 -0
  31. {prompty-0.1.19 → prompty-0.1.20}/tests/generated/faithfulness.prompty.md +0 -0
  32. {prompty-0.1.19 → prompty-0.1.20}/tests/generated/groundedness.prompty.md +0 -0
  33. {prompty-0.1.19 → prompty-0.1.20}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
  34. {prompty-0.1.19 → prompty-0.1.20}/tests/hello_world.embedding.json +0 -0
  35. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/__init__.py +0 -0
  36. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/basic.prompty +0 -0
  37. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/basic.prompty.execution.json +0 -0
  38. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/basic_json_output.prompty +0 -0
  39. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/camping.jpg +0 -0
  40. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/chat.prompty +0 -0
  41. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/context.json +0 -0
  42. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/context.prompty +0 -0
  43. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/context.prompty.execution.json +0 -0
  44. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/embedding.prompty +0 -0
  45. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/embedding.prompty.execution.json +0 -0
  46. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/evaluation.prompty +0 -0
  47. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/faithfulness.prompty +0 -0
  48. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/faithfulness.prompty.execution.json +0 -0
  49. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/fake.prompty +0 -0
  50. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/funcfile.json +0 -0
  51. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/funcfile.prompty +0 -0
  52. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/functions.prompty +0 -0
  53. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/functions.prompty.execution.json +0 -0
  54. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/groundedness.prompty +0 -0
  55. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/groundedness.prompty.execution.json +0 -0
  56. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/prompty.json +0 -0
  57. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/serverless.prompty +0 -0
  58. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/serverless.prompty.execution.json +0 -0
  59. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/serverless_stream.prompty +0 -0
  60. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/serverless_stream.prompty.execution.json +0 -0
  61. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/streaming.prompty +0 -0
  62. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/streaming.prompty.execution.json +0 -0
  63. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/sub/__init__.py +0 -0
  64. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/sub/basic.prompty +0 -0
  65. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/sub/sub/__init__.py +0 -0
  66. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/sub/sub/basic.prompty +0 -0
  67. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/sub/sub/prompty.json +0 -0
  68. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/sub/sub/test.py +0 -0
  69. {prompty-0.1.19 → prompty-0.1.20}/tests/prompts/test.py +0 -0
  70. {prompty-0.1.19 → prompty-0.1.20}/tests/prompty.json +0 -0
  71. {prompty-0.1.19 → prompty-0.1.20}/tests/test_common.py +0 -0
  72. {prompty-0.1.19 → prompty-0.1.20}/tests/test_execute.py +0 -0
  73. {prompty-0.1.19 → prompty-0.1.20}/tests/test_factory_invoker.py +0 -0
  74. {prompty-0.1.19 → prompty-0.1.20}/tests/test_path_exec.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompty
3
- Version: 0.1.19
3
+ Version: 0.1.20
4
4
  Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
5
  Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
6
  Requires-Dist: pyyaml>=6.0.1
@@ -177,7 +177,7 @@ def get_response(customerId, prompt):
177
177
 
178
178
  ```
179
179
 
180
- In this case, whenever this code is executed, a `.ptrace` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
180
+ In this case, whenever this code is executed, a `.tracy` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
181
181
 
182
182
  ## OpenTelemetry Tracing
183
183
  You can add OpenTelemetry tracing to your application using the same hook mechanism. In your application, you might create something like `trace_span` to trace the execution of your prompts:
@@ -206,10 +206,10 @@ This will produce spans during the execution of the prompt that can be sent to a
206
206
  The Prompty runtime also comes with a CLI tool that allows you to run prompts from the command line. The CLI tool is installed with the Python package.
207
207
 
208
208
  ```bash
209
- prompty -s path/to/prompty/file
209
+ prompty -s path/to/prompty/file -e .env
210
210
  ```
211
211
 
212
- This will execute the prompt and print the response to the console. It also has default tracing enabled.
212
+ This will execute the prompt and print the response to the console. If there are any environment variables the CLI should take into account, you can pass those in via the `-e` flag. It also has default tracing enabled.
213
213
 
214
214
  ## Contributing
215
215
  We welcome contributions to the Prompty project! This community led project is open to all contributors. The project cvan be found on [GitHub](https://github.com/Microsoft/prompty).
@@ -158,7 +158,7 @@ def get_response(customerId, prompt):
158
158
 
159
159
  ```
160
160
 
161
- In this case, whenever this code is executed, a `.ptrace` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
161
+ In this case, whenever this code is executed, a `.tracy` file will be created in the `path/to/output` directory. This file will contain the trace of the execution of the `get_response` function, the execution of the `get_customer` function, and the prompty internals that generated the response.
162
162
 
163
163
  ## OpenTelemetry Tracing
164
164
  You can add OpenTelemetry tracing to your application using the same hook mechanism. In your application, you might create something like `trace_span` to trace the execution of your prompts:
@@ -187,10 +187,10 @@ This will produce spans during the execution of the prompt that can be sent to a
187
187
  The Prompty runtime also comes with a CLI tool that allows you to run prompts from the command line. The CLI tool is installed with the Python package.
188
188
 
189
189
  ```bash
190
- prompty -s path/to/prompty/file
190
+ prompty -s path/to/prompty/file -e .env
191
191
  ```
192
192
 
193
- This will execute the prompt and print the response to the console. It also has default tracing enabled.
193
+ This will execute the prompt and print the response to the console. If there are any environment variables the CLI should take into account, you can pass those in via the `-e` flag. It also has default tracing enabled.
194
194
 
195
195
  ## Contributing
196
196
  We welcome contributions to the Prompty project! This community led project is open to all contributors. The project cvan be found on [GitHub](https://github.com/Microsoft/prompty).
@@ -91,7 +91,7 @@ class AzureOpenAIExecutor(Invoker):
91
91
  elif self.api == "completion":
92
92
  trace("signature", "AzureOpenAI.completions.create")
93
93
  args = {
94
- "prompt": data.item,
94
+ "prompt": data,
95
95
  "model": self.deployment,
96
96
  **self.parameters,
97
97
  }
@@ -111,10 +111,22 @@ class AzureOpenAIExecutor(Invoker):
111
111
  trace("result", response)
112
112
 
113
113
  elif self.api == "image":
114
- raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
114
+ trace("signature", "AzureOpenAI.images.generate")
115
+ args = {
116
+ "prompt": data,
117
+ "model": self.deployment,
118
+ **self.parameters,
119
+ }
120
+ trace("inputs", args)
121
+ response = client.images.generate.create(**args)
122
+ trace("result", response)
115
123
 
116
124
  # stream response
117
125
  if isinstance(response, Iterator):
118
- return PromptyStream("AzureOpenAIExecutor", response)
126
+ if self.api == "chat":
127
+ # TODO: handle the case where there might be no usage in the stream
128
+ return PromptyStream("AzureOpenAIExecutor", response)
129
+ else:
130
+ return PromptyStream("AzureOpenAIExecutor", response)
119
131
  else:
120
132
  return response
@@ -1,5 +1,6 @@
1
1
  from typing import Iterator
2
2
  from openai.types.completion import Completion
3
+ from openai.types.images_response import ImagesResponse
3
4
  from openai.types.chat.chat_completion import ChatCompletion
4
5
  from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, ToolCall
5
6
  from openai.types.create_embedding_response import CreateEmbeddingResponse
@@ -50,6 +51,17 @@ class AzureOpenAIProcessor(Invoker):
50
51
  return data.data[0].embedding
51
52
  else:
52
53
  return [item.embedding for item in data.data]
54
+ elif isinstance(data, ImagesResponse):
55
+ self.prompty.model.parameters
56
+ item: ImagesResponse = data
57
+
58
+ if len(data.data) == 0:
59
+ raise ValueError("Invalid data")
60
+ elif len(data.data) == 1:
61
+ return data.data[0].url if item.data[0].url else item.data[0].b64_json
62
+ else:
63
+ return [item.url if item.url else item.b64_json for item in data.data]
64
+
53
65
  elif isinstance(data, Iterator):
54
66
 
55
67
  def generator():
@@ -561,7 +561,9 @@ class AsyncPromptyStream(AsyncIterator):
561
561
  # StopIteration is raised
562
562
  # contents are exhausted
563
563
  if len(self.items) > 0:
564
- with Tracer.start(f"{self.name}.AsyncPromptyStream") as trace:
564
+ with Tracer.start("AsyncPromptyStream") as trace:
565
+ trace("signature", f"{self.name}.AsyncPromptyStream")
566
+ trace("inputs", "None")
565
567
  trace("result", [to_dict(s) for s in self.items])
566
568
 
567
569
  raise StopIteration
@@ -1,6 +1,7 @@
1
1
  import os
2
2
  import json
3
3
  import inspect
4
+ import traceback
4
5
  import importlib
5
6
  import contextlib
6
7
  from pathlib import Path
@@ -176,7 +177,8 @@ def _trace_async(
176
177
  "result",
177
178
  {
178
179
  "exception": {
179
- "type": type(e).__name__,
180
+ "type": type(e),
181
+ "traceback": traceback.format_tb(),
180
182
  "message": str(e),
181
183
  "args": to_dict(e.args),
182
184
  }
@@ -13,7 +13,7 @@ dependencies = [
13
13
  "python-dotenv>=1.0.1",
14
14
  "click>=8.1.7",
15
15
  ]
16
- version = "0.1.19"
16
+ version = "0.1.20"
17
17
 
18
18
  [project.optional-dependencies]
19
19
  azure = [
@@ -151,5 +151,8 @@ def test_streaming():
151
151
  result = prompty.execute(
152
152
  "prompts/streaming.prompty",
153
153
  )
154
+ r = []
154
155
  for item in result:
155
- print(item)
156
+ r.append(item)
157
+
158
+ return ' '.join(r)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes