prompty 0.1.48__tar.gz → 0.1.49__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. {prompty-0.1.48 → prompty-0.1.49}/PKG-INFO +5 -4
  2. {prompty-0.1.48 → prompty-0.1.49}/README.md +4 -3
  3. {prompty-0.1.48 → prompty-0.1.49}/prompty/__init__.py +2 -1
  4. {prompty-0.1.48 → prompty-0.1.49}/prompty/utils.py +3 -2
  5. {prompty-0.1.48 → prompty-0.1.49}/pyproject.toml +1 -1
  6. {prompty-0.1.48 → prompty-0.1.49}/tests/test_core.py +10 -0
  7. {prompty-0.1.48 → prompty-0.1.49}/LICENSE +0 -0
  8. {prompty-0.1.48 → prompty-0.1.49}/prompty/azure/__init__.py +0 -0
  9. {prompty-0.1.48 → prompty-0.1.49}/prompty/azure/executor.py +0 -0
  10. {prompty-0.1.48 → prompty-0.1.49}/prompty/azure/processor.py +0 -0
  11. {prompty-0.1.48 → prompty-0.1.49}/prompty/azure_beta/__init__.py +0 -0
  12. {prompty-0.1.48 → prompty-0.1.49}/prompty/azure_beta/executor.py +0 -0
  13. {prompty-0.1.48 → prompty-0.1.49}/prompty/cli.py +0 -0
  14. {prompty-0.1.48 → prompty-0.1.49}/prompty/core.py +0 -0
  15. {prompty-0.1.48 → prompty-0.1.49}/prompty/invoker.py +0 -0
  16. {prompty-0.1.48 → prompty-0.1.49}/prompty/mustache.py +0 -0
  17. {prompty-0.1.48 → prompty-0.1.49}/prompty/openai/__init__.py +0 -0
  18. {prompty-0.1.48 → prompty-0.1.49}/prompty/openai/executor.py +0 -0
  19. {prompty-0.1.48 → prompty-0.1.49}/prompty/openai/processor.py +0 -0
  20. {prompty-0.1.48 → prompty-0.1.49}/prompty/parsers.py +0 -0
  21. {prompty-0.1.48 → prompty-0.1.49}/prompty/py.typed +0 -0
  22. {prompty-0.1.48 → prompty-0.1.49}/prompty/renderers.py +0 -0
  23. {prompty-0.1.48 → prompty-0.1.49}/prompty/serverless/__init__.py +0 -0
  24. {prompty-0.1.48 → prompty-0.1.49}/prompty/serverless/executor.py +0 -0
  25. {prompty-0.1.48 → prompty-0.1.49}/prompty/serverless/processor.py +0 -0
  26. {prompty-0.1.48 → prompty-0.1.49}/prompty/tracer.py +0 -0
  27. {prompty-0.1.48 → prompty-0.1.49}/tests/__init__.py +0 -0
  28. {prompty-0.1.48 → prompty-0.1.49}/tests/fake_azure_executor.py +0 -0
  29. {prompty-0.1.48 → prompty-0.1.49}/tests/fake_serverless_executor.py +0 -0
  30. {prompty-0.1.48 → prompty-0.1.49}/tests/generated/1contoso.md +0 -0
  31. {prompty-0.1.48 → prompty-0.1.49}/tests/generated/2contoso.md +0 -0
  32. {prompty-0.1.48 → prompty-0.1.49}/tests/generated/3contoso.md +0 -0
  33. {prompty-0.1.48 → prompty-0.1.49}/tests/generated/4contoso.md +0 -0
  34. {prompty-0.1.48 → prompty-0.1.49}/tests/generated/basic.prompty.md +0 -0
  35. {prompty-0.1.48 → prompty-0.1.49}/tests/generated/camping.jpg +0 -0
  36. {prompty-0.1.48 → prompty-0.1.49}/tests/generated/context.prompty.md +0 -0
  37. {prompty-0.1.48 → prompty-0.1.49}/tests/generated/contoso_multi.md +0 -0
  38. {prompty-0.1.48 → prompty-0.1.49}/tests/generated/faithfulness.prompty.md +0 -0
  39. {prompty-0.1.48 → prompty-0.1.49}/tests/generated/groundedness.prompty.md +0 -0
  40. {prompty-0.1.48 → prompty-0.1.49}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
  41. {prompty-0.1.48 → prompty-0.1.49}/tests/hello_world.embedding.json +0 -0
  42. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/__init__.py +0 -0
  43. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/basic.prompty +0 -0
  44. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/basic.prompty.execution.json +0 -0
  45. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/basic_json_output.prompty +0 -0
  46. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/camping.jpg +0 -0
  47. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/chat.prompty +0 -0
  48. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/context.json +0 -0
  49. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/context.prompty +0 -0
  50. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/context.prompty.execution.json +0 -0
  51. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/embedding.prompty +0 -0
  52. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/embedding.prompty.execution.json +0 -0
  53. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/evaluation.prompty +0 -0
  54. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/faithfulness.prompty +0 -0
  55. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/faithfulness.prompty.execution.json +0 -0
  56. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/fake.prompty +0 -0
  57. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/funcfile.json +0 -0
  58. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/funcfile.prompty +0 -0
  59. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/functions.prompty +0 -0
  60. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/functions.prompty.execution.json +0 -0
  61. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/groundedness.prompty +0 -0
  62. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/groundedness.prompty.execution.json +0 -0
  63. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/prompty.json +0 -0
  64. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/serverless.prompty +0 -0
  65. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/serverless.prompty.execution.json +0 -0
  66. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/serverless_stream.prompty +0 -0
  67. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/serverless_stream.prompty.execution.json +0 -0
  68. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/streaming.prompty +0 -0
  69. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/streaming.prompty.execution.json +0 -0
  70. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/structured_output.prompty +0 -0
  71. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/structured_output.prompty.execution.json +0 -0
  72. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/structured_output_schema.json +0 -0
  73. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/sub/__init__.py +0 -0
  74. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/sub/basic.prompty +0 -0
  75. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/sub/sub/__init__.py +0 -0
  76. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/sub/sub/basic.prompty +0 -0
  77. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/sub/sub/prompty.json +0 -0
  78. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/sub/sub/test.py +0 -0
  79. {prompty-0.1.48 → prompty-0.1.49}/tests/prompts/test.py +0 -0
  80. {prompty-0.1.48 → prompty-0.1.49}/tests/prompty.json +0 -0
  81. {prompty-0.1.48 → prompty-0.1.49}/tests/test_common.py +0 -0
  82. {prompty-0.1.48 → prompty-0.1.49}/tests/test_execute.py +0 -0
  83. {prompty-0.1.48 → prompty-0.1.49}/tests/test_factory_invoker.py +0 -0
  84. {prompty-0.1.48 → prompty-0.1.49}/tests/test_path_exec.py +0 -0
  85. {prompty-0.1.48 → prompty-0.1.49}/tests/test_tracing.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompty
3
- Version: 0.1.48
3
+ Version: 0.1.49
4
4
  Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
5
  Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
6
  License: MIT
@@ -37,16 +37,17 @@ Examples prompty file:
37
37
  ```markdown
38
38
  ---
39
39
  name: Basic Prompt
40
- description: A basic prompt that uses the GPT-3 chat API to answer questions
40
+ description: A basic prompt that uses the gpt-3.5-turbo chat API to answer questions
41
41
  authors:
42
42
  - sethjuarez
43
43
  - jietong
44
44
  model:
45
45
  api: chat
46
46
  configuration:
47
- api_version: 2023-12-01-preview
47
+ api_version: 2024-10-21
48
48
  azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
49
49
  azure_deployment: ${env:AZURE_OPENAI_DEPLOYMENT:gpt-35-turbo}
50
+ type: azure_openai
50
51
  sample:
51
52
  firstName: Jane
52
53
  lastName: Doe
@@ -76,7 +77,7 @@ Download the [VS Code extension here](https://marketplace.visualstudio.com/items
76
77
  The Python runtime is a simple way to run your prompts in Python. The runtime is available as a Python package and can be installed using pip. Depending on the type of prompt you are running, you may need to install additional dependencies. The runtime is designed to be extensible and can be customized to fit your needs.
77
78
 
78
79
  ```bash
79
- pip install prompty[azure]
80
+ pip install "prompty[azure]"
80
81
  ```
81
82
 
82
83
  Simple usage example:
@@ -12,16 +12,17 @@ Examples prompty file:
12
12
  ```markdown
13
13
  ---
14
14
  name: Basic Prompt
15
- description: A basic prompt that uses the GPT-3 chat API to answer questions
15
+ description: A basic prompt that uses the gpt-3.5-turbo chat API to answer questions
16
16
  authors:
17
17
  - sethjuarez
18
18
  - jietong
19
19
  model:
20
20
  api: chat
21
21
  configuration:
22
- api_version: 2023-12-01-preview
22
+ api_version: 2024-10-21
23
23
  azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
24
24
  azure_deployment: ${env:AZURE_OPENAI_DEPLOYMENT:gpt-35-turbo}
25
+ type: azure_openai
25
26
  sample:
26
27
  firstName: Jane
27
28
  lastName: Doe
@@ -51,7 +52,7 @@ Download the [VS Code extension here](https://marketplace.visualstudio.com/items
51
52
  The Python runtime is a simple way to run your prompts in Python. The runtime is available as a Python package and can be installed using pip. Depending on the type of prompt you are running, you may need to install additional dependencies. The runtime is designed to be extensible and can be customized to fit your needs.
52
53
 
53
54
  ```bash
54
- pip install prompty[azure]
55
+ pip install "prompty[azure]"
55
56
  ```
56
57
 
57
58
  Simple usage example:
@@ -12,7 +12,7 @@ from .core import (
12
12
  )
13
13
  from .invoker import InvokerFactory
14
14
  from .parsers import PromptyChatParser
15
- from .renderers import Jinja2Renderer
15
+ from .renderers import Jinja2Renderer, MustacheRenderer
16
16
  from .tracer import trace
17
17
  from .utils import (
18
18
  load_global_config,
@@ -22,6 +22,7 @@ from .utils import (
22
22
  )
23
23
 
24
24
  InvokerFactory.add_renderer("jinja2", Jinja2Renderer)
25
+ InvokerFactory.add_renderer("mustache", MustacheRenderer)
25
26
  InvokerFactory.add_parser("prompty.chat", PromptyChatParser)
26
27
 
27
28
 
@@ -37,14 +37,15 @@ def _find_global_config(prompty_path: Path = Path.cwd()) -> typing.Union[Path, N
37
37
  prompty_config = list(Path.cwd().glob("**/prompty.json"))
38
38
 
39
39
  if len(prompty_config) > 0:
40
- return sorted(
40
+ sorted_list = sorted(
41
41
  [
42
42
  c
43
43
  for c in prompty_config
44
44
  if len(c.parent.parts) <= len(prompty_path.parts)
45
45
  ],
46
46
  key=lambda p: len(p.parts),
47
- )[-1]
47
+ )
48
+ return sorted_list[-1] if len(sorted_list) > 0 else None
48
49
  else:
49
50
  return None
50
51
 
@@ -14,7 +14,7 @@ dependencies = [
14
14
  "click>=8.1.7",
15
15
  "aiofiles>=24.1.0",
16
16
  ]
17
- version = "0.1.48"
17
+ version = "0.1.49"
18
18
 
19
19
  [project.license]
20
20
  text = "MIT"
@@ -39,3 +39,13 @@ class TestCore:
39
39
  p.file = Path("/path/to/file")
40
40
  d = p.to_safe_dict()
41
41
  assert d["file"] == "/path/to/file"
42
+
43
+
44
+ def test_headless(self, **kwargs):
45
+ content = "You are a helpful assistant,\n{{ question }}"
46
+ data = { "question": "where is Microsoft?" }
47
+ p = prompty.headless(api="chat", content=content)
48
+ p.template.type = "mustache"
49
+ prompt_template = prompty.InvokerFactory.run_renderer(p, data)
50
+ parsed = prompty.InvokerFactory.run_parser(p, prompt_template)
51
+ assert parsed == "You are a helpful assistant,\nwhere is Microsoft?"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes