prompty 0.1.46__tar.gz → 0.1.48__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. {prompty-0.1.46 → prompty-0.1.48}/PKG-INFO +1 -2
  2. {prompty-0.1.46 → prompty-0.1.48}/prompty/azure/executor.py +3 -1
  3. {prompty-0.1.46 → prompty-0.1.48}/prompty/cli.py +4 -4
  4. {prompty-0.1.46 → prompty-0.1.48}/prompty/core.py +50 -80
  5. prompty-0.1.48/prompty/mustache.py +666 -0
  6. {prompty-0.1.46 → prompty-0.1.48}/prompty/renderers.py +33 -0
  7. {prompty-0.1.46 → prompty-0.1.48}/prompty/tracer.py +7 -6
  8. {prompty-0.1.46 → prompty-0.1.48}/pyproject.toml +2 -3
  9. {prompty-0.1.46/tests/prompts/sub/sub → prompty-0.1.48/tests/prompts}/basic.prompty +16 -0
  10. prompty-0.1.48/tests/test_core.py +41 -0
  11. {prompty-0.1.46 → prompty-0.1.48}/LICENSE +0 -0
  12. {prompty-0.1.46 → prompty-0.1.48}/README.md +0 -0
  13. {prompty-0.1.46 → prompty-0.1.48}/prompty/__init__.py +0 -0
  14. {prompty-0.1.46 → prompty-0.1.48}/prompty/azure/__init__.py +0 -0
  15. {prompty-0.1.46 → prompty-0.1.48}/prompty/azure/processor.py +0 -0
  16. {prompty-0.1.46 → prompty-0.1.48}/prompty/azure_beta/__init__.py +0 -0
  17. {prompty-0.1.46 → prompty-0.1.48}/prompty/azure_beta/executor.py +0 -0
  18. {prompty-0.1.46 → prompty-0.1.48}/prompty/invoker.py +0 -0
  19. {prompty-0.1.46 → prompty-0.1.48}/prompty/openai/__init__.py +0 -0
  20. {prompty-0.1.46 → prompty-0.1.48}/prompty/openai/executor.py +0 -0
  21. {prompty-0.1.46 → prompty-0.1.48}/prompty/openai/processor.py +0 -0
  22. {prompty-0.1.46 → prompty-0.1.48}/prompty/parsers.py +0 -0
  23. {prompty-0.1.46 → prompty-0.1.48}/prompty/py.typed +0 -0
  24. {prompty-0.1.46 → prompty-0.1.48}/prompty/serverless/__init__.py +0 -0
  25. {prompty-0.1.46 → prompty-0.1.48}/prompty/serverless/executor.py +0 -0
  26. {prompty-0.1.46 → prompty-0.1.48}/prompty/serverless/processor.py +0 -0
  27. {prompty-0.1.46 → prompty-0.1.48}/prompty/utils.py +0 -0
  28. {prompty-0.1.46 → prompty-0.1.48}/tests/__init__.py +0 -0
  29. {prompty-0.1.46 → prompty-0.1.48}/tests/fake_azure_executor.py +0 -0
  30. {prompty-0.1.46 → prompty-0.1.48}/tests/fake_serverless_executor.py +0 -0
  31. {prompty-0.1.46 → prompty-0.1.48}/tests/generated/1contoso.md +0 -0
  32. {prompty-0.1.46 → prompty-0.1.48}/tests/generated/2contoso.md +0 -0
  33. {prompty-0.1.46 → prompty-0.1.48}/tests/generated/3contoso.md +0 -0
  34. {prompty-0.1.46 → prompty-0.1.48}/tests/generated/4contoso.md +0 -0
  35. {prompty-0.1.46 → prompty-0.1.48}/tests/generated/basic.prompty.md +0 -0
  36. {prompty-0.1.46 → prompty-0.1.48}/tests/generated/camping.jpg +0 -0
  37. {prompty-0.1.46 → prompty-0.1.48}/tests/generated/context.prompty.md +0 -0
  38. {prompty-0.1.46 → prompty-0.1.48}/tests/generated/contoso_multi.md +0 -0
  39. {prompty-0.1.46 → prompty-0.1.48}/tests/generated/faithfulness.prompty.md +0 -0
  40. {prompty-0.1.46 → prompty-0.1.48}/tests/generated/groundedness.prompty.md +0 -0
  41. {prompty-0.1.46 → prompty-0.1.48}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
  42. {prompty-0.1.46 → prompty-0.1.48}/tests/hello_world.embedding.json +0 -0
  43. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/__init__.py +0 -0
  44. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/basic.prompty.execution.json +0 -0
  45. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/basic_json_output.prompty +0 -0
  46. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/camping.jpg +0 -0
  47. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/chat.prompty +0 -0
  48. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/context.json +0 -0
  49. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/context.prompty +0 -0
  50. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/context.prompty.execution.json +0 -0
  51. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/embedding.prompty +0 -0
  52. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/embedding.prompty.execution.json +0 -0
  53. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/evaluation.prompty +0 -0
  54. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/faithfulness.prompty +0 -0
  55. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/faithfulness.prompty.execution.json +0 -0
  56. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/fake.prompty +0 -0
  57. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/funcfile.json +0 -0
  58. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/funcfile.prompty +0 -0
  59. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/functions.prompty +0 -0
  60. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/functions.prompty.execution.json +0 -0
  61. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/groundedness.prompty +0 -0
  62. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/groundedness.prompty.execution.json +0 -0
  63. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/prompty.json +0 -0
  64. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/serverless.prompty +0 -0
  65. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/serverless.prompty.execution.json +0 -0
  66. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/serverless_stream.prompty +0 -0
  67. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/serverless_stream.prompty.execution.json +0 -0
  68. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/streaming.prompty +0 -0
  69. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/streaming.prompty.execution.json +0 -0
  70. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/structured_output.prompty +0 -0
  71. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/structured_output.prompty.execution.json +0 -0
  72. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/structured_output_schema.json +0 -0
  73. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/sub/__init__.py +0 -0
  74. {prompty-0.1.46/tests/prompts → prompty-0.1.48/tests/prompts/sub}/basic.prompty +0 -0
  75. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/sub/sub/__init__.py +0 -0
  76. {prompty-0.1.46/tests/prompts → prompty-0.1.48/tests/prompts/sub}/sub/basic.prompty +0 -0
  77. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/sub/sub/prompty.json +0 -0
  78. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/sub/sub/test.py +0 -0
  79. {prompty-0.1.46 → prompty-0.1.48}/tests/prompts/test.py +0 -0
  80. {prompty-0.1.46 → prompty-0.1.48}/tests/prompty.json +0 -0
  81. {prompty-0.1.46 → prompty-0.1.48}/tests/test_common.py +0 -0
  82. {prompty-0.1.46 → prompty-0.1.48}/tests/test_execute.py +0 -0
  83. {prompty-0.1.46 → prompty-0.1.48}/tests/test_factory_invoker.py +0 -0
  84. {prompty-0.1.46 → prompty-0.1.48}/tests/test_path_exec.py +0 -0
  85. {prompty-0.1.46 → prompty-0.1.48}/tests/test_tracing.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: prompty
3
- Version: 0.1.46
3
+ Version: 0.1.48
4
4
  Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
5
  Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
6
  License: MIT
@@ -9,7 +9,6 @@ Project-URL: Documentation, https://prompty.ai/docs
9
9
  Project-URL: Issues, https://github.com/microsoft/prompty/issues
10
10
  Requires-Python: >=3.9
11
11
  Requires-Dist: pyyaml>=6.0.1
12
- Requires-Dist: pydantic>=2.8.2
13
12
  Requires-Dist: jinja2>=3.1.4
14
13
  Requires-Dist: python-dotenv>=1.0.1
15
14
  Requires-Dist: click>=8.1.7
@@ -198,7 +198,9 @@ class AzureOpenAIExecutor(Invoker):
198
198
  raw: APIResponse = (
199
199
  await client.chat.completions.with_raw_response.create(**args)
200
200
  )
201
- response = ChatCompletion.model_validate_json(raw.text())
201
+ if raw is not None and raw.text is not None and isinstance(raw.text, str):
202
+ response = ChatCompletion.model_validate_json(raw.text)
203
+
202
204
  for k, v in raw.headers.raw:
203
205
  trace(k.decode("utf-8"), v.decode("utf-8"))
204
206
 
@@ -6,7 +6,7 @@ from typing import Any, Optional
6
6
 
7
7
  import click
8
8
  from dotenv import load_dotenv
9
- from pydantic import BaseModel
9
+ from dataclasses import asdict, is_dataclass
10
10
 
11
11
  import prompty
12
12
  from prompty.tracer import PromptyTracer, Tracer, console_tracer, trace
@@ -91,11 +91,11 @@ def execute(prompt_path: str, inputs: Optional[dict[str, Any]] = None, raw=False
91
91
  dynamic_import(p.model.configuration["type"])
92
92
 
93
93
  result = prompty.execute(p, inputs=inputs, raw=raw)
94
- if issubclass(type(result), BaseModel):
95
- print("\n", json.dumps(result.model_dump(), indent=4), "\n")
94
+ if is_dataclass(result) and not isinstance(result, type):
95
+ print("\n", json.dumps(asdict(result), indent=4), "\n")
96
96
  elif isinstance(result, list):
97
97
  print(
98
- "\n", json.dumps([item.model_dump() for item in result], indent=4), "\n"
98
+ "\n", json.dumps([asdict(item) for item in result], indent=4), "\n"
99
99
  )
100
100
  else:
101
101
  print("\n", result, "\n")
@@ -1,23 +1,23 @@
1
+ import copy
1
2
  import os
2
3
  import typing
3
4
  from collections.abc import AsyncIterator, Iterator
5
+ from dataclasses import dataclass, field, fields, asdict
4
6
  from pathlib import Path
5
- from typing import Literal, Union
6
-
7
- from pydantic import BaseModel, Field, FilePath
8
- from pydantic.main import IncEx
9
-
10
- from .tracer import Tracer, sanitize, to_dict
7
+ from typing import Any, Dict, List, Literal, Union
8
+ from .tracer import Tracer, to_dict
11
9
  from .utils import load_json, load_json_async
12
10
 
13
11
 
14
- class ToolCall(BaseModel):
12
+ @dataclass
13
+ class ToolCall:
15
14
  id: str
16
15
  name: str
17
16
  arguments: str
18
17
 
19
18
 
20
- class PropertySettings(BaseModel):
19
+ @dataclass
20
+ class PropertySettings:
21
21
  """PropertySettings class to define the properties of the model
22
22
 
23
23
  Attributes
@@ -31,11 +31,12 @@ class PropertySettings(BaseModel):
31
31
  """
32
32
 
33
33
  type: Literal["string", "number", "array", "object", "boolean"]
34
- default: Union[str, int, float, list, dict, bool, None] = Field(default=None)
35
- description: str = Field(default="")
34
+ default: Union[str, int, float, list, dict, bool, None] = field(default=None)
35
+ description: str = field(default="")
36
36
 
37
37
 
38
- class ModelSettings(BaseModel):
38
+ @dataclass
39
+ class ModelSettings:
39
40
  """ModelSettings class to define the model of the prompty
40
41
 
41
42
  Attributes
@@ -50,48 +51,14 @@ class ModelSettings(BaseModel):
50
51
  The response of the model
51
52
  """
52
53
 
53
- api: str = Field(default="")
54
- configuration: dict = Field(default={})
55
- parameters: dict = Field(default={})
56
- response: dict = Field(default={})
57
-
58
- def model_dump(
59
- self,
60
- *,
61
- mode: str = "python",
62
- include: Union[IncEx, None] = None,
63
- exclude: Union[IncEx, None] = None,
64
- context: Union[typing.Any, None] = None,
65
- by_alias: bool = False,
66
- exclude_unset: bool = False,
67
- exclude_defaults: bool = False,
68
- exclude_none: bool = False,
69
- round_trip: bool = False,
70
- warnings: Union[
71
- bool, Literal["none"], Literal["warn"], Literal["error"]
72
- ] = True,
73
- serialize_as_any: bool = False,
74
- ) -> dict[str, typing.Any]:
75
- """Method to dump the model in a safe way"""
76
- d = super().model_dump(
77
- mode=mode,
78
- include=include,
79
- exclude=exclude,
80
- context=context,
81
- by_alias=by_alias,
82
- exclude_unset=exclude_unset,
83
- exclude_defaults=exclude_defaults,
84
- exclude_none=exclude_none,
85
- round_trip=round_trip,
86
- warnings=warnings,
87
- serialize_as_any=serialize_as_any,
88
- )
89
-
90
- d["configuration"] = {k: sanitize(k, v) for k, v in d["configuration"].items()}
91
- return d
54
+ api: str = field(default="")
55
+ configuration: dict = field(default_factory=dict)
56
+ parameters: dict = field(default_factory=dict)
57
+ response: dict = field(default_factory=dict)
92
58
 
93
59
 
94
- class TemplateSettings(BaseModel):
60
+ @dataclass
61
+ class TemplateSettings:
95
62
  """TemplateSettings class to define the template of the prompty
96
63
 
97
64
  Attributes
@@ -102,11 +69,12 @@ class TemplateSettings(BaseModel):
102
69
  The parser of the template
103
70
  """
104
71
 
105
- type: str = Field(default="jinja2")
106
- parser: str = Field(default="")
72
+ type: str = field(default="mustache")
73
+ parser: str = field(default="")
107
74
 
108
75
 
109
- class Prompty(BaseModel):
76
+ @dataclass
77
+ class Prompty:
110
78
  """Prompty class to define the prompty
111
79
 
112
80
  Attributes
@@ -115,9 +83,9 @@ class Prompty(BaseModel):
115
83
  The name of the prompty
116
84
  description : str
117
85
  The description of the prompty
118
- authors : List[str]
86
+ authors : list[str]
119
87
  The authors of the prompty
120
- tags : List[str]
88
+ tags : list[str]
121
89
  The tags of the prompty
122
90
  version : str
123
91
  The version of the prompty
@@ -129,52 +97,54 @@ class Prompty(BaseModel):
129
97
  The model of the prompty
130
98
  sample : dict
131
99
  The sample of the prompty
132
- inputs : Dict[str, PropertySettings]
100
+ inputs : dict[str, PropertySettings]
133
101
  The inputs of the prompty
134
- outputs : Dict[str, PropertySettings]
102
+ outputs : dict[str, PropertySettings]
135
103
  The outputs of the prompty
136
104
  template : TemplateSettings
137
105
  The template of the prompty
138
106
  file : FilePath
139
107
  The file of the prompty
140
- content : str | List[str] | dict
108
+ content : Union[str, list[str], dict]
141
109
  The content of the prompty
142
110
  """
143
111
 
144
112
  # metadata
145
- name: str = Field(default="")
146
- description: str = Field(default="")
147
- authors: list[str] = Field(default=[])
148
- tags: list[str] = Field(default=[])
149
- version: str = Field(default="")
150
- base: str = Field(default="")
151
- basePrompty: Union["Prompty", None] = Field(default=None)
113
+ name: str = field(default="")
114
+ description: str = field(default="")
115
+ authors: List[str] = field(default_factory=list)
116
+ tags: List[str] = field(default_factory=list)
117
+ version: str = field(default="")
118
+ base: str = field(default="")
119
+ basePrompty: Union["Prompty", None] = field(default=None)
152
120
  # model
153
- model: ModelSettings = Field(default_factory=ModelSettings)
121
+ model: ModelSettings = field(default_factory=ModelSettings)
154
122
 
155
123
  # sample
156
- sample: dict = Field(default={})
124
+ sample: dict = field(default_factory=dict)
157
125
 
158
126
  # input / output
159
- inputs: dict[str, PropertySettings] = Field(default={})
160
- outputs: dict[str, PropertySettings] = Field(default={})
127
+ inputs: dict[str, PropertySettings] = field(default_factory=dict)
128
+ outputs: dict[str, PropertySettings] = field(default_factory=dict)
161
129
 
162
130
  # template
163
- template: TemplateSettings
131
+ template: TemplateSettings = field(default_factory=TemplateSettings)
164
132
 
165
- file: Union[str, FilePath] = Field(default="")
166
- content: Union[str, list[str], dict] = Field(default="")
133
+ file: Union[str, Path] = field(default="")
134
+ content: Union[str, list[str], dict] = field(default="")
167
135
 
168
136
  def to_safe_dict(self) -> dict[str, typing.Any]:
169
- d = {}
170
- for k, v in self:
137
+ d: dict[str, typing.Any] = {}
138
+ for field in fields(self):
139
+ k = field.name
140
+ v = getattr(self, field.name)
171
141
  if v != "" and v != {} and v != [] and v is not None:
172
142
  if k == "model":
173
- d[k] = v.model_dump()
143
+ d[k] = asdict(self.model)
174
144
  elif k == "template":
175
- d[k] = v.model_dump()
145
+ d[k] = asdict(self.template)
176
146
  elif k == "inputs" or k == "outputs":
177
- d[k] = {k: v.model_dump() for k, v in v.items()}
147
+ d[k] = copy.deepcopy(v)
178
148
  elif k == "file":
179
149
  d[k] = (
180
150
  str(self.file.as_posix())
@@ -217,7 +187,7 @@ class Prompty(BaseModel):
217
187
  f = Path(parent / Path(file)).resolve().absolute()
218
188
  if f.exists():
219
189
  items = load_json(f)
220
- if isinstance(items, list):
190
+ if isinstance(items, List):
221
191
  return [Prompty.normalize(value, parent) for value in items]
222
192
  elif isinstance(items, dict):
223
193
  return {
@@ -325,7 +295,7 @@ def param_hoisting(
325
295
  top: dict[str, typing.Any],
326
296
  bottom: dict[str, typing.Any],
327
297
  top_key: Union[str, None] = None,
328
- ) -> dict[str, typing.Any]:
298
+ ) -> Dict[str, typing.Any]:
329
299
  if top_key:
330
300
  new_dict = {**top[top_key]} if top_key in top else {}
331
301
  else: