prompty 0.1.1__tar.gz → 0.1.8__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (63) hide show
  1. prompty-0.1.8/LICENSE +7 -0
  2. prompty-0.1.8/PKG-INFO +136 -0
  3. prompty-0.1.8/README.md +120 -0
  4. {prompty-0.1.1 → prompty-0.1.8}/prompty/__init__.py +139 -7
  5. prompty-0.1.8/prompty/cli.py +85 -0
  6. {prompty-0.1.1 → prompty-0.1.8}/prompty/core.py +164 -18
  7. {prompty-0.1.1 → prompty-0.1.8}/prompty/executors.py +28 -3
  8. {prompty-0.1.1 → prompty-0.1.8}/prompty/parsers.py +38 -2
  9. {prompty-0.1.1 → prompty-0.1.8}/prompty/processors.py +28 -9
  10. {prompty-0.1.1 → prompty-0.1.8}/prompty/renderers.py +2 -1
  11. prompty-0.1.8/prompty/tracer.py +231 -0
  12. {prompty-0.1.1 → prompty-0.1.8}/pyproject.toml +6 -2
  13. {prompty-0.1.1 → prompty-0.1.8}/tests/__init__.py +13 -17
  14. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/groundedness.prompty +1 -1
  15. prompty-0.1.8/tests/prompts/streaming.prompty +28 -0
  16. prompty-0.1.8/tests/prompts/streaming.prompty.execution.json +3601 -0
  17. {prompty-0.1.1 → prompty-0.1.8}/tests/test_execute.py +17 -0
  18. prompty-0.1.1/PKG-INFO +0 -15
  19. prompty-0.1.1/README.md +0 -1
  20. {prompty-0.1.1 → prompty-0.1.8}/tests/generated/1contoso.md +0 -0
  21. {prompty-0.1.1 → prompty-0.1.8}/tests/generated/2contoso.md +0 -0
  22. {prompty-0.1.1 → prompty-0.1.8}/tests/generated/3contoso.md +0 -0
  23. {prompty-0.1.1 → prompty-0.1.8}/tests/generated/4contoso.md +0 -0
  24. {prompty-0.1.1 → prompty-0.1.8}/tests/generated/basic.prompty.md +0 -0
  25. {prompty-0.1.1 → prompty-0.1.8}/tests/generated/camping.jpg +0 -0
  26. {prompty-0.1.1 → prompty-0.1.8}/tests/generated/context.prompty.md +0 -0
  27. {prompty-0.1.1 → prompty-0.1.8}/tests/generated/contoso_multi.md +0 -0
  28. {prompty-0.1.1 → prompty-0.1.8}/tests/generated/faithfulness.prompty.md +0 -0
  29. {prompty-0.1.1 → prompty-0.1.8}/tests/generated/groundedness.prompty.md +0 -0
  30. {prompty-0.1.1 → prompty-0.1.8}/tests/hello_world-goodbye_world-hello_again.embedding.json +0 -0
  31. {prompty-0.1.1 → prompty-0.1.8}/tests/hello_world.embedding.json +0 -0
  32. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/__init__.py +0 -0
  33. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/basic.prompty +0 -0
  34. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/basic.prompty.execution.json +0 -0
  35. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/basic_json_output.prompty +0 -0
  36. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/camping.jpg +0 -0
  37. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/chat.prompty +0 -0
  38. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/context.json +0 -0
  39. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/context.prompty +0 -0
  40. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/context.prompty.execution.json +0 -0
  41. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/embedding.prompty +0 -0
  42. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/embedding.prompty.execution.json +0 -0
  43. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/evaluation.prompty +0 -0
  44. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/faithfulness.prompty +0 -0
  45. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/faithfulness.prompty.execution.json +0 -0
  46. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/fake.prompty +0 -0
  47. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/funcfile.json +0 -0
  48. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/funcfile.prompty +0 -0
  49. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/functions.prompty +0 -0
  50. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/functions.prompty.execution.json +0 -0
  51. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/groundedness.prompty.execution.json +0 -0
  52. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/prompty.json +0 -0
  53. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/sub/__init__.py +0 -0
  54. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/sub/basic.prompty +0 -0
  55. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/sub/sub/__init__.py +0 -0
  56. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/sub/sub/basic.prompty +0 -0
  57. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/sub/sub/prompty.json +0 -0
  58. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/sub/sub/test.py +0 -0
  59. {prompty-0.1.1 → prompty-0.1.8}/tests/prompts/test.py +0 -0
  60. {prompty-0.1.1 → prompty-0.1.8}/tests/prompty.json +0 -0
  61. {prompty-0.1.1 → prompty-0.1.8}/tests/test_common.py +0 -0
  62. {prompty-0.1.1 → prompty-0.1.8}/tests/test_factory_invoker.py +0 -0
  63. {prompty-0.1.1 → prompty-0.1.8}/tests/test_path_exec.py +0 -0
prompty-0.1.8/LICENSE ADDED
@@ -0,0 +1,7 @@
1
+ Copyright (c) 2024 Microsoft
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4
+
5
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+
7
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
prompty-0.1.8/PKG-INFO ADDED
@@ -0,0 +1,136 @@
1
+ Metadata-Version: 2.1
2
+ Name: prompty
3
+ Version: 0.1.8
4
+ Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
+ Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
+ License: MIT
7
+ Requires-Python: >=3.9
8
+ Requires-Dist: pyyaml>=6.0.1
9
+ Requires-Dist: pydantic>=2.8.2
10
+ Requires-Dist: jinja2>=3.1.4
11
+ Requires-Dist: openai>=1.35.10
12
+ Requires-Dist: azure-identity>=1.17.1
13
+ Requires-Dist: python-dotenv>=1.0.1
14
+ Requires-Dist: click>=8.1.7
15
+ Description-Content-Type: text/markdown
16
+
17
+
18
+ Prompty is an asset class and format for LLM prompts designed to enhance observability, understandability, and portability for developers. The primary goal is to accelerate the developer inner loop of prompt engineering and prompt source management in a cross-language and cross-platform implentation.
19
+
20
+ The file format has a supporting toolchain with a VS Code extension and runtimes in multiple programming languages to simplify and accelerate your AI application development.
21
+
22
+ The tooling comes together in three ways: the *prompty file asset*, the *VS Code extension tool*, and *runtimes* in multiple programming languges.
23
+
24
+ ## The Prompty File Format
25
+ Prompty is a language agnostic prompt asset for creating prompts and engineering the responses. Learn more about the format [here](https://prompty.ai/docs/prompty-file-spec).
26
+
27
+ Examples prompty file:
28
+ ```markdown
29
+ ---
30
+ name: Basic Prompt
31
+ description: A basic prompt that uses the GPT-3 chat API to answer questions
32
+ authors:
33
+ - sethjuarez
34
+ - jietong
35
+ model:
36
+ api: chat
37
+ configuration:
38
+ azure_deployment: gpt-35-turbo
39
+ sample:
40
+ firstName: Jane
41
+ lastName: Doe
42
+ question: What is the meaning of life?
43
+ ---
44
+ system:
45
+ You are an AI assistant who helps people find information.
46
+ As the assistant, you answer questions briefly, succinctly,
47
+ and in a personable manner using markdown and even add some personal flair with appropriate emojis.
48
+
49
+ # Customer
50
+ You are helping {{firstName}} {{lastName}} to find answers to their questions.
51
+ Use their name to address them in your responses.
52
+
53
+ user:
54
+ {{question}}
55
+ ```
56
+
57
+
58
+ ## The Prompty VS Code Extension
59
+ Run Prompty files directly in VS Code. This Visual Studio Code extension offers an intuitive prompt playground within VS Code to streamline the prompt engineering process. You can find the Prompty extension in the Visual Studio Code Marketplace.
60
+
61
+ Download the [VS Code extension here](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.prompty).
62
+
63
+
64
+ ## Using this Prompty Runtime
65
+ The Python runtime is a simple way to run your prompts in Python. The runtime is available as a Python package and can be installed using pip.
66
+
67
+ ```bash
68
+ pip install prompty
69
+ ```
70
+
71
+ Simple usage example:
72
+
73
+ ```python
74
+ import prompty
75
+
76
+ # execute the prompt
77
+ response = prompty.execute("path/to/prompty/file")
78
+
79
+ print(response)
80
+ ```
81
+
82
+ ## Using Tracing in Prompty
83
+ Prompty supports tracing to help you understand the execution of your prompts. The built-in tracing dumps the execution of the prompt to a file.
84
+
85
+ ```python
86
+ import prompty
87
+ from prompty.tracer import Trace, PromptyTracer
88
+
89
+ # add default tracer
90
+ Trace.add_tracerTrace.add_tracer("prompty", PromptyTracer("path/to/trace/dir"))
91
+
92
+ # execute the prompt
93
+ response = prompty.execute("path/to/prompty/file")
94
+
95
+ print(response)
96
+ ```
97
+
98
+ You can also bring your own tracer by creating a `Tracer` class.
99
+ Simple example:
100
+
101
+ ```python
102
+ import prompty
103
+ from prompty.tracer import Tracer
104
+
105
+ class MyTracer(Tracer):
106
+
107
+ def start(self, name: str) -> None:
108
+ print(f"Starting {name}")
109
+
110
+ def add(self, key: str, value: Any) -> None:
111
+ print(f"Adding {key} with value {value}")
112
+
113
+ def end(self) -> None:
114
+ print("Ending")
115
+
116
+ # add your tracer
117
+ Trace.add_tracer("my_tracer", MyTracer())
118
+
119
+ # execute the prompt
120
+ response = prompty.execute("path/to/prompty/file")
121
+
122
+ ```
123
+
124
+ To define your own tracer, you can subclass the `Tracer` class and implement the `start`, `add`, and `end` methods and then add it to the `Trace` instance. You can add as many tracers as you like - the will all of them will be called in order.
125
+
126
+ ## CLI
127
+ The Prompty runtime also comes with a CLI tool that allows you to run prompts from the command line. The CLI tool is installed with the Python package.
128
+
129
+ ```bash
130
+ prompty -s path/to/prompty/file
131
+ ```
132
+
133
+ This will execute the prompt and print the response to the console. It also has default tracing enabled.
134
+
135
+ ## Contributing
136
+ We welcome contributions to the Prompty project! This community led project is open to all contributors. The project cvan be found on [GitHub](https://github.com/Microsoft/prompty).
@@ -0,0 +1,120 @@
1
+
2
+ Prompty is an asset class and format for LLM prompts designed to enhance observability, understandability, and portability for developers. The primary goal is to accelerate the developer inner loop of prompt engineering and prompt source management in a cross-language and cross-platform implentation.
3
+
4
+ The file format has a supporting toolchain with a VS Code extension and runtimes in multiple programming languages to simplify and accelerate your AI application development.
5
+
6
+ The tooling comes together in three ways: the *prompty file asset*, the *VS Code extension tool*, and *runtimes* in multiple programming languges.
7
+
8
+ ## The Prompty File Format
9
+ Prompty is a language agnostic prompt asset for creating prompts and engineering the responses. Learn more about the format [here](https://prompty.ai/docs/prompty-file-spec).
10
+
11
+ Examples prompty file:
12
+ ```markdown
13
+ ---
14
+ name: Basic Prompt
15
+ description: A basic prompt that uses the GPT-3 chat API to answer questions
16
+ authors:
17
+ - sethjuarez
18
+ - jietong
19
+ model:
20
+ api: chat
21
+ configuration:
22
+ azure_deployment: gpt-35-turbo
23
+ sample:
24
+ firstName: Jane
25
+ lastName: Doe
26
+ question: What is the meaning of life?
27
+ ---
28
+ system:
29
+ You are an AI assistant who helps people find information.
30
+ As the assistant, you answer questions briefly, succinctly,
31
+ and in a personable manner using markdown and even add some personal flair with appropriate emojis.
32
+
33
+ # Customer
34
+ You are helping {{firstName}} {{lastName}} to find answers to their questions.
35
+ Use their name to address them in your responses.
36
+
37
+ user:
38
+ {{question}}
39
+ ```
40
+
41
+
42
+ ## The Prompty VS Code Extension
43
+ Run Prompty files directly in VS Code. This Visual Studio Code extension offers an intuitive prompt playground within VS Code to streamline the prompt engineering process. You can find the Prompty extension in the Visual Studio Code Marketplace.
44
+
45
+ Download the [VS Code extension here](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.prompty).
46
+
47
+
48
+ ## Using this Prompty Runtime
49
+ The Python runtime is a simple way to run your prompts in Python. The runtime is available as a Python package and can be installed using pip.
50
+
51
+ ```bash
52
+ pip install prompty
53
+ ```
54
+
55
+ Simple usage example:
56
+
57
+ ```python
58
+ import prompty
59
+
60
+ # execute the prompt
61
+ response = prompty.execute("path/to/prompty/file")
62
+
63
+ print(response)
64
+ ```
65
+
66
+ ## Using Tracing in Prompty
67
+ Prompty supports tracing to help you understand the execution of your prompts. The built-in tracing dumps the execution of the prompt to a file.
68
+
69
+ ```python
70
+ import prompty
71
+ from prompty.tracer import Trace, PromptyTracer
72
+
73
+ # add default tracer
74
+ Trace.add_tracerTrace.add_tracer("prompty", PromptyTracer("path/to/trace/dir"))
75
+
76
+ # execute the prompt
77
+ response = prompty.execute("path/to/prompty/file")
78
+
79
+ print(response)
80
+ ```
81
+
82
+ You can also bring your own tracer by creating a `Tracer` class.
83
+ Simple example:
84
+
85
+ ```python
86
+ import prompty
87
+ from prompty.tracer import Tracer
88
+
89
+ class MyTracer(Tracer):
90
+
91
+ def start(self, name: str) -> None:
92
+ print(f"Starting {name}")
93
+
94
+ def add(self, key: str, value: Any) -> None:
95
+ print(f"Adding {key} with value {value}")
96
+
97
+ def end(self) -> None:
98
+ print("Ending")
99
+
100
+ # add your tracer
101
+ Trace.add_tracer("my_tracer", MyTracer())
102
+
103
+ # execute the prompt
104
+ response = prompty.execute("path/to/prompty/file")
105
+
106
+ ```
107
+
108
+ To define your own tracer, you can subclass the `Tracer` class and implement the `start`, `add`, and `end` methods and then add it to the `Trace` instance. You can add as many tracers as you like - the will all of them will be called in order.
109
+
110
+ ## CLI
111
+ The Prompty runtime also comes with a CLI tool that allows you to run prompts from the command line. The CLI tool is installed with the Python package.
112
+
113
+ ```bash
114
+ prompty -s path/to/prompty/file
115
+ ```
116
+
117
+ This will execute the prompt and print the response to the console. It also has default tracing enabled.
118
+
119
+ ## Contributing
120
+ We welcome contributions to the Prompty project! This community led project is open to all contributors. The project cvan be found on [GitHub](https://github.com/Microsoft/prompty).
@@ -2,6 +2,8 @@ import json
2
2
  import traceback
3
3
  from pathlib import Path
4
4
  from typing import Dict, List, Union
5
+
6
+ from .tracer import trace
5
7
  from .core import (
6
8
  Frontmatter,
7
9
  InvokerFactory,
@@ -46,6 +48,7 @@ def load_global_config(
46
48
  return {}
47
49
 
48
50
 
51
+ @trace(description="Create a headless prompty object for programmatic use.")
49
52
  def headless(
50
53
  api: str,
51
54
  content: str | List[str] | dict,
@@ -53,6 +56,38 @@ def headless(
53
56
  parameters: Dict[str, any] = {},
54
57
  connection: str = "default",
55
58
  ) -> Prompty:
59
+ """Create a headless prompty object for programmatic use.
60
+
61
+ Parameters
62
+ ----------
63
+ api : str
64
+ The API to use for the model
65
+ content : str | List[str] | dict
66
+ The content to process
67
+ configuration : Dict[str, any], optional
68
+ The configuration to use, by default {}
69
+ parameters : Dict[str, any], optional
70
+ The parameters to use, by default {}
71
+ connection : str, optional
72
+ The connection to use, by default "default"
73
+
74
+ Returns
75
+ -------
76
+ Prompty
77
+ The headless prompty object
78
+
79
+ Example
80
+ -------
81
+ >>> import prompty
82
+ >>> p = prompty.headless(
83
+ api="embedding",
84
+ configuration={"type": "azure", "azure_deployment": "text-embedding-ada-002"},
85
+ content="hello world",
86
+ )
87
+ >>> emb = prompty.execute(p)
88
+
89
+ """
90
+
56
91
  # get caller's path (to get relative path for prompty.json)
57
92
  caller = Path(traceback.extract_stack()[-2].filename)
58
93
  templateSettings = TemplateSettings(type="NOOP", parser="NOOP")
@@ -70,11 +105,33 @@ def headless(
70
105
  return Prompty(model=modelSettings, template=templateSettings, content=content)
71
106
 
72
107
 
108
+ @trace(description="Load a prompty file.")
73
109
  def load(prompty_file: str, configuration: str = "default") -> Prompty:
110
+ """Load a prompty file.
111
+
112
+ Parameters
113
+ ----------
114
+ prompty_file : str
115
+ The path to the prompty file
116
+ configuration : str, optional
117
+ The configuration to use, by default "default"
118
+
119
+ Returns
120
+ -------
121
+ Prompty
122
+ The loaded prompty object
123
+
124
+ Example
125
+ -------
126
+ >>> import prompty
127
+ >>> p = prompty.load("prompts/basic.prompty")
128
+ >>> print(p)
129
+ """
130
+
74
131
  p = Path(prompty_file)
75
132
  if not p.is_absolute():
76
133
  # get caller's path (take into account trace frame)
77
- caller = Path(traceback.extract_stack()[-2].filename)
134
+ caller = Path(traceback.extract_stack()[-3].filename)
78
135
  p = Path(caller.parent / p).resolve().absolute()
79
136
 
80
137
  # load dictionary from prompty file
@@ -175,11 +232,32 @@ def load(prompty_file: str, configuration: str = "default") -> Prompty:
175
232
  )
176
233
  return p
177
234
 
178
-
235
+ @trace(description="Prepare the inputs for the prompt.")
179
236
  def prepare(
180
237
  prompt: Prompty,
181
238
  inputs: Dict[str, any] = {},
182
239
  ):
240
+ """ Prepare the inputs for the prompt.
241
+
242
+ Parameters
243
+ ----------
244
+ prompt : Prompty
245
+ The prompty object
246
+ inputs : Dict[str, any], optional
247
+ The inputs to the prompt, by default {}
248
+
249
+ Returns
250
+ -------
251
+ dict
252
+ The prepared and hidrated template shaped to the LLM model
253
+
254
+ Example
255
+ -------
256
+ >>> import prompty
257
+ >>> p = prompty.load("prompts/basic.prompty")
258
+ >>> inputs = {"name": "John Doe"}
259
+ >>> content = prompty.prepare(p, inputs)
260
+ """
183
261
  inputs = param_hoisting(inputs, prompt.sample)
184
262
 
185
263
  if prompt.template.type == "NOOP":
@@ -200,7 +278,7 @@ def prepare(
200
278
 
201
279
  return result
202
280
 
203
-
281
+ @trace(description="Run the prepared Prompty content against the model.")
204
282
  def run(
205
283
  prompt: Prompty,
206
284
  content: dict | list | str,
@@ -208,7 +286,34 @@ def run(
208
286
  parameters: Dict[str, any] = {},
209
287
  raw: bool = False,
210
288
  ):
211
- # invoker = InvokerFactory()
289
+ """Run the prepared Prompty content.
290
+
291
+ Parameters
292
+ ----------
293
+ prompt : Prompty
294
+ The prompty object
295
+ content : dict | list | str
296
+ The content to process
297
+ configuration : Dict[str, any], optional
298
+ The configuration to use, by default {}
299
+ parameters : Dict[str, any], optional
300
+ The parameters to use, by default {}
301
+ raw : bool, optional
302
+ Whether to skip processing, by default False
303
+
304
+ Returns
305
+ -------
306
+ any
307
+ The result of the prompt
308
+
309
+ Example
310
+ -------
311
+ >>> import prompty
312
+ >>> p = prompty.load("prompts/basic.prompty")
313
+ >>> inputs = {"name": "John Doe"}
314
+ >>> content = prompty.prepare(p, inputs)
315
+ >>> result = prompty.run(p, content)
316
+ """
212
317
 
213
318
  if configuration != {}:
214
319
  prompt.model.configuration = param_hoisting(
@@ -234,7 +339,7 @@ def run(
234
339
 
235
340
  return result
236
341
 
237
-
342
+ @trace(description="Execute a prompty")
238
343
  def execute(
239
344
  prompt: Union[str, Prompty],
240
345
  configuration: Dict[str, any] = {},
@@ -243,12 +348,39 @@ def execute(
243
348
  raw: bool = False,
244
349
  connection: str = "default",
245
350
  ):
246
-
351
+ """Execute a prompty.
352
+
353
+ Parameters
354
+ ----------
355
+ prompt : Union[str, Prompty]
356
+ The prompty object or path to the prompty file
357
+ configuration : Dict[str, any], optional
358
+ The configuration to use, by default {}
359
+ parameters : Dict[str, any], optional
360
+ The parameters to use, by default {}
361
+ inputs : Dict[str, any], optional
362
+ The inputs to the prompt, by default {}
363
+ raw : bool, optional
364
+ Whether to skip processing, by default False
365
+ connection : str, optional
366
+ The connection to use, by default "default"
367
+
368
+ Returns
369
+ -------
370
+ any
371
+ The result of the prompt
372
+
373
+ Example
374
+ -------
375
+ >>> import prompty
376
+ >>> inputs = {"name": "John Doe"}
377
+ >>> result = prompty.execute("prompts/basic.prompty", inputs=inputs)
378
+ """
247
379
  if isinstance(prompt, str):
248
380
  path = Path(prompt)
249
381
  if not path.is_absolute():
250
382
  # get caller's path (take into account trace frame)
251
- caller = Path(traceback.extract_stack()[-2].filename)
383
+ caller = Path(traceback.extract_stack()[-3].filename)
252
384
  path = Path(caller.parent / path).resolve().absolute()
253
385
  prompt = load(path, connection)
254
386
 
@@ -0,0 +1,85 @@
1
+ import os
2
+ import json
3
+ import click
4
+
5
+
6
+ from pathlib import Path
7
+ from pydantic import BaseModel
8
+
9
+ from . import load, execute
10
+ from .tracer import trace, Trace, PromptyTracer
11
+ from dotenv import load_dotenv
12
+
13
+ load_dotenv()
14
+ Trace.add_tracer("prompty", PromptyTracer())
15
+
16
+ def normalize_path(p, create_dir=False) -> Path:
17
+ path = Path(p)
18
+ if not path.is_absolute():
19
+ path = Path(os.getcwd()).joinpath(path).absolute().resolve()
20
+ else:
21
+ path = path.absolute().resolve()
22
+
23
+ if create_dir:
24
+ if not path.exists():
25
+ print(f"Creating directory {str(path)}")
26
+ os.makedirs(str(path))
27
+
28
+ return path
29
+
30
+
31
+ @trace
32
+ def chat_mode(prompt_path: str):
33
+ W = "\033[0m" # white (normal)
34
+ R = "\033[31m" # red
35
+ G = "\033[32m" # green
36
+ O = "\033[33m" # orange
37
+ B = "\033[34m" # blue
38
+ P = "\033[35m" # purple
39
+ print(f"Executing {str(prompt_path)} in chat mode...")
40
+ prompty = load(str(prompt_path))
41
+ if "chat_history" not in prompty.sample:
42
+ print(
43
+ f"{R}{str(prompt_path)} needs to have a chat_history input to work in chat mode{W}"
44
+ )
45
+ return
46
+ else:
47
+ chat_history = prompty.sample["chat_history"]
48
+ while True:
49
+ user_input = input(f"{B}User:{W} ")
50
+ if user_input == "exit":
51
+ break
52
+ chat_history.append({"role": "user", "content": user_input})
53
+ # reloadable prompty file
54
+ result = execute(prompt_path, inputs={"chat_history": chat_history})
55
+ print(f"\n{G}Assistant:{W} {result}\n")
56
+ chat_history.append({"role": "assistant", "content": result})
57
+ print("Goodbye!")
58
+
59
+
60
+ @click.command()
61
+ @click.option("--source", "-s", required=True)
62
+ @click.option("--verbose", "-v", is_flag=True)
63
+ @click.option("--chat", "-c", is_flag=True)
64
+ @click.version_option()
65
+ @trace
66
+ def run(source, verbose, chat):
67
+ prompt_path = normalize_path(source)
68
+ if not prompt_path.exists():
69
+ print(f"{str(prompt_path)} does not exist")
70
+ return
71
+
72
+ if chat:
73
+ chat_mode(str(prompt_path))
74
+ else:
75
+ result = execute(str(prompt_path), raw=verbose)
76
+ if issubclass(type(result), BaseModel):
77
+ print(json.dumps(result.model_dump(), indent=4))
78
+ elif isinstance(result, list):
79
+ print(json.dumps([item.model_dump() for item in result], indent=4))
80
+ else:
81
+ print(result)
82
+
83
+
84
+ if __name__ == "__main__":
85
+ chat_mode(source="./tests/prompts/basic.prompt")