prompty 0.1.1__py3-none-any.whl → 0.1.8__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
prompty/__init__.py CHANGED
@@ -2,6 +2,8 @@ import json
2
2
  import traceback
3
3
  from pathlib import Path
4
4
  from typing import Dict, List, Union
5
+
6
+ from .tracer import trace
5
7
  from .core import (
6
8
  Frontmatter,
7
9
  InvokerFactory,
@@ -46,6 +48,7 @@ def load_global_config(
46
48
  return {}
47
49
 
48
50
 
51
+ @trace(description="Create a headless prompty object for programmatic use.")
49
52
  def headless(
50
53
  api: str,
51
54
  content: str | List[str] | dict,
@@ -53,6 +56,38 @@ def headless(
53
56
  parameters: Dict[str, any] = {},
54
57
  connection: str = "default",
55
58
  ) -> Prompty:
59
+ """Create a headless prompty object for programmatic use.
60
+
61
+ Parameters
62
+ ----------
63
+ api : str
64
+ The API to use for the model
65
+ content : str | List[str] | dict
66
+ The content to process
67
+ configuration : Dict[str, any], optional
68
+ The configuration to use, by default {}
69
+ parameters : Dict[str, any], optional
70
+ The parameters to use, by default {}
71
+ connection : str, optional
72
+ The connection to use, by default "default"
73
+
74
+ Returns
75
+ -------
76
+ Prompty
77
+ The headless prompty object
78
+
79
+ Example
80
+ -------
81
+ >>> import prompty
82
+ >>> p = prompty.headless(
83
+ api="embedding",
84
+ configuration={"type": "azure", "azure_deployment": "text-embedding-ada-002"},
85
+ content="hello world",
86
+ )
87
+ >>> emb = prompty.execute(p)
88
+
89
+ """
90
+
56
91
  # get caller's path (to get relative path for prompty.json)
57
92
  caller = Path(traceback.extract_stack()[-2].filename)
58
93
  templateSettings = TemplateSettings(type="NOOP", parser="NOOP")
@@ -70,11 +105,33 @@ def headless(
70
105
  return Prompty(model=modelSettings, template=templateSettings, content=content)
71
106
 
72
107
 
108
+ @trace(description="Load a prompty file.")
73
109
  def load(prompty_file: str, configuration: str = "default") -> Prompty:
110
+ """Load a prompty file.
111
+
112
+ Parameters
113
+ ----------
114
+ prompty_file : str
115
+ The path to the prompty file
116
+ configuration : str, optional
117
+ The configuration to use, by default "default"
118
+
119
+ Returns
120
+ -------
121
+ Prompty
122
+ The loaded prompty object
123
+
124
+ Example
125
+ -------
126
+ >>> import prompty
127
+ >>> p = prompty.load("prompts/basic.prompty")
128
+ >>> print(p)
129
+ """
130
+
74
131
  p = Path(prompty_file)
75
132
  if not p.is_absolute():
76
133
  # get caller's path (take into account trace frame)
77
- caller = Path(traceback.extract_stack()[-2].filename)
134
+ caller = Path(traceback.extract_stack()[-3].filename)
78
135
  p = Path(caller.parent / p).resolve().absolute()
79
136
 
80
137
  # load dictionary from prompty file
@@ -175,11 +232,32 @@ def load(prompty_file: str, configuration: str = "default") -> Prompty:
175
232
  )
176
233
  return p
177
234
 
178
-
235
+ @trace(description="Prepare the inputs for the prompt.")
179
236
  def prepare(
180
237
  prompt: Prompty,
181
238
  inputs: Dict[str, any] = {},
182
239
  ):
240
+ """ Prepare the inputs for the prompt.
241
+
242
+ Parameters
243
+ ----------
244
+ prompt : Prompty
245
+ The prompty object
246
+ inputs : Dict[str, any], optional
247
+ The inputs to the prompt, by default {}
248
+
249
+ Returns
250
+ -------
251
+ dict
252
+ The prepared and hidrated template shaped to the LLM model
253
+
254
+ Example
255
+ -------
256
+ >>> import prompty
257
+ >>> p = prompty.load("prompts/basic.prompty")
258
+ >>> inputs = {"name": "John Doe"}
259
+ >>> content = prompty.prepare(p, inputs)
260
+ """
183
261
  inputs = param_hoisting(inputs, prompt.sample)
184
262
 
185
263
  if prompt.template.type == "NOOP":
@@ -200,7 +278,7 @@ def prepare(
200
278
 
201
279
  return result
202
280
 
203
-
281
+ @trace(description="Run the prepared Prompty content against the model.")
204
282
  def run(
205
283
  prompt: Prompty,
206
284
  content: dict | list | str,
@@ -208,7 +286,34 @@ def run(
208
286
  parameters: Dict[str, any] = {},
209
287
  raw: bool = False,
210
288
  ):
211
- # invoker = InvokerFactory()
289
+ """Run the prepared Prompty content.
290
+
291
+ Parameters
292
+ ----------
293
+ prompt : Prompty
294
+ The prompty object
295
+ content : dict | list | str
296
+ The content to process
297
+ configuration : Dict[str, any], optional
298
+ The configuration to use, by default {}
299
+ parameters : Dict[str, any], optional
300
+ The parameters to use, by default {}
301
+ raw : bool, optional
302
+ Whether to skip processing, by default False
303
+
304
+ Returns
305
+ -------
306
+ any
307
+ The result of the prompt
308
+
309
+ Example
310
+ -------
311
+ >>> import prompty
312
+ >>> p = prompty.load("prompts/basic.prompty")
313
+ >>> inputs = {"name": "John Doe"}
314
+ >>> content = prompty.prepare(p, inputs)
315
+ >>> result = prompty.run(p, content)
316
+ """
212
317
 
213
318
  if configuration != {}:
214
319
  prompt.model.configuration = param_hoisting(
@@ -234,7 +339,7 @@ def run(
234
339
 
235
340
  return result
236
341
 
237
-
342
+ @trace(description="Execute a prompty")
238
343
  def execute(
239
344
  prompt: Union[str, Prompty],
240
345
  configuration: Dict[str, any] = {},
@@ -243,12 +348,39 @@ def execute(
243
348
  raw: bool = False,
244
349
  connection: str = "default",
245
350
  ):
246
-
351
+ """Execute a prompty.
352
+
353
+ Parameters
354
+ ----------
355
+ prompt : Union[str, Prompty]
356
+ The prompty object or path to the prompty file
357
+ configuration : Dict[str, any], optional
358
+ The configuration to use, by default {}
359
+ parameters : Dict[str, any], optional
360
+ The parameters to use, by default {}
361
+ inputs : Dict[str, any], optional
362
+ The inputs to the prompt, by default {}
363
+ raw : bool, optional
364
+ Whether to skip processing, by default False
365
+ connection : str, optional
366
+ The connection to use, by default "default"
367
+
368
+ Returns
369
+ -------
370
+ any
371
+ The result of the prompt
372
+
373
+ Example
374
+ -------
375
+ >>> import prompty
376
+ >>> inputs = {"name": "John Doe"}
377
+ >>> result = prompty.execute("prompts/basic.prompty", inputs=inputs)
378
+ """
247
379
  if isinstance(prompt, str):
248
380
  path = Path(prompt)
249
381
  if not path.is_absolute():
250
382
  # get caller's path (take into account trace frame)
251
- caller = Path(traceback.extract_stack()[-2].filename)
383
+ caller = Path(traceback.extract_stack()[-3].filename)
252
384
  path = Path(caller.parent / path).resolve().absolute()
253
385
  prompt = load(path, connection)
254
386
 
prompty/cli.py ADDED
@@ -0,0 +1,85 @@
1
+ import os
2
+ import json
3
+ import click
4
+
5
+
6
+ from pathlib import Path
7
+ from pydantic import BaseModel
8
+
9
+ from . import load, execute
10
+ from .tracer import trace, Trace, PromptyTracer
11
+ from dotenv import load_dotenv
12
+
13
+ load_dotenv()
14
+ Trace.add_tracer("prompty", PromptyTracer())
15
+
16
+ def normalize_path(p, create_dir=False) -> Path:
17
+ path = Path(p)
18
+ if not path.is_absolute():
19
+ path = Path(os.getcwd()).joinpath(path).absolute().resolve()
20
+ else:
21
+ path = path.absolute().resolve()
22
+
23
+ if create_dir:
24
+ if not path.exists():
25
+ print(f"Creating directory {str(path)}")
26
+ os.makedirs(str(path))
27
+
28
+ return path
29
+
30
+
31
+ @trace
32
+ def chat_mode(prompt_path: str):
33
+ W = "\033[0m" # white (normal)
34
+ R = "\033[31m" # red
35
+ G = "\033[32m" # green
36
+ O = "\033[33m" # orange
37
+ B = "\033[34m" # blue
38
+ P = "\033[35m" # purple
39
+ print(f"Executing {str(prompt_path)} in chat mode...")
40
+ prompty = load(str(prompt_path))
41
+ if "chat_history" not in prompty.sample:
42
+ print(
43
+ f"{R}{str(prompt_path)} needs to have a chat_history input to work in chat mode{W}"
44
+ )
45
+ return
46
+ else:
47
+ chat_history = prompty.sample["chat_history"]
48
+ while True:
49
+ user_input = input(f"{B}User:{W} ")
50
+ if user_input == "exit":
51
+ break
52
+ chat_history.append({"role": "user", "content": user_input})
53
+ # reloadable prompty file
54
+ result = execute(prompt_path, inputs={"chat_history": chat_history})
55
+ print(f"\n{G}Assistant:{W} {result}\n")
56
+ chat_history.append({"role": "assistant", "content": result})
57
+ print("Goodbye!")
58
+
59
+
60
+ @click.command()
61
+ @click.option("--source", "-s", required=True)
62
+ @click.option("--verbose", "-v", is_flag=True)
63
+ @click.option("--chat", "-c", is_flag=True)
64
+ @click.version_option()
65
+ @trace
66
+ def run(source, verbose, chat):
67
+ prompt_path = normalize_path(source)
68
+ if not prompt_path.exists():
69
+ print(f"{str(prompt_path)} does not exist")
70
+ return
71
+
72
+ if chat:
73
+ chat_mode(str(prompt_path))
74
+ else:
75
+ result = execute(str(prompt_path), raw=verbose)
76
+ if issubclass(type(result), BaseModel):
77
+ print(json.dumps(result.model_dump(), indent=4))
78
+ elif isinstance(result, list):
79
+ print(json.dumps([item.model_dump() for item in result], indent=4))
80
+ else:
81
+ print(result)
82
+
83
+
84
+ if __name__ == "__main__":
85
+ chat_mode(source="./tests/prompts/basic.prompt")
prompty/core.py CHANGED
@@ -7,26 +7,82 @@ import json
7
7
  import abc
8
8
  from pathlib import Path
9
9
  from pydantic import BaseModel, Field, FilePath
10
- from typing import List, Literal, Dict, Callable, TypeVar
11
-
12
-
13
- T = TypeVar("T")
10
+ from typing import List, Literal, Dict, Callable, Set, TypeVar
11
+ from .tracer import trace
14
12
 
15
13
 
16
14
  class PropertySettings(BaseModel):
15
+ """PropertySettings class to define the properties of the model
16
+
17
+ Attributes
18
+ ----------
19
+ type : str
20
+ The type of the property
21
+ default : any
22
+ The default value of the property
23
+ description : str
24
+ The description of the property
25
+ """
26
+
17
27
  type: Literal["string", "number", "array", "object", "boolean"]
18
28
  default: str | int | float | List | dict | bool = Field(default=None)
19
29
  description: str = Field(default="")
20
30
 
21
31
 
22
32
  class ModelSettings(BaseModel):
33
+ """ModelSettings class to define the model of the prompty
34
+
35
+ Attributes
36
+ ----------
37
+ api : str
38
+ The api of the model
39
+ configuration : dict
40
+ The configuration of the model
41
+ parameters : dict
42
+ The parameters of the model
43
+ response : dict
44
+ The response of the model
45
+ """
46
+
23
47
  api: str = Field(default="")
24
48
  configuration: dict = Field(default={})
25
49
  parameters: dict = Field(default={})
26
50
  response: dict = Field(default={})
27
51
 
28
- def model_dump_safe(self) -> dict:
29
- d = self.model_dump()
52
+ def model_dump(
53
+ self,
54
+ *,
55
+ mode: str = "python",
56
+ include: (
57
+ Set[int] | Set[str] | Dict[int, os.Any] | Dict[str, os.Any] | None
58
+ ) = None,
59
+ exclude: (
60
+ Set[int] | Set[str] | Dict[int, os.Any] | Dict[str, os.Any] | None
61
+ ) = None,
62
+ context: os.Any | None = None,
63
+ by_alias: bool = False,
64
+ exclude_unset: bool = False,
65
+ exclude_defaults: bool = False,
66
+ exclude_none: bool = False,
67
+ round_trip: bool = False,
68
+ warnings: bool | Literal["none"] | Literal["warn"] | Literal["error"] = True,
69
+ serialize_as_any: bool = False,
70
+ ) -> Dict[str, os.Any]:
71
+ """Method to dump the model in a safe way"""
72
+ d = super().model_dump(
73
+ mode=mode,
74
+ include=include,
75
+ exclude=exclude,
76
+ context=context,
77
+ by_alias=by_alias,
78
+ exclude_unset=exclude_unset,
79
+ exclude_defaults=exclude_defaults,
80
+ exclude_none=exclude_none,
81
+ round_trip=round_trip,
82
+ warnings=warnings,
83
+ serialize_as_any=serialize_as_any,
84
+ )
85
+
30
86
  d["configuration"] = {
31
87
  k: "*" * len(v) if "key" in k.lower() or "secret" in k.lower() else v
32
88
  for k, v in d["configuration"].items()
@@ -35,11 +91,55 @@ class ModelSettings(BaseModel):
35
91
 
36
92
 
37
93
  class TemplateSettings(BaseModel):
94
+ """TemplateSettings class to define the template of the prompty
95
+
96
+ Attributes
97
+ ----------
98
+ type : str
99
+ The type of the template
100
+ parser : str
101
+ The parser of the template
102
+ """
103
+
38
104
  type: str = Field(default="jinja2")
39
105
  parser: str = Field(default="")
40
106
 
41
107
 
42
108
  class Prompty(BaseModel):
109
+ """Prompty class to define the prompty
110
+
111
+ Attributes
112
+ ----------
113
+ name : str
114
+ The name of the prompty
115
+ description : str
116
+ The description of the prompty
117
+ authors : List[str]
118
+ The authors of the prompty
119
+ tags : List[str]
120
+ The tags of the prompty
121
+ version : str
122
+ The version of the prompty
123
+ base : str
124
+ The base of the prompty
125
+ basePrompty : Prompty
126
+ The base prompty
127
+ model : ModelSettings
128
+ The model of the prompty
129
+ sample : dict
130
+ The sample of the prompty
131
+ inputs : Dict[str, PropertySettings]
132
+ The inputs of the prompty
133
+ outputs : Dict[str, PropertySettings]
134
+ The outputs of the prompty
135
+ template : TemplateSettings
136
+ The template of the prompty
137
+ file : FilePath
138
+ The file of the prompty
139
+ content : str | List[str] | dict
140
+ The content of the prompty
141
+ """
142
+
43
143
  # metadata
44
144
  name: str = Field(default="")
45
145
  description: str = Field(default="")
@@ -69,7 +169,7 @@ class Prompty(BaseModel):
69
169
  for k, v in self:
70
170
  if v != "" and v != {} and v != [] and v != None:
71
171
  if k == "model":
72
- d[k] = v.model_dump_safe()
172
+ d[k] = v.model_dump()
73
173
  elif k == "template":
74
174
  d[k] = v.model_dump()
75
175
  elif k == "inputs" or k == "outputs":
@@ -88,11 +188,6 @@ class Prompty(BaseModel):
88
188
  d[k] = v
89
189
  return d
90
190
 
91
- # generate json representation of the prompty
92
- def to_safe_json(self) -> str:
93
- d = self.to_safe_dict()
94
- return json.dumps(d)
95
-
96
191
  @staticmethod
97
192
  def _process_file(file: str, parent: Path) -> any:
98
193
  file = Path(parent / Path(file)).resolve().absolute()
@@ -180,18 +275,57 @@ def param_hoisting(
180
275
 
181
276
 
182
277
  class Invoker(abc.ABC):
278
+ """Abstract class for Invoker
279
+
280
+ Attributes
281
+ ----------
282
+ prompty : Prompty
283
+ The prompty object
284
+ name : str
285
+ The name of the invoker
286
+
287
+ """
288
+
183
289
  def __init__(self, prompty: Prompty) -> None:
184
290
  self.prompty = prompty
291
+ self.name = self.__class__.__name__
185
292
 
186
293
  @abc.abstractmethod
187
294
  def invoke(self, data: any) -> any:
295
+ """Abstract method to invoke the invoker
296
+
297
+ Parameters
298
+ ----------
299
+ data : any
300
+ The data to be invoked
301
+
302
+ Returns
303
+ -------
304
+ any
305
+ The invoked
306
+ """
188
307
  pass
189
308
 
309
+ @trace
190
310
  def __call__(self, data: any) -> any:
311
+ """Method to call the invoker
312
+
313
+ Parameters
314
+ ----------
315
+ data : any
316
+ The data to be invoked
317
+
318
+ Returns
319
+ -------
320
+ any
321
+ The invoked
322
+ """
191
323
  return self.invoke(data)
192
324
 
193
325
 
194
326
  class InvokerFactory:
327
+ """Factory class for Invoker"""
328
+
195
329
  _renderers: Dict[str, Invoker] = {}
196
330
  _parsers: Dict[str, Invoker] = {}
197
331
  _executors: Dict[str, Invoker] = {}
@@ -267,6 +401,8 @@ class NoOp(Invoker):
267
401
 
268
402
 
269
403
  class Frontmatter:
404
+ """Frontmatter class to extract frontmatter from string."""
405
+
270
406
  _yaml_delim = r"(?:---|\+\+\+)"
271
407
  _yaml = r"(.*?)"
272
408
  _content = r"\s*(.+)$"
@@ -275,8 +411,12 @@ class Frontmatter:
275
411
 
276
412
  @classmethod
277
413
  def read_file(cls, path):
278
- """Reads file at path and returns dict with separated frontmatter.
279
- See read() for more info on dict return value.
414
+ """Returns dict with separated frontmatter from file.
415
+
416
+ Parameters
417
+ ----------
418
+ path : str
419
+ The path to the file
280
420
  """
281
421
  with open(path, encoding="utf-8") as file:
282
422
  file_contents = file.read()
@@ -286,10 +426,16 @@ class Frontmatter:
286
426
  def read(cls, string):
287
427
  """Returns dict with separated frontmatter from string.
288
428
 
289
- Returned dict keys:
290
- attributes -- extracted YAML attributes in dict form.
291
- body -- string contents below the YAML separators
292
- frontmatter -- string representation of YAML
429
+ Parameters
430
+ ----------
431
+ string : str
432
+ The string to extract frontmatter from
433
+
434
+
435
+ Returns
436
+ -------
437
+ dict
438
+ The separated frontmatter
293
439
  """
294
440
  fmatter = ""
295
441
  body = ""
prompty/executors.py CHANGED
@@ -1,14 +1,18 @@
1
1
  import azure.identity
2
+ from .tracer import Trace
2
3
  from openai import AzureOpenAI
3
4
  from .core import Invoker, InvokerFactory, Prompty
4
- from pathlib import Path
5
+ import importlib.metadata
6
+
7
+ VERSION = importlib.metadata.version("prompty")
5
8
 
6
9
 
7
10
  @InvokerFactory.register_executor("azure")
8
11
  @InvokerFactory.register_executor("azure_openai")
9
12
  class AzureOpenAIExecutor(Invoker):
13
+ """ Azure OpenAI Executor """
10
14
  def __init__(self, prompty: Prompty) -> None:
11
- self.prompty = prompty
15
+ super().__init__(prompty)
12
16
  kwargs = {
13
17
  key: value
14
18
  for key, value in self.prompty.model.configuration.items()
@@ -35,7 +39,10 @@ class AzureOpenAIExecutor(Invoker):
35
39
  )
36
40
 
37
41
  self.client = AzureOpenAI(
38
- default_headers={"User-Agent": "prompty/0.1.0"},
42
+ default_headers={
43
+ "User-Agent": f"prompty{VERSION}",
44
+ "x-ms-useragent": f"prompty/{VERSION}",
45
+ },
39
46
  **kwargs,
40
47
  )
41
48
 
@@ -44,12 +51,25 @@ class AzureOpenAIExecutor(Invoker):
44
51
  self.parameters = self.prompty.model.parameters
45
52
 
46
53
  def invoke(self, data: any) -> any:
54
+ """ Invoke the Azure OpenAI API
55
+
56
+ Parameters
57
+ ----------
58
+ data : any
59
+ The data to send to the Azure OpenAI API
60
+
61
+ Returns
62
+ -------
63
+ any
64
+ The response from the Azure OpenAI API
65
+ """
47
66
  if self.api == "chat":
48
67
  response = self.client.chat.completions.create(
49
68
  model=self.deployment,
50
69
  messages=data if isinstance(data, list) else [data],
51
70
  **self.parameters,
52
71
  )
72
+
53
73
  elif self.api == "completion":
54
74
  response = self.client.completions.create(
55
75
  prompt=data.item,
@@ -67,4 +87,9 @@ class AzureOpenAIExecutor(Invoker):
67
87
  elif self.api == "image":
68
88
  raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
69
89
 
90
+ if hasattr(response, "usage") and response.usage:
91
+ Trace.add("completion_tokens", response.usage.completion_tokens)
92
+ Trace.add("prompt_tokens", response.usage.prompt_tokens)
93
+ Trace.add("total_tokens", response.usage.total_tokens)
94
+
70
95
  return response
prompty/parsers.py CHANGED
@@ -5,12 +5,25 @@ from .core import Invoker, InvokerFactory, Prompty
5
5
 
6
6
  @InvokerFactory.register_parser("prompty.chat")
7
7
  class PromptyChatParser(Invoker):
8
+ """ Prompty Chat Parser """
8
9
  def __init__(self, prompty: Prompty) -> None:
9
- self.prompty = prompty
10
+ super().__init__(prompty)
10
11
  self.roles = ["assistant", "function", "system", "user"]
11
12
  self.path = self.prompty.file.parent
12
13
 
13
14
  def inline_image(self, image_item: str) -> str:
15
+ """ Inline Image
16
+
17
+ Parameters
18
+ ----------
19
+ image_item : str
20
+ The image item to inline
21
+
22
+ Returns
23
+ -------
24
+ str
25
+ The inlined image
26
+ """
14
27
  # pass through if it's a url or base64 encoded
15
28
  if image_item.startswith("http") or image_item.startswith("data"):
16
29
  return image_item
@@ -32,7 +45,18 @@ class PromptyChatParser(Invoker):
32
45
  )
33
46
 
34
47
  def parse_content(self, content: str):
35
- """for parsing inline images"""
48
+ """ for parsing inline images
49
+
50
+ Parameters
51
+ ----------
52
+ content : str
53
+ The content to parse
54
+
55
+ Returns
56
+ -------
57
+ any
58
+ The parsed content
59
+ """
36
60
  # regular expression to parse markdown images
37
61
  image = r"(?P<alt>!\[[^\]]*\])\((?P<filename>.*?)(?=\"|\))\)"
38
62
  matches = re.findall(image, content, flags=re.MULTILINE)
@@ -73,6 +97,18 @@ class PromptyChatParser(Invoker):
73
97
  return content
74
98
 
75
99
  def invoke(self, data: str) -> str:
100
+ """ Invoke the Prompty Chat Parser
101
+
102
+ Parameters
103
+ ----------
104
+ data : str
105
+ The data to parse
106
+
107
+ Returns
108
+ -------
109
+ str
110
+ The parsed data
111
+ """
76
112
  messages = []
77
113
  separator = r"(?i)^\s*#?\s*(" + "|".join(self.roles) + r")\s*:\s*\n"
78
114
 
prompty/processors.py CHANGED
@@ -1,3 +1,6 @@
1
+ from .tracer import Trace
2
+ from openai import Stream
3
+ from typing import Iterator
1
4
  from pydantic import BaseModel
2
5
  from openai.types.completion import Completion
3
6
  from .core import Invoker, InvokerFactory, Prompty
@@ -5,7 +8,6 @@ from openai.types.chat.chat_completion import ChatCompletion
5
8
  from openai.types.create_embedding_response import CreateEmbeddingResponse
6
9
 
7
10
 
8
-
9
11
  class ToolCall(BaseModel):
10
12
  id: str
11
13
  name: str
@@ -16,18 +18,25 @@ class ToolCall(BaseModel):
16
18
  @InvokerFactory.register_processor("azure")
17
19
  @InvokerFactory.register_processor("azure_openai")
18
20
  class OpenAIProcessor(Invoker):
21
+ """OpenAI/Azure Processor"""
22
+
19
23
  def __init__(self, prompty: Prompty) -> None:
20
- self.prompty = prompty
24
+ super().__init__(prompty)
21
25
 
22
26
  def invoke(self, data: any) -> any:
27
+ """Invoke the OpenAI/Azure API
28
+
29
+ Parameters
30
+ ----------
31
+ data : any
32
+ The data to send to the OpenAI/Azure API
23
33
 
24
- assert (
25
- isinstance(data, ChatCompletion)
26
- or isinstance(data, Completion)
27
- or isinstance(data, CreateEmbeddingResponse)
28
- )
34
+ Returns
35
+ -------
36
+ any
37
+ The response from the OpenAI/Azure API
38
+ """
29
39
  if isinstance(data, ChatCompletion):
30
- # TODO: Check for streaming response
31
40
  response = data.choices[0].message
32
41
  # tool calls available in response
33
42
  if response.tool_calls:
@@ -51,5 +60,15 @@ class OpenAIProcessor(Invoker):
51
60
  return data.data[0].embedding
52
61
  else:
53
62
  return [item.embedding for item in data.data]
63
+ elif isinstance(data, Iterator):
64
+
65
+ def generator():
66
+ for chunk in data:
67
+ if len(chunk.choices) == 1 and chunk.choices[0].delta.content != None:
68
+ content = chunk.choices[0].delta.content
69
+ Trace.add("stream", content)
70
+ yield content
71
+
72
+ return generator()
54
73
  else:
55
- raise ValueError("Invalid data type")
74
+ return data
prompty/renderers.py CHANGED
@@ -4,8 +4,9 @@ from .core import Invoker, InvokerFactory, Prompty
4
4
 
5
5
  @InvokerFactory.register_renderer("jinja2")
6
6
  class Jinja2Renderer(Invoker):
7
+ """ Jinja2 Renderer """
7
8
  def __init__(self, prompty: Prompty) -> None:
8
- self.prompty = prompty
9
+ super().__init__(prompty)
9
10
  self.templates = {}
10
11
  # generate template dictionary
11
12
  cur_prompt = self.prompty
prompty/tracer.py ADDED
@@ -0,0 +1,231 @@
1
+ import abc
2
+ import json
3
+ import inspect
4
+ import datetime
5
+ from numbers import Number
6
+ import os
7
+ from datetime import datetime
8
+ from pathlib import Path
9
+ from pydantic import BaseModel
10
+ from functools import wraps, partial
11
+ from typing import Any, Callable, Dict, List
12
+
13
+
14
+ class Tracer(abc.ABC):
15
+
16
+ @abc.abstractmethod
17
+ def start(self, name: str) -> None:
18
+ pass
19
+
20
+ @abc.abstractmethod
21
+ def add(self, key: str, value: Any) -> None:
22
+ pass
23
+
24
+ @abc.abstractmethod
25
+ def end(self) -> None:
26
+ pass
27
+
28
+
29
+ class Trace:
30
+ _tracers: Dict[str, Tracer] = {}
31
+
32
+ @classmethod
33
+ def add_tracer(cls, name: str, tracer: Tracer) -> None:
34
+ cls._tracers[name] = tracer
35
+
36
+ @classmethod
37
+ def start(cls, name: str) -> None:
38
+ for tracer in cls._tracers.values():
39
+ tracer.start(name)
40
+
41
+ @classmethod
42
+ def add(cls, name: str, value: Any) -> None:
43
+ for tracer in cls._tracers.values():
44
+ tracer.add(name, value)
45
+
46
+ @classmethod
47
+ def end(cls) -> None:
48
+ for tracer in cls._tracers.values():
49
+ tracer.end()
50
+
51
+ @classmethod
52
+ def clear(cls) -> None:
53
+ cls._tracers = {}
54
+
55
+ @classmethod
56
+ def register(cls, name: str):
57
+ def inner_wrapper(wrapped_class: Tracer) -> Callable:
58
+ cls._tracers[name] = wrapped_class()
59
+ return wrapped_class
60
+
61
+ return inner_wrapper
62
+
63
+ @classmethod
64
+ def to_dict(cls, obj: Any) -> Dict[str, Any]:
65
+ # simple json types
66
+ if isinstance(obj, str) or isinstance(obj, Number) or isinstance(obj, bool):
67
+ return obj
68
+ # datetime
69
+ elif isinstance(obj, datetime):
70
+ return obj.isoformat()
71
+ # safe Prompty obj serialization
72
+ elif type(obj).__name__ == "Prompty":
73
+ return obj.to_safe_dict()
74
+ # pydantic models have their own json serialization
75
+ elif isinstance(obj, BaseModel):
76
+ return obj.model_dump()
77
+ # recursive list and dict
78
+ elif isinstance(obj, list):
79
+ return [Trace.to_dict(item) for item in obj]
80
+ elif isinstance(obj, dict):
81
+ return {
82
+ k: v if isinstance(v, str) else Trace.to_dict(v)
83
+ for k, v in obj.items()
84
+ }
85
+ elif isinstance(obj, Path):
86
+ return str(obj)
87
+ # cast to string otherwise...
88
+ else:
89
+ return str(obj)
90
+
91
+
92
+ def _name(func: Callable, args):
93
+ if hasattr(func, "__qualname__"):
94
+ signature = f"{func.__module__}.{func.__qualname__}"
95
+ else:
96
+ signature = f"{func.__module__}.{func.__name__}"
97
+
98
+ # core invoker gets special treatment
99
+ core_invoker = signature == "prompty.core.Invoker.__call__"
100
+ if core_invoker:
101
+ name = type(args[0]).__name__
102
+ signature = f"{args[0].__module__}.{args[0].__class__.__name__}.invoke"
103
+ else:
104
+ name = func.__name__
105
+
106
+ return name, signature
107
+
108
+
109
+ def _inputs(func: Callable, args, kwargs) -> dict:
110
+ ba = inspect.signature(func).bind(*args, **kwargs)
111
+ ba.apply_defaults()
112
+
113
+ inputs = {k: Trace.to_dict(v) for k, v in ba.arguments.items() if k != "self"}
114
+
115
+ return inputs
116
+
117
+ def _results(result: Any) -> dict:
118
+ return {
119
+ "result": Trace.to_dict(result) if result is not None else "None",
120
+ }
121
+
122
+ def _trace_sync(func: Callable = None, *, description: str = None) -> Callable:
123
+ description = description or ""
124
+
125
+ @wraps(func)
126
+ def wrapper(*args, **kwargs):
127
+ name, signature = _name(func, args)
128
+ Trace.start(name)
129
+ Trace.add("signature", signature)
130
+ if description and description != "":
131
+ Trace.add("description", description)
132
+
133
+ inputs = _inputs(func, args, kwargs)
134
+ Trace.add("inputs", inputs)
135
+
136
+ result = func(*args, **kwargs)
137
+ Trace.add("result", _results(result))
138
+
139
+ Trace.end()
140
+
141
+ return result
142
+
143
+ return wrapper
144
+
145
+ def _trace_async(func: Callable = None, *, description: str = None) -> Callable:
146
+ description = description or ""
147
+
148
+ @wraps(func)
149
+ async def wrapper(*args, **kwargs):
150
+ name, signature = _name(func, args)
151
+ Trace.start(name)
152
+ Trace.add("signature", signature)
153
+ if description and description != "":
154
+ Trace.add("description", description)
155
+
156
+ inputs = _inputs(func, args, kwargs)
157
+ Trace.add("inputs", inputs)
158
+
159
+ result = await func(*args, **kwargs)
160
+ Trace.add("result", _results(result))
161
+
162
+ Trace.end()
163
+
164
+ return result
165
+
166
+ return wrapper
167
+
168
+ def trace(func: Callable = None, *, description: str = None) -> Callable:
169
+ if func is None:
170
+ return partial(trace, description=description)
171
+
172
+ wrapped_method = (
173
+ _trace_async if inspect.iscoroutinefunction(func) else _trace_sync
174
+ )
175
+
176
+ return wrapped_method(func, description=description)
177
+
178
+
179
+ class PromptyTracer(Tracer):
180
+ _stack: List[Dict[str, Any]] = []
181
+ _name: str = None
182
+
183
+ def __init__(self, output_dir: str = None) -> None:
184
+ super().__init__()
185
+ if output_dir:
186
+ self.root = Path(output_dir).resolve().absolute()
187
+ else:
188
+ self.root = Path(Path(os.getcwd()) / ".runs").resolve().absolute()
189
+
190
+ if not self.root.exists():
191
+ self.root.mkdir(parents=True, exist_ok=True)
192
+
193
+ def start(self, name: str) -> None:
194
+ self._stack.append({"name": name})
195
+ # first entry frame
196
+ if self._name is None:
197
+ self._name = name
198
+
199
+ def add(self, name: str, value: Any) -> None:
200
+ frame = self._stack[-1]
201
+ if name not in frame:
202
+ frame[name] = value
203
+ # multiple values creates list
204
+ else:
205
+ if isinstance(frame[name], list):
206
+ frame[name].append(value)
207
+ else:
208
+ frame[name] = [frame[name], value]
209
+
210
+
211
+ def end(self) -> None:
212
+ # pop the current stack
213
+ frame = self._stack.pop()
214
+
215
+ # if stack is empty, dump the frame
216
+ if len(self._stack) == 0:
217
+ self.flush(frame)
218
+ # otherwise, append the frame to the parent
219
+ else:
220
+ if "__frames" not in self._stack[-1]:
221
+ self._stack[-1]["__frames"] = []
222
+ self._stack[-1]["__frames"].append(frame)
223
+
224
+ def flush(self, frame: Dict[str, Any]) -> None:
225
+
226
+ trace_file = (
227
+ self.root / f"{self._name}.{datetime.now().strftime('%Y%m%d.%H%M%S')}.ptrace"
228
+ )
229
+
230
+ with open(trace_file, "w") as f:
231
+ json.dump(frame, f, indent=4)
@@ -0,0 +1,136 @@
1
+ Metadata-Version: 2.1
2
+ Name: prompty
3
+ Version: 0.1.8
4
+ Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
+ Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
+ License: MIT
7
+ Requires-Python: >=3.9
8
+ Requires-Dist: pyyaml>=6.0.1
9
+ Requires-Dist: pydantic>=2.8.2
10
+ Requires-Dist: jinja2>=3.1.4
11
+ Requires-Dist: openai>=1.35.10
12
+ Requires-Dist: azure-identity>=1.17.1
13
+ Requires-Dist: python-dotenv>=1.0.1
14
+ Requires-Dist: click>=8.1.7
15
+ Description-Content-Type: text/markdown
16
+
17
+
18
+ Prompty is an asset class and format for LLM prompts designed to enhance observability, understandability, and portability for developers. The primary goal is to accelerate the developer inner loop of prompt engineering and prompt source management in a cross-language and cross-platform implentation.
19
+
20
+ The file format has a supporting toolchain with a VS Code extension and runtimes in multiple programming languages to simplify and accelerate your AI application development.
21
+
22
+ The tooling comes together in three ways: the *prompty file asset*, the *VS Code extension tool*, and *runtimes* in multiple programming languges.
23
+
24
+ ## The Prompty File Format
25
+ Prompty is a language agnostic prompt asset for creating prompts and engineering the responses. Learn more about the format [here](https://prompty.ai/docs/prompty-file-spec).
26
+
27
+ Examples prompty file:
28
+ ```markdown
29
+ ---
30
+ name: Basic Prompt
31
+ description: A basic prompt that uses the GPT-3 chat API to answer questions
32
+ authors:
33
+ - sethjuarez
34
+ - jietong
35
+ model:
36
+ api: chat
37
+ configuration:
38
+ azure_deployment: gpt-35-turbo
39
+ sample:
40
+ firstName: Jane
41
+ lastName: Doe
42
+ question: What is the meaning of life?
43
+ ---
44
+ system:
45
+ You are an AI assistant who helps people find information.
46
+ As the assistant, you answer questions briefly, succinctly,
47
+ and in a personable manner using markdown and even add some personal flair with appropriate emojis.
48
+
49
+ # Customer
50
+ You are helping {{firstName}} {{lastName}} to find answers to their questions.
51
+ Use their name to address them in your responses.
52
+
53
+ user:
54
+ {{question}}
55
+ ```
56
+
57
+
58
+ ## The Prompty VS Code Extension
59
+ Run Prompty files directly in VS Code. This Visual Studio Code extension offers an intuitive prompt playground within VS Code to streamline the prompt engineering process. You can find the Prompty extension in the Visual Studio Code Marketplace.
60
+
61
+ Download the [VS Code extension here](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.prompty).
62
+
63
+
64
+ ## Using this Prompty Runtime
65
+ The Python runtime is a simple way to run your prompts in Python. The runtime is available as a Python package and can be installed using pip.
66
+
67
+ ```bash
68
+ pip install prompty
69
+ ```
70
+
71
+ Simple usage example:
72
+
73
+ ```python
74
+ import prompty
75
+
76
+ # execute the prompt
77
+ response = prompty.execute("path/to/prompty/file")
78
+
79
+ print(response)
80
+ ```
81
+
82
+ ## Using Tracing in Prompty
83
+ Prompty supports tracing to help you understand the execution of your prompts. The built-in tracing dumps the execution of the prompt to a file.
84
+
85
+ ```python
86
+ import prompty
87
+ from prompty.tracer import Trace, PromptyTracer
88
+
89
+ # add default tracer
90
+ Trace.add_tracerTrace.add_tracer("prompty", PromptyTracer("path/to/trace/dir"))
91
+
92
+ # execute the prompt
93
+ response = prompty.execute("path/to/prompty/file")
94
+
95
+ print(response)
96
+ ```
97
+
98
+ You can also bring your own tracer by creating a `Tracer` class.
99
+ Simple example:
100
+
101
+ ```python
102
+ import prompty
103
+ from prompty.tracer import Tracer
104
+
105
+ class MyTracer(Tracer):
106
+
107
+ def start(self, name: str) -> None:
108
+ print(f"Starting {name}")
109
+
110
+ def add(self, key: str, value: Any) -> None:
111
+ print(f"Adding {key} with value {value}")
112
+
113
+ def end(self) -> None:
114
+ print("Ending")
115
+
116
+ # add your tracer
117
+ Trace.add_tracer("my_tracer", MyTracer())
118
+
119
+ # execute the prompt
120
+ response = prompty.execute("path/to/prompty/file")
121
+
122
+ ```
123
+
124
+ To define your own tracer, you can subclass the `Tracer` class and implement the `start`, `add`, and `end` methods and then add it to the `Trace` instance. You can add as many tracers as you like - the will all of them will be called in order.
125
+
126
+ ## CLI
127
+ The Prompty runtime also comes with a CLI tool that allows you to run prompts from the command line. The CLI tool is installed with the Python package.
128
+
129
+ ```bash
130
+ prompty -s path/to/prompty/file
131
+ ```
132
+
133
+ This will execute the prompt and print the response to the console. It also has default tracing enabled.
134
+
135
+ ## Contributing
136
+ We welcome contributions to the Prompty project! This community led project is open to all contributors. The project cvan be found on [GitHub](https://github.com/Microsoft/prompty).
@@ -0,0 +1,12 @@
1
+ prompty-0.1.8.dist-info/METADATA,sha256=1sVPpxf3pjHAhCIJoXa-v02zF6P5w4aCdBcQZV3kEm4,4665
2
+ prompty-0.1.8.dist-info/WHEEL,sha256=rSwsxJWe3vzyR5HCwjWXQruDgschpei4h_giTm0dJVE,90
3
+ prompty-0.1.8.dist-info/licenses/LICENSE,sha256=KWSC4z9cfML_t0xThoQYjzTdcZQj86Y_mhXdatzU-KM,1052
4
+ prompty/__init__.py,sha256=Msp8eiKdrDq0wyl6G5DFDH8r5BxM2_E60uzzL7_MJ5w,11183
5
+ prompty/cli.py,sha256=_bx_l5v7OGhtAn4d_73b8tyfEw7OOkjCqGMQPu0YP5A,2489
6
+ prompty/core.py,sha256=WYSvognjMUl08FT0_mkcqZfymb_guKcp3sK8_RO4Kq0,13528
7
+ prompty/executors.py,sha256=TankDTAEBTZkvnPfNUw2KNb1TnNuWhyY8TkWOogUXKs,3185
8
+ prompty/parsers.py,sha256=4mmIn4SVNs8B0R1BufanqUJk8v4r0OEEo8yx6UOxQpA,4670
9
+ prompty/processors.py,sha256=GmReygLx2XW1UuanlX71HG3rTZL86y0yAGyNdbGWkcg,2366
10
+ prompty/renderers.py,sha256=RSHFQFx7AtKLUfsMLCXR0a56Mb7DL1NJNgjUqgg3IqU,776
11
+ prompty/tracer.py,sha256=XMS4aJD_Tp76wm2UFB8amtXn7ioGmPBUy11LmklSUFQ,6490
12
+ prompty-0.1.8.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: pdm-backend (2.3.2)
2
+ Generator: pdm-backend (2.3.3)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -0,0 +1,7 @@
1
+ Copyright (c) 2024 Microsoft
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4
+
5
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+
7
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -1,15 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: prompty
3
- Version: 0.1.1
4
- Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
- Author-Email: Seth Juarez <seth.juarez@microsoft.com>
6
- License: MIT
7
- Requires-Python: >=3.9
8
- Requires-Dist: pyyaml>=6.0.1
9
- Requires-Dist: pydantic>=2.8.2
10
- Requires-Dist: jinja2>=3.1.4
11
- Requires-Dist: openai>=1.35.10
12
- Requires-Dist: azure-identity>=1.17.1
13
- Description-Content-Type: text/markdown
14
-
15
- # prompty
@@ -1,9 +0,0 @@
1
- prompty-0.1.1.dist-info/METADATA,sha256=fkJx_0VrHNxhOcuQHhgaQSuyevJdErISkPbzA5A2noM,581
2
- prompty-0.1.1.dist-info/WHEEL,sha256=mbxFTmdEUhG7evcdMkR3aBt9SWcoFBJ4CDwnfguNegA,90
3
- prompty/__init__.py,sha256=PP7fVje52try-QcjVnSc44MkF8DVYXeCTfbyRlltqZI,7625
4
- prompty/core.py,sha256=AIQCA-aN9i9tckcObGoRMMS4rjRZzSTSKxM-o6hXuDw,10085
5
- prompty/executors.py,sha256=KlvlDXxpwNCXNpkvigG_jNcaBTz8eP3pFiVjPPnyjk0,2448
6
- prompty/parsers.py,sha256=e7Wf4hnDzrcau_4WsaQT9Jeqcuf1gYvL4KERCnWnVXQ,3993
7
- prompty/processors.py,sha256=x3LCtXhElsaa6bJ82-x_QFNpc7ddgDcyimcNOtni-ow,1856
8
- prompty/renderers.py,sha256=-NetGbPujfRLgofsbU8ty7Ap-y4oFb47bqE21I-vx8o,745
9
- prompty-0.1.1.dist-info/RECORD,,