prompty 0.1.12__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
prompty/core.py ADDED
@@ -0,0 +1,539 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import re
5
+ import yaml
6
+ import json
7
+ import abc
8
+ from pathlib import Path
9
+ from .tracer import Tracer, trace, to_dict
10
+ from pydantic import BaseModel, Field, FilePath
11
+ from typing import AsyncIterator, Iterator, List, Literal, Dict, Callable, Set
12
+
13
+
14
+ class ToolCall(BaseModel):
15
+ id: str
16
+ name: str
17
+ arguments: str
18
+
19
+
20
+ class PropertySettings(BaseModel):
21
+ """PropertySettings class to define the properties of the model
22
+
23
+ Attributes
24
+ ----------
25
+ type : str
26
+ The type of the property
27
+ default : any
28
+ The default value of the property
29
+ description : str
30
+ The description of the property
31
+ """
32
+
33
+ type: Literal["string", "number", "array", "object", "boolean"]
34
+ default: str | int | float | List | dict | bool = Field(default=None)
35
+ description: str = Field(default="")
36
+
37
+
38
+ class ModelSettings(BaseModel):
39
+ """ModelSettings class to define the model of the prompty
40
+
41
+ Attributes
42
+ ----------
43
+ api : str
44
+ The api of the model
45
+ configuration : dict
46
+ The configuration of the model
47
+ parameters : dict
48
+ The parameters of the model
49
+ response : dict
50
+ The response of the model
51
+ """
52
+
53
+ api: str = Field(default="")
54
+ configuration: dict = Field(default={})
55
+ parameters: dict = Field(default={})
56
+ response: dict = Field(default={})
57
+
58
+ def model_dump(
59
+ self,
60
+ *,
61
+ mode: str = "python",
62
+ include: (
63
+ Set[int] | Set[str] | Dict[int, os.Any] | Dict[str, os.Any] | None
64
+ ) = None,
65
+ exclude: (
66
+ Set[int] | Set[str] | Dict[int, os.Any] | Dict[str, os.Any] | None
67
+ ) = None,
68
+ context: os.Any | None = None,
69
+ by_alias: bool = False,
70
+ exclude_unset: bool = False,
71
+ exclude_defaults: bool = False,
72
+ exclude_none: bool = False,
73
+ round_trip: bool = False,
74
+ warnings: bool | Literal["none"] | Literal["warn"] | Literal["error"] = True,
75
+ serialize_as_any: bool = False,
76
+ ) -> Dict[str, os.Any]:
77
+ """Method to dump the model in a safe way"""
78
+ d = super().model_dump(
79
+ mode=mode,
80
+ include=include,
81
+ exclude=exclude,
82
+ context=context,
83
+ by_alias=by_alias,
84
+ exclude_unset=exclude_unset,
85
+ exclude_defaults=exclude_defaults,
86
+ exclude_none=exclude_none,
87
+ round_trip=round_trip,
88
+ warnings=warnings,
89
+ serialize_as_any=serialize_as_any,
90
+ )
91
+
92
+ d["configuration"] = {
93
+ k: "*" * len(v) if "key" in k.lower() or "secret" in k.lower() else v
94
+ for k, v in d["configuration"].items()
95
+ }
96
+ return d
97
+
98
+
99
+ class TemplateSettings(BaseModel):
100
+ """TemplateSettings class to define the template of the prompty
101
+
102
+ Attributes
103
+ ----------
104
+ type : str
105
+ The type of the template
106
+ parser : str
107
+ The parser of the template
108
+ """
109
+
110
+ type: str = Field(default="jinja2")
111
+ parser: str = Field(default="")
112
+
113
+
114
+ class Prompty(BaseModel):
115
+ """Prompty class to define the prompty
116
+
117
+ Attributes
118
+ ----------
119
+ name : str
120
+ The name of the prompty
121
+ description : str
122
+ The description of the prompty
123
+ authors : List[str]
124
+ The authors of the prompty
125
+ tags : List[str]
126
+ The tags of the prompty
127
+ version : str
128
+ The version of the prompty
129
+ base : str
130
+ The base of the prompty
131
+ basePrompty : Prompty
132
+ The base prompty
133
+ model : ModelSettings
134
+ The model of the prompty
135
+ sample : dict
136
+ The sample of the prompty
137
+ inputs : Dict[str, PropertySettings]
138
+ The inputs of the prompty
139
+ outputs : Dict[str, PropertySettings]
140
+ The outputs of the prompty
141
+ template : TemplateSettings
142
+ The template of the prompty
143
+ file : FilePath
144
+ The file of the prompty
145
+ content : str | List[str] | dict
146
+ The content of the prompty
147
+ """
148
+
149
+ # metadata
150
+ name: str = Field(default="")
151
+ description: str = Field(default="")
152
+ authors: List[str] = Field(default=[])
153
+ tags: List[str] = Field(default=[])
154
+ version: str = Field(default="")
155
+ base: str = Field(default="")
156
+ basePrompty: Prompty | None = Field(default=None)
157
+ # model
158
+ model: ModelSettings = Field(default_factory=ModelSettings)
159
+
160
+ # sample
161
+ sample: dict = Field(default={})
162
+
163
+ # input / output
164
+ inputs: Dict[str, PropertySettings] = Field(default={})
165
+ outputs: Dict[str, PropertySettings] = Field(default={})
166
+
167
+ # template
168
+ template: TemplateSettings
169
+
170
+ file: FilePath = Field(default="")
171
+ content: str | List[str] | dict = Field(default="")
172
+
173
+ def to_safe_dict(self) -> Dict[str, any]:
174
+ d = {}
175
+ for k, v in self:
176
+ if v != "" and v != {} and v != [] and v != None:
177
+ if k == "model":
178
+ d[k] = v.model_dump()
179
+ elif k == "template":
180
+ d[k] = v.model_dump()
181
+ elif k == "inputs" or k == "outputs":
182
+ d[k] = {k: v.model_dump() for k, v in v.items()}
183
+ elif k == "file":
184
+ d[k] = (
185
+ str(self.file.as_posix())
186
+ if isinstance(self.file, Path)
187
+ else self.file
188
+ )
189
+ elif k == "basePrompty":
190
+ # no need to serialize basePrompty
191
+ continue
192
+
193
+ else:
194
+ d[k] = v
195
+ return d
196
+
197
+ @staticmethod
198
+ def _process_file(file: str, parent: Path) -> any:
199
+ file = Path(parent / Path(file)).resolve().absolute()
200
+ if file.exists():
201
+ with open(str(file), "r") as f:
202
+ items = json.load(f)
203
+ if isinstance(items, list):
204
+ return [Prompty.normalize(value, parent) for value in items]
205
+ elif isinstance(items, dict):
206
+ return {
207
+ key: Prompty.normalize(value, parent)
208
+ for key, value in items.items()
209
+ }
210
+ else:
211
+ return items
212
+ else:
213
+ raise FileNotFoundError(f"File {file} not found")
214
+
215
+ @staticmethod
216
+ def _process_env(variable: str, env_error=True, default: str = None) -> any:
217
+ if variable in os.environ.keys():
218
+ return os.environ[variable]
219
+ else:
220
+ if default:
221
+ return default
222
+ if env_error:
223
+ raise ValueError(f"Variable {variable} not found in environment")
224
+
225
+ return ""
226
+
227
+ @staticmethod
228
+ def normalize(attribute: any, parent: Path, env_error=True) -> any:
229
+ if isinstance(attribute, str):
230
+ attribute = attribute.strip()
231
+ if attribute.startswith("${") and attribute.endswith("}"):
232
+ # check if env or file
233
+ variable = attribute[2:-1].split(":")
234
+ if variable[0] == "env" and len(variable) > 1:
235
+ return Prompty._process_env(
236
+ variable[1],
237
+ env_error,
238
+ variable[2] if len(variable) > 2 else None,
239
+ )
240
+ elif variable[0] == "file" and len(variable) > 1:
241
+ return Prompty._process_file(variable[1], parent)
242
+ else:
243
+ # old way of doing things for back compatibility
244
+ v = Prompty._process_env(variable[0], False)
245
+ if len(v) == 0:
246
+ if len(variable) > 1:
247
+ return variable[1]
248
+ else:
249
+ if env_error:
250
+ raise ValueError(
251
+ f"Variable {variable[0]} not found in environment"
252
+ )
253
+ else:
254
+ return v
255
+ else:
256
+ return v
257
+ elif (
258
+ attribute.startswith("file:")
259
+ and Path(parent / attribute.split(":")[1]).exists()
260
+ ):
261
+ # old way of doing things for back compatibility
262
+ return Prompty._process_file(attribute.split(":")[1], parent)
263
+ else:
264
+ return attribute
265
+ elif isinstance(attribute, list):
266
+ return [Prompty.normalize(value, parent) for value in attribute]
267
+ elif isinstance(attribute, dict):
268
+ return {
269
+ key: Prompty.normalize(value, parent)
270
+ for key, value in attribute.items()
271
+ }
272
+ else:
273
+ return attribute
274
+
275
+
276
+ def param_hoisting(
277
+ top: Dict[str, any], bottom: Dict[str, any], top_key: str = None
278
+ ) -> Dict[str, any]:
279
+ if top_key:
280
+ new_dict = {**top[top_key]} if top_key in top else {}
281
+ else:
282
+ new_dict = {**top}
283
+ for key, value in bottom.items():
284
+ if not key in new_dict:
285
+ new_dict[key] = value
286
+ return new_dict
287
+
288
+
289
+ class Invoker(abc.ABC):
290
+ """Abstract class for Invoker
291
+
292
+ Attributes
293
+ ----------
294
+ prompty : Prompty
295
+ The prompty object
296
+ name : str
297
+ The name of the invoker
298
+
299
+ """
300
+
301
+ def __init__(self, prompty: Prompty) -> None:
302
+ self.prompty = prompty
303
+ self.name = self.__class__.__name__
304
+
305
+ @abc.abstractmethod
306
+ def invoke(self, data: any) -> any:
307
+ """Abstract method to invoke the invoker
308
+
309
+ Parameters
310
+ ----------
311
+ data : any
312
+ The data to be invoked
313
+
314
+ Returns
315
+ -------
316
+ any
317
+ The invoked
318
+ """
319
+ pass
320
+
321
+ @trace
322
+ def __call__(self, data: any) -> any:
323
+ """Method to call the invoker
324
+
325
+ Parameters
326
+ ----------
327
+ data : any
328
+ The data to be invoked
329
+
330
+ Returns
331
+ -------
332
+ any
333
+ The invoked
334
+ """
335
+ return self.invoke(data)
336
+
337
+
338
+ class InvokerFactory:
339
+ """Factory class for Invoker"""
340
+
341
+ _renderers: Dict[str, Invoker] = {}
342
+ _parsers: Dict[str, Invoker] = {}
343
+ _executors: Dict[str, Invoker] = {}
344
+ _processors: Dict[str, Invoker] = {}
345
+
346
+ @classmethod
347
+ def add_renderer(cls, name: str, invoker: Invoker) -> None:
348
+ cls._renderers[name] = invoker
349
+
350
+ @classmethod
351
+ def add_parser(cls, name: str, invoker: Invoker) -> None:
352
+ cls._parsers[name] = invoker
353
+
354
+ @classmethod
355
+ def add_executor(cls, name: str, invoker: Invoker) -> None:
356
+ cls._executors[name] = invoker
357
+
358
+ @classmethod
359
+ def add_processor(cls, name: str, invoker: Invoker) -> None:
360
+ cls._processors[name] = invoker
361
+
362
+ @classmethod
363
+ def register_renderer(cls, name: str) -> Callable:
364
+ def inner_wrapper(wrapped_class: Invoker) -> Callable:
365
+ cls._renderers[name] = wrapped_class
366
+ return wrapped_class
367
+
368
+ return inner_wrapper
369
+
370
+ @classmethod
371
+ def register_parser(cls, name: str) -> Callable:
372
+ def inner_wrapper(wrapped_class: Invoker) -> Callable:
373
+ cls._parsers[name] = wrapped_class
374
+ return wrapped_class
375
+
376
+ return inner_wrapper
377
+
378
+ @classmethod
379
+ def register_executor(cls, name: str) -> Callable:
380
+ def inner_wrapper(wrapped_class: Invoker) -> Callable:
381
+ cls._executors[name] = wrapped_class
382
+ return wrapped_class
383
+
384
+ return inner_wrapper
385
+
386
+ @classmethod
387
+ def register_processor(cls, name: str) -> Callable:
388
+ def inner_wrapper(wrapped_class: Invoker) -> Callable:
389
+ cls._processors[name] = wrapped_class
390
+ return wrapped_class
391
+
392
+ return inner_wrapper
393
+
394
+ @classmethod
395
+ def create_renderer(cls, name: str, prompty: Prompty) -> Invoker:
396
+ if name not in cls._renderers:
397
+ raise ValueError(f"Renderer {name} not found")
398
+ return cls._renderers[name](prompty)
399
+
400
+ @classmethod
401
+ def create_parser(cls, name: str, prompty: Prompty) -> Invoker:
402
+ if name not in cls._parsers:
403
+ raise ValueError(f"Parser {name} not found")
404
+ return cls._parsers[name](prompty)
405
+
406
+ @classmethod
407
+ def create_executor(cls, name: str, prompty: Prompty) -> Invoker:
408
+ if name not in cls._executors:
409
+ raise ValueError(f"Executor {name} not found")
410
+ return cls._executors[name](prompty)
411
+
412
+ @classmethod
413
+ def create_processor(cls, name: str, prompty: Prompty) -> Invoker:
414
+ if name not in cls._processors:
415
+ raise ValueError(f"Processor {name} not found")
416
+ return cls._processors[name](prompty)
417
+
418
+
419
+ @InvokerFactory.register_renderer("NOOP")
420
+ @InvokerFactory.register_parser("NOOP")
421
+ @InvokerFactory.register_executor("NOOP")
422
+ @InvokerFactory.register_processor("NOOP")
423
+ @InvokerFactory.register_parser("prompty.embedding")
424
+ @InvokerFactory.register_parser("prompty.image")
425
+ @InvokerFactory.register_parser("prompty.completion")
426
+ class NoOp(Invoker):
427
+ def invoke(self, data: any) -> any:
428
+ return data
429
+
430
+
431
+ class Frontmatter:
432
+ """Frontmatter class to extract frontmatter from string."""
433
+
434
+ _yaml_delim = r"(?:---|\+\+\+)"
435
+ _yaml = r"(.*?)"
436
+ _content = r"\s*(.+)$"
437
+ _re_pattern = r"^\s*" + _yaml_delim + _yaml + _yaml_delim + _content
438
+ _regex = re.compile(_re_pattern, re.S | re.M)
439
+
440
+ @classmethod
441
+ def read_file(cls, path):
442
+ """Returns dict with separated frontmatter from file.
443
+
444
+ Parameters
445
+ ----------
446
+ path : str
447
+ The path to the file
448
+ """
449
+ with open(path, encoding="utf-8") as file:
450
+ file_contents = file.read()
451
+ return cls.read(file_contents)
452
+
453
+ @classmethod
454
+ def read(cls, string):
455
+ """Returns dict with separated frontmatter from string.
456
+
457
+ Parameters
458
+ ----------
459
+ string : str
460
+ The string to extract frontmatter from
461
+
462
+
463
+ Returns
464
+ -------
465
+ dict
466
+ The separated frontmatter
467
+ """
468
+ fmatter = ""
469
+ body = ""
470
+ result = cls._regex.search(string)
471
+
472
+ if result:
473
+ fmatter = result.group(1)
474
+ body = result.group(2)
475
+ return {
476
+ "attributes": yaml.load(fmatter, Loader=yaml.FullLoader),
477
+ "body": body,
478
+ "frontmatter": fmatter,
479
+ }
480
+
481
+
482
+ class PromptyStream(Iterator):
483
+ """PromptyStream class to iterate over LLM stream.
484
+ Necessary for Prompty to handle streaming data when tracing."""
485
+
486
+ def __init__(self, name: str, iterator: Iterator):
487
+ self.name = name
488
+ self.iterator = iterator
489
+ self.items: List[any] = []
490
+ self.__name__ = "PromptyStream"
491
+
492
+ def __iter__(self):
493
+ return self
494
+
495
+ def __next__(self):
496
+ try:
497
+ # enumerate but add to list
498
+ o = self.iterator.__next__()
499
+ self.items.append(o)
500
+ return o
501
+
502
+ except StopIteration:
503
+ # StopIteration is raised
504
+ # contents are exhausted
505
+ if len(self.items) > 0:
506
+ with Tracer.start(f"{self.name}.PromptyStream") as trace:
507
+ trace("result", [to_dict(s) for s in self.items])
508
+
509
+ raise StopIteration
510
+
511
+
512
+ class AsyncPromptyStream(AsyncIterator):
513
+ """AsyncPromptyStream class to iterate over LLM stream.
514
+ Necessary for Prompty to handle streaming data when tracing."""
515
+
516
+ def __init__(self, name: str, iterator: AsyncIterator):
517
+ self.name = name
518
+ self.iterator = iterator
519
+ self.items: List[any] = []
520
+ self.__name__ = "AsyncPromptyStream"
521
+
522
+ def __aiter__(self):
523
+ return self
524
+
525
+ async def __anext__(self):
526
+ try:
527
+ # enumerate but add to list
528
+ o = await self.iterator.__anext__()
529
+ self.items.append(o)
530
+ return o
531
+
532
+ except StopIteration:
533
+ # StopIteration is raised
534
+ # contents are exhausted
535
+ if len(self.items) > 0:
536
+ with Tracer.start(f"{self.name}.AsyncPromptyStream") as trace:
537
+ trace("result", [to_dict(s) for s in self.items])
538
+
539
+ raise StopIteration
@@ -0,0 +1,3 @@
1
+ # __init__.py
2
+ from .executor import AzureOpenAIExecutor
3
+ from .processor import AzureOpenAIProcessor
@@ -0,0 +1,74 @@
1
+ import importlib.metadata
2
+ from openai import OpenAI
3
+ from typing import Iterator
4
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream
5
+
6
+ VERSION = importlib.metadata.version("prompty")
7
+
8
+
9
+ @InvokerFactory.register_executor("openai")
10
+ class AzureOpenAIExecutor(Invoker):
11
+ """OpenAI Executor"""
12
+
13
+ def __init__(self, prompty: Prompty) -> None:
14
+ super().__init__(prompty)
15
+ kwargs = {
16
+ key: value
17
+ for key, value in self.prompty.model.configuration.items()
18
+ if key != "type"
19
+ }
20
+
21
+ self.client = OpenAI(
22
+ default_headers={
23
+ "User-Agent": f"prompty/{VERSION}",
24
+ "x-ms-useragent": f"prompty/{VERSION}",
25
+ },
26
+ **kwargs,
27
+ )
28
+
29
+ self.api = self.prompty.model.api
30
+ self.deployment = self.prompty.model.configuration["azure_deployment"]
31
+ self.parameters = self.prompty.model.parameters
32
+
33
+ def invoke(self, data: any) -> any:
34
+ """Invoke the OpenAI API
35
+
36
+ Parameters
37
+ ----------
38
+ data : any
39
+ The data to send to the OpenAI API
40
+
41
+ Returns
42
+ -------
43
+ any
44
+ The response from the OpenAI API
45
+ """
46
+ if self.api == "chat":
47
+ response = self.client.chat.completions.create(
48
+ model=self.deployment,
49
+ messages=data if isinstance(data, list) else [data],
50
+ **self.parameters,
51
+ )
52
+
53
+ elif self.api == "completion":
54
+ response = self.client.completions.create(
55
+ prompt=data.item,
56
+ model=self.deployment,
57
+ **self.parameters,
58
+ )
59
+
60
+ elif self.api == "embedding":
61
+ response = self.client.embeddings.create(
62
+ input=data if isinstance(data, list) else [data],
63
+ model=self.deployment,
64
+ **self.parameters,
65
+ )
66
+
67
+ elif self.api == "image":
68
+ raise NotImplementedError("OpenAI Image API is not implemented yet")
69
+
70
+ # stream response
71
+ if isinstance(response, Iterator):
72
+ return PromptyStream("OpenAIExecutor", response)
73
+ else:
74
+ return response
@@ -0,0 +1,65 @@
1
+ from typing import Iterator
2
+ from openai.types.completion import Completion
3
+ from openai.types.chat.chat_completion import ChatCompletion
4
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, ToolCall
5
+ from openai.types.create_embedding_response import CreateEmbeddingResponse
6
+
7
+
8
+ @InvokerFactory.register_processor("openai")
9
+ class AzureOpenAIProcessor(Invoker):
10
+ """OpenAI Processor"""
11
+
12
+ def __init__(self, prompty: Prompty) -> None:
13
+ super().__init__(prompty)
14
+
15
+ def invoke(self, data: any) -> any:
16
+ """Invoke the OpenAI API
17
+
18
+ Parameters
19
+ ----------
20
+ data : any
21
+ The data to send to the OpenAI API
22
+
23
+ Returns
24
+ -------
25
+ any
26
+ The response from the OpenAI API
27
+ """
28
+ if isinstance(data, ChatCompletion):
29
+ response = data.choices[0].message
30
+ # tool calls available in response
31
+ if response.tool_calls:
32
+ return [
33
+ ToolCall(
34
+ id=tool_call.id,
35
+ name=tool_call.function.name,
36
+ arguments=tool_call.function.arguments,
37
+ )
38
+ for tool_call in response.tool_calls
39
+ ]
40
+ else:
41
+ return response.content
42
+
43
+ elif isinstance(data, Completion):
44
+ return data.choices[0].text
45
+ elif isinstance(data, CreateEmbeddingResponse):
46
+ if len(data.data) == 0:
47
+ raise ValueError("Invalid data")
48
+ elif len(data.data) == 1:
49
+ return data.data[0].embedding
50
+ else:
51
+ return [item.embedding for item in data.data]
52
+ elif isinstance(data, Iterator):
53
+
54
+ def generator():
55
+ for chunk in data:
56
+ if (
57
+ len(chunk.choices) == 1
58
+ and chunk.choices[0].delta.content != None
59
+ ):
60
+ content = chunk.choices[0].delta.content
61
+ yield content
62
+
63
+ return PromptyStream("OpenAIProcessor", generator())
64
+ else:
65
+ return data