prompty 0.1.12__py2.py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
prompty/__init__.py ADDED
@@ -0,0 +1,391 @@
1
+ import json
2
+ import traceback
3
+ from pathlib import Path
4
+ from typing import Dict, List, Union
5
+
6
+ from prompty.tracer import trace
7
+ from prompty.core import (
8
+ Frontmatter,
9
+ InvokerFactory,
10
+ ModelSettings,
11
+ Prompty,
12
+ PropertySettings,
13
+ TemplateSettings,
14
+ param_hoisting,
15
+ )
16
+
17
+ from .renderers import *
18
+ from .parsers import *
19
+
20
+
21
+ def load_global_config(
22
+ prompty_path: Path = Path.cwd(), configuration: str = "default"
23
+ ) -> Dict[str, any]:
24
+ # prompty.config laying around?
25
+ prompty_config = list(Path.cwd().glob("**/prompty.json"))
26
+
27
+ # if there is one load it
28
+ if len(prompty_config) > 0:
29
+ # pick the nearest prompty.json
30
+ config = sorted(
31
+ [
32
+ c
33
+ for c in prompty_config
34
+ if len(c.parent.parts) <= len(prompty_path.parts)
35
+ ],
36
+ key=lambda p: len(p.parts),
37
+ )[-1]
38
+
39
+ with open(config, "r") as f:
40
+ c = json.load(f)
41
+ if configuration in c:
42
+ return c[configuration]
43
+ else:
44
+ raise ValueError(f'Item "{configuration}" not found in "{config}"')
45
+
46
+ return {}
47
+
48
+
49
+ @trace(description="Create a headless prompty object for programmatic use.")
50
+ def headless(
51
+ api: str,
52
+ content: str | List[str] | dict,
53
+ configuration: Dict[str, any] = {},
54
+ parameters: Dict[str, any] = {},
55
+ connection: str = "default",
56
+ ) -> Prompty:
57
+ """Create a headless prompty object for programmatic use.
58
+
59
+ Parameters
60
+ ----------
61
+ api : str
62
+ The API to use for the model
63
+ content : str | List[str] | dict
64
+ The content to process
65
+ configuration : Dict[str, any], optional
66
+ The configuration to use, by default {}
67
+ parameters : Dict[str, any], optional
68
+ The parameters to use, by default {}
69
+ connection : str, optional
70
+ The connection to use, by default "default"
71
+
72
+ Returns
73
+ -------
74
+ Prompty
75
+ The headless prompty object
76
+
77
+ Example
78
+ -------
79
+ >>> import prompty
80
+ >>> p = prompty.headless(
81
+ api="embedding",
82
+ configuration={"type": "azure", "azure_deployment": "text-embedding-ada-002"},
83
+ content="hello world",
84
+ )
85
+ >>> emb = prompty.execute(p)
86
+
87
+ """
88
+
89
+ # get caller's path (to get relative path for prompty.json)
90
+ caller = Path(traceback.extract_stack()[-2].filename)
91
+ templateSettings = TemplateSettings(type="NOOP", parser="NOOP")
92
+ modelSettings = ModelSettings(
93
+ api=api,
94
+ configuration=Prompty.normalize(
95
+ param_hoisting(
96
+ configuration, load_global_config(caller.parent, connection)
97
+ ),
98
+ caller.parent,
99
+ ),
100
+ parameters=parameters,
101
+ )
102
+
103
+ return Prompty(model=modelSettings, template=templateSettings, content=content)
104
+
105
+
106
+ @trace(description="Load a prompty file.")
107
+ def load(prompty_file: str, configuration: str = "default") -> Prompty:
108
+ """Load a prompty file.
109
+
110
+ Parameters
111
+ ----------
112
+ prompty_file : str
113
+ The path to the prompty file
114
+ configuration : str, optional
115
+ The configuration to use, by default "default"
116
+
117
+ Returns
118
+ -------
119
+ Prompty
120
+ The loaded prompty object
121
+
122
+ Example
123
+ -------
124
+ >>> import prompty
125
+ >>> p = prompty.load("prompts/basic.prompty")
126
+ >>> print(p)
127
+ """
128
+
129
+ p = Path(prompty_file)
130
+ if not p.is_absolute():
131
+ # get caller's path (take into account trace frame)
132
+ caller = Path(traceback.extract_stack()[-3].filename)
133
+ p = Path(caller.parent / p).resolve().absolute()
134
+
135
+ # load dictionary from prompty file
136
+ matter = Frontmatter.read_file(p)
137
+ attributes = matter["attributes"]
138
+ content = matter["body"]
139
+
140
+ # normalize attribute dictionary resolve keys and files
141
+ attributes = Prompty.normalize(attributes, p.parent)
142
+
143
+ # load global configuration
144
+ global_config = Prompty.normalize(
145
+ load_global_config(p.parent, configuration), p.parent
146
+ )
147
+ if "model" not in attributes:
148
+ attributes["model"] = {}
149
+
150
+ if "configuration" not in attributes["model"]:
151
+ attributes["model"]["configuration"] = global_config
152
+ else:
153
+ attributes["model"]["configuration"] = param_hoisting(
154
+ attributes["model"]["configuration"],
155
+ global_config,
156
+ )
157
+
158
+ # pull model settings out of attributes
159
+ try:
160
+ model = ModelSettings(**attributes.pop("model"))
161
+ except Exception as e:
162
+ raise ValueError(f"Error in model settings: {e}")
163
+
164
+ # pull template settings
165
+ try:
166
+ if "template" in attributes:
167
+ t = attributes.pop("template")
168
+ if isinstance(t, dict):
169
+ template = TemplateSettings(**t)
170
+ # has to be a string denoting the type
171
+ else:
172
+ template = TemplateSettings(type=t, parser="prompty")
173
+ else:
174
+ template = TemplateSettings(type="jinja2", parser="prompty")
175
+ except Exception as e:
176
+ raise ValueError(f"Error in template loader: {e}")
177
+
178
+ # formalize inputs and outputs
179
+ if "inputs" in attributes:
180
+ try:
181
+ inputs = {
182
+ k: PropertySettings(**v) for (k, v) in attributes.pop("inputs").items()
183
+ }
184
+ except Exception as e:
185
+ raise ValueError(f"Error in inputs: {e}")
186
+ else:
187
+ inputs = {}
188
+ if "outputs" in attributes:
189
+ try:
190
+ outputs = {
191
+ k: PropertySettings(**v) for (k, v) in attributes.pop("outputs").items()
192
+ }
193
+ except Exception as e:
194
+ raise ValueError(f"Error in outputs: {e}")
195
+ else:
196
+ outputs = {}
197
+
198
+ # recursive loading of base prompty
199
+ if "base" in attributes:
200
+ # load the base prompty from the same directory as the current prompty
201
+ base = load(p.parent / attributes["base"])
202
+ # hoist the base prompty's attributes to the current prompty
203
+ model.api = base.model.api if model.api == "" else model.api
204
+ model.configuration = param_hoisting(
205
+ model.configuration, base.model.configuration
206
+ )
207
+ model.parameters = param_hoisting(model.parameters, base.model.parameters)
208
+ model.response = param_hoisting(model.response, base.model.response)
209
+ attributes["sample"] = param_hoisting(attributes, base.sample, "sample")
210
+
211
+ p = Prompty(
212
+ **attributes,
213
+ model=model,
214
+ inputs=inputs,
215
+ outputs=outputs,
216
+ template=template,
217
+ content=content,
218
+ file=p,
219
+ basePrompty=base,
220
+ )
221
+ else:
222
+ p = Prompty(
223
+ **attributes,
224
+ model=model,
225
+ inputs=inputs,
226
+ outputs=outputs,
227
+ template=template,
228
+ content=content,
229
+ file=p,
230
+ )
231
+ return p
232
+
233
+ @trace(description="Prepare the inputs for the prompt.")
234
+ def prepare(
235
+ prompt: Prompty,
236
+ inputs: Dict[str, any] = {},
237
+ ):
238
+ """ Prepare the inputs for the prompt.
239
+
240
+ Parameters
241
+ ----------
242
+ prompt : Prompty
243
+ The prompty object
244
+ inputs : Dict[str, any], optional
245
+ The inputs to the prompt, by default {}
246
+
247
+ Returns
248
+ -------
249
+ dict
250
+ The prepared and hidrated template shaped to the LLM model
251
+
252
+ Example
253
+ -------
254
+ >>> import prompty
255
+ >>> p = prompty.load("prompts/basic.prompty")
256
+ >>> inputs = {"name": "John Doe"}
257
+ >>> content = prompty.prepare(p, inputs)
258
+ """
259
+ inputs = param_hoisting(inputs, prompt.sample)
260
+
261
+ if prompt.template.type == "NOOP":
262
+ render = prompt.content
263
+ else:
264
+ # render
265
+ renderer = InvokerFactory.create_renderer(prompt.template.type, prompt)
266
+ render = renderer(inputs)
267
+
268
+ if prompt.template.parser == "NOOP":
269
+ result = render
270
+ else:
271
+ # parse [parser].[api]
272
+ parser = InvokerFactory.create_parser(
273
+ f"{prompt.template.parser}.{prompt.model.api}", prompt
274
+ )
275
+ result = parser(render)
276
+
277
+ return result
278
+
279
+ @trace(description="Run the prepared Prompty content against the model.")
280
+ def run(
281
+ prompt: Prompty,
282
+ content: dict | list | str,
283
+ configuration: Dict[str, any] = {},
284
+ parameters: Dict[str, any] = {},
285
+ raw: bool = False,
286
+ ):
287
+ """Run the prepared Prompty content.
288
+
289
+ Parameters
290
+ ----------
291
+ prompt : Prompty
292
+ The prompty object
293
+ content : dict | list | str
294
+ The content to process
295
+ configuration : Dict[str, any], optional
296
+ The configuration to use, by default {}
297
+ parameters : Dict[str, any], optional
298
+ The parameters to use, by default {}
299
+ raw : bool, optional
300
+ Whether to skip processing, by default False
301
+
302
+ Returns
303
+ -------
304
+ any
305
+ The result of the prompt
306
+
307
+ Example
308
+ -------
309
+ >>> import prompty
310
+ >>> p = prompty.load("prompts/basic.prompty")
311
+ >>> inputs = {"name": "John Doe"}
312
+ >>> content = prompty.prepare(p, inputs)
313
+ >>> result = prompty.run(p, content)
314
+ """
315
+
316
+ if configuration != {}:
317
+ prompt.model.configuration = param_hoisting(
318
+ configuration, prompt.model.configuration
319
+ )
320
+
321
+ if parameters != {}:
322
+ prompt.model.parameters = param_hoisting(parameters, prompt.model.parameters)
323
+
324
+ # execute
325
+ executor = InvokerFactory.create_executor(
326
+ prompt.model.configuration["type"], prompt
327
+ )
328
+ result = executor(content)
329
+
330
+ # skip?
331
+ if not raw:
332
+ # process
333
+ processor = InvokerFactory.create_processor(
334
+ prompt.model.configuration["type"], prompt
335
+ )
336
+ result = processor(result)
337
+
338
+ return result
339
+
340
+ @trace(description="Execute a prompty")
341
+ def execute(
342
+ prompt: Union[str, Prompty],
343
+ configuration: Dict[str, any] = {},
344
+ parameters: Dict[str, any] = {},
345
+ inputs: Dict[str, any] = {},
346
+ raw: bool = False,
347
+ connection: str = "default",
348
+ ):
349
+ """Execute a prompty.
350
+
351
+ Parameters
352
+ ----------
353
+ prompt : Union[str, Prompty]
354
+ The prompty object or path to the prompty file
355
+ configuration : Dict[str, any], optional
356
+ The configuration to use, by default {}
357
+ parameters : Dict[str, any], optional
358
+ The parameters to use, by default {}
359
+ inputs : Dict[str, any], optional
360
+ The inputs to the prompt, by default {}
361
+ raw : bool, optional
362
+ Whether to skip processing, by default False
363
+ connection : str, optional
364
+ The connection to use, by default "default"
365
+
366
+ Returns
367
+ -------
368
+ any
369
+ The result of the prompt
370
+
371
+ Example
372
+ -------
373
+ >>> import prompty
374
+ >>> inputs = {"name": "John Doe"}
375
+ >>> result = prompty.execute("prompts/basic.prompty", inputs=inputs)
376
+ """
377
+ if isinstance(prompt, str):
378
+ path = Path(prompt)
379
+ if not path.is_absolute():
380
+ # get caller's path (take into account trace frame)
381
+ caller = Path(traceback.extract_stack()[-3].filename)
382
+ path = Path(caller.parent / path).resolve().absolute()
383
+ prompt = load(path, connection)
384
+
385
+ # prepare content
386
+ content = prepare(prompt, inputs)
387
+
388
+ # run LLM model
389
+ result = run(prompt, content, configuration, parameters, raw)
390
+
391
+ return result
@@ -0,0 +1,3 @@
1
+ # __init__.py
2
+ from .executor import AzureOpenAIExecutor
3
+ from .processor import AzureOpenAIProcessor
@@ -0,0 +1,95 @@
1
+ import azure.identity
2
+ import importlib.metadata
3
+ from typing import Iterator
4
+ from openai import AzureOpenAI
5
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream
6
+
7
+ VERSION = importlib.metadata.version("prompty")
8
+
9
+
10
+ @InvokerFactory.register_executor("azure")
11
+ @InvokerFactory.register_executor("azure_openai")
12
+ class AzureOpenAIExecutor(Invoker):
13
+ """Azure OpenAI Executor"""
14
+
15
+ def __init__(self, prompty: Prompty) -> None:
16
+ super().__init__(prompty)
17
+ kwargs = {
18
+ key: value
19
+ for key, value in self.prompty.model.configuration.items()
20
+ if key != "type"
21
+ }
22
+
23
+ # no key, use default credentials
24
+ if "api_key" not in kwargs:
25
+ # managed identity if client id
26
+ if "client_id" in kwargs:
27
+ default_credential = azure.identity.ManagedIdentityCredential(
28
+ client_id=kwargs.pop("client_id"),
29
+ )
30
+ # default credential
31
+ else:
32
+ default_credential = azure.identity.DefaultAzureCredential(
33
+ exclude_shared_token_cache_credential=True
34
+ )
35
+
36
+ kwargs["azure_ad_token_provider"] = (
37
+ azure.identity.get_bearer_token_provider(
38
+ default_credential, "https://cognitiveservices.azure.com/.default"
39
+ )
40
+ )
41
+
42
+ self.client = AzureOpenAI(
43
+ default_headers={
44
+ "User-Agent": f"prompty/{VERSION}",
45
+ "x-ms-useragent": f"prompty/{VERSION}",
46
+ },
47
+ **kwargs,
48
+ )
49
+
50
+ self.api = self.prompty.model.api
51
+ self.deployment = self.prompty.model.configuration["azure_deployment"]
52
+ self.parameters = self.prompty.model.parameters
53
+
54
+ def invoke(self, data: any) -> any:
55
+ """Invoke the Azure OpenAI API
56
+
57
+ Parameters
58
+ ----------
59
+ data : any
60
+ The data to send to the Azure OpenAI API
61
+
62
+ Returns
63
+ -------
64
+ any
65
+ The response from the Azure OpenAI API
66
+ """
67
+ if self.api == "chat":
68
+ response = self.client.chat.completions.create(
69
+ model=self.deployment,
70
+ messages=data if isinstance(data, list) else [data],
71
+ **self.parameters,
72
+ )
73
+
74
+ elif self.api == "completion":
75
+ response = self.client.completions.create(
76
+ prompt=data.item,
77
+ model=self.deployment,
78
+ **self.parameters,
79
+ )
80
+
81
+ elif self.api == "embedding":
82
+ response = self.client.embeddings.create(
83
+ input=data if isinstance(data, list) else [data],
84
+ model=self.deployment,
85
+ **self.parameters,
86
+ )
87
+
88
+ elif self.api == "image":
89
+ raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
90
+
91
+ # stream response
92
+ if isinstance(response, Iterator):
93
+ return PromptyStream("AzureOpenAIExecutor", response)
94
+ else:
95
+ return response
@@ -0,0 +1,66 @@
1
+ from typing import Iterator
2
+ from openai.types.completion import Completion
3
+ from openai.types.chat.chat_completion import ChatCompletion
4
+ from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, ToolCall
5
+ from openai.types.create_embedding_response import CreateEmbeddingResponse
6
+
7
+
8
+ @InvokerFactory.register_processor("azure")
9
+ @InvokerFactory.register_processor("azure_openai")
10
+ class AzureOpenAIProcessor(Invoker):
11
+ """Azure OpenAI Processor"""
12
+
13
+ def __init__(self, prompty: Prompty) -> None:
14
+ super().__init__(prompty)
15
+
16
+ def invoke(self, data: any) -> any:
17
+ """Invoke the OpenAI/Azure API
18
+
19
+ Parameters
20
+ ----------
21
+ data : any
22
+ The data to send to the OpenAI/Azure API
23
+
24
+ Returns
25
+ -------
26
+ any
27
+ The response from the OpenAI/Azure API
28
+ """
29
+ if isinstance(data, ChatCompletion):
30
+ response = data.choices[0].message
31
+ # tool calls available in response
32
+ if response.tool_calls:
33
+ return [
34
+ ToolCall(
35
+ id=tool_call.id,
36
+ name=tool_call.function.name,
37
+ arguments=tool_call.function.arguments,
38
+ )
39
+ for tool_call in response.tool_calls
40
+ ]
41
+ else:
42
+ return response.content
43
+
44
+ elif isinstance(data, Completion):
45
+ return data.choices[0].text
46
+ elif isinstance(data, CreateEmbeddingResponse):
47
+ if len(data.data) == 0:
48
+ raise ValueError("Invalid data")
49
+ elif len(data.data) == 1:
50
+ return data.data[0].embedding
51
+ else:
52
+ return [item.embedding for item in data.data]
53
+ elif isinstance(data, Iterator):
54
+
55
+ def generator():
56
+ for chunk in data:
57
+ if (
58
+ len(chunk.choices) == 1
59
+ and chunk.choices[0].delta.content != None
60
+ ):
61
+ content = chunk.choices[0].delta.content
62
+ yield content
63
+
64
+ return PromptyStream("AzureOpenAIProcessor", generator())
65
+ else:
66
+ return data
prompty/cli.py ADDED
@@ -0,0 +1,117 @@
1
+ import os
2
+ import json
3
+ import click
4
+ import importlib
5
+
6
+ from pathlib import Path
7
+ from pydantic import BaseModel
8
+
9
+ import prompty
10
+ from prompty.tracer import trace, PromptyTracer, console_tracer, Tracer
11
+ from dotenv import load_dotenv
12
+
13
+ load_dotenv()
14
+
15
+
16
+ def normalize_path(p, create_dir=False) -> Path:
17
+ path = Path(p)
18
+ if not path.is_absolute():
19
+ path = Path(os.getcwd()).joinpath(path).absolute().resolve()
20
+ else:
21
+ path = path.absolute().resolve()
22
+
23
+ if create_dir:
24
+ if not path.exists():
25
+ print(f"Creating directory {str(path)}")
26
+ os.makedirs(str(path))
27
+
28
+ return path
29
+
30
+ def dynamic_import(module: str):
31
+ t = module if "." in module else f"prompty.{module}"
32
+ print(f"Loading invokers from {t}")
33
+ importlib.import_module(t)
34
+
35
+
36
+ @trace
37
+ def chat_mode(prompt_path: str):
38
+ W = "\033[0m" # white (normal)
39
+ R = "\033[31m" # red
40
+ G = "\033[32m" # green
41
+ O = "\033[33m" # orange
42
+ B = "\033[34m" # blue
43
+ P = "\033[35m" # purple
44
+ print(f"Executing {str(prompt_path)} in chat mode...")
45
+ p = prompty.load(str(prompt_path))
46
+ if "chat_history" not in p.sample:
47
+ print(
48
+ f"{R}{str(prompt_path)} needs to have a chat_history input to work in chat mode{W}"
49
+ )
50
+ return
51
+ else:
52
+
53
+ try:
54
+ # load executor / processor types
55
+ dynamic_import(p.model.configuration["type"])
56
+ chat_history = p.sample["chat_history"]
57
+ while True:
58
+ user_input = input(f"\n{B}User:{W} ")
59
+ if user_input == "exit":
60
+ break
61
+ # reloadable prompty file
62
+ chat_history.append({"role": "user", "content": user_input})
63
+ result = prompty.execute(prompt_path, inputs={"chat_history": chat_history})
64
+ print(f"\n{G}Assistant:{W} {result}")
65
+ chat_history.append({"role": "assistant", "content": result})
66
+ except Exception as e:
67
+ print(f"{type(e).__qualname__}: {e}")
68
+
69
+ print(f"\n{R}Goodbye!{W}\n")
70
+
71
+
72
+ @trace
73
+ def execute(prompt_path: str, raw=False):
74
+ p = prompty.load(prompt_path)
75
+
76
+ try:
77
+ # load executor / processor types
78
+ dynamic_import(p.model.configuration["type"])
79
+
80
+ result = prompty.execute(p, raw=raw)
81
+ if issubclass(type(result), BaseModel):
82
+ print("\n", json.dumps(result.model_dump(), indent=4), "\n")
83
+ elif isinstance(result, list):
84
+ print(
85
+ "\n", json.dumps([item.model_dump() for item in result], indent=4), "\n"
86
+ )
87
+ else:
88
+ print("\n", result, "\n")
89
+ except Exception as e:
90
+ print(f"{type(e).__qualname__}: {e}", "\n")
91
+
92
+
93
+ @click.command()
94
+ @click.option("--source", "-s", required=True)
95
+ @click.option("--verbose", "-v", is_flag=True)
96
+ @click.option("--chat", "-c", is_flag=True)
97
+ @click.version_option()
98
+ def run(source, verbose, chat):
99
+ prompt_path = normalize_path(source)
100
+ if not prompt_path.exists():
101
+ print(f"{str(prompt_path)} does not exist")
102
+ return
103
+
104
+ if verbose:
105
+ Tracer.add("console", console_tracer)
106
+
107
+ ptrace = PromptyTracer()
108
+ Tracer.add("prompty", ptrace.tracer)
109
+
110
+ if chat:
111
+ chat_mode(str(prompt_path))
112
+ else:
113
+ execute(str(prompt_path), raw=verbose)
114
+
115
+
116
+ if __name__ == "__main__":
117
+ chat_mode(source="./tests/prompts/basic.prompt")