prompty 0.1.12__py2.py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- prompty/__init__.py +391 -0
- prompty/azure/__init__.py +3 -0
- prompty/azure/executor.py +95 -0
- prompty/azure/processor.py +66 -0
- prompty/cli.py +117 -0
- prompty/core.py +539 -0
- prompty/openai/__init__.py +3 -0
- prompty/openai/executor.py +74 -0
- prompty/openai/processor.py +65 -0
- prompty/parsers.py +139 -0
- prompty/renderers.py +23 -0
- prompty/serverless/__init__.py +3 -0
- prompty/serverless/executor.py +82 -0
- prompty/serverless/processor.py +62 -0
- prompty/tracer.py +260 -0
- prompty-0.1.12.dist-info/METADATA +17 -0
- prompty-0.1.12.dist-info/RECORD +19 -0
- prompty-0.1.12.dist-info/WHEEL +4 -0
- prompty-0.1.12.dist-info/licenses/LICENSE +7 -0
prompty/parsers.py
ADDED
@@ -0,0 +1,139 @@
|
|
1
|
+
import re
|
2
|
+
import base64
|
3
|
+
from .core import Invoker, InvokerFactory, Prompty
|
4
|
+
|
5
|
+
|
6
|
+
@InvokerFactory.register_parser("prompty.chat")
|
7
|
+
class PromptyChatParser(Invoker):
|
8
|
+
""" Prompty Chat Parser """
|
9
|
+
def __init__(self, prompty: Prompty) -> None:
|
10
|
+
super().__init__(prompty)
|
11
|
+
self.roles = ["assistant", "function", "system", "user"]
|
12
|
+
self.path = self.prompty.file.parent
|
13
|
+
|
14
|
+
def inline_image(self, image_item: str) -> str:
|
15
|
+
""" Inline Image
|
16
|
+
|
17
|
+
Parameters
|
18
|
+
----------
|
19
|
+
image_item : str
|
20
|
+
The image item to inline
|
21
|
+
|
22
|
+
Returns
|
23
|
+
-------
|
24
|
+
str
|
25
|
+
The inlined image
|
26
|
+
"""
|
27
|
+
# pass through if it's a url or base64 encoded
|
28
|
+
if image_item.startswith("http") or image_item.startswith("data"):
|
29
|
+
return image_item
|
30
|
+
# otherwise, it's a local file - need to base64 encode it
|
31
|
+
else:
|
32
|
+
image_path = self.path / image_item
|
33
|
+
with open(image_path, "rb") as f:
|
34
|
+
base64_image = base64.b64encode(f.read()).decode("utf-8")
|
35
|
+
|
36
|
+
if image_path.suffix == ".png":
|
37
|
+
return f"data:image/png;base64,{base64_image}"
|
38
|
+
elif image_path.suffix == ".jpg":
|
39
|
+
return f"data:image/jpeg;base64,{base64_image}"
|
40
|
+
elif image_path.suffix == ".jpeg":
|
41
|
+
return f"data:image/jpeg;base64,{base64_image}"
|
42
|
+
else:
|
43
|
+
raise ValueError(
|
44
|
+
f"Invalid image format {image_path.suffix} - currently only .png and .jpg / .jpeg are supported."
|
45
|
+
)
|
46
|
+
|
47
|
+
def parse_content(self, content: str):
|
48
|
+
""" for parsing inline images
|
49
|
+
|
50
|
+
Parameters
|
51
|
+
----------
|
52
|
+
content : str
|
53
|
+
The content to parse
|
54
|
+
|
55
|
+
Returns
|
56
|
+
-------
|
57
|
+
any
|
58
|
+
The parsed content
|
59
|
+
"""
|
60
|
+
# regular expression to parse markdown images
|
61
|
+
image = r"(?P<alt>!\[[^\]]*\])\((?P<filename>.*?)(?=\"|\))\)"
|
62
|
+
matches = re.findall(image, content, flags=re.MULTILINE)
|
63
|
+
if len(matches) > 0:
|
64
|
+
content_items = []
|
65
|
+
content_chunks = re.split(image, content, flags=re.MULTILINE)
|
66
|
+
current_chunk = 0
|
67
|
+
for i in range(len(content_chunks)):
|
68
|
+
# image entry
|
69
|
+
if (
|
70
|
+
current_chunk < len(matches)
|
71
|
+
and content_chunks[i] == matches[current_chunk][0]
|
72
|
+
):
|
73
|
+
content_items.append(
|
74
|
+
{
|
75
|
+
"type": "image_url",
|
76
|
+
"image_url": {
|
77
|
+
"url": self.inline_image(
|
78
|
+
matches[current_chunk][1].split(" ")[0].strip()
|
79
|
+
)
|
80
|
+
},
|
81
|
+
}
|
82
|
+
)
|
83
|
+
# second part of image entry
|
84
|
+
elif (
|
85
|
+
current_chunk < len(matches)
|
86
|
+
and content_chunks[i] == matches[current_chunk][1]
|
87
|
+
):
|
88
|
+
current_chunk += 1
|
89
|
+
# text entry
|
90
|
+
else:
|
91
|
+
if len(content_chunks[i].strip()) > 0:
|
92
|
+
content_items.append(
|
93
|
+
{"type": "text", "text": content_chunks[i].strip()}
|
94
|
+
)
|
95
|
+
return content_items
|
96
|
+
else:
|
97
|
+
return content
|
98
|
+
|
99
|
+
def invoke(self, data: str) -> str:
|
100
|
+
""" Invoke the Prompty Chat Parser
|
101
|
+
|
102
|
+
Parameters
|
103
|
+
----------
|
104
|
+
data : str
|
105
|
+
The data to parse
|
106
|
+
|
107
|
+
Returns
|
108
|
+
-------
|
109
|
+
str
|
110
|
+
The parsed data
|
111
|
+
"""
|
112
|
+
messages = []
|
113
|
+
separator = r"(?i)^\s*#?\s*(" + "|".join(self.roles) + r")\s*:\s*\n"
|
114
|
+
|
115
|
+
# get valid chunks - remove empty items
|
116
|
+
chunks = [
|
117
|
+
item
|
118
|
+
for item in re.split(separator, data, flags=re.MULTILINE)
|
119
|
+
if len(item.strip()) > 0
|
120
|
+
]
|
121
|
+
|
122
|
+
# if no starter role, then inject system role
|
123
|
+
if not chunks[0].strip().lower() in self.roles:
|
124
|
+
chunks.insert(0, "system")
|
125
|
+
|
126
|
+
# if last chunk is role entry, then remove (no content?)
|
127
|
+
if chunks[-1].strip().lower() in self.roles:
|
128
|
+
chunks.pop()
|
129
|
+
|
130
|
+
if len(chunks) % 2 != 0:
|
131
|
+
raise ValueError("Invalid prompt format")
|
132
|
+
|
133
|
+
# create messages
|
134
|
+
for i in range(0, len(chunks), 2):
|
135
|
+
role = chunks[i].strip().lower()
|
136
|
+
content = chunks[i + 1].strip()
|
137
|
+
messages.append({"role": role, "content": self.parse_content(content)})
|
138
|
+
|
139
|
+
return messages
|
prompty/renderers.py
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
from jinja2 import DictLoader, Environment
|
2
|
+
from .core import Invoker, InvokerFactory, Prompty
|
3
|
+
|
4
|
+
|
5
|
+
@InvokerFactory.register_renderer("jinja2")
|
6
|
+
class Jinja2Renderer(Invoker):
|
7
|
+
""" Jinja2 Renderer """
|
8
|
+
def __init__(self, prompty: Prompty) -> None:
|
9
|
+
super().__init__(prompty)
|
10
|
+
self.templates = {}
|
11
|
+
# generate template dictionary
|
12
|
+
cur_prompt = self.prompty
|
13
|
+
while cur_prompt:
|
14
|
+
self.templates[cur_prompt.file.name] = cur_prompt.content
|
15
|
+
cur_prompt = cur_prompt.basePrompty
|
16
|
+
|
17
|
+
self.name = self.prompty.file.name
|
18
|
+
|
19
|
+
def invoke(self, data: any) -> any:
|
20
|
+
env = Environment(loader=DictLoader(self.templates))
|
21
|
+
t = env.get_template(self.name)
|
22
|
+
generated = t.render(**data)
|
23
|
+
return generated
|
@@ -0,0 +1,82 @@
|
|
1
|
+
import importlib.metadata
|
2
|
+
from typing import Iterator
|
3
|
+
from azure.core.credentials import AzureKeyCredential
|
4
|
+
from azure.ai.inference import (
|
5
|
+
ChatCompletionsClient,
|
6
|
+
EmbeddingsClient,
|
7
|
+
)
|
8
|
+
from azure.ai.inference.models import (
|
9
|
+
StreamingChatCompletions,
|
10
|
+
AsyncStreamingChatCompletions,
|
11
|
+
)
|
12
|
+
from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, AsyncPromptyStream
|
13
|
+
|
14
|
+
VERSION = importlib.metadata.version("prompty")
|
15
|
+
|
16
|
+
|
17
|
+
@InvokerFactory.register_executor("serverless")
|
18
|
+
class ServerlessExecutor(Invoker):
|
19
|
+
"""Azure OpenAI Executor"""
|
20
|
+
|
21
|
+
def __init__(self, prompty: Prompty) -> None:
|
22
|
+
super().__init__(prompty)
|
23
|
+
|
24
|
+
# serverless configuration
|
25
|
+
self.endpoint = self.prompty.model.configuration["endpoint"]
|
26
|
+
self.model = self.prompty.model.configuration["model"]
|
27
|
+
self.key = self.prompty.model.configuration["key"]
|
28
|
+
|
29
|
+
# api type
|
30
|
+
self.api = self.prompty.model.api
|
31
|
+
|
32
|
+
def invoke(self, data: any) -> any:
|
33
|
+
"""Invoke the Serverless SDK
|
34
|
+
|
35
|
+
Parameters
|
36
|
+
----------
|
37
|
+
data : any
|
38
|
+
The data to send to the Serverless SDK
|
39
|
+
|
40
|
+
Returns
|
41
|
+
-------
|
42
|
+
any
|
43
|
+
The response from the Serverless SDK
|
44
|
+
"""
|
45
|
+
if self.api == "chat":
|
46
|
+
response = ChatCompletionsClient(
|
47
|
+
endpoint=self.endpoint,
|
48
|
+
credential=AzureKeyCredential(self.key),
|
49
|
+
).complete(
|
50
|
+
model=self.model,
|
51
|
+
messages=data if isinstance(data, list) else [data],
|
52
|
+
**self.prompty.model.parameters,
|
53
|
+
)
|
54
|
+
|
55
|
+
elif self.api == "completion":
|
56
|
+
raise NotImplementedError(
|
57
|
+
"Serverless Completions API is not implemented yet"
|
58
|
+
)
|
59
|
+
|
60
|
+
elif self.api == "embedding":
|
61
|
+
response = EmbeddingsClient(
|
62
|
+
endpoint=self.endpoint,
|
63
|
+
credential=AzureKeyCredential(self.key),
|
64
|
+
).complete(
|
65
|
+
model=self.model,
|
66
|
+
input=data if isinstance(data, list) else [data],
|
67
|
+
**self.prompty.model.parameters,
|
68
|
+
)
|
69
|
+
|
70
|
+
elif self.api == "image":
|
71
|
+
raise NotImplementedError("Azure OpenAI Image API is not implemented yet")
|
72
|
+
|
73
|
+
# stream response
|
74
|
+
if isinstance(response, Iterator):
|
75
|
+
if isinstance(response, StreamingChatCompletions):
|
76
|
+
return PromptyStream("ServerlessExecutor", response)
|
77
|
+
elif isinstance(response, AsyncStreamingChatCompletions):
|
78
|
+
return AsyncPromptyStream("ServerlessExecutor", response)
|
79
|
+
return PromptyStream("ServerlessExecutor", response)
|
80
|
+
else:
|
81
|
+
|
82
|
+
return response
|
@@ -0,0 +1,62 @@
|
|
1
|
+
from typing import Iterator
|
2
|
+
from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, ToolCall
|
3
|
+
|
4
|
+
from azure.ai.inference.models import ChatCompletions, EmbeddingsResult
|
5
|
+
|
6
|
+
|
7
|
+
@InvokerFactory.register_processor("serverless")
|
8
|
+
class ServerlessProcessor(Invoker):
|
9
|
+
"""OpenAI Processor"""
|
10
|
+
|
11
|
+
def __init__(self, prompty: Prompty) -> None:
|
12
|
+
super().__init__(prompty)
|
13
|
+
|
14
|
+
def invoke(self, data: any) -> any:
|
15
|
+
"""Invoke the OpenAI API
|
16
|
+
|
17
|
+
Parameters
|
18
|
+
----------
|
19
|
+
data : any
|
20
|
+
The data to send to the OpenAI API
|
21
|
+
|
22
|
+
Returns
|
23
|
+
-------
|
24
|
+
any
|
25
|
+
The response from the OpenAI API
|
26
|
+
"""
|
27
|
+
if isinstance(data, ChatCompletions):
|
28
|
+
response = data.choices[0].message
|
29
|
+
# tool calls available in response
|
30
|
+
if response.tool_calls:
|
31
|
+
return [
|
32
|
+
ToolCall(
|
33
|
+
id=tool_call.id,
|
34
|
+
name=tool_call.function.name,
|
35
|
+
arguments=tool_call.function.arguments,
|
36
|
+
)
|
37
|
+
for tool_call in response.tool_calls
|
38
|
+
]
|
39
|
+
else:
|
40
|
+
return response.content
|
41
|
+
|
42
|
+
elif isinstance(data, EmbeddingsResult):
|
43
|
+
if len(data.data) == 0:
|
44
|
+
raise ValueError("Invalid data")
|
45
|
+
elif len(data.data) == 1:
|
46
|
+
return data.data[0].embedding
|
47
|
+
else:
|
48
|
+
return [item.embedding for item in data.data]
|
49
|
+
elif isinstance(data, Iterator):
|
50
|
+
|
51
|
+
def generator():
|
52
|
+
for chunk in data:
|
53
|
+
if (
|
54
|
+
len(chunk.choices) == 1
|
55
|
+
and chunk.choices[0].delta.content != None
|
56
|
+
):
|
57
|
+
content = chunk.choices[0].delta.content
|
58
|
+
yield content
|
59
|
+
|
60
|
+
return PromptyStream("ServerlessProcessor", generator())
|
61
|
+
else:
|
62
|
+
return data
|
prompty/tracer.py
ADDED
@@ -0,0 +1,260 @@
|
|
1
|
+
import os
|
2
|
+
import json
|
3
|
+
import inspect
|
4
|
+
import importlib
|
5
|
+
import contextlib
|
6
|
+
from pathlib import Path
|
7
|
+
from numbers import Number
|
8
|
+
from datetime import datetime
|
9
|
+
from pydantic import BaseModel
|
10
|
+
from functools import wraps, partial
|
11
|
+
from typing import Any, Callable, Dict, Iterator, List
|
12
|
+
|
13
|
+
|
14
|
+
class Tracer:
|
15
|
+
_tracers: Dict[str, Callable[[str], Iterator[Callable[[str, Any], None]]]] = {}
|
16
|
+
|
17
|
+
@classmethod
|
18
|
+
def add(
|
19
|
+
cls, name: str, tracer: Callable[[str], Iterator[Callable[[str, Any], None]]]
|
20
|
+
) -> None:
|
21
|
+
cls._tracers[name] = tracer
|
22
|
+
|
23
|
+
@classmethod
|
24
|
+
def clear(cls) -> None:
|
25
|
+
cls._tracers = {}
|
26
|
+
|
27
|
+
@classmethod
|
28
|
+
@contextlib.contextmanager
|
29
|
+
def start(cls, name: str) -> Iterator[Callable[[str, Any], None]]:
|
30
|
+
with contextlib.ExitStack() as stack:
|
31
|
+
traces = [
|
32
|
+
stack.enter_context(tracer(name)) for tracer in cls._tracers.values()
|
33
|
+
]
|
34
|
+
yield lambda key, value: [trace(key, value) for trace in traces]
|
35
|
+
|
36
|
+
|
37
|
+
def to_dict(obj: Any) -> Dict[str, Any]:
|
38
|
+
# simple json types
|
39
|
+
if isinstance(obj, str) or isinstance(obj, Number) or isinstance(obj, bool):
|
40
|
+
return obj
|
41
|
+
# datetime
|
42
|
+
elif isinstance(obj, datetime):
|
43
|
+
return obj.isoformat()
|
44
|
+
# safe Prompty obj serialization
|
45
|
+
elif type(obj).__name__ == "Prompty":
|
46
|
+
return obj.to_safe_dict()
|
47
|
+
# safe PromptyStream obj serialization
|
48
|
+
elif type(obj).__name__ == "PromptyStream":
|
49
|
+
return "PromptyStream"
|
50
|
+
elif type(obj).__name__ == "AsyncPromptyStream":
|
51
|
+
return "AsyncPromptyStream"
|
52
|
+
# pydantic models have their own json serialization
|
53
|
+
elif isinstance(obj, BaseModel):
|
54
|
+
return obj.model_dump()
|
55
|
+
# recursive list and dict
|
56
|
+
elif isinstance(obj, list):
|
57
|
+
return [to_dict(item) for item in obj]
|
58
|
+
elif isinstance(obj, dict):
|
59
|
+
return {k: v if isinstance(v, str) else to_dict(v) for k, v in obj.items()}
|
60
|
+
elif isinstance(obj, Path):
|
61
|
+
return str(obj)
|
62
|
+
# cast to string otherwise...
|
63
|
+
else:
|
64
|
+
return str(obj)
|
65
|
+
|
66
|
+
|
67
|
+
def _name(func: Callable, args):
|
68
|
+
if hasattr(func, "__qualname__"):
|
69
|
+
signature = f"{func.__module__}.{func.__qualname__}"
|
70
|
+
else:
|
71
|
+
signature = f"{func.__module__}.{func.__name__}"
|
72
|
+
|
73
|
+
# core invoker gets special treatment
|
74
|
+
core_invoker = signature == "prompty.core.Invoker.__call__"
|
75
|
+
if core_invoker:
|
76
|
+
name = type(args[0]).__name__
|
77
|
+
signature = f"{args[0].__module__}.{args[0].__class__.__name__}.invoke"
|
78
|
+
else:
|
79
|
+
name = func.__name__
|
80
|
+
|
81
|
+
return name, signature
|
82
|
+
|
83
|
+
|
84
|
+
def _inputs(func: Callable, args, kwargs) -> dict:
|
85
|
+
ba = inspect.signature(func).bind(*args, **kwargs)
|
86
|
+
ba.apply_defaults()
|
87
|
+
|
88
|
+
inputs = {k: to_dict(v) for k, v in ba.arguments.items() if k != "self"}
|
89
|
+
|
90
|
+
return inputs
|
91
|
+
|
92
|
+
|
93
|
+
def _results(result: Any) -> dict:
|
94
|
+
return to_dict(result) if result is not None else "None"
|
95
|
+
|
96
|
+
|
97
|
+
def _trace_sync(func: Callable = None, *, description: str = None) -> Callable:
|
98
|
+
description = description or ""
|
99
|
+
|
100
|
+
@wraps(func)
|
101
|
+
def wrapper(*args, **kwargs):
|
102
|
+
name, signature = _name(func, args)
|
103
|
+
with Tracer.start(name) as trace:
|
104
|
+
trace("signature", signature)
|
105
|
+
if description and description != "":
|
106
|
+
trace("description", description)
|
107
|
+
|
108
|
+
inputs = _inputs(func, args, kwargs)
|
109
|
+
trace("inputs", inputs)
|
110
|
+
|
111
|
+
result = func(*args, **kwargs)
|
112
|
+
trace("result", _results(result))
|
113
|
+
|
114
|
+
return result
|
115
|
+
|
116
|
+
return wrapper
|
117
|
+
|
118
|
+
|
119
|
+
def _trace_async(func: Callable = None, *, description: str = None) -> Callable:
|
120
|
+
description = description or ""
|
121
|
+
|
122
|
+
@wraps(func)
|
123
|
+
async def wrapper(*args, **kwargs):
|
124
|
+
name, signature = _name(func, args)
|
125
|
+
with Tracer.start(name) as trace:
|
126
|
+
trace("signature", signature)
|
127
|
+
if description and description != "":
|
128
|
+
trace("description", description)
|
129
|
+
|
130
|
+
inputs = _inputs(func, args, kwargs)
|
131
|
+
trace("inputs", inputs)
|
132
|
+
|
133
|
+
result = await func(*args, **kwargs)
|
134
|
+
trace("result", _results(result))
|
135
|
+
|
136
|
+
return result
|
137
|
+
|
138
|
+
return wrapper
|
139
|
+
|
140
|
+
|
141
|
+
def trace(func: Callable = None, *, description: str = None) -> Callable:
|
142
|
+
if func is None:
|
143
|
+
return partial(trace, description=description)
|
144
|
+
|
145
|
+
wrapped_method = _trace_async if inspect.iscoroutinefunction(func) else _trace_sync
|
146
|
+
|
147
|
+
return wrapped_method(func, description=description)
|
148
|
+
|
149
|
+
|
150
|
+
class PromptyTracer:
|
151
|
+
def __init__(self, output_dir: str = None) -> None:
|
152
|
+
if output_dir:
|
153
|
+
self.output = Path(output_dir).resolve().absolute()
|
154
|
+
else:
|
155
|
+
self.output = Path(Path(os.getcwd()) / ".runs").resolve().absolute()
|
156
|
+
|
157
|
+
if not self.output.exists():
|
158
|
+
self.output.mkdir(parents=True, exist_ok=True)
|
159
|
+
|
160
|
+
self.stack: List[Dict[str, Any]] = []
|
161
|
+
|
162
|
+
@contextlib.contextmanager
|
163
|
+
def tracer(self, name: str) -> Iterator[Callable[[str, Any], None]]:
|
164
|
+
try:
|
165
|
+
self.stack.append({"name": name})
|
166
|
+
frame = self.stack[-1]
|
167
|
+
frame["__time"] = {
|
168
|
+
"start": datetime.now(),
|
169
|
+
}
|
170
|
+
|
171
|
+
def add(key: str, value: Any) -> None:
|
172
|
+
if key not in frame:
|
173
|
+
frame[key] = value
|
174
|
+
# multiple values creates list
|
175
|
+
else:
|
176
|
+
if isinstance(frame[key], list):
|
177
|
+
frame[key].append(value)
|
178
|
+
else:
|
179
|
+
frame[key] = [frame[key], value]
|
180
|
+
|
181
|
+
yield add
|
182
|
+
finally:
|
183
|
+
frame = self.stack.pop()
|
184
|
+
start: datetime = frame["__time"]["start"]
|
185
|
+
end: datetime = datetime.now()
|
186
|
+
|
187
|
+
# add duration to frame
|
188
|
+
frame["__time"] = {
|
189
|
+
"start": start.strftime("%Y-%m-%dT%H:%M:%S.%f"),
|
190
|
+
"end": end.strftime("%Y-%m-%dT%H:%M:%S.%f"),
|
191
|
+
"duration": int((end - start).total_seconds() * 1000),
|
192
|
+
}
|
193
|
+
|
194
|
+
# hoist usage to parent frame
|
195
|
+
if "result" in frame and isinstance(frame["result"], dict):
|
196
|
+
if "usage" in frame["result"]:
|
197
|
+
if "__usage" in frame:
|
198
|
+
for key, value in frame["result"]["usage"].items():
|
199
|
+
frame["__usage"][key] += value
|
200
|
+
else:
|
201
|
+
frame["__usage"] = frame["result"]["usage"]
|
202
|
+
|
203
|
+
# streamed results may have usage as well
|
204
|
+
if "result" in frame and isinstance(frame["result"], list):
|
205
|
+
for result in frame["result"]:
|
206
|
+
if (
|
207
|
+
isinstance(result, dict)
|
208
|
+
and "usage" in result
|
209
|
+
and isinstance(result["usage"], dict)
|
210
|
+
):
|
211
|
+
if "__usage" not in frame:
|
212
|
+
frame["__usage"] = {}
|
213
|
+
for key, value in result["usage"].items():
|
214
|
+
if key not in frame["__usage"]:
|
215
|
+
frame["__usage"][key] = value
|
216
|
+
else:
|
217
|
+
frame["__usage"][key] += value
|
218
|
+
|
219
|
+
# add any usage frames from below
|
220
|
+
if "__frames" in frame:
|
221
|
+
for child in frame["__frames"]:
|
222
|
+
if "__usage" in child:
|
223
|
+
if "__usage" not in frame:
|
224
|
+
frame["__usage"] = {}
|
225
|
+
for key, value in child["__usage"].items():
|
226
|
+
if key not in frame["__usage"]:
|
227
|
+
frame["__usage"][key] = value
|
228
|
+
else:
|
229
|
+
frame["__usage"][key] += value
|
230
|
+
|
231
|
+
# if stack is empty, dump the frame
|
232
|
+
if len(self.stack) == 0:
|
233
|
+
trace_file = (
|
234
|
+
self.output
|
235
|
+
/ f"{frame['name']}.{datetime.now().strftime('%Y%m%d.%H%M%S')}.ptrace"
|
236
|
+
)
|
237
|
+
|
238
|
+
v = importlib.metadata.version("prompty")
|
239
|
+
enriched_frame = {
|
240
|
+
"runtime": "python",
|
241
|
+
"version": v,
|
242
|
+
"trace": frame,
|
243
|
+
}
|
244
|
+
|
245
|
+
with open(trace_file, "w") as f:
|
246
|
+
json.dump(enriched_frame, f, indent=4)
|
247
|
+
# otherwise, append the frame to the parent
|
248
|
+
else:
|
249
|
+
if "__frames" not in self.stack[-1]:
|
250
|
+
self.stack[-1]["__frames"] = []
|
251
|
+
self.stack[-1]["__frames"].append(frame)
|
252
|
+
|
253
|
+
|
254
|
+
@contextlib.contextmanager
|
255
|
+
def console_tracer(name: str) -> Iterator[Callable[[str, Any], None]]:
|
256
|
+
try:
|
257
|
+
print(f"Starting {name}")
|
258
|
+
yield lambda key, value: print(f"{key}:\n{json.dumps(value, indent=4)}")
|
259
|
+
finally:
|
260
|
+
print(f"Ending {name}")
|
@@ -0,0 +1,17 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: prompty
|
3
|
+
Version: 0.1.12
|
4
|
+
Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
|
5
|
+
Author-Email: Seth Juarez <seth.juarez@microsoft.com>
|
6
|
+
Requires-Dist: pyyaml>=6.0.1
|
7
|
+
Requires-Dist: pydantic>=2.8.2
|
8
|
+
Requires-Dist: jinja2>=3.1.4
|
9
|
+
Requires-Dist: python-dotenv>=1.0.1
|
10
|
+
Requires-Dist: click>=8.1.7
|
11
|
+
Requires-Dist: azure-identity>=1.17.1; extra == "azure"
|
12
|
+
Requires-Dist: openai>=1.35.10; extra == "azure"
|
13
|
+
Requires-Dist: openai>=1.35.10; extra == "openai"
|
14
|
+
Requires-Dist: azure-ai-inference>=1.0.0b3; extra == "serverless"
|
15
|
+
Provides-Extra: azure
|
16
|
+
Provides-Extra: openai
|
17
|
+
Provides-Extra: serverless
|
@@ -0,0 +1,19 @@
|
|
1
|
+
prompty-0.1.12.dist-info/METADATA,sha256=4uJHCJDPTuM6Sp1ikmtS68WepC329WCDMAdfr9zBanQ,783
|
2
|
+
prompty-0.1.12.dist-info/WHEEL,sha256=CuZGaXTwoRLAOVv0AcE3bCTxO5ejVuBEJkUBe9C-kvk,94
|
3
|
+
prompty-0.1.12.dist-info/licenses/LICENSE,sha256=KWSC4z9cfML_t0xThoQYjzTdcZQj86Y_mhXdatzU-KM,1052
|
4
|
+
prompty/__init__.py,sha256=5t_hxoRVxsbc7gfMyH9EVO2j259dM6uac3GmspQ-MjE,11146
|
5
|
+
prompty/azure/__init__.py,sha256=6duJ79CDPG3w-cLk3vt8YfELDMOtSsnI2ClNLMFP_Og,100
|
6
|
+
prompty/azure/executor.py,sha256=x2ng2EbYUxbingjy8w27TFGWezs4QC0LHh_S0F0-E1U,3082
|
7
|
+
prompty/azure/processor.py,sha256=e9CcKG665zvCLPeJfS91FM6c_W_6YY0mVENxinCo19A,2253
|
8
|
+
prompty/cli.py,sha256=oIJ5aPCjwBl4xA5SWkbQ_Xj0KhzuSJhRnqV95DhfORc,3382
|
9
|
+
prompty/core.py,sha256=Zy6BFYUAMTeaAFeHdcRkm4lrxlyBEKHwc96XQJH3M2U,16120
|
10
|
+
prompty/openai/__init__.py,sha256=6duJ79CDPG3w-cLk3vt8YfELDMOtSsnI2ClNLMFP_Og,100
|
11
|
+
prompty/openai/executor.py,sha256=hlze8dXG_jPurPBN7vPC-HJC1dYXSSZeElhE_X_BJhk,2217
|
12
|
+
prompty/openai/processor.py,sha256=PacKjMmGO-fd5KhOs98JyjsIf0Kl_J2SX5VroA8lVbI,2174
|
13
|
+
prompty/parsers.py,sha256=4mmIn4SVNs8B0R1BufanqUJk8v4r0OEEo8yx6UOxQpA,4670
|
14
|
+
prompty/renderers.py,sha256=RSHFQFx7AtKLUfsMLCXR0a56Mb7DL1NJNgjUqgg3IqU,776
|
15
|
+
prompty/serverless/__init__.py,sha256=KgsiNr-IhPiIuZoChvDf6xbbvFF467MCUKspJHo56yc,98
|
16
|
+
prompty/serverless/executor.py,sha256=2XVzFX9SMX33sQTW-AZObiZ5NtVl3xVahb79ejMrlz8,2684
|
17
|
+
prompty/serverless/processor.py,sha256=pft1XGbPzo0MzQMbAt1VxsLsvRrjQO3B8MXEE2PfSA0,1982
|
18
|
+
prompty/tracer.py,sha256=r-HC__xLtFPb2pr-xGUuhdlAaMA_PmsBB_NG79jKRO4,8810
|
19
|
+
prompty-0.1.12.dist-info/RECORD,,
|
@@ -0,0 +1,7 @@
|
|
1
|
+
Copyright (c) 2024 Microsoft
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
4
|
+
|
5
|
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
6
|
+
|
7
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|