pixie-prompts 0.1.1__tar.gz → 0.1.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pixie_prompts-0.1.1 → pixie_prompts-0.1.2}/PKG-INFO +2 -1
- {pixie_prompts-0.1.1 → pixie_prompts-0.1.2}/pixie/prompts/graphql.py +87 -1
- pixie_prompts-0.1.2/pixie/prompts/utils.py +433 -0
- {pixie_prompts-0.1.1 → pixie_prompts-0.1.2}/pyproject.toml +3 -2
- {pixie_prompts-0.1.1 → pixie_prompts-0.1.2}/LICENSE +0 -0
- {pixie_prompts-0.1.1 → pixie_prompts-0.1.2}/README.md +0 -0
- {pixie_prompts-0.1.1 → pixie_prompts-0.1.2}/pixie/prompts/__init__.py +0 -0
- {pixie_prompts-0.1.1 → pixie_prompts-0.1.2}/pixie/prompts/file_watcher.py +0 -0
- {pixie_prompts-0.1.1 → pixie_prompts-0.1.2}/pixie/prompts/prompt.py +0 -0
- {pixie_prompts-0.1.1 → pixie_prompts-0.1.2}/pixie/prompts/prompt_management.py +0 -0
- {pixie_prompts-0.1.1 → pixie_prompts-0.1.2}/pixie/prompts/server.py +0 -0
- {pixie_prompts-0.1.1 → pixie_prompts-0.1.2}/pixie/prompts/storage.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pixie-prompts
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.2
|
|
4
4
|
Summary: Code-first, type-safe prompt management
|
|
5
5
|
License: MIT
|
|
6
6
|
License-File: LICENSE
|
|
@@ -21,6 +21,7 @@ Requires-Dist: fastapi (>=0.128.0) ; extra == "server"
|
|
|
21
21
|
Requires-Dist: jinja2 (>=3.1.6,<4.0.0)
|
|
22
22
|
Requires-Dist: jsonsubschema (>=0.0.7,<0.0.8)
|
|
23
23
|
Requires-Dist: pydantic (>=2.12.5,<3.0.0)
|
|
24
|
+
Requires-Dist: pydantic-ai-slim (>=1.39.0) ; extra == "server"
|
|
24
25
|
Requires-Dist: strawberry-graphql (>=0.288.1) ; extra == "server"
|
|
25
26
|
Requires-Dist: uvicorn (>=0.40.0) ; extra == "server"
|
|
26
27
|
Requires-Dist: watchdog (>=6.0.0) ; extra == "server"
|
|
@@ -1,14 +1,21 @@
|
|
|
1
1
|
"""GraphQL schema for SDK server."""
|
|
2
2
|
|
|
3
|
+
from datetime import datetime
|
|
3
4
|
import logging
|
|
4
|
-
from typing import Optional
|
|
5
|
+
from typing import Any, Optional, cast
|
|
5
6
|
|
|
6
7
|
from graphql import GraphQLError
|
|
8
|
+
from pydantic_ai import ModelSettings
|
|
9
|
+
from pydantic_ai.direct import model_request
|
|
7
10
|
import strawberry
|
|
8
11
|
from strawberry.scalars import JSON
|
|
9
12
|
|
|
10
13
|
from pixie.prompts.prompt import variables_definition_to_schema
|
|
11
14
|
from pixie.prompts.prompt_management import get_prompt, list_prompts
|
|
15
|
+
from pixie.prompts.utils import (
|
|
16
|
+
assemble_model_request_parameters,
|
|
17
|
+
openai_messages_to_pydantic_ai_messages,
|
|
18
|
+
)
|
|
12
19
|
|
|
13
20
|
logger = logging.getLogger(__name__)
|
|
14
21
|
|
|
@@ -62,6 +69,25 @@ class Prompt:
|
|
|
62
69
|
module: Optional[str] = None
|
|
63
70
|
|
|
64
71
|
|
|
72
|
+
@strawberry.type
|
|
73
|
+
class ToolCall:
|
|
74
|
+
"""Tool call information."""
|
|
75
|
+
|
|
76
|
+
name: str
|
|
77
|
+
args: JSON
|
|
78
|
+
tool_call_id: strawberry.ID
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@strawberry.type
|
|
82
|
+
class LlmCallResult:
|
|
83
|
+
|
|
84
|
+
output: JSON | None
|
|
85
|
+
tool_calls: list[ToolCall] | None
|
|
86
|
+
usage: JSON
|
|
87
|
+
cost: float
|
|
88
|
+
timestamp: datetime
|
|
89
|
+
|
|
90
|
+
|
|
65
91
|
@strawberry.type
|
|
66
92
|
class Query:
|
|
67
93
|
"""GraphQL queries."""
|
|
@@ -147,6 +173,66 @@ class Query:
|
|
|
147
173
|
class Mutation:
|
|
148
174
|
"""GraphQL mutations."""
|
|
149
175
|
|
|
176
|
+
@strawberry.mutation
|
|
177
|
+
async def call_llm(
|
|
178
|
+
self,
|
|
179
|
+
model: str,
|
|
180
|
+
input_messages: list[JSON],
|
|
181
|
+
output_schema: Optional[JSON] = None,
|
|
182
|
+
tools: Optional[list[JSON]] = None,
|
|
183
|
+
model_parameters: Optional[JSON] = None,
|
|
184
|
+
) -> LlmCallResult:
|
|
185
|
+
"""Call LLM with the given inputs.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
model: The model name to use (e.g., "openai:gpt-4").
|
|
189
|
+
input_messages: List of messages as JSON objects in openai format.
|
|
190
|
+
output_schema: Optional output schema.
|
|
191
|
+
tools: Optional tools configuration (not yet implemented).
|
|
192
|
+
model_parameters: Optional model parameters.
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
LLM call result
|
|
196
|
+
|
|
197
|
+
Raises:
|
|
198
|
+
GraphQLError: If the LLM call fails.
|
|
199
|
+
"""
|
|
200
|
+
try:
|
|
201
|
+
response = await model_request(
|
|
202
|
+
model=model,
|
|
203
|
+
messages=openai_messages_to_pydantic_ai_messages(
|
|
204
|
+
cast(list[dict[str, Any]], input_messages)
|
|
205
|
+
),
|
|
206
|
+
model_settings=cast(ModelSettings | None, model_parameters),
|
|
207
|
+
model_request_parameters=assemble_model_request_parameters(
|
|
208
|
+
cast(dict[str, Any] | None, output_schema),
|
|
209
|
+
cast(list[dict[str, Any]] | None, tools),
|
|
210
|
+
strict=True,
|
|
211
|
+
allow_text_output=False,
|
|
212
|
+
),
|
|
213
|
+
)
|
|
214
|
+
return LlmCallResult(
|
|
215
|
+
output=JSON(response.text),
|
|
216
|
+
tool_calls=(
|
|
217
|
+
[
|
|
218
|
+
ToolCall(
|
|
219
|
+
name=tc.tool_name,
|
|
220
|
+
args=JSON(tc.args_as_dict()),
|
|
221
|
+
tool_call_id=strawberry.ID(tc.tool_call_id),
|
|
222
|
+
)
|
|
223
|
+
for tc in response.tool_calls
|
|
224
|
+
]
|
|
225
|
+
if response.tool_calls
|
|
226
|
+
else None
|
|
227
|
+
),
|
|
228
|
+
usage=JSON(response.usage.details),
|
|
229
|
+
cost=float(response.cost().total_price),
|
|
230
|
+
timestamp=response.timestamp,
|
|
231
|
+
)
|
|
232
|
+
except Exception as e:
|
|
233
|
+
logger.error("Error running LLM: %s", str(e))
|
|
234
|
+
raise GraphQLError(f"Failed to run LLM: {str(e)}") from e
|
|
235
|
+
|
|
150
236
|
@strawberry.mutation
|
|
151
237
|
async def add_prompt_version(
|
|
152
238
|
self,
|
|
@@ -0,0 +1,433 @@
|
|
|
1
|
+
"""Utilities for converting between different message formats."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Any, Literal
|
|
7
|
+
|
|
8
|
+
from pydantic_ai.messages import (
|
|
9
|
+
ModelMessage,
|
|
10
|
+
ModelRequest,
|
|
11
|
+
ModelResponse,
|
|
12
|
+
SystemPromptPart,
|
|
13
|
+
UserPromptPart,
|
|
14
|
+
TextPart,
|
|
15
|
+
ToolCallPart,
|
|
16
|
+
ToolReturnPart,
|
|
17
|
+
RetryPromptPart,
|
|
18
|
+
ThinkingPart,
|
|
19
|
+
ImageUrl,
|
|
20
|
+
AudioUrl,
|
|
21
|
+
VideoUrl,
|
|
22
|
+
DocumentUrl,
|
|
23
|
+
BinaryContent,
|
|
24
|
+
)
|
|
25
|
+
from pydantic_ai.models import ModelRequestParameters
|
|
26
|
+
from pydantic_ai.tools import ToolDefinition
|
|
27
|
+
from pydantic_ai.output import OutputObjectDefinition
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def openai_messages_to_pydantic_ai_messages(
|
|
31
|
+
messages: list[dict[str, Any]],
|
|
32
|
+
) -> list[ModelMessage]:
|
|
33
|
+
"""Convert OpenAI chat completion message format to Pydantic AI messages.
|
|
34
|
+
|
|
35
|
+
This function converts the OpenAI message format (used in chat completions API)
|
|
36
|
+
to the Pydantic AI message format.
|
|
37
|
+
|
|
38
|
+
Supported message roles:
|
|
39
|
+
- system/developer: Converted to ModelRequest with SystemPromptPart
|
|
40
|
+
- user: Converted to ModelRequest with UserPromptPart
|
|
41
|
+
- assistant: Converted to ModelResponse with TextPart and/or ToolCallPart
|
|
42
|
+
- tool/function: Converted to ModelRequest with ToolReturnPart
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
messages: List of OpenAI format messages
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
List of Pydantic AI ModelMessage objects
|
|
49
|
+
|
|
50
|
+
Raises:
|
|
51
|
+
NotImplementedError: If multimedia content (images, audio, etc.) is encountered
|
|
52
|
+
ValueError: If an unknown message role is encountered
|
|
53
|
+
"""
|
|
54
|
+
result: list[ModelMessage] = []
|
|
55
|
+
|
|
56
|
+
for msg in messages:
|
|
57
|
+
role = msg.get("role")
|
|
58
|
+
content = msg.get("content")
|
|
59
|
+
|
|
60
|
+
if role in ("system", "developer"):
|
|
61
|
+
# System/developer messages become ModelRequest with SystemPromptPart
|
|
62
|
+
result.append(ModelRequest(parts=[SystemPromptPart(content=content or "")]))
|
|
63
|
+
|
|
64
|
+
elif role == "user":
|
|
65
|
+
# Check for multimedia content
|
|
66
|
+
if isinstance(content, list):
|
|
67
|
+
# Content array indicates multimodal content
|
|
68
|
+
raise NotImplementedError(
|
|
69
|
+
"Multimedia content (images, audio, etc.) is not supported. "
|
|
70
|
+
"Only text content is currently supported."
|
|
71
|
+
)
|
|
72
|
+
result.append(ModelRequest(parts=[UserPromptPart(content=content or "")]))
|
|
73
|
+
|
|
74
|
+
elif role == "assistant":
|
|
75
|
+
parts: list[TextPart | ToolCallPart] = []
|
|
76
|
+
|
|
77
|
+
# Handle text content if present
|
|
78
|
+
if content:
|
|
79
|
+
parts.append(TextPart(content=content))
|
|
80
|
+
|
|
81
|
+
# Handle tool_calls (modern format)
|
|
82
|
+
tool_calls = msg.get("tool_calls", [])
|
|
83
|
+
for tool_call in tool_calls:
|
|
84
|
+
if tool_call.get("type") == "function":
|
|
85
|
+
func = tool_call.get("function", {})
|
|
86
|
+
parts.append(
|
|
87
|
+
ToolCallPart(
|
|
88
|
+
tool_name=func.get("name", ""),
|
|
89
|
+
tool_call_id=tool_call.get("id", ""),
|
|
90
|
+
args=func.get("arguments", "{}"),
|
|
91
|
+
)
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
# Handle deprecated function_call format
|
|
95
|
+
function_call = msg.get("function_call")
|
|
96
|
+
if function_call:
|
|
97
|
+
parts.append(
|
|
98
|
+
ToolCallPart(
|
|
99
|
+
tool_name=function_call.get("name", ""),
|
|
100
|
+
args=function_call.get("arguments", "{}"),
|
|
101
|
+
)
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# If no parts were created but we have an assistant message,
|
|
105
|
+
# create an empty text part
|
|
106
|
+
if not parts:
|
|
107
|
+
parts.append(TextPart(content=content or ""))
|
|
108
|
+
|
|
109
|
+
result.append(ModelResponse(parts=parts))
|
|
110
|
+
|
|
111
|
+
elif role == "tool":
|
|
112
|
+
# Tool response message
|
|
113
|
+
tool_call_id = msg.get("tool_call_id", "")
|
|
114
|
+
tool_name = msg.get("name", "") # Optional in OpenAI format
|
|
115
|
+
tool_content = msg.get("content", "")
|
|
116
|
+
|
|
117
|
+
result.append(
|
|
118
|
+
ModelRequest(
|
|
119
|
+
parts=[
|
|
120
|
+
ToolReturnPart(
|
|
121
|
+
tool_name=tool_name,
|
|
122
|
+
tool_call_id=tool_call_id,
|
|
123
|
+
content=tool_content,
|
|
124
|
+
)
|
|
125
|
+
]
|
|
126
|
+
)
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
elif role == "function":
|
|
130
|
+
# Deprecated function role message
|
|
131
|
+
func_name = msg.get("name", "")
|
|
132
|
+
func_content = msg.get("content", "")
|
|
133
|
+
|
|
134
|
+
result.append(
|
|
135
|
+
ModelRequest(
|
|
136
|
+
parts=[
|
|
137
|
+
ToolReturnPart(
|
|
138
|
+
tool_name=func_name,
|
|
139
|
+
content=func_content,
|
|
140
|
+
)
|
|
141
|
+
]
|
|
142
|
+
)
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
else:
|
|
146
|
+
raise ValueError(f"Unknown message role: {role}")
|
|
147
|
+
|
|
148
|
+
return result
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def pydantic_ai_messages_to_openai_messages(
|
|
152
|
+
messages: list[ModelMessage],
|
|
153
|
+
) -> list[dict[str, Any]]:
|
|
154
|
+
"""Convert Pydantic AI messages to OpenAI chat completion message format.
|
|
155
|
+
|
|
156
|
+
This function converts Pydantic AI messages to the OpenAI message format
|
|
157
|
+
that can be used with the chat completions API.
|
|
158
|
+
|
|
159
|
+
Supported Pydantic AI parts:
|
|
160
|
+
- SystemPromptPart: Converted to system role message
|
|
161
|
+
- UserPromptPart: Converted to user role message
|
|
162
|
+
- TextPart: Part of assistant role message
|
|
163
|
+
- ToolCallPart: Part of assistant role message with tool_calls
|
|
164
|
+
- ToolReturnPart: Converted to tool role message
|
|
165
|
+
- RetryPromptPart: Converted to tool or user role message
|
|
166
|
+
- ThinkingPart: Excluded from output (internal to model)
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
messages: List of Pydantic AI ModelMessage objects
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
List of OpenAI format messages
|
|
173
|
+
|
|
174
|
+
Raises:
|
|
175
|
+
NotImplementedError: If multimedia content is encountered in UserPromptPart
|
|
176
|
+
"""
|
|
177
|
+
result: list[dict[str, Any]] = []
|
|
178
|
+
|
|
179
|
+
for msg in messages:
|
|
180
|
+
if isinstance(msg, ModelRequest):
|
|
181
|
+
# Process each part of the request
|
|
182
|
+
for part in msg.parts:
|
|
183
|
+
if isinstance(part, SystemPromptPart):
|
|
184
|
+
result.append({"role": "system", "content": part.content})
|
|
185
|
+
|
|
186
|
+
elif isinstance(part, UserPromptPart):
|
|
187
|
+
# Check for multimedia content
|
|
188
|
+
if not isinstance(part.content, str):
|
|
189
|
+
# Content is a sequence, check for non-text content
|
|
190
|
+
for item in part.content:
|
|
191
|
+
if isinstance(
|
|
192
|
+
item,
|
|
193
|
+
(
|
|
194
|
+
ImageUrl,
|
|
195
|
+
AudioUrl,
|
|
196
|
+
VideoUrl,
|
|
197
|
+
DocumentUrl,
|
|
198
|
+
BinaryContent,
|
|
199
|
+
),
|
|
200
|
+
):
|
|
201
|
+
raise NotImplementedError(
|
|
202
|
+
"Multimedia content is not supported. "
|
|
203
|
+
"Only text content is currently supported."
|
|
204
|
+
)
|
|
205
|
+
# If we get here, all items should be strings - join them
|
|
206
|
+
text_content = " ".join(
|
|
207
|
+
item if isinstance(item, str) else str(item)
|
|
208
|
+
for item in part.content
|
|
209
|
+
)
|
|
210
|
+
result.append({"role": "user", "content": text_content})
|
|
211
|
+
else:
|
|
212
|
+
result.append({"role": "user", "content": part.content})
|
|
213
|
+
|
|
214
|
+
elif isinstance(part, ToolReturnPart):
|
|
215
|
+
# Serialize content if it's not a string
|
|
216
|
+
content = part.content
|
|
217
|
+
if not isinstance(content, str):
|
|
218
|
+
content = json.dumps(content)
|
|
219
|
+
|
|
220
|
+
result.append(
|
|
221
|
+
{
|
|
222
|
+
"role": "tool",
|
|
223
|
+
"tool_call_id": part.tool_call_id,
|
|
224
|
+
"content": content,
|
|
225
|
+
}
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
elif isinstance(part, RetryPromptPart):
|
|
229
|
+
# Convert retry prompt based on whether it has a tool name
|
|
230
|
+
if part.tool_name:
|
|
231
|
+
# Retry for a tool call - send as tool message
|
|
232
|
+
content = (
|
|
233
|
+
part.content
|
|
234
|
+
if isinstance(part.content, str)
|
|
235
|
+
else json.dumps(part.content)
|
|
236
|
+
)
|
|
237
|
+
result.append(
|
|
238
|
+
{
|
|
239
|
+
"role": "tool",
|
|
240
|
+
"tool_call_id": part.tool_call_id,
|
|
241
|
+
"content": part.model_response(),
|
|
242
|
+
}
|
|
243
|
+
)
|
|
244
|
+
else:
|
|
245
|
+
# General retry - send as user message
|
|
246
|
+
result.append(
|
|
247
|
+
{"role": "user", "content": part.model_response()}
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
elif isinstance(msg, ModelResponse):
|
|
251
|
+
# Collect text parts and tool call parts
|
|
252
|
+
text_parts: list[str] = []
|
|
253
|
+
tool_calls: list[dict[str, Any]] = []
|
|
254
|
+
|
|
255
|
+
for part in msg.parts:
|
|
256
|
+
if isinstance(part, TextPart):
|
|
257
|
+
text_parts.append(part.content)
|
|
258
|
+
elif isinstance(part, ToolCallPart):
|
|
259
|
+
# Convert args to string if it's a dict
|
|
260
|
+
args = part.args
|
|
261
|
+
if isinstance(args, dict):
|
|
262
|
+
args = json.dumps(args)
|
|
263
|
+
elif args is None:
|
|
264
|
+
args = "{}"
|
|
265
|
+
|
|
266
|
+
tool_calls.append(
|
|
267
|
+
{
|
|
268
|
+
"id": part.tool_call_id,
|
|
269
|
+
"type": "function",
|
|
270
|
+
"function": {
|
|
271
|
+
"name": part.tool_name,
|
|
272
|
+
"arguments": args,
|
|
273
|
+
},
|
|
274
|
+
}
|
|
275
|
+
)
|
|
276
|
+
elif isinstance(part, ThinkingPart):
|
|
277
|
+
# ThinkingPart is internal, skip it
|
|
278
|
+
pass
|
|
279
|
+
# Other part types (BuiltinToolCallPart, BuiltinToolReturnPart, FilePart)
|
|
280
|
+
# are not directly mappable to OpenAI format
|
|
281
|
+
|
|
282
|
+
# Build the assistant message
|
|
283
|
+
assistant_msg: dict[str, Any] = {"role": "assistant"}
|
|
284
|
+
|
|
285
|
+
if text_parts:
|
|
286
|
+
assistant_msg["content"] = "\n\n".join(text_parts)
|
|
287
|
+
else:
|
|
288
|
+
assistant_msg["content"] = None
|
|
289
|
+
|
|
290
|
+
if tool_calls:
|
|
291
|
+
assistant_msg["tool_calls"] = tool_calls
|
|
292
|
+
|
|
293
|
+
result.append(assistant_msg)
|
|
294
|
+
|
|
295
|
+
return result
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
def assemble_model_request_parameters(
|
|
299
|
+
output_schema: dict[str, Any] | None,
|
|
300
|
+
tools: list[dict[str, Any]] | None,
|
|
301
|
+
*,
|
|
302
|
+
output_mode: Literal["text", "tool", "native", "prompted"] | None = None,
|
|
303
|
+
strict: bool | None = None,
|
|
304
|
+
allow_text_output: bool = True,
|
|
305
|
+
) -> ModelRequestParameters:
|
|
306
|
+
"""Assemble Pydantic AI ModelRequestParameters from OpenAI format tools and output schema.
|
|
307
|
+
|
|
308
|
+
This function converts OpenAI format tools definitions and JSON schema output specification
|
|
309
|
+
to the Pydantic AI ModelRequestParameters format that can be used with model requests.
|
|
310
|
+
|
|
311
|
+
Args:
|
|
312
|
+
output_schema: A JSON schema defining the expected structured output format.
|
|
313
|
+
If provided, creates an OutputObjectDefinition. The schema can include
|
|
314
|
+
'title' and 'description' fields which will be extracted.
|
|
315
|
+
tools: List of OpenAI format tool definitions. Each tool should have the format:
|
|
316
|
+
{
|
|
317
|
+
"type": "function",
|
|
318
|
+
"function": {
|
|
319
|
+
"name": "tool_name",
|
|
320
|
+
"description": "Tool description",
|
|
321
|
+
"parameters": { ... JSON schema ... },
|
|
322
|
+
"strict": true/false # optional
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
output_mode: The output mode for structured output. Defaults to "native" when
|
|
326
|
+
output_schema is provided, otherwise "text".
|
|
327
|
+
strict: Whether to enforce strict JSON schema validation for output.
|
|
328
|
+
Only applies when output_schema is provided.
|
|
329
|
+
allow_text_output: Whether plain text output is allowed alongside structured output.
|
|
330
|
+
Defaults to True.
|
|
331
|
+
|
|
332
|
+
Returns:
|
|
333
|
+
ModelRequestParameters configured with function_tools and/or output_object.
|
|
334
|
+
|
|
335
|
+
Raises:
|
|
336
|
+
ValueError: If a tool has an unsupported type (not "function"),
|
|
337
|
+
if a tool is missing the 'function' key, or if a tool is missing a 'name'.
|
|
338
|
+
|
|
339
|
+
Example:
|
|
340
|
+
>>> tools = [
|
|
341
|
+
... {
|
|
342
|
+
... "type": "function",
|
|
343
|
+
... "function": {
|
|
344
|
+
... "name": "get_weather",
|
|
345
|
+
... "description": "Get weather for a location",
|
|
346
|
+
... "parameters": {
|
|
347
|
+
... "type": "object",
|
|
348
|
+
... "properties": {"location": {"type": "string"}},
|
|
349
|
+
... "required": ["location"]
|
|
350
|
+
... }
|
|
351
|
+
... }
|
|
352
|
+
... }
|
|
353
|
+
... ]
|
|
354
|
+
>>> output_schema = {
|
|
355
|
+
... "type": "object",
|
|
356
|
+
... "properties": {"temperature": {"type": "number"}},
|
|
357
|
+
... "required": ["temperature"]
|
|
358
|
+
... }
|
|
359
|
+
>>> params = assemble_model_request_parameters(
|
|
360
|
+
... output_schema=output_schema,
|
|
361
|
+
... tools=tools
|
|
362
|
+
... )
|
|
363
|
+
"""
|
|
364
|
+
function_tools: list[ToolDefinition] = []
|
|
365
|
+
output_object: OutputObjectDefinition | None = None
|
|
366
|
+
|
|
367
|
+
# Convert tools to ToolDefinition objects
|
|
368
|
+
if tools:
|
|
369
|
+
for tool in tools:
|
|
370
|
+
tool_type = tool.get("type")
|
|
371
|
+
if tool_type != "function":
|
|
372
|
+
raise ValueError(
|
|
373
|
+
f"Unsupported tool type: {tool_type}. Only 'function' type is supported."
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
function_def = tool.get("function")
|
|
377
|
+
if function_def is None:
|
|
378
|
+
raise ValueError(
|
|
379
|
+
"Missing 'function' key in tool definition. "
|
|
380
|
+
"Expected format: {'type': 'function', 'function': {...}}"
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
name = function_def.get("name")
|
|
384
|
+
if not name:
|
|
385
|
+
raise ValueError(
|
|
386
|
+
"Missing 'name' in function definition. "
|
|
387
|
+
"Every tool must have a name."
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
description = function_def.get("description")
|
|
391
|
+
parameters = function_def.get("parameters")
|
|
392
|
+
tool_strict = function_def.get("strict")
|
|
393
|
+
|
|
394
|
+
# Build parameters schema, defaulting to empty object if not provided
|
|
395
|
+
parameters_json_schema: dict[str, Any] = (
|
|
396
|
+
parameters
|
|
397
|
+
if parameters is not None
|
|
398
|
+
else {"type": "object", "properties": {}}
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
function_tools.append(
|
|
402
|
+
ToolDefinition(
|
|
403
|
+
name=name,
|
|
404
|
+
description=description,
|
|
405
|
+
parameters_json_schema=parameters_json_schema,
|
|
406
|
+
strict=tool_strict,
|
|
407
|
+
kind="function",
|
|
408
|
+
)
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
# Convert output_schema to OutputObjectDefinition
|
|
412
|
+
if output_schema is not None:
|
|
413
|
+
# Extract optional title and description from schema
|
|
414
|
+
schema_name = output_schema.get("title")
|
|
415
|
+
schema_description = output_schema.get("description")
|
|
416
|
+
|
|
417
|
+
output_object = OutputObjectDefinition(
|
|
418
|
+
json_schema=output_schema,
|
|
419
|
+
name=schema_name,
|
|
420
|
+
description=schema_description,
|
|
421
|
+
strict=strict,
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
# Determine output_mode
|
|
425
|
+
if output_mode is None:
|
|
426
|
+
output_mode = "native" if output_schema is not None else "text"
|
|
427
|
+
|
|
428
|
+
return ModelRequestParameters(
|
|
429
|
+
function_tools=function_tools,
|
|
430
|
+
output_mode=output_mode,
|
|
431
|
+
output_object=output_object,
|
|
432
|
+
allow_text_output=allow_text_output,
|
|
433
|
+
)
|
|
@@ -4,7 +4,7 @@ packages = [
|
|
|
4
4
|
{ include = "pixie" },
|
|
5
5
|
]
|
|
6
6
|
|
|
7
|
-
version = "0.1.
|
|
7
|
+
version = "0.1.2" # Managed by setuptools-scm
|
|
8
8
|
description = "Code-first, type-safe prompt management"
|
|
9
9
|
authors = ["Yiou Li <yol@gopixie.ai>"]
|
|
10
10
|
license = "MIT"
|
|
@@ -28,9 +28,10 @@ uvicorn = {version = ">=0.40.0", optional = true}
|
|
|
28
28
|
colorlog = {version = ">=6.10.1", optional = true}
|
|
29
29
|
dotenv = {version = ">=0.9.9", optional = true}
|
|
30
30
|
watchdog = {version = ">=6.0.0", optional = true}
|
|
31
|
+
pydantic-ai-slim = {version = ">=1.39.0", optional = true}
|
|
31
32
|
|
|
32
33
|
[tool.poetry.extras]
|
|
33
|
-
server = ["fastapi", "strawberry-graphql", "uvicorn", "colorlog", "dotenv", "watchdog"]
|
|
34
|
+
server = ["fastapi", "strawberry-graphql", "uvicorn", "colorlog", "dotenv", "watchdog", "pydantic-ai-slim"]
|
|
34
35
|
|
|
35
36
|
[tool.poetry.group.dev.dependencies]
|
|
36
37
|
pytest = ">=7.4,<9.0"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|