pixie-prompts 0.1.1__tar.gz → 0.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pixie-prompts
3
- Version: 0.1.1
3
+ Version: 0.1.3
4
4
  Summary: Code-first, type-safe prompt management
5
5
  License: MIT
6
6
  License-File: LICENSE
@@ -21,6 +21,7 @@ Requires-Dist: fastapi (>=0.128.0) ; extra == "server"
21
21
  Requires-Dist: jinja2 (>=3.1.6,<4.0.0)
22
22
  Requires-Dist: jsonsubschema (>=0.0.7,<0.0.8)
23
23
  Requires-Dist: pydantic (>=2.12.5,<3.0.0)
24
+ Requires-Dist: pydantic-ai-slim (>=1.39.0) ; extra == "server"
24
25
  Requires-Dist: strawberry-graphql (>=0.288.1) ; extra == "server"
25
26
  Requires-Dist: uvicorn (>=0.40.0) ; extra == "server"
26
27
  Requires-Dist: watchdog (>=6.0.0) ; extra == "server"
@@ -10,7 +10,7 @@ from watchdog.observers import Observer
10
10
  import asyncio
11
11
  import logging
12
12
 
13
- from pixie.prompts.storage import initialize_prompt_storage
13
+ from pixie.prompts.storage import PromptLoadError, initialize_prompt_storage
14
14
 
15
15
  logger = logging.getLogger(__name__)
16
16
 
@@ -311,7 +311,11 @@ async def stop_storage_watcher() -> None:
311
311
  def init_prompt_storage():
312
312
 
313
313
  storage_directory = os.getenv("PIXIE_PROMPT_STORAGE_DIR", ".pixie/prompts")
314
- initialize_prompt_storage(storage_directory)
314
+ try:
315
+ initialize_prompt_storage(storage_directory)
316
+ except PromptLoadError as e:
317
+ for err in e.failures:
318
+ logger.error("Prompt load error: %s", err)
315
319
 
316
320
  @asynccontextmanager
317
321
  async def lifespan(app: FastAPI):
@@ -1,14 +1,25 @@
1
1
  """GraphQL schema for SDK server."""
2
2
 
3
+ from datetime import datetime
4
+ import json
3
5
  import logging
4
- from typing import Optional
6
+ from typing import Any, Optional, cast, get_args
5
7
 
6
8
  from graphql import GraphQLError
9
+ import jinja2
10
+ from pydantic_ai import ModelSettings
11
+ from pydantic_ai.direct import model_request
12
+ from pydantic_ai.models import KnownModelName
7
13
  import strawberry
8
14
  from strawberry.scalars import JSON
9
15
 
10
16
  from pixie.prompts.prompt import variables_definition_to_schema
11
17
  from pixie.prompts.prompt_management import get_prompt, list_prompts
18
+ from pixie.prompts.utils import (
19
+ assemble_model_request_parameters,
20
+ openai_messages_to_pydantic_ai_messages,
21
+ pydantic_ai_messages_to_openai_messages,
22
+ )
12
23
 
13
24
  logger = logging.getLogger(__name__)
14
25
 
@@ -62,10 +73,40 @@ class Prompt:
62
73
  module: Optional[str] = None
63
74
 
64
75
 
76
+ @strawberry.type
77
+ class ToolCall:
78
+ """Tool call information."""
79
+
80
+ name: str
81
+ args: JSON
82
+ tool_call_id: strawberry.ID
83
+
84
+
85
+ @strawberry.type
86
+ class LlmCallResult:
87
+
88
+ input: JSON
89
+ output: JSON | None
90
+ tool_calls: list[ToolCall] | None
91
+ usage: JSON
92
+ cost: float
93
+ timestamp: datetime
94
+ reasoning: str | None
95
+
96
+
65
97
  @strawberry.type
66
98
  class Query:
67
99
  """GraphQL queries."""
68
100
 
101
+ @strawberry.field
102
+ async def possible_models(self) -> list[str]:
103
+ """List possible model names.
104
+
105
+ Returns:
106
+ A list of model names supported by the server.
107
+ """
108
+ return list(get_args(KnownModelName.__value__))
109
+
69
110
  @strawberry.field
70
111
  async def health_check(self) -> str:
71
112
  """Health check endpoint."""
@@ -147,6 +188,108 @@ class Query:
147
188
  class Mutation:
148
189
  """GraphQL mutations."""
149
190
 
191
+ @strawberry.mutation
192
+ async def call_llm(
193
+ self,
194
+ model: str,
195
+ prompt_template: str,
196
+ variables: Optional[JSON],
197
+ prompt_placeholder: str,
198
+ input_messages: list[JSON],
199
+ output_schema: Optional[JSON] = None,
200
+ tools: Optional[list[JSON]] = None,
201
+ model_parameters: Optional[JSON] = None,
202
+ ) -> LlmCallResult:
203
+ """Call LLM with the given inputs.
204
+
205
+ Args:
206
+ model: The model name to use (e.g., "openai:gpt-4").
207
+ prompt_template: prompt template string.
208
+ variables: variables for the prompt template.
209
+ prompt_placeholder: placeholder string in the prompt template to be replaced.
210
+ input_messages: List of messages as JSON objects in openai format, containing prompt_placeholder in content.
211
+ output_schema: Optional output schema.
212
+ tools: Optional tools configuration (not yet implemented).
213
+ model_parameters: Optional model parameters.
214
+
215
+ Returns:
216
+ LLM call result
217
+
218
+ Raises:
219
+ GraphQLError: If the LLM call fails.
220
+ """
221
+ try:
222
+ template = jinja2.Template(prompt_template)
223
+ prompt = template.render(**(cast(dict[str, Any], variables) or {}))
224
+ print(prompt)
225
+ print(type(prompt))
226
+ pydantic_messages = openai_messages_to_pydantic_ai_messages(
227
+ cast(list[dict[str, Any]], input_messages)
228
+ )
229
+ for msg in pydantic_messages:
230
+ for part in msg.parts:
231
+ if part.part_kind == "user-prompt":
232
+ if isinstance(part.content, str):
233
+ part.content = part.content.replace(
234
+ prompt_placeholder,
235
+ prompt,
236
+ )
237
+ else:
238
+ part.content = [
239
+ p.replace(prompt_placeholder, prompt)
240
+ for p in part.content
241
+ if isinstance(p, str)
242
+ ]
243
+ elif part.part_kind == "system-prompt":
244
+ part.content = part.content.replace(prompt_placeholder, prompt)
245
+
246
+ print(pydantic_messages)
247
+ # Replace the placeholder in input messages
248
+ response = await model_request(
249
+ model=model,
250
+ messages=pydantic_messages,
251
+ model_settings=cast(ModelSettings | None, model_parameters),
252
+ model_request_parameters=assemble_model_request_parameters(
253
+ cast(dict[str, Any] | None, output_schema),
254
+ cast(list[dict[str, Any]] | None, tools),
255
+ strict=True,
256
+ allow_text_output=False,
257
+ ),
258
+ )
259
+ return LlmCallResult(
260
+ input=JSON(pydantic_ai_messages_to_openai_messages(pydantic_messages)),
261
+ output=(
262
+ JSON(json.loads(response.text) if output_schema else response.text)
263
+ if response.text
264
+ else None
265
+ ),
266
+ tool_calls=(
267
+ [
268
+ ToolCall(
269
+ name=tc.tool_name,
270
+ args=JSON(tc.args_as_dict()),
271
+ tool_call_id=strawberry.ID(tc.tool_call_id),
272
+ )
273
+ for tc in response.tool_calls
274
+ ]
275
+ if response.tool_calls
276
+ else None
277
+ ),
278
+ usage=JSON(
279
+ {
280
+ "input_tokens": response.usage.input_tokens,
281
+ "output_tokens": response.usage.output_tokens,
282
+ "total_tokens": response.usage.total_tokens,
283
+ }
284
+ ),
285
+ cost=float(response.cost().total_price),
286
+ timestamp=response.timestamp,
287
+ reasoning=response.thinking,
288
+ )
289
+ except Exception as e:
290
+ logger.error("Error running LLM: %s", str(e))
291
+ raise GraphQLError(f"Failed to run LLM: {str(e)}") from e
292
+
150
293
  @strawberry.mutation
151
294
  async def add_prompt_version(
152
295
  self,
@@ -0,0 +1,617 @@
1
+ """Utilities for converting between different message formats."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import base64
6
+ import json
7
+ from typing import Any, Literal, Sequence
8
+
9
+ from pydantic_ai.messages import (
10
+ ModelMessage,
11
+ ModelRequest,
12
+ ModelResponse,
13
+ SystemPromptPart,
14
+ UserPromptPart,
15
+ TextPart,
16
+ ToolCallPart,
17
+ ToolReturnPart,
18
+ RetryPromptPart,
19
+ ThinkingPart,
20
+ ImageUrl,
21
+ AudioUrl,
22
+ VideoUrl,
23
+ DocumentUrl,
24
+ BinaryContent,
25
+ UserContent,
26
+ )
27
+ from pydantic_ai.models import ModelRequestParameters
28
+ from pydantic_ai.tools import ToolDefinition
29
+ from pydantic_ai.output import OutputObjectDefinition
30
+
31
+
32
+ # Mapping from OpenAI audio formats to media types
33
+ AUDIO_FORMAT_TO_MEDIA_TYPE = {
34
+ "mp3": "audio/mpeg",
35
+ "wav": "audio/wav",
36
+ "pcm": "audio/pcm",
37
+ "flac": "audio/flac",
38
+ "ogg": "audio/ogg",
39
+ "aac": "audio/aac",
40
+ "opus": "audio/opus",
41
+ }
42
+
43
+ # Mapping from media types to OpenAI audio formats
44
+ MEDIA_TYPE_TO_AUDIO_FORMAT = {
45
+ "audio/mpeg": "mp3",
46
+ "audio/mp3": "mp3",
47
+ "audio/wav": "wav",
48
+ "audio/x-wav": "wav",
49
+ "audio/wave": "wav",
50
+ "audio/pcm": "pcm",
51
+ "audio/flac": "flac",
52
+ "audio/ogg": "ogg",
53
+ "audio/aac": "aac",
54
+ "audio/opus": "opus",
55
+ }
56
+
57
+
58
+ def _convert_openai_content_array_to_pydantic(
59
+ content_array: list[dict[str, Any]],
60
+ ) -> list[UserContent]:
61
+ """Convert OpenAI content array format to Pydantic AI UserContent list.
62
+
63
+ Args:
64
+ content_array: List of content parts in OpenAI format
65
+
66
+ Returns:
67
+ List of Pydantic AI UserContent items (str, ImageUrl, AudioUrl, BinaryContent, etc.)
68
+ """
69
+ user_content: list[UserContent] = []
70
+
71
+ for part in content_array:
72
+ part_type = part.get("type")
73
+
74
+ if part_type == "text":
75
+ user_content.append(part.get("text", ""))
76
+
77
+ elif part_type == "image_url":
78
+ image_data = part.get("image_url", {})
79
+ url = image_data.get("url", "")
80
+ detail = image_data.get("detail")
81
+
82
+ # Check if it's a data URI (base64 encoded)
83
+ if url.startswith("data:"):
84
+ binary_content = BinaryContent.from_data_uri(url)
85
+ if detail:
86
+ binary_content.vendor_metadata = {"detail": detail}
87
+ user_content.append(binary_content)
88
+ else:
89
+ # Regular URL
90
+ vendor_metadata = {"detail": detail} if detail else None
91
+ user_content.append(ImageUrl(url=url, vendor_metadata=vendor_metadata))
92
+
93
+ elif part_type == "input_audio":
94
+ audio_data = part.get("input_audio", {})
95
+ data_b64 = audio_data.get("data", "")
96
+ audio_format = audio_data.get("format", "mp3")
97
+
98
+ # Convert base64 to binary
99
+ audio_bytes = base64.b64decode(data_b64)
100
+ media_type = AUDIO_FORMAT_TO_MEDIA_TYPE.get(
101
+ audio_format, f"audio/{audio_format}"
102
+ )
103
+
104
+ user_content.append(BinaryContent(data=audio_bytes, media_type=media_type))
105
+
106
+ elif part_type == "file":
107
+ # File input - convert to DocumentUrl or appropriate type
108
+ file_data = part.get("file", {})
109
+ file_id = file_data.get("file_id", "")
110
+ # OpenAI file references are URLs to their file storage
111
+ if file_id:
112
+ user_content.append(DocumentUrl(url=f"openai://file/{file_id}"))
113
+
114
+ else:
115
+ # Unknown type - treat as text if possible
116
+ if "text" in part:
117
+ user_content.append(part["text"])
118
+
119
+ return user_content
120
+
121
+
122
+ def _convert_pydantic_content_to_openai(
123
+ content: Sequence[UserContent],
124
+ ) -> list[dict[str, Any]]:
125
+ """Convert Pydantic AI UserContent sequence to OpenAI content array format.
126
+
127
+ Args:
128
+ content: Sequence of Pydantic AI UserContent items
129
+
130
+ Returns:
131
+ List of OpenAI content parts
132
+ """
133
+ content_array: list[dict[str, Any]] = []
134
+
135
+ for item in content:
136
+ if isinstance(item, str):
137
+ content_array.append({"type": "text", "text": item})
138
+
139
+ elif isinstance(item, ImageUrl):
140
+ image_url_data: dict[str, Any] = {"url": item.url}
141
+ # Include detail if present in vendor_metadata
142
+ if item.vendor_metadata and "detail" in item.vendor_metadata:
143
+ image_url_data["detail"] = item.vendor_metadata["detail"]
144
+ content_array.append({"type": "image_url", "image_url": image_url_data})
145
+
146
+ elif isinstance(item, BinaryContent):
147
+ if item.is_image:
148
+ # Convert binary image to data URI
149
+ image_url_data: dict[str, Any] = {"url": item.data_uri}
150
+ if item.vendor_metadata and "detail" in item.vendor_metadata:
151
+ image_url_data["detail"] = item.vendor_metadata["detail"]
152
+ content_array.append({"type": "image_url", "image_url": image_url_data})
153
+
154
+ elif item.is_audio:
155
+ # Convert to OpenAI input_audio format
156
+ audio_format = MEDIA_TYPE_TO_AUDIO_FORMAT.get(
157
+ item.media_type, item.format
158
+ )
159
+ content_array.append(
160
+ {
161
+ "type": "input_audio",
162
+ "input_audio": {
163
+ "data": item.base64,
164
+ "format": audio_format,
165
+ },
166
+ }
167
+ )
168
+
169
+ elif item.is_video:
170
+ # Video as data URI (limited support in OpenAI)
171
+ content_array.append(
172
+ {
173
+ "type": "video",
174
+ "video": {"url": item.data_uri},
175
+ }
176
+ )
177
+
178
+ elif item.is_document:
179
+ # Document as data URI
180
+ content_array.append(
181
+ {
182
+ "type": "file",
183
+ "file": {"url": item.data_uri},
184
+ }
185
+ )
186
+
187
+ else:
188
+ # Unknown binary type - try as file
189
+ content_array.append(
190
+ {
191
+ "type": "file",
192
+ "file": {"url": item.data_uri},
193
+ }
194
+ )
195
+
196
+ elif isinstance(item, AudioUrl):
197
+ # Audio URL - OpenAI prefers input_audio with base64 data,
198
+ # but we can reference the URL
199
+ content_array.append(
200
+ {
201
+ "type": "audio_url",
202
+ "audio_url": {"url": item.url},
203
+ }
204
+ )
205
+
206
+ elif isinstance(item, VideoUrl):
207
+ # Video URL
208
+ content_array.append(
209
+ {
210
+ "type": "video",
211
+ "video": {"url": item.url},
212
+ }
213
+ )
214
+
215
+ elif isinstance(item, DocumentUrl):
216
+ # Document URL
217
+ content_array.append(
218
+ {
219
+ "type": "file",
220
+ "file": {"url": item.url},
221
+ }
222
+ )
223
+
224
+ # Skip CachePoint and other non-content types
225
+
226
+ return content_array
227
+
228
+
229
+ def openai_messages_to_pydantic_ai_messages(
230
+ messages: list[dict[str, Any]],
231
+ ) -> list[ModelMessage]:
232
+ """Convert OpenAI chat completion message format to Pydantic AI messages.
233
+
234
+ This function converts the OpenAI message format (used in chat completions API)
235
+ to the Pydantic AI message format.
236
+
237
+ Supported message roles:
238
+ - system/developer: Converted to ModelRequest with SystemPromptPart
239
+ - user: Converted to ModelRequest with UserPromptPart
240
+ - assistant: Converted to ModelResponse with TextPart and/or ToolCallPart
241
+ - tool/function: Converted to ModelRequest with ToolReturnPart
242
+
243
+ Args:
244
+ messages: List of OpenAI format messages
245
+
246
+ Returns:
247
+ List of Pydantic AI ModelMessage objects
248
+
249
+ Raises:
250
+ NotImplementedError: If multimedia content (images, audio, etc.) is encountered
251
+ ValueError: If an unknown message role is encountered
252
+ """
253
+ result: list[ModelMessage] = []
254
+
255
+ for msg in messages:
256
+ role = msg.get("role")
257
+ content = msg.get("content")
258
+
259
+ if role in ("system", "developer"):
260
+ # System/developer messages become ModelRequest with SystemPromptPart
261
+ result.append(ModelRequest(parts=[SystemPromptPart(content=content or "")]))
262
+
263
+ elif role == "user":
264
+ # Check for multimodal content (content array)
265
+ if isinstance(content, list):
266
+ user_content = _convert_openai_content_array_to_pydantic(content)
267
+ result.append(
268
+ ModelRequest(parts=[UserPromptPart(content=user_content)])
269
+ )
270
+ else:
271
+ result.append(
272
+ ModelRequest(parts=[UserPromptPart(content=content or "")])
273
+ )
274
+
275
+ elif role == "assistant":
276
+ parts: list[TextPart | ToolCallPart] = []
277
+
278
+ # Handle text content if present
279
+ if content:
280
+ parts.append(TextPart(content=content))
281
+
282
+ # Handle tool_calls (modern format)
283
+ tool_calls = msg.get("tool_calls", [])
284
+ for tool_call in tool_calls:
285
+ if tool_call.get("type") == "function":
286
+ func = tool_call.get("function", {})
287
+ parts.append(
288
+ ToolCallPart(
289
+ tool_name=func.get("name", ""),
290
+ tool_call_id=tool_call.get("id", ""),
291
+ args=func.get("arguments", "{}"),
292
+ )
293
+ )
294
+
295
+ # Handle deprecated function_call format
296
+ function_call = msg.get("function_call")
297
+ if function_call:
298
+ parts.append(
299
+ ToolCallPart(
300
+ tool_name=function_call.get("name", ""),
301
+ args=function_call.get("arguments", "{}"),
302
+ )
303
+ )
304
+
305
+ # If no parts were created but we have an assistant message,
306
+ # create an empty text part
307
+ if not parts:
308
+ parts.append(TextPart(content=content or ""))
309
+
310
+ result.append(ModelResponse(parts=parts))
311
+
312
+ elif role == "tool":
313
+ # Tool response message
314
+ tool_call_id = msg.get("tool_call_id", "")
315
+ tool_name = msg.get("name", "") # Optional in OpenAI format
316
+ tool_content = msg.get("content", "")
317
+
318
+ result.append(
319
+ ModelRequest(
320
+ parts=[
321
+ ToolReturnPart(
322
+ tool_name=tool_name,
323
+ tool_call_id=tool_call_id,
324
+ content=tool_content,
325
+ )
326
+ ]
327
+ )
328
+ )
329
+
330
+ elif role == "function":
331
+ # Deprecated function role message
332
+ func_name = msg.get("name", "")
333
+ func_content = msg.get("content", "")
334
+
335
+ result.append(
336
+ ModelRequest(
337
+ parts=[
338
+ ToolReturnPart(
339
+ tool_name=func_name,
340
+ content=func_content,
341
+ )
342
+ ]
343
+ )
344
+ )
345
+
346
+ else:
347
+ raise ValueError(f"Unknown message role: {role}")
348
+
349
+ return result
350
+
351
+
352
+ def pydantic_ai_messages_to_openai_messages(
353
+ messages: list[ModelMessage],
354
+ ) -> list[dict[str, Any]]:
355
+ """Convert Pydantic AI messages to OpenAI chat completion message format.
356
+
357
+ This function converts Pydantic AI messages to the OpenAI message format
358
+ that can be used with the chat completions API.
359
+
360
+ Supported Pydantic AI parts:
361
+ - SystemPromptPart: Converted to system role message
362
+ - UserPromptPart: Converted to user role message
363
+ - TextPart: Part of assistant role message
364
+ - ToolCallPart: Part of assistant role message with tool_calls
365
+ - ToolReturnPart: Converted to tool role message
366
+ - RetryPromptPart: Converted to tool or user role message
367
+ - ThinkingPart: Excluded from output (internal to model)
368
+
369
+ Args:
370
+ messages: List of Pydantic AI ModelMessage objects
371
+
372
+ Returns:
373
+ List of OpenAI format messages
374
+
375
+ Raises:
376
+ NotImplementedError: If multimedia content is encountered in UserPromptPart
377
+ """
378
+ result: list[dict[str, Any]] = []
379
+
380
+ for msg in messages:
381
+ if isinstance(msg, ModelRequest):
382
+ # Process each part of the request
383
+ for part in msg.parts:
384
+ if isinstance(part, SystemPromptPart):
385
+ result.append({"role": "system", "content": part.content})
386
+
387
+ elif isinstance(part, UserPromptPart):
388
+ # Check for multimodal content
389
+ if not isinstance(part.content, str):
390
+ # Content is a sequence - convert to OpenAI content array
391
+ content_array = _convert_pydantic_content_to_openai(
392
+ part.content
393
+ )
394
+ result.append({"role": "user", "content": content_array})
395
+ else:
396
+ result.append({"role": "user", "content": part.content})
397
+
398
+ elif isinstance(part, ToolReturnPart):
399
+ # Serialize content if it's not a string
400
+ content = part.content
401
+ if not isinstance(content, str):
402
+ content = json.dumps(content)
403
+
404
+ result.append(
405
+ {
406
+ "role": "tool",
407
+ "tool_call_id": part.tool_call_id,
408
+ "content": content,
409
+ }
410
+ )
411
+
412
+ elif isinstance(part, RetryPromptPart):
413
+ # Convert retry prompt based on whether it has a tool name
414
+ if part.tool_name:
415
+ # Retry for a tool call - send as tool message
416
+ content = (
417
+ part.content
418
+ if isinstance(part.content, str)
419
+ else json.dumps(part.content)
420
+ )
421
+ result.append(
422
+ {
423
+ "role": "tool",
424
+ "tool_call_id": part.tool_call_id,
425
+ "content": part.model_response(),
426
+ }
427
+ )
428
+ else:
429
+ # General retry - send as user message
430
+ result.append(
431
+ {"role": "user", "content": part.model_response()}
432
+ )
433
+
434
+ elif isinstance(msg, ModelResponse):
435
+ # Collect text parts and tool call parts
436
+ text_parts: list[str] = []
437
+ tool_calls: list[dict[str, Any]] = []
438
+
439
+ for part in msg.parts:
440
+ if isinstance(part, TextPart):
441
+ text_parts.append(part.content)
442
+ elif isinstance(part, ToolCallPart):
443
+ # Convert args to string if it's a dict
444
+ args = part.args
445
+ if isinstance(args, dict):
446
+ args = json.dumps(args)
447
+ elif args is None:
448
+ args = "{}"
449
+
450
+ tool_calls.append(
451
+ {
452
+ "id": part.tool_call_id,
453
+ "type": "function",
454
+ "function": {
455
+ "name": part.tool_name,
456
+ "arguments": args,
457
+ },
458
+ }
459
+ )
460
+ elif isinstance(part, ThinkingPart):
461
+ # ThinkingPart is internal, skip it
462
+ pass
463
+ # Other part types (BuiltinToolCallPart, BuiltinToolReturnPart, FilePart)
464
+ # are not directly mappable to OpenAI format
465
+
466
+ # Build the assistant message
467
+ assistant_msg: dict[str, Any] = {"role": "assistant"}
468
+
469
+ if text_parts:
470
+ assistant_msg["content"] = "\n\n".join(text_parts)
471
+ else:
472
+ assistant_msg["content"] = None
473
+
474
+ if tool_calls:
475
+ assistant_msg["tool_calls"] = tool_calls
476
+
477
+ result.append(assistant_msg)
478
+
479
+ return result
480
+
481
+
482
+ def assemble_model_request_parameters(
483
+ output_schema: dict[str, Any] | None,
484
+ tools: list[dict[str, Any]] | None,
485
+ *,
486
+ output_mode: Literal["text", "tool", "native", "prompted"] | None = None,
487
+ strict: bool | None = None,
488
+ allow_text_output: bool = True,
489
+ ) -> ModelRequestParameters:
490
+ """Assemble Pydantic AI ModelRequestParameters from OpenAI format tools and output schema.
491
+
492
+ This function converts OpenAI format tools definitions and JSON schema output specification
493
+ to the Pydantic AI ModelRequestParameters format that can be used with model requests.
494
+
495
+ Args:
496
+ output_schema: A JSON schema defining the expected structured output format.
497
+ If provided, creates an OutputObjectDefinition. The schema can include
498
+ 'title' and 'description' fields which will be extracted.
499
+ tools: List of OpenAI format tool definitions. Each tool should have the format:
500
+ {
501
+ "type": "function",
502
+ "function": {
503
+ "name": "tool_name",
504
+ "description": "Tool description",
505
+ "parameters": { ... JSON schema ... },
506
+ "strict": true/false # optional
507
+ }
508
+ }
509
+ output_mode: The output mode for structured output. Defaults to "native" when
510
+ output_schema is provided, otherwise "text".
511
+ strict: Whether to enforce strict JSON schema validation for output.
512
+ Only applies when output_schema is provided.
513
+ allow_text_output: Whether plain text output is allowed alongside structured output.
514
+ Defaults to True.
515
+
516
+ Returns:
517
+ ModelRequestParameters configured with function_tools and/or output_object.
518
+
519
+ Raises:
520
+ ValueError: If a tool has an unsupported type (not "function"),
521
+ if a tool is missing the 'function' key, or if a tool is missing a 'name'.
522
+
523
+ Example:
524
+ >>> tools = [
525
+ ... {
526
+ ... "type": "function",
527
+ ... "function": {
528
+ ... "name": "get_weather",
529
+ ... "description": "Get weather for a location",
530
+ ... "parameters": {
531
+ ... "type": "object",
532
+ ... "properties": {"location": {"type": "string"}},
533
+ ... "required": ["location"]
534
+ ... }
535
+ ... }
536
+ ... }
537
+ ... ]
538
+ >>> output_schema = {
539
+ ... "type": "object",
540
+ ... "properties": {"temperature": {"type": "number"}},
541
+ ... "required": ["temperature"]
542
+ ... }
543
+ >>> params = assemble_model_request_parameters(
544
+ ... output_schema=output_schema,
545
+ ... tools=tools
546
+ ... )
547
+ """
548
+ function_tools: list[ToolDefinition] = []
549
+ output_object: OutputObjectDefinition | None = None
550
+
551
+ # Convert tools to ToolDefinition objects
552
+ if tools:
553
+ for tool in tools:
554
+ tool_type = tool.get("type")
555
+ if tool_type != "function":
556
+ raise ValueError(
557
+ f"Unsupported tool type: {tool_type}. Only 'function' type is supported."
558
+ )
559
+
560
+ function_def = tool.get("function")
561
+ if function_def is None:
562
+ raise ValueError(
563
+ "Missing 'function' key in tool definition. "
564
+ "Expected format: {'type': 'function', 'function': {...}}"
565
+ )
566
+
567
+ name = function_def.get("name")
568
+ if not name:
569
+ raise ValueError(
570
+ "Missing 'name' in function definition. "
571
+ "Every tool must have a name."
572
+ )
573
+
574
+ description = function_def.get("description")
575
+ parameters = function_def.get("parameters")
576
+ tool_strict = function_def.get("strict")
577
+
578
+ # Build parameters schema, defaulting to empty object if not provided
579
+ parameters_json_schema: dict[str, Any] = (
580
+ parameters
581
+ if parameters is not None
582
+ else {"type": "object", "properties": {}}
583
+ )
584
+
585
+ function_tools.append(
586
+ ToolDefinition(
587
+ name=name,
588
+ description=description,
589
+ parameters_json_schema=parameters_json_schema,
590
+ strict=tool_strict,
591
+ kind="function",
592
+ )
593
+ )
594
+
595
+ # Convert output_schema to OutputObjectDefinition
596
+ if output_schema is not None:
597
+ # Extract optional title and description from schema
598
+ schema_name = output_schema.get("title")
599
+ schema_description = output_schema.get("description")
600
+
601
+ output_object = OutputObjectDefinition(
602
+ json_schema=output_schema,
603
+ name=schema_name,
604
+ description=schema_description,
605
+ strict=strict,
606
+ )
607
+
608
+ # Determine output_mode
609
+ if output_mode is None:
610
+ output_mode = "native" if output_schema is not None else "text"
611
+
612
+ return ModelRequestParameters(
613
+ function_tools=function_tools,
614
+ output_mode=output_mode,
615
+ output_object=output_object,
616
+ allow_text_output=allow_text_output,
617
+ )
@@ -4,7 +4,7 @@ packages = [
4
4
  { include = "pixie" },
5
5
  ]
6
6
 
7
- version = "0.1.1" # Managed by setuptools-scm
7
+ version = "0.1.3"
8
8
  description = "Code-first, type-safe prompt management"
9
9
  authors = ["Yiou Li <yol@gopixie.ai>"]
10
10
  license = "MIT"
@@ -28,9 +28,10 @@ uvicorn = {version = ">=0.40.0", optional = true}
28
28
  colorlog = {version = ">=6.10.1", optional = true}
29
29
  dotenv = {version = ">=0.9.9", optional = true}
30
30
  watchdog = {version = ">=6.0.0", optional = true}
31
+ pydantic-ai-slim = {version = ">=1.39.0", optional = true}
31
32
 
32
33
  [tool.poetry.extras]
33
- server = ["fastapi", "strawberry-graphql", "uvicorn", "colorlog", "dotenv", "watchdog"]
34
+ server = ["fastapi", "strawberry-graphql", "uvicorn", "colorlog", "dotenv", "watchdog", "pydantic-ai-slim"]
34
35
 
35
36
  [tool.poetry.group.dev.dependencies]
36
37
  pytest = ">=7.4,<9.0"
File without changes
File without changes