pixie-prompts 0.1.2__tar.gz → 0.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pixie-prompts
3
- Version: 0.1.2
3
+ Version: 0.1.3
4
4
  Summary: Code-first, type-safe prompt management
5
5
  License: MIT
6
6
  License-File: LICENSE
@@ -10,7 +10,7 @@ from watchdog.observers import Observer
10
10
  import asyncio
11
11
  import logging
12
12
 
13
- from pixie.prompts.storage import initialize_prompt_storage
13
+ from pixie.prompts.storage import PromptLoadError, initialize_prompt_storage
14
14
 
15
15
  logger = logging.getLogger(__name__)
16
16
 
@@ -311,7 +311,11 @@ async def stop_storage_watcher() -> None:
311
311
  def init_prompt_storage():
312
312
 
313
313
  storage_directory = os.getenv("PIXIE_PROMPT_STORAGE_DIR", ".pixie/prompts")
314
- initialize_prompt_storage(storage_directory)
314
+ try:
315
+ initialize_prompt_storage(storage_directory)
316
+ except PromptLoadError as e:
317
+ for err in e.failures:
318
+ logger.error("Prompt load error: %s", err)
315
319
 
316
320
  @asynccontextmanager
317
321
  async def lifespan(app: FastAPI):
@@ -1,12 +1,15 @@
1
1
  """GraphQL schema for SDK server."""
2
2
 
3
3
  from datetime import datetime
4
+ import json
4
5
  import logging
5
- from typing import Any, Optional, cast
6
+ from typing import Any, Optional, cast, get_args
6
7
 
7
8
  from graphql import GraphQLError
9
+ import jinja2
8
10
  from pydantic_ai import ModelSettings
9
11
  from pydantic_ai.direct import model_request
12
+ from pydantic_ai.models import KnownModelName
10
13
  import strawberry
11
14
  from strawberry.scalars import JSON
12
15
 
@@ -15,6 +18,7 @@ from pixie.prompts.prompt_management import get_prompt, list_prompts
15
18
  from pixie.prompts.utils import (
16
19
  assemble_model_request_parameters,
17
20
  openai_messages_to_pydantic_ai_messages,
21
+ pydantic_ai_messages_to_openai_messages,
18
22
  )
19
23
 
20
24
  logger = logging.getLogger(__name__)
@@ -81,17 +85,28 @@ class ToolCall:
81
85
  @strawberry.type
82
86
  class LlmCallResult:
83
87
 
88
+ input: JSON
84
89
  output: JSON | None
85
90
  tool_calls: list[ToolCall] | None
86
91
  usage: JSON
87
92
  cost: float
88
93
  timestamp: datetime
94
+ reasoning: str | None
89
95
 
90
96
 
91
97
  @strawberry.type
92
98
  class Query:
93
99
  """GraphQL queries."""
94
100
 
101
+ @strawberry.field
102
+ async def possible_models(self) -> list[str]:
103
+ """List possible model names.
104
+
105
+ Returns:
106
+ A list of model names supported by the server.
107
+ """
108
+ return list(get_args(KnownModelName.__value__))
109
+
95
110
  @strawberry.field
96
111
  async def health_check(self) -> str:
97
112
  """Health check endpoint."""
@@ -177,6 +192,9 @@ class Mutation:
177
192
  async def call_llm(
178
193
  self,
179
194
  model: str,
195
+ prompt_template: str,
196
+ variables: Optional[JSON],
197
+ prompt_placeholder: str,
180
198
  input_messages: list[JSON],
181
199
  output_schema: Optional[JSON] = None,
182
200
  tools: Optional[list[JSON]] = None,
@@ -186,7 +204,10 @@ class Mutation:
186
204
 
187
205
  Args:
188
206
  model: The model name to use (e.g., "openai:gpt-4").
189
- input_messages: List of messages as JSON objects in openai format.
207
+ prompt_template: prompt template string.
208
+ variables: variables for the prompt template.
209
+ prompt_placeholder: placeholder string in the prompt template to be replaced.
210
+ input_messages: List of messages as JSON objects in openai format, containing prompt_placeholder in content.
190
211
  output_schema: Optional output schema.
191
212
  tools: Optional tools configuration (not yet implemented).
192
213
  model_parameters: Optional model parameters.
@@ -198,11 +219,35 @@ class Mutation:
198
219
  GraphQLError: If the LLM call fails.
199
220
  """
200
221
  try:
222
+ template = jinja2.Template(prompt_template)
223
+ prompt = template.render(**(cast(dict[str, Any], variables) or {}))
224
+ print(prompt)
225
+ print(type(prompt))
226
+ pydantic_messages = openai_messages_to_pydantic_ai_messages(
227
+ cast(list[dict[str, Any]], input_messages)
228
+ )
229
+ for msg in pydantic_messages:
230
+ for part in msg.parts:
231
+ if part.part_kind == "user-prompt":
232
+ if isinstance(part.content, str):
233
+ part.content = part.content.replace(
234
+ prompt_placeholder,
235
+ prompt,
236
+ )
237
+ else:
238
+ part.content = [
239
+ p.replace(prompt_placeholder, prompt)
240
+ for p in part.content
241
+ if isinstance(p, str)
242
+ ]
243
+ elif part.part_kind == "system-prompt":
244
+ part.content = part.content.replace(prompt_placeholder, prompt)
245
+
246
+ print(pydantic_messages)
247
+ # Replace the placeholder in input messages
201
248
  response = await model_request(
202
249
  model=model,
203
- messages=openai_messages_to_pydantic_ai_messages(
204
- cast(list[dict[str, Any]], input_messages)
205
- ),
250
+ messages=pydantic_messages,
206
251
  model_settings=cast(ModelSettings | None, model_parameters),
207
252
  model_request_parameters=assemble_model_request_parameters(
208
253
  cast(dict[str, Any] | None, output_schema),
@@ -212,7 +257,12 @@ class Mutation:
212
257
  ),
213
258
  )
214
259
  return LlmCallResult(
215
- output=JSON(response.text),
260
+ input=JSON(pydantic_ai_messages_to_openai_messages(pydantic_messages)),
261
+ output=(
262
+ JSON(json.loads(response.text) if output_schema else response.text)
263
+ if response.text
264
+ else None
265
+ ),
216
266
  tool_calls=(
217
267
  [
218
268
  ToolCall(
@@ -225,9 +275,16 @@ class Mutation:
225
275
  if response.tool_calls
226
276
  else None
227
277
  ),
228
- usage=JSON(response.usage.details),
278
+ usage=JSON(
279
+ {
280
+ "input_tokens": response.usage.input_tokens,
281
+ "output_tokens": response.usage.output_tokens,
282
+ "total_tokens": response.usage.total_tokens,
283
+ }
284
+ ),
229
285
  cost=float(response.cost().total_price),
230
286
  timestamp=response.timestamp,
287
+ reasoning=response.thinking,
231
288
  )
232
289
  except Exception as e:
233
290
  logger.error("Error running LLM: %s", str(e))
@@ -2,8 +2,9 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import base64
5
6
  import json
6
- from typing import Any, Literal
7
+ from typing import Any, Literal, Sequence
7
8
 
8
9
  from pydantic_ai.messages import (
9
10
  ModelMessage,
@@ -21,12 +22,210 @@ from pydantic_ai.messages import (
21
22
  VideoUrl,
22
23
  DocumentUrl,
23
24
  BinaryContent,
25
+ UserContent,
24
26
  )
25
27
  from pydantic_ai.models import ModelRequestParameters
26
28
  from pydantic_ai.tools import ToolDefinition
27
29
  from pydantic_ai.output import OutputObjectDefinition
28
30
 
29
31
 
32
+ # Mapping from OpenAI audio formats to media types
33
+ AUDIO_FORMAT_TO_MEDIA_TYPE = {
34
+ "mp3": "audio/mpeg",
35
+ "wav": "audio/wav",
36
+ "pcm": "audio/pcm",
37
+ "flac": "audio/flac",
38
+ "ogg": "audio/ogg",
39
+ "aac": "audio/aac",
40
+ "opus": "audio/opus",
41
+ }
42
+
43
+ # Mapping from media types to OpenAI audio formats
44
+ MEDIA_TYPE_TO_AUDIO_FORMAT = {
45
+ "audio/mpeg": "mp3",
46
+ "audio/mp3": "mp3",
47
+ "audio/wav": "wav",
48
+ "audio/x-wav": "wav",
49
+ "audio/wave": "wav",
50
+ "audio/pcm": "pcm",
51
+ "audio/flac": "flac",
52
+ "audio/ogg": "ogg",
53
+ "audio/aac": "aac",
54
+ "audio/opus": "opus",
55
+ }
56
+
57
+
58
+ def _convert_openai_content_array_to_pydantic(
59
+ content_array: list[dict[str, Any]],
60
+ ) -> list[UserContent]:
61
+ """Convert OpenAI content array format to Pydantic AI UserContent list.
62
+
63
+ Args:
64
+ content_array: List of content parts in OpenAI format
65
+
66
+ Returns:
67
+ List of Pydantic AI UserContent items (str, ImageUrl, AudioUrl, BinaryContent, etc.)
68
+ """
69
+ user_content: list[UserContent] = []
70
+
71
+ for part in content_array:
72
+ part_type = part.get("type")
73
+
74
+ if part_type == "text":
75
+ user_content.append(part.get("text", ""))
76
+
77
+ elif part_type == "image_url":
78
+ image_data = part.get("image_url", {})
79
+ url = image_data.get("url", "")
80
+ detail = image_data.get("detail")
81
+
82
+ # Check if it's a data URI (base64 encoded)
83
+ if url.startswith("data:"):
84
+ binary_content = BinaryContent.from_data_uri(url)
85
+ if detail:
86
+ binary_content.vendor_metadata = {"detail": detail}
87
+ user_content.append(binary_content)
88
+ else:
89
+ # Regular URL
90
+ vendor_metadata = {"detail": detail} if detail else None
91
+ user_content.append(ImageUrl(url=url, vendor_metadata=vendor_metadata))
92
+
93
+ elif part_type == "input_audio":
94
+ audio_data = part.get("input_audio", {})
95
+ data_b64 = audio_data.get("data", "")
96
+ audio_format = audio_data.get("format", "mp3")
97
+
98
+ # Convert base64 to binary
99
+ audio_bytes = base64.b64decode(data_b64)
100
+ media_type = AUDIO_FORMAT_TO_MEDIA_TYPE.get(
101
+ audio_format, f"audio/{audio_format}"
102
+ )
103
+
104
+ user_content.append(BinaryContent(data=audio_bytes, media_type=media_type))
105
+
106
+ elif part_type == "file":
107
+ # File input - convert to DocumentUrl or appropriate type
108
+ file_data = part.get("file", {})
109
+ file_id = file_data.get("file_id", "")
110
+ # OpenAI file references are URLs to their file storage
111
+ if file_id:
112
+ user_content.append(DocumentUrl(url=f"openai://file/{file_id}"))
113
+
114
+ else:
115
+ # Unknown type - treat as text if possible
116
+ if "text" in part:
117
+ user_content.append(part["text"])
118
+
119
+ return user_content
120
+
121
+
122
+ def _convert_pydantic_content_to_openai(
123
+ content: Sequence[UserContent],
124
+ ) -> list[dict[str, Any]]:
125
+ """Convert Pydantic AI UserContent sequence to OpenAI content array format.
126
+
127
+ Args:
128
+ content: Sequence of Pydantic AI UserContent items
129
+
130
+ Returns:
131
+ List of OpenAI content parts
132
+ """
133
+ content_array: list[dict[str, Any]] = []
134
+
135
+ for item in content:
136
+ if isinstance(item, str):
137
+ content_array.append({"type": "text", "text": item})
138
+
139
+ elif isinstance(item, ImageUrl):
140
+ image_url_data: dict[str, Any] = {"url": item.url}
141
+ # Include detail if present in vendor_metadata
142
+ if item.vendor_metadata and "detail" in item.vendor_metadata:
143
+ image_url_data["detail"] = item.vendor_metadata["detail"]
144
+ content_array.append({"type": "image_url", "image_url": image_url_data})
145
+
146
+ elif isinstance(item, BinaryContent):
147
+ if item.is_image:
148
+ # Convert binary image to data URI
149
+ image_url_data: dict[str, Any] = {"url": item.data_uri}
150
+ if item.vendor_metadata and "detail" in item.vendor_metadata:
151
+ image_url_data["detail"] = item.vendor_metadata["detail"]
152
+ content_array.append({"type": "image_url", "image_url": image_url_data})
153
+
154
+ elif item.is_audio:
155
+ # Convert to OpenAI input_audio format
156
+ audio_format = MEDIA_TYPE_TO_AUDIO_FORMAT.get(
157
+ item.media_type, item.format
158
+ )
159
+ content_array.append(
160
+ {
161
+ "type": "input_audio",
162
+ "input_audio": {
163
+ "data": item.base64,
164
+ "format": audio_format,
165
+ },
166
+ }
167
+ )
168
+
169
+ elif item.is_video:
170
+ # Video as data URI (limited support in OpenAI)
171
+ content_array.append(
172
+ {
173
+ "type": "video",
174
+ "video": {"url": item.data_uri},
175
+ }
176
+ )
177
+
178
+ elif item.is_document:
179
+ # Document as data URI
180
+ content_array.append(
181
+ {
182
+ "type": "file",
183
+ "file": {"url": item.data_uri},
184
+ }
185
+ )
186
+
187
+ else:
188
+ # Unknown binary type - try as file
189
+ content_array.append(
190
+ {
191
+ "type": "file",
192
+ "file": {"url": item.data_uri},
193
+ }
194
+ )
195
+
196
+ elif isinstance(item, AudioUrl):
197
+ # Audio URL - OpenAI prefers input_audio with base64 data,
198
+ # but we can reference the URL
199
+ content_array.append(
200
+ {
201
+ "type": "audio_url",
202
+ "audio_url": {"url": item.url},
203
+ }
204
+ )
205
+
206
+ elif isinstance(item, VideoUrl):
207
+ # Video URL
208
+ content_array.append(
209
+ {
210
+ "type": "video",
211
+ "video": {"url": item.url},
212
+ }
213
+ )
214
+
215
+ elif isinstance(item, DocumentUrl):
216
+ # Document URL
217
+ content_array.append(
218
+ {
219
+ "type": "file",
220
+ "file": {"url": item.url},
221
+ }
222
+ )
223
+
224
+ # Skip CachePoint and other non-content types
225
+
226
+ return content_array
227
+
228
+
30
229
  def openai_messages_to_pydantic_ai_messages(
31
230
  messages: list[dict[str, Any]],
32
231
  ) -> list[ModelMessage]:
@@ -62,14 +261,16 @@ def openai_messages_to_pydantic_ai_messages(
62
261
  result.append(ModelRequest(parts=[SystemPromptPart(content=content or "")]))
63
262
 
64
263
  elif role == "user":
65
- # Check for multimedia content
264
+ # Check for multimodal content (content array)
66
265
  if isinstance(content, list):
67
- # Content array indicates multimodal content
68
- raise NotImplementedError(
69
- "Multimedia content (images, audio, etc.) is not supported. "
70
- "Only text content is currently supported."
266
+ user_content = _convert_openai_content_array_to_pydantic(content)
267
+ result.append(
268
+ ModelRequest(parts=[UserPromptPart(content=user_content)])
269
+ )
270
+ else:
271
+ result.append(
272
+ ModelRequest(parts=[UserPromptPart(content=content or "")])
71
273
  )
72
- result.append(ModelRequest(parts=[UserPromptPart(content=content or "")]))
73
274
 
74
275
  elif role == "assistant":
75
276
  parts: list[TextPart | ToolCallPart] = []
@@ -184,30 +385,13 @@ def pydantic_ai_messages_to_openai_messages(
184
385
  result.append({"role": "system", "content": part.content})
185
386
 
186
387
  elif isinstance(part, UserPromptPart):
187
- # Check for multimedia content
388
+ # Check for multimodal content
188
389
  if not isinstance(part.content, str):
189
- # Content is a sequence, check for non-text content
190
- for item in part.content:
191
- if isinstance(
192
- item,
193
- (
194
- ImageUrl,
195
- AudioUrl,
196
- VideoUrl,
197
- DocumentUrl,
198
- BinaryContent,
199
- ),
200
- ):
201
- raise NotImplementedError(
202
- "Multimedia content is not supported. "
203
- "Only text content is currently supported."
204
- )
205
- # If we get here, all items should be strings - join them
206
- text_content = " ".join(
207
- item if isinstance(item, str) else str(item)
208
- for item in part.content
390
+ # Content is a sequence - convert to OpenAI content array
391
+ content_array = _convert_pydantic_content_to_openai(
392
+ part.content
209
393
  )
210
- result.append({"role": "user", "content": text_content})
394
+ result.append({"role": "user", "content": content_array})
211
395
  else:
212
396
  result.append({"role": "user", "content": part.content})
213
397
 
@@ -4,7 +4,7 @@ packages = [
4
4
  { include = "pixie" },
5
5
  ]
6
6
 
7
- version = "0.1.2" # Managed by setuptools-scm
7
+ version = "0.1.3"
8
8
  description = "Code-first, type-safe prompt management"
9
9
  authors = ["Yiou Li <yol@gopixie.ai>"]
10
10
  license = "MIT"
File without changes
File without changes