pixie-prompts 0.1.10.dev1__tar.gz → 0.1.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pixie-prompts
3
- Version: 0.1.10.dev1
3
+ Version: 0.1.12
4
4
  Summary: Code-first, type-safe prompt management
5
5
  License: MIT
6
6
  License-File: LICENSE
@@ -95,6 +95,22 @@ class LlmCallResult:
95
95
  reasoning: str | None
96
96
 
97
97
 
98
+ @strawberry.input
99
+ class LlmCallInput:
100
+ """Input for a single LLM call in a batch."""
101
+
102
+ id: strawberry.ID
103
+ """Unique identifier for this call, used to correlate results."""
104
+ model: str
105
+ prompt_template: str
106
+ variables: Optional[JSON] = None
107
+ prompt_placeholder: str = "{{prompt}}"
108
+ input_messages: list[JSON] = strawberry.field(default_factory=list)
109
+ output_schema: Optional[JSON] = None
110
+ tools: Optional[list[JSON]] = None
111
+ model_parameters: Optional[JSON] = None
112
+
113
+
98
114
  def is_demo_mode() -> bool:
99
115
  is_demo_mode = os.getenv("IS_DEMO_MODE", "0") in ("1", "true", "True")
100
116
  return is_demo_mode
@@ -227,77 +243,20 @@ class Mutation:
227
243
  GraphQLError: If the LLM call fails.
228
244
  """
229
245
  try:
230
- if is_demo_mode():
231
- model = "openai:gpt-4o-mini"
232
- template = jinja2.Template(prompt_template)
233
- prompt = template.render(**(cast(dict[str, Any], variables) or {}))
234
- print(prompt)
235
- print(type(prompt))
236
- pydantic_messages = openai_messages_to_pydantic_ai_messages(
237
- cast(list[dict[str, Any]], input_messages)
238
- )
239
- for msg in pydantic_messages:
240
- for part in msg.parts:
241
- if part.part_kind == "user-prompt":
242
- if isinstance(part.content, str):
243
- part.content = part.content.replace(
244
- prompt_placeholder,
245
- prompt,
246
- )
247
- else:
248
- part.content = [
249
- p.replace(prompt_placeholder, prompt)
250
- for p in part.content
251
- if isinstance(p, str)
252
- ]
253
- elif part.part_kind == "system-prompt":
254
- part.content = part.content.replace(prompt_placeholder, prompt)
255
-
256
- # Replace the placeholder in input messages
257
- response = await model_request(
246
+ call_input = LlmCallInput(
247
+ id=strawberry.ID("single-call"),
258
248
  model=model,
259
- messages=pydantic_messages,
260
- model_settings=cast(ModelSettings | None, model_parameters),
261
- model_request_parameters=assemble_model_request_parameters(
262
- cast(dict[str, Any] | None, output_schema),
263
- cast(list[dict[str, Any]] | None, tools),
264
- strict=True,
265
- allow_text_output=False,
266
- ),
267
- )
268
- return LlmCallResult(
269
- input=JSON(pydantic_ai_messages_to_openai_messages(pydantic_messages)),
270
- output=(
271
- JSON(json.loads(response.text) if output_schema else response.text)
272
- if response.text
273
- else None
274
- ),
275
- tool_calls=(
276
- [
277
- ToolCall(
278
- name=tc.tool_name,
279
- args=JSON(tc.args_as_dict()),
280
- tool_call_id=strawberry.ID(tc.tool_call_id),
281
- )
282
- for tc in response.tool_calls
283
- ]
284
- if response.tool_calls
285
- else None
286
- ),
287
- usage=JSON(
288
- {
289
- "input_tokens": response.usage.input_tokens,
290
- "output_tokens": response.usage.output_tokens,
291
- "total_tokens": response.usage.total_tokens,
292
- }
293
- ),
294
- cost=float(response.cost().total_price),
295
- timestamp=response.timestamp,
296
- reasoning=response.thinking,
249
+ prompt_template=prompt_template,
250
+ variables=variables,
251
+ prompt_placeholder=prompt_placeholder,
252
+ input_messages=input_messages,
253
+ output_schema=output_schema,
254
+ tools=tools,
255
+ model_parameters=model_parameters,
297
256
  )
257
+ return await execute_single_llm_call(call_input)
298
258
  except Exception as e:
299
- logger.error("Error running LLM: %s", str(e))
300
- raise GraphQLError(f"Failed to run LLM: {str(e)}") from e
259
+ raise GraphQLError(f"LLM call failed: {str(e)}") from e
301
260
 
302
261
  @strawberry.mutation
303
262
  async def add_prompt_version(
@@ -364,5 +323,85 @@ class Mutation:
364
323
  return "OK"
365
324
 
366
325
 
326
+ async def execute_single_llm_call(
327
+ call_input: LlmCallInput,
328
+ ) -> LlmCallResult:
329
+ """Execute a single LLM call and return the result as a BatchLlmCallUpdate."""
330
+ model = call_input.model
331
+ template = jinja2.Template(call_input.prompt_template)
332
+ prompt = template.render(**(cast(dict[str, Any], call_input.variables) or {}))
333
+
334
+ pydantic_messages = openai_messages_to_pydantic_ai_messages(
335
+ cast(list[dict[str, Any]], call_input.input_messages)
336
+ )
337
+
338
+ # Replace the placeholder in messages
339
+ for msg in pydantic_messages:
340
+ for part in msg.parts:
341
+ if part.part_kind == "user-prompt":
342
+ if isinstance(part.content, str):
343
+ part.content = part.content.replace(
344
+ call_input.prompt_placeholder,
345
+ prompt,
346
+ )
347
+ else:
348
+ part.content = [
349
+ p.replace(call_input.prompt_placeholder, prompt)
350
+ for p in part.content
351
+ if isinstance(p, str)
352
+ ]
353
+ elif part.part_kind == "system-prompt":
354
+ part.content = part.content.replace(
355
+ call_input.prompt_placeholder, prompt
356
+ )
357
+
358
+ response = await model_request(
359
+ model=model,
360
+ messages=pydantic_messages,
361
+ model_settings=cast(ModelSettings | None, call_input.model_parameters),
362
+ model_request_parameters=assemble_model_request_parameters(
363
+ cast(dict[str, Any] | None, call_input.output_schema),
364
+ cast(list[dict[str, Any]] | None, call_input.tools),
365
+ strict=True,
366
+ allow_text_output=False,
367
+ ),
368
+ )
369
+
370
+ result = LlmCallResult(
371
+ input=JSON(pydantic_ai_messages_to_openai_messages(pydantic_messages)),
372
+ output=(
373
+ JSON(
374
+ json.loads(response.text) if call_input.output_schema else response.text
375
+ )
376
+ if response.text
377
+ else None
378
+ ),
379
+ tool_calls=(
380
+ [
381
+ ToolCall(
382
+ name=tc.tool_name,
383
+ args=JSON(tc.args_as_dict()),
384
+ tool_call_id=strawberry.ID(tc.tool_call_id),
385
+ )
386
+ for tc in response.tool_calls
387
+ ]
388
+ if response.tool_calls
389
+ else None
390
+ ),
391
+ usage=JSON(
392
+ {
393
+ "input_tokens": response.usage.input_tokens,
394
+ "output_tokens": response.usage.output_tokens,
395
+ "total_tokens": response.usage.total_tokens,
396
+ }
397
+ ),
398
+ cost=float(response.cost().total_price),
399
+ timestamp=response.timestamp,
400
+ reasoning=response.thinking,
401
+ )
402
+
403
+ return result
404
+
405
+
367
406
  # Create the schema
368
407
  schema = strawberry.Schema(query=Query, mutation=Mutation)
@@ -49,7 +49,7 @@ key is the id() of the compiled string."""
49
49
  def _find_matching_prompt(obj):
50
50
  if isinstance(obj, str):
51
51
  for compiled in _compiled_prompt_registry.values():
52
- if compiled.value == obj:
52
+ if compiled.value in obj:
53
53
  return compiled
54
54
  return None
55
55
  elif isinstance(obj, dict):
@@ -76,7 +76,7 @@ def get_compiled_prompt(text: str) -> CompiledPrompt | None:
76
76
  if direct_match:
77
77
  return direct_match
78
78
  for compiled in _compiled_prompt_registry.values():
79
- if compiled.value == text:
79
+ if compiled.value in text:
80
80
  return compiled
81
81
  try:
82
82
  obj = json.loads(text)
@@ -41,6 +41,10 @@ class PromptLoadError(Exception):
41
41
  super().__init__(message)
42
42
 
43
43
 
44
+ class PromptNotFoundError(KeyError):
45
+ pass
46
+
47
+
44
48
  class BaseUntypedPromptWithCreationTime(BaseUntypedPrompt):
45
49
 
46
50
  def __init__(
@@ -254,7 +258,12 @@ class _FilePromptStorage(PromptStorage):
254
258
  return original is None
255
259
 
256
260
  def get(self, prompt_id: str) -> BaseUntypedPromptWithCreationTime:
257
- return self._prompts[prompt_id]
261
+ try:
262
+ return self._prompts[prompt_id]
263
+ except KeyError:
264
+ raise PromptNotFoundError(
265
+ f"Prompt with ID '{prompt_id}' not found in storage."
266
+ )
258
267
 
259
268
 
260
269
  _storage_instance: PromptStorage | None = None
@@ -306,9 +315,11 @@ class StorageBackedPrompt(Prompt[TPromptVar]):
306
315
  id: str,
307
316
  *,
308
317
  variables_definition: type[TPromptVar] = NoneType,
318
+ default: str | None = None,
309
319
  ) -> None:
310
320
  self._id = id
311
321
  self._variables_definition = variables_definition
322
+ self._default = default
312
323
  self._prompt: BasePrompt[TPromptVar] | None = None
313
324
 
314
325
  @property
@@ -319,24 +330,42 @@ class StorageBackedPrompt(Prompt[TPromptVar]):
319
330
  def variables_definition(self) -> type[TPromptVar]:
320
331
  return self._variables_definition
321
332
 
333
+ @property
334
+ def default(self) -> str | None:
335
+ return self._default
336
+
322
337
  def get_variables_schema(self) -> dict[str, Any]:
323
338
  return variables_definition_to_schema(self._variables_definition)
324
339
 
325
340
  def _get_prompt(self) -> BasePrompt[TPromptVar]:
326
- storage = _ensure_storage_initialized()
327
- if self._prompt is None:
328
- untyped_prompt = storage.get(self.id)
329
- self._prompt = BasePrompt.from_untyped(
330
- untyped_prompt,
341
+ if self._prompt is not None:
342
+ return self._prompt
343
+ try:
344
+ storage = _ensure_storage_initialized()
345
+ if self._prompt is None:
346
+ untyped_prompt = storage.get(self.id)
347
+ self._prompt = BasePrompt.from_untyped(
348
+ untyped_prompt,
349
+ variables_definition=self.variables_definition,
350
+ )
351
+ schema_from_storage = untyped_prompt.get_variables_schema()
352
+ schema_from_definition = self.get_variables_schema()
353
+ if not isSubschema(schema_from_definition, schema_from_storage):
354
+ raise TypeError(
355
+ "Schema from definition is not a subschema of the schema from storage."
356
+ )
357
+ return self._prompt
358
+ except (PromptNotFoundError, PromptLoadError):
359
+ if self.default is None:
360
+ raise PromptNotFoundError(
361
+ f"Cannot load prompt with id '{self.id}' from storage, and no default is provided."
362
+ )
363
+ self._prompt = BasePrompt(
364
+ id=self.id,
365
+ versions={"v0": self.default},
331
366
  variables_definition=self.variables_definition,
332
367
  )
333
- schema_from_storage = untyped_prompt.get_variables_schema()
334
- schema_from_definition = self.get_variables_schema()
335
- if not isSubschema(schema_from_definition, schema_from_storage):
336
- raise TypeError(
337
- "Schema from definition is not a subschema of the schema from storage."
338
- )
339
- return self._prompt
368
+ return self._prompt
340
369
 
341
370
  def actualize(self) -> Self:
342
371
  self._get_prompt()
@@ -4,7 +4,7 @@ packages = [
4
4
  { include = "pixie" },
5
5
  ]
6
6
 
7
- version = "0.1.10.dev1"
7
+ version = "0.1.12"
8
8
  description = "Code-first, type-safe prompt management"
9
9
  authors = ["Yiou Li <yol@gopixie.ai>"]
10
10
  license = "MIT"