pixie-prompts 0.1.11__py3-none-any.whl → 0.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pixie/prompts/graphql.py CHANGED
@@ -95,6 +95,22 @@ class LlmCallResult:
95
95
  reasoning: str | None
96
96
 
97
97
 
98
+ @strawberry.input
99
+ class LlmCallInput:
100
+ """Input for a single LLM call in a batch."""
101
+
102
+ id: strawberry.ID
103
+ """Unique identifier for this call, used to correlate results."""
104
+ model: str
105
+ prompt_template: str
106
+ variables: Optional[JSON] = None
107
+ prompt_placeholder: str = "{{prompt}}"
108
+ input_messages: list[JSON] = strawberry.field(default_factory=list)
109
+ output_schema: Optional[JSON] = None
110
+ tools: Optional[list[JSON]] = None
111
+ model_parameters: Optional[JSON] = None
112
+
113
+
98
114
  def is_demo_mode() -> bool:
99
115
  is_demo_mode = os.getenv("IS_DEMO_MODE", "0") in ("1", "true", "True")
100
116
  return is_demo_mode
@@ -227,77 +243,20 @@ class Mutation:
227
243
  GraphQLError: If the LLM call fails.
228
244
  """
229
245
  try:
230
- if is_demo_mode():
231
- model = "openai:gpt-4o-mini"
232
- template = jinja2.Template(prompt_template)
233
- prompt = template.render(**(cast(dict[str, Any], variables) or {}))
234
- print(prompt)
235
- print(type(prompt))
236
- pydantic_messages = openai_messages_to_pydantic_ai_messages(
237
- cast(list[dict[str, Any]], input_messages)
238
- )
239
- for msg in pydantic_messages:
240
- for part in msg.parts:
241
- if part.part_kind == "user-prompt":
242
- if isinstance(part.content, str):
243
- part.content = part.content.replace(
244
- prompt_placeholder,
245
- prompt,
246
- )
247
- else:
248
- part.content = [
249
- p.replace(prompt_placeholder, prompt)
250
- for p in part.content
251
- if isinstance(p, str)
252
- ]
253
- elif part.part_kind == "system-prompt":
254
- part.content = part.content.replace(prompt_placeholder, prompt)
255
-
256
- # Replace the placeholder in input messages
257
- response = await model_request(
246
+ call_input = LlmCallInput(
247
+ id=strawberry.ID("single-call"),
258
248
  model=model,
259
- messages=pydantic_messages,
260
- model_settings=cast(ModelSettings | None, model_parameters),
261
- model_request_parameters=assemble_model_request_parameters(
262
- cast(dict[str, Any] | None, output_schema),
263
- cast(list[dict[str, Any]] | None, tools),
264
- strict=True,
265
- allow_text_output=False,
266
- ),
267
- )
268
- return LlmCallResult(
269
- input=JSON(pydantic_ai_messages_to_openai_messages(pydantic_messages)),
270
- output=(
271
- JSON(json.loads(response.text) if output_schema else response.text)
272
- if response.text
273
- else None
274
- ),
275
- tool_calls=(
276
- [
277
- ToolCall(
278
- name=tc.tool_name,
279
- args=JSON(tc.args_as_dict()),
280
- tool_call_id=strawberry.ID(tc.tool_call_id),
281
- )
282
- for tc in response.tool_calls
283
- ]
284
- if response.tool_calls
285
- else None
286
- ),
287
- usage=JSON(
288
- {
289
- "input_tokens": response.usage.input_tokens,
290
- "output_tokens": response.usage.output_tokens,
291
- "total_tokens": response.usage.total_tokens,
292
- }
293
- ),
294
- cost=float(response.cost().total_price),
295
- timestamp=response.timestamp,
296
- reasoning=response.thinking,
249
+ prompt_template=prompt_template,
250
+ variables=variables,
251
+ prompt_placeholder=prompt_placeholder,
252
+ input_messages=input_messages,
253
+ output_schema=output_schema,
254
+ tools=tools,
255
+ model_parameters=model_parameters,
297
256
  )
257
+ return await execute_single_llm_call(call_input)
298
258
  except Exception as e:
299
- logger.error("Error running LLM: %s", str(e))
300
- raise GraphQLError(f"Failed to run LLM: {str(e)}") from e
259
+ raise GraphQLError(f"LLM call failed: {str(e)}") from e
301
260
 
302
261
  @strawberry.mutation
303
262
  async def add_prompt_version(
@@ -364,5 +323,85 @@ class Mutation:
364
323
  return "OK"
365
324
 
366
325
 
326
+ async def execute_single_llm_call(
327
+ call_input: LlmCallInput,
328
+ ) -> LlmCallResult:
329
+ """Execute a single LLM call and return the result as a BatchLlmCallUpdate."""
330
+ model = call_input.model
331
+ template = jinja2.Template(call_input.prompt_template)
332
+ prompt = template.render(**(cast(dict[str, Any], call_input.variables) or {}))
333
+
334
+ pydantic_messages = openai_messages_to_pydantic_ai_messages(
335
+ cast(list[dict[str, Any]], call_input.input_messages)
336
+ )
337
+
338
+ # Replace the placeholder in messages
339
+ for msg in pydantic_messages:
340
+ for part in msg.parts:
341
+ if part.part_kind == "user-prompt":
342
+ if isinstance(part.content, str):
343
+ part.content = part.content.replace(
344
+ call_input.prompt_placeholder,
345
+ prompt,
346
+ )
347
+ else:
348
+ part.content = [
349
+ p.replace(call_input.prompt_placeholder, prompt)
350
+ for p in part.content
351
+ if isinstance(p, str)
352
+ ]
353
+ elif part.part_kind == "system-prompt":
354
+ part.content = part.content.replace(
355
+ call_input.prompt_placeholder, prompt
356
+ )
357
+
358
+ response = await model_request(
359
+ model=model,
360
+ messages=pydantic_messages,
361
+ model_settings=cast(ModelSettings | None, call_input.model_parameters),
362
+ model_request_parameters=assemble_model_request_parameters(
363
+ cast(dict[str, Any] | None, call_input.output_schema),
364
+ cast(list[dict[str, Any]] | None, call_input.tools),
365
+ strict=True,
366
+ allow_text_output=False,
367
+ ),
368
+ )
369
+
370
+ result = LlmCallResult(
371
+ input=JSON(pydantic_ai_messages_to_openai_messages(pydantic_messages)),
372
+ output=(
373
+ JSON(
374
+ json.loads(response.text) if call_input.output_schema else response.text
375
+ )
376
+ if response.text
377
+ else None
378
+ ),
379
+ tool_calls=(
380
+ [
381
+ ToolCall(
382
+ name=tc.tool_name,
383
+ args=JSON(tc.args_as_dict()),
384
+ tool_call_id=strawberry.ID(tc.tool_call_id),
385
+ )
386
+ for tc in response.tool_calls
387
+ ]
388
+ if response.tool_calls
389
+ else None
390
+ ),
391
+ usage=JSON(
392
+ {
393
+ "input_tokens": response.usage.input_tokens,
394
+ "output_tokens": response.usage.output_tokens,
395
+ "total_tokens": response.usage.total_tokens,
396
+ }
397
+ ),
398
+ cost=float(response.cost().total_price),
399
+ timestamp=response.timestamp,
400
+ reasoning=response.thinking,
401
+ )
402
+
403
+ return result
404
+
405
+
367
406
  # Create the schema
368
407
  schema = strawberry.Schema(query=Query, mutation=Mutation)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pixie-prompts
3
- Version: 0.1.11
3
+ Version: 0.1.12
4
4
  Summary: Code-first, type-safe prompt management
5
5
  License: MIT
6
6
  License-File: LICENSE
@@ -1,13 +1,13 @@
1
1
  pixie/prompts/__init__.py,sha256=ZueU9cJ7aiVHBQYH4g3MXAFtjQwTfvvpy3d8ZTtBQ2c,396
2
2
  pixie/prompts/file_watcher.py,sha256=TAFY8tx5w--qUvpNX_L3ag5heCHJcZ1YO_10t8k9Sc0,14253
3
- pixie/prompts/graphql.py,sha256=jzTGVedsl_QoONUwV5gIZFgqlvpYBug97UhyAJ0_B3o,12258
3
+ pixie/prompts/graphql.py,sha256=QAKzuFX0fRDcLPZY1zfQ5IOYBD4cMco_xMzRbZKXsCI,12959
4
4
  pixie/prompts/prompt.py,sha256=tmjWZZEDxGzcMh6U0AQ0KsW7ae2wQYbswsSLhpuenQ0,12384
5
5
  pixie/prompts/prompt_management.py,sha256=gq5Eklqy2_Sq8jATVae4eANNmyFE8s8a9cedxWs2P_Y,2816
6
6
  pixie/prompts/server.py,sha256=gaZ4ws78f2381Zvegphy0yit1pAYZvm4kC7SJeGqYrs,7817
7
7
  pixie/prompts/storage.py,sha256=oE1YStN1YpoEDVvgmzzbboNvqpSAy_KKrFPR1B1_wjE,15875
8
8
  pixie/prompts/utils.py,sha256=ssAb4HdwZX__Fq50i2-DFsYXD5vpsYEliA_XI8GPx3Y,21929
9
- pixie_prompts-0.1.11.dist-info/METADATA,sha256=zJvWUqc1ACXGDFFX58q4JW0oNVWswR9WsiQrhJgamVM,4719
10
- pixie_prompts-0.1.11.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
11
- pixie_prompts-0.1.11.dist-info/entry_points.txt,sha256=SWOSFuUXDxkJMmf28u7E0Go_LcEpofz7NAlV70Cp8Es,48
12
- pixie_prompts-0.1.11.dist-info/licenses/LICENSE,sha256=nZoehBpdSXe6iTF2ZWzM-fgXdXECUZ0J8LrW_1tBwyk,1064
13
- pixie_prompts-0.1.11.dist-info/RECORD,,
9
+ pixie_prompts-0.1.12.dist-info/METADATA,sha256=dDewWzSPiUtJmzjlZQNceJqrSlkvtj-yBYgzqcPAkVY,4719
10
+ pixie_prompts-0.1.12.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
11
+ pixie_prompts-0.1.12.dist-info/entry_points.txt,sha256=SWOSFuUXDxkJMmf28u7E0Go_LcEpofz7NAlV70Cp8Es,48
12
+ pixie_prompts-0.1.12.dist-info/licenses/LICENSE,sha256=nZoehBpdSXe6iTF2ZWzM-fgXdXECUZ0J8LrW_1tBwyk,1064
13
+ pixie_prompts-0.1.12.dist-info/RECORD,,