synkro 0.4.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synkro might be problematic. Click here for more details.

Files changed (81) hide show
  1. synkro/__init__.py +331 -0
  2. synkro/advanced.py +184 -0
  3. synkro/cli.py +156 -0
  4. synkro/core/__init__.py +7 -0
  5. synkro/core/checkpoint.py +250 -0
  6. synkro/core/dataset.py +432 -0
  7. synkro/core/policy.py +337 -0
  8. synkro/errors.py +178 -0
  9. synkro/examples/__init__.py +148 -0
  10. synkro/factory.py +291 -0
  11. synkro/formatters/__init__.py +18 -0
  12. synkro/formatters/chatml.py +121 -0
  13. synkro/formatters/langfuse.py +98 -0
  14. synkro/formatters/langsmith.py +98 -0
  15. synkro/formatters/qa.py +112 -0
  16. synkro/formatters/sft.py +90 -0
  17. synkro/formatters/tool_call.py +127 -0
  18. synkro/generation/__init__.py +9 -0
  19. synkro/generation/follow_ups.py +134 -0
  20. synkro/generation/generator.py +314 -0
  21. synkro/generation/golden_responses.py +269 -0
  22. synkro/generation/golden_scenarios.py +333 -0
  23. synkro/generation/golden_tool_responses.py +791 -0
  24. synkro/generation/logic_extractor.py +126 -0
  25. synkro/generation/multiturn_responses.py +177 -0
  26. synkro/generation/planner.py +131 -0
  27. synkro/generation/responses.py +189 -0
  28. synkro/generation/scenarios.py +90 -0
  29. synkro/generation/tool_responses.py +625 -0
  30. synkro/generation/tool_simulator.py +114 -0
  31. synkro/interactive/__init__.py +16 -0
  32. synkro/interactive/hitl_session.py +205 -0
  33. synkro/interactive/intent_classifier.py +94 -0
  34. synkro/interactive/logic_map_editor.py +176 -0
  35. synkro/interactive/rich_ui.py +459 -0
  36. synkro/interactive/scenario_editor.py +198 -0
  37. synkro/llm/__init__.py +7 -0
  38. synkro/llm/client.py +309 -0
  39. synkro/llm/rate_limits.py +99 -0
  40. synkro/models/__init__.py +50 -0
  41. synkro/models/anthropic.py +26 -0
  42. synkro/models/google.py +19 -0
  43. synkro/models/local.py +104 -0
  44. synkro/models/openai.py +31 -0
  45. synkro/modes/__init__.py +13 -0
  46. synkro/modes/config.py +66 -0
  47. synkro/modes/conversation.py +35 -0
  48. synkro/modes/tool_call.py +18 -0
  49. synkro/parsers.py +442 -0
  50. synkro/pipeline/__init__.py +20 -0
  51. synkro/pipeline/phases.py +592 -0
  52. synkro/pipeline/runner.py +769 -0
  53. synkro/pipelines.py +136 -0
  54. synkro/prompts/__init__.py +57 -0
  55. synkro/prompts/base.py +167 -0
  56. synkro/prompts/golden_templates.py +533 -0
  57. synkro/prompts/interactive_templates.py +198 -0
  58. synkro/prompts/multiturn_templates.py +156 -0
  59. synkro/prompts/templates.py +281 -0
  60. synkro/prompts/tool_templates.py +318 -0
  61. synkro/quality/__init__.py +14 -0
  62. synkro/quality/golden_refiner.py +163 -0
  63. synkro/quality/grader.py +153 -0
  64. synkro/quality/multiturn_grader.py +150 -0
  65. synkro/quality/refiner.py +137 -0
  66. synkro/quality/tool_grader.py +126 -0
  67. synkro/quality/tool_refiner.py +128 -0
  68. synkro/quality/verifier.py +228 -0
  69. synkro/reporting.py +464 -0
  70. synkro/schemas.py +521 -0
  71. synkro/types/__init__.py +43 -0
  72. synkro/types/core.py +153 -0
  73. synkro/types/dataset_type.py +33 -0
  74. synkro/types/logic_map.py +348 -0
  75. synkro/types/tool.py +94 -0
  76. synkro-0.4.36.data/data/examples/__init__.py +148 -0
  77. synkro-0.4.36.dist-info/METADATA +507 -0
  78. synkro-0.4.36.dist-info/RECORD +81 -0
  79. synkro-0.4.36.dist-info/WHEEL +4 -0
  80. synkro-0.4.36.dist-info/entry_points.txt +2 -0
  81. synkro-0.4.36.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,625 @@
1
+ """Tool call response generation with JSON mode for structured outputs."""
2
+
3
+ import json
4
+ import uuid
5
+ from typing import TYPE_CHECKING
6
+
7
+ from pydantic import BaseModel, Field
8
+
9
+ from synkro.llm.client import LLM
10
+ from synkro.models import Model, OpenAI
11
+ from synkro.types.core import Scenario, Trace, Message
12
+ from synkro.types.tool import ToolCall, ToolFunction, ToolDefinition
13
+ from synkro.prompts.tool_templates import (
14
+ MULTI_TURN_TOOL_DECISION_PROMPT,
15
+ MULTI_TURN_TOOL_SYNTHESIS_PROMPT,
16
+ )
17
+
18
+ if TYPE_CHECKING:
19
+ from synkro.generation.tool_simulator import ToolSimulator
20
+ from synkro.generation.follow_ups import FollowUpGenerator
21
+
22
+
23
+ # =============================================================================
24
+ # Pydantic models for structured JSON output
25
+ # =============================================================================
26
+
27
+ class ToolCallRequest(BaseModel):
28
+ """A single tool call request from the LLM."""
29
+
30
+ name: str = Field(description="Name of the tool to call")
31
+ arguments: str = Field(description="Arguments as a JSON string, e.g. '{\"query\": \"test\"}'")
32
+
33
+ def get_arguments_dict(self) -> dict:
34
+ """Parse arguments JSON string to dict."""
35
+ return json.loads(self.arguments)
36
+
37
+
38
+ class ToolCallDecision(BaseModel):
39
+ """
40
+ Structured output for the LLM's tool calling decision.
41
+
42
+ The LLM outputs this to indicate whether tools are needed
43
+ and which ones to call.
44
+ """
45
+
46
+ needs_tool: bool = Field(
47
+ description="Whether a tool call is needed to answer the user's request"
48
+ )
49
+ reasoning: str = Field(
50
+ description="Brief explanation of why tool is/isn't needed"
51
+ )
52
+ tool_calls: list[ToolCallRequest] = Field(
53
+ default_factory=list,
54
+ description="List of tool calls to make (empty if needs_tool is False)"
55
+ )
56
+ direct_response: str | None = Field(
57
+ default=None,
58
+ description="Direct response if no tool is needed"
59
+ )
60
+
61
+
62
+ class FinalSynthesis(BaseModel):
63
+ """Structured output for synthesizing tool results into a response."""
64
+
65
+ response: str = Field(
66
+ description="Natural response incorporating the tool results"
67
+ )
68
+
69
+
70
+ # =============================================================================
71
+ # Tool Call Response Generator
72
+ # =============================================================================
73
+
74
+ class ToolCallResponseGenerator:
75
+ """
76
+ Generates tool call training traces using JSON mode for structured outputs.
77
+
78
+ Produces traces in OpenAI function calling format:
79
+ - system message with tool descriptions
80
+ - user message with request
81
+ - assistant message with tool_calls (or direct response)
82
+ - tool response messages
83
+ - final assistant message synthesizing results
84
+
85
+ Example:
86
+ >>> gen = ToolCallResponseGenerator(
87
+ ... tools=[web_search_tool, db_tool],
88
+ ... llm=LLM(model=OpenAI.GPT_4O),
89
+ ... simulator=tool_simulator,
90
+ ... )
91
+ >>> trace = await gen.generate_single(policy_text, scenario)
92
+ """
93
+
94
+ def __init__(
95
+ self,
96
+ tools: list[ToolDefinition],
97
+ llm: LLM | None = None,
98
+ simulator: "ToolSimulator | None" = None,
99
+ model: Model = OpenAI.GPT_4O_MINI,
100
+ ):
101
+ """
102
+ Initialize the tool call response generator.
103
+
104
+ Args:
105
+ tools: List of available tool definitions
106
+ llm: LLM client to use (creates one if not provided)
107
+ simulator: Tool simulator for generating tool responses
108
+ model: Model to use if creating LLM
109
+ """
110
+ self.tools = tools
111
+ self.tools_by_name = {t.name: t for t in tools}
112
+ self.llm = llm or LLM(model=model)
113
+ self.simulator = simulator
114
+ self._follow_up_gen: "FollowUpGenerator | None" = None
115
+
116
+ @property
117
+ def follow_up_generator(self) -> "FollowUpGenerator":
118
+ """Lazy initialization of follow-up generator for multi-turn."""
119
+ if self._follow_up_gen is None:
120
+ from synkro.generation.follow_ups import FollowUpGenerator
121
+ self._follow_up_gen = FollowUpGenerator(llm=self.llm)
122
+ return self._follow_up_gen
123
+
124
+ def _get_tools_description(self) -> str:
125
+ """Get formatted description of all tools for system prompt."""
126
+ descriptions = []
127
+ for tool in self.tools:
128
+ descriptions.append(tool.to_system_prompt())
129
+ return "\n\n".join(descriptions)
130
+
131
+ def _get_tools_json_schema(self) -> str:
132
+ """Get JSON schema representation of tools."""
133
+ tools_json = [tool.to_openai_format() for tool in self.tools]
134
+ return json.dumps(tools_json, indent=2)
135
+
136
+ def _generate_call_id(self) -> str:
137
+ """Generate a unique tool call ID."""
138
+ return f"call_{uuid.uuid4().hex[:12]}"
139
+
140
+ async def generate_single(
141
+ self,
142
+ policy_text: str,
143
+ scenario: Scenario,
144
+ target_turns: int = 1,
145
+ ) -> Trace:
146
+ """
147
+ Generate a single tool call trace.
148
+
149
+ Args:
150
+ policy_text: The policy/guidelines text
151
+ scenario: The scenario to respond to
152
+ target_turns: Number of conversation turns (1 for single-turn,
153
+ >1 for multi-turn with follow-up questions)
154
+
155
+ Returns:
156
+ Trace with proper tool calling format
157
+ """
158
+ if target_turns > 1:
159
+ return await self._generate_multi_turn(policy_text, scenario, target_turns)
160
+
161
+ # Single-turn generation
162
+ tools_desc = self._get_tools_description()
163
+
164
+ # Step 1: Get LLM decision on tool usage
165
+ decision = await self._get_tool_decision(policy_text, scenario, tools_desc)
166
+
167
+ # Step 2: Build the message sequence
168
+ messages = await self._build_message_sequence(
169
+ policy_text, scenario, tools_desc, decision
170
+ )
171
+
172
+ return Trace(messages=messages, scenario=scenario)
173
+
174
+ async def _get_tool_decision(
175
+ self,
176
+ policy_text: str,
177
+ scenario: Scenario,
178
+ tools_desc: str,
179
+ ) -> ToolCallDecision:
180
+ """
181
+ Get the LLM's decision on whether to use tools.
182
+
183
+ Uses JSON mode to force structured output.
184
+ """
185
+ prompt = f"""You are a customer support agent deciding whether to use tools.
186
+
187
+ AVAILABLE TOOLS:
188
+ {tools_desc}
189
+
190
+ TOOL USAGE GUIDELINES:
191
+ {policy_text}
192
+
193
+ USER REQUEST:
194
+ {scenario.description}
195
+
196
+ CONTEXT:
197
+ {scenario.context}
198
+
199
+ Analyze this request and decide:
200
+ 1. Does this require calling a tool, or can you answer directly?
201
+ 2. If tools are needed, which ones and with what arguments?
202
+ 3. If no tools needed, provide the direct response.
203
+
204
+ Important rules:
205
+ - Only call tools when necessary (don't call for information you already know)
206
+ - Use correct tool names and parameter types
207
+ - If multiple tools are needed, list them all
208
+ - Provide clear reasoning for your decision"""
209
+
210
+ return await self.llm.generate_structured(prompt, ToolCallDecision)
211
+
212
+ async def _build_message_sequence(
213
+ self,
214
+ policy_text: str,
215
+ scenario: Scenario,
216
+ tools_desc: str,
217
+ decision: ToolCallDecision,
218
+ ) -> list[Message]:
219
+ """Build the full message sequence based on the tool decision."""
220
+ messages = []
221
+
222
+ # System message with tool descriptions
223
+ system_content = f"""You are a helpful customer support agent. You have access to the following tools:
224
+
225
+ {tools_desc}
226
+
227
+ Follow the tool usage guidelines provided to assist customers effectively."""
228
+
229
+ messages.append(Message(role="system", content=system_content))
230
+
231
+ # User message
232
+ messages.append(Message(role="user", content=scenario.description))
233
+
234
+ if decision.needs_tool and decision.tool_calls:
235
+ # Assistant message with tool_calls
236
+ tool_calls = []
237
+ for tc in decision.tool_calls:
238
+ call_id = self._generate_call_id()
239
+ tool_calls.append(ToolCall(
240
+ id=call_id,
241
+ type="function",
242
+ function=ToolFunction(
243
+ name=tc.name,
244
+ arguments=tc.arguments # Already a JSON string
245
+ )
246
+ ))
247
+
248
+ messages.append(Message(
249
+ role="assistant",
250
+ content=None,
251
+ tool_calls=tool_calls
252
+ ))
253
+
254
+ # Tool response messages
255
+ tool_results = []
256
+ for tc in tool_calls:
257
+ result = await self._simulate_tool_call(tc)
258
+ tool_results.append(result)
259
+
260
+ messages.append(Message(
261
+ role="tool",
262
+ content=result,
263
+ tool_call_id=tc.id
264
+ ))
265
+
266
+ # Final assistant message synthesizing results
267
+ final_response = await self._synthesize_response(
268
+ scenario.description, tool_calls, tool_results, policy_text
269
+ )
270
+ messages.append(Message(role="assistant", content=final_response))
271
+
272
+ else:
273
+ # Direct response without tools
274
+ response = decision.direct_response or await self._generate_direct_response(
275
+ policy_text, scenario, tools_desc
276
+ )
277
+ messages.append(Message(role="assistant", content=response))
278
+
279
+ return messages
280
+
281
+ async def _simulate_tool_call(self, tool_call: ToolCall) -> str:
282
+ """Simulate a tool response."""
283
+ if self.simulator:
284
+ return await self.simulator.simulate(tool_call)
285
+
286
+ # Fallback: generate a mock response based on tool definition
287
+ tool_name = tool_call.function.name
288
+ if tool_name in self.tools_by_name:
289
+ tool = self.tools_by_name[tool_name]
290
+ if tool.mock_responses:
291
+ # Use a mock response
292
+ import random
293
+ return random.choice(tool.mock_responses)
294
+
295
+ # Default mock response
296
+ args = json.loads(tool_call.function.arguments)
297
+ return json.dumps({
298
+ "status": "success",
299
+ "result": f"Simulated response for {tool_name}",
300
+ "query": args
301
+ })
302
+
303
+ async def _synthesize_response(
304
+ self,
305
+ user_request: str,
306
+ tool_calls: list[ToolCall],
307
+ tool_results: list[str],
308
+ policy_text: str,
309
+ ) -> str:
310
+ """Synthesize a natural response from tool results."""
311
+ # Build context of tool calls and results
312
+ tools_context = []
313
+ for tc, result in zip(tool_calls, tool_results):
314
+ tools_context.append(f"Tool: {tc.function.name}")
315
+ tools_context.append(f"Arguments: {tc.function.arguments}")
316
+ tools_context.append(f"Result: {result}")
317
+ tools_context.append("")
318
+
319
+ prompt = f"""Based on the tool results, provide a helpful response to the user.
320
+
321
+ USER REQUEST:
322
+ {user_request}
323
+
324
+ TOOL RESULTS:
325
+ {chr(10).join(tools_context)}
326
+
327
+ GUIDELINES:
328
+ {policy_text}
329
+
330
+ Synthesize the tool results into a natural, helpful response.
331
+ - Incorporate the information from the tool results
332
+ - Don't expose raw JSON or technical details
333
+ - Be conversational and helpful
334
+ - If a tool returned an error, acknowledge it and offer alternatives"""
335
+
336
+ synthesis = await self.llm.generate_structured(prompt, FinalSynthesis)
337
+ return synthesis.response
338
+
339
+ async def _generate_direct_response(
340
+ self,
341
+ policy_text: str,
342
+ scenario: Scenario,
343
+ tools_desc: str,
344
+ ) -> str:
345
+ """Generate a direct response when no tools are needed."""
346
+ prompt = f"""Provide a helpful response to the user's request.
347
+
348
+ USER REQUEST:
349
+ {scenario.description}
350
+
351
+ CONTEXT:
352
+ {scenario.context}
353
+
354
+ GUIDELINES:
355
+ {policy_text}
356
+
357
+ Note: No tools are needed for this request. Provide a direct, helpful response
358
+ based on your knowledge and the guidelines."""
359
+
360
+ synthesis = await self.llm.generate_structured(prompt, FinalSynthesis)
361
+ return synthesis.response
362
+
363
+ # =========================================================================
364
+ # MULTI-TURN TOOL CALLING
365
+ # =========================================================================
366
+
367
+ async def _generate_multi_turn(
368
+ self,
369
+ policy_text: str,
370
+ scenario: Scenario,
371
+ target_turns: int,
372
+ ) -> Trace:
373
+ """
374
+ Generate multi-turn tool call trace.
375
+
376
+ Each turn can independently decide if new tool calls are needed
377
+ based on the follow-up question and conversation history.
378
+
379
+ Args:
380
+ policy_text: The policy/guidelines text
381
+ scenario: The initial scenario to respond to
382
+ target_turns: Number of conversation turns
383
+
384
+ Returns:
385
+ Trace with multi-turn tool calling conversation
386
+ """
387
+ tools_desc = self._get_tools_description()
388
+
389
+ # Step 1: Generate initial response (Turn 1)
390
+ decision = await self._get_tool_decision(policy_text, scenario, tools_desc)
391
+ messages = await self._build_message_sequence(
392
+ policy_text, scenario, tools_desc, decision
393
+ )
394
+
395
+ # Step 2: Generate follow-up turns
396
+ for turn in range(1, target_turns):
397
+ # Generate follow-up question based on conversation so far
398
+ follow_up = await self.follow_up_generator.generate(
399
+ policy_text=policy_text,
400
+ messages=messages,
401
+ turn_index=turn,
402
+ )
403
+
404
+ # Add user message with follow-up question
405
+ messages.append(Message(role="user", content=follow_up.question))
406
+
407
+ # Get tool decision for this follow-up
408
+ follow_up_decision = await self._get_follow_up_tool_decision(
409
+ policy_text=policy_text,
410
+ messages=messages,
411
+ follow_up_question=follow_up.question,
412
+ tools_desc=tools_desc,
413
+ )
414
+
415
+ # Build response for this turn (may include new tool calls)
416
+ turn_messages = await self._build_follow_up_message_sequence(
417
+ policy_text=policy_text,
418
+ messages=messages,
419
+ follow_up_question=follow_up.question,
420
+ tools_desc=tools_desc,
421
+ decision=follow_up_decision,
422
+ )
423
+
424
+ # Extend conversation with this turn's messages
425
+ messages.extend(turn_messages)
426
+
427
+ return Trace(messages=messages, scenario=scenario)
428
+
429
+ def _format_conversation_with_tools(self, messages: list[Message]) -> str:
430
+ """
431
+ Format conversation including tool calls and results.
432
+
433
+ This provides context for follow-up tool decisions so the LLM knows:
434
+ - What tools were already called
435
+ - What results were obtained
436
+ - What information is already available
437
+ """
438
+ formatted = []
439
+ for msg in messages:
440
+ role = msg.role.upper()
441
+
442
+ if msg.role == "assistant" and msg.tool_calls:
443
+ # Format assistant message with tool calls
444
+ tool_strs = []
445
+ for tc in msg.tool_calls:
446
+ if hasattr(tc, "function"):
447
+ tool_strs.append(
448
+ f" - {tc.function.name}({tc.function.arguments})"
449
+ )
450
+ elif isinstance(tc, dict) and "function" in tc:
451
+ func = tc["function"]
452
+ tool_strs.append(
453
+ f" - {func.get('name', 'unknown')}({func.get('arguments', '{}')})"
454
+ )
455
+ else:
456
+ tool_strs.append(f" - {tc}")
457
+ formatted.append(f"ASSISTANT: [Tool Calls]\n" + "\n".join(tool_strs))
458
+ elif msg.role == "tool":
459
+ # Format tool response
460
+ formatted.append(f"TOOL RESULT [{msg.tool_call_id}]: {msg.content}")
461
+ else:
462
+ content = msg.content or "[No content]"
463
+ formatted.append(f"{role}: {content}")
464
+
465
+ return "\n\n".join(formatted)
466
+
467
+ async def _get_follow_up_tool_decision(
468
+ self,
469
+ policy_text: str,
470
+ messages: list[Message],
471
+ follow_up_question: str,
472
+ tools_desc: str,
473
+ ) -> ToolCallDecision:
474
+ """
475
+ Get tool decision for a follow-up question with full conversation context.
476
+
477
+ The LLM can see previous tool calls and results to decide if new
478
+ tools are needed or if existing results can answer the follow-up.
479
+ """
480
+ conversation_history = self._format_conversation_with_tools(messages)
481
+
482
+ prompt = MULTI_TURN_TOOL_DECISION_PROMPT.format(
483
+ tools_desc=tools_desc,
484
+ policy_text=policy_text,
485
+ conversation_history=conversation_history,
486
+ follow_up_question=follow_up_question,
487
+ )
488
+
489
+ return await self.llm.generate_structured(prompt, ToolCallDecision)
490
+
491
+ async def _build_follow_up_message_sequence(
492
+ self,
493
+ policy_text: str,
494
+ messages: list[Message],
495
+ follow_up_question: str,
496
+ tools_desc: str,
497
+ decision: ToolCallDecision,
498
+ ) -> list[Message]:
499
+ """
500
+ Build message sequence for a follow-up turn.
501
+
502
+ Returns only the new messages for this turn (not the full conversation).
503
+ May include: assistant with tool_calls, tool responses, final assistant.
504
+ Or just: assistant with direct response.
505
+ """
506
+ new_messages = []
507
+
508
+ if decision.needs_tool and decision.tool_calls:
509
+ # Assistant message with new tool_calls
510
+ tool_calls = []
511
+ for tc in decision.tool_calls:
512
+ call_id = self._generate_call_id()
513
+ tool_calls.append(
514
+ ToolCall(
515
+ id=call_id,
516
+ type="function",
517
+ function=ToolFunction(
518
+ name=tc.name,
519
+ arguments=tc.arguments,
520
+ ),
521
+ )
522
+ )
523
+
524
+ new_messages.append(
525
+ Message(role="assistant", content=None, tool_calls=tool_calls)
526
+ )
527
+
528
+ # Tool response messages
529
+ tool_results = []
530
+ for tc in tool_calls:
531
+ result = await self._simulate_tool_call(tc)
532
+ tool_results.append(result)
533
+ new_messages.append(
534
+ Message(role="tool", content=result, tool_call_id=tc.id)
535
+ )
536
+
537
+ # Final assistant message synthesizing new results
538
+ final_response = await self._synthesize_follow_up_response(
539
+ policy_text=policy_text,
540
+ messages=messages,
541
+ follow_up_question=follow_up_question,
542
+ tool_calls=tool_calls,
543
+ tool_results=tool_results,
544
+ )
545
+ new_messages.append(Message(role="assistant", content=final_response))
546
+
547
+ else:
548
+ # Direct response without new tools
549
+ if decision.direct_response:
550
+ response = decision.direct_response
551
+ else:
552
+ # Generate response using existing context
553
+ response = await self._synthesize_follow_up_response(
554
+ policy_text=policy_text,
555
+ messages=messages,
556
+ follow_up_question=follow_up_question,
557
+ tool_calls=[],
558
+ tool_results=[],
559
+ )
560
+ new_messages.append(Message(role="assistant", content=response))
561
+
562
+ return new_messages
563
+
564
+ async def _synthesize_follow_up_response(
565
+ self,
566
+ policy_text: str,
567
+ messages: list[Message],
568
+ follow_up_question: str,
569
+ tool_calls: list[ToolCall],
570
+ tool_results: list[str],
571
+ ) -> str:
572
+ """Synthesize response for a follow-up turn."""
573
+ conversation_history = self._format_conversation_with_tools(messages)
574
+
575
+ # Format new tool results if any
576
+ if tool_calls and tool_results:
577
+ new_tool_results = []
578
+ for tc, result in zip(tool_calls, tool_results):
579
+ new_tool_results.append(f"Tool: {tc.function.name}")
580
+ new_tool_results.append(f"Arguments: {tc.function.arguments}")
581
+ new_tool_results.append(f"Result: {result}")
582
+ new_tool_results.append("")
583
+ new_results_str = "\n".join(new_tool_results)
584
+ else:
585
+ new_results_str = "None (using existing information from conversation)"
586
+
587
+ prompt = MULTI_TURN_TOOL_SYNTHESIS_PROMPT.format(
588
+ conversation_history=conversation_history,
589
+ follow_up_question=follow_up_question,
590
+ new_tool_results=new_results_str,
591
+ policy_text=policy_text,
592
+ )
593
+
594
+ synthesis = await self.llm.generate_structured(prompt, FinalSynthesis)
595
+ return synthesis.response
596
+
597
+ async def generate(
598
+ self,
599
+ policy_text: str,
600
+ scenarios: list[Scenario],
601
+ ) -> list[Trace]:
602
+ """
603
+ Generate traces for multiple scenarios.
604
+
605
+ Args:
606
+ policy_text: The policy/guidelines text
607
+ scenarios: List of scenarios to respond to
608
+
609
+ Returns:
610
+ List of traces with tool calling format
611
+ """
612
+ traces = []
613
+ for scenario in scenarios:
614
+ trace = await self.generate_single(policy_text, scenario)
615
+ traces.append(trace)
616
+ return traces
617
+
618
+
619
+ __all__ = [
620
+ "ToolCallResponseGenerator",
621
+ "ToolCallDecision",
622
+ "ToolCallRequest",
623
+ "FinalSynthesis",
624
+ ]
625
+