synkro 0.4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synkro might be problematic. Click here for more details.
- synkro/__init__.py +165 -0
- synkro/cli.py +120 -0
- synkro/core/__init__.py +7 -0
- synkro/core/dataset.py +233 -0
- synkro/core/policy.py +337 -0
- synkro/errors.py +178 -0
- synkro/examples/__init__.py +148 -0
- synkro/factory.py +160 -0
- synkro/formatters/__init__.py +12 -0
- synkro/formatters/qa.py +85 -0
- synkro/formatters/sft.py +90 -0
- synkro/formatters/tool_call.py +127 -0
- synkro/generation/__init__.py +9 -0
- synkro/generation/generator.py +163 -0
- synkro/generation/planner.py +87 -0
- synkro/generation/responses.py +160 -0
- synkro/generation/scenarios.py +90 -0
- synkro/generation/tool_responses.py +370 -0
- synkro/generation/tool_simulator.py +114 -0
- synkro/llm/__init__.py +7 -0
- synkro/llm/client.py +235 -0
- synkro/llm/rate_limits.py +95 -0
- synkro/models/__init__.py +43 -0
- synkro/models/anthropic.py +26 -0
- synkro/models/google.py +19 -0
- synkro/models/openai.py +31 -0
- synkro/modes/__init__.py +15 -0
- synkro/modes/config.py +66 -0
- synkro/modes/qa.py +18 -0
- synkro/modes/sft.py +18 -0
- synkro/modes/tool_call.py +18 -0
- synkro/parsers.py +442 -0
- synkro/pipeline/__init__.py +20 -0
- synkro/pipeline/phases.py +237 -0
- synkro/pipeline/runner.py +198 -0
- synkro/pipelines.py +105 -0
- synkro/prompts/__init__.py +44 -0
- synkro/prompts/base.py +167 -0
- synkro/prompts/qa_templates.py +97 -0
- synkro/prompts/templates.py +281 -0
- synkro/prompts/tool_templates.py +201 -0
- synkro/quality/__init__.py +14 -0
- synkro/quality/grader.py +130 -0
- synkro/quality/refiner.py +137 -0
- synkro/quality/tool_grader.py +126 -0
- synkro/quality/tool_refiner.py +128 -0
- synkro/reporting.py +213 -0
- synkro/schemas.py +325 -0
- synkro/types/__init__.py +41 -0
- synkro/types/core.py +113 -0
- synkro/types/dataset_type.py +30 -0
- synkro/types/tool.py +94 -0
- synkro-0.4.5.data/data/examples/__init__.py +148 -0
- synkro-0.4.5.dist-info/METADATA +221 -0
- synkro-0.4.5.dist-info/RECORD +58 -0
- synkro-0.4.5.dist-info/WHEEL +4 -0
- synkro-0.4.5.dist-info/entry_points.txt +2 -0
- synkro-0.4.5.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
"""Tool Call mode configuration."""
|
|
2
|
+
|
|
3
|
+
from synkro.modes.config import ModeConfig
|
|
4
|
+
from synkro.prompts.tool_templates import (
|
|
5
|
+
TOOL_SCENARIO_PROMPT,
|
|
6
|
+
TOOL_RESPONSE_PROMPT,
|
|
7
|
+
TOOL_GRADE_PROMPT,
|
|
8
|
+
TOOL_REFINE_PROMPT,
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
TOOL_CALL_CONFIG = ModeConfig(
|
|
12
|
+
scenario_prompt=TOOL_SCENARIO_PROMPT,
|
|
13
|
+
response_prompt=TOOL_RESPONSE_PROMPT,
|
|
14
|
+
grade_prompt=TOOL_GRADE_PROMPT,
|
|
15
|
+
refine_prompt=TOOL_REFINE_PROMPT,
|
|
16
|
+
output_description="Tool calling: {messages: [system, user, {tool_calls}, {tool}, assistant]}",
|
|
17
|
+
)
|
|
18
|
+
|
synkro/parsers.py
ADDED
|
@@ -0,0 +1,442 @@
|
|
|
1
|
+
"""Response parsing functions for LLM outputs with robust JSON extraction."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from synkro.schemas import (
|
|
8
|
+
ScenarioOutput,
|
|
9
|
+
GradeOutput,
|
|
10
|
+
SingleGrade,
|
|
11
|
+
SingleResponse,
|
|
12
|
+
PolicyComplexity,
|
|
13
|
+
PolicyPlan,
|
|
14
|
+
ChatMessage,
|
|
15
|
+
)
|
|
16
|
+
from synkro.prompts.templates import SYSTEM_PROMPT
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def strip_markdown_fences(content: str) -> str:
|
|
20
|
+
"""Strip markdown code fences from content."""
|
|
21
|
+
# Remove ```json ... ``` blocks, keeping just the content
|
|
22
|
+
content = re.sub(r'```json\s*', '', content)
|
|
23
|
+
content = re.sub(r'```\s*', '', content)
|
|
24
|
+
return content.strip()
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def extract_json(content: str, start_char: str = "[") -> str | None:
|
|
28
|
+
"""
|
|
29
|
+
Extract JSON from a string that may contain other text.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
content: Raw content that may contain JSON
|
|
33
|
+
start_char: Starting character to look for ('[' for arrays, '{' for objects)
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Extracted JSON string or None if not found
|
|
37
|
+
"""
|
|
38
|
+
end_char = "]" if start_char == "[" else "}"
|
|
39
|
+
start = content.find(start_char)
|
|
40
|
+
if start == -1:
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
depth = 0
|
|
44
|
+
in_string = False
|
|
45
|
+
escape = False
|
|
46
|
+
|
|
47
|
+
for i in range(start, len(content)):
|
|
48
|
+
char = content[i]
|
|
49
|
+
|
|
50
|
+
if escape:
|
|
51
|
+
escape = False
|
|
52
|
+
continue
|
|
53
|
+
|
|
54
|
+
if char == "\\" and in_string:
|
|
55
|
+
escape = True
|
|
56
|
+
continue
|
|
57
|
+
|
|
58
|
+
if char == '"':
|
|
59
|
+
in_string = not in_string
|
|
60
|
+
continue
|
|
61
|
+
|
|
62
|
+
if in_string:
|
|
63
|
+
continue
|
|
64
|
+
|
|
65
|
+
if char == start_char:
|
|
66
|
+
depth += 1
|
|
67
|
+
if char == end_char:
|
|
68
|
+
depth -= 1
|
|
69
|
+
|
|
70
|
+
if depth == 0:
|
|
71
|
+
return content[start : i + 1]
|
|
72
|
+
|
|
73
|
+
return None
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def extract_content(response: Any) -> str:
|
|
77
|
+
"""
|
|
78
|
+
Extract text content from various LLM response formats.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
response: Raw response from an LLM
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
Extracted text content
|
|
85
|
+
"""
|
|
86
|
+
try:
|
|
87
|
+
if isinstance(response, str):
|
|
88
|
+
return response
|
|
89
|
+
|
|
90
|
+
# Gemini format
|
|
91
|
+
if isinstance(response, dict):
|
|
92
|
+
if "candidates" in response:
|
|
93
|
+
return response["candidates"][0]["content"]["parts"][0]["text"]
|
|
94
|
+
|
|
95
|
+
# OpenAI format
|
|
96
|
+
if "choices" in response:
|
|
97
|
+
return response["choices"][0]["message"]["content"]
|
|
98
|
+
|
|
99
|
+
# Simple content field
|
|
100
|
+
if "content" in response:
|
|
101
|
+
return response["content"]
|
|
102
|
+
|
|
103
|
+
if "text" in response:
|
|
104
|
+
return response["text"]
|
|
105
|
+
|
|
106
|
+
if "output" in response:
|
|
107
|
+
return response["output"]
|
|
108
|
+
|
|
109
|
+
return json.dumps(response)
|
|
110
|
+
except Exception:
|
|
111
|
+
return str(response)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def parse_scenarios(response: Any, expected_count: int) -> list[ScenarioOutput]:
|
|
115
|
+
"""
|
|
116
|
+
Parse scenario output from LLM response.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
response: Raw LLM response
|
|
120
|
+
expected_count: Number of scenarios expected
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
List of parsed scenarios
|
|
124
|
+
"""
|
|
125
|
+
try:
|
|
126
|
+
content = extract_content(response)
|
|
127
|
+
json_str = extract_json(content, "[")
|
|
128
|
+
|
|
129
|
+
if json_str:
|
|
130
|
+
parsed = json.loads(json_str)
|
|
131
|
+
|
|
132
|
+
if isinstance(parsed, list):
|
|
133
|
+
scenarios = []
|
|
134
|
+
for s in parsed[:expected_count]:
|
|
135
|
+
scenarios.append(
|
|
136
|
+
ScenarioOutput(
|
|
137
|
+
scenario=s.get("scenario", s.get("description", "")),
|
|
138
|
+
context=s.get("context", s.get("background", "")),
|
|
139
|
+
)
|
|
140
|
+
)
|
|
141
|
+
return scenarios
|
|
142
|
+
except Exception:
|
|
143
|
+
pass # Fallback handles this
|
|
144
|
+
|
|
145
|
+
# Fallback: generate placeholder scenarios
|
|
146
|
+
return [
|
|
147
|
+
ScenarioOutput(
|
|
148
|
+
scenario=f"Policy compliance scenario {i + 1}",
|
|
149
|
+
context="General policy application context",
|
|
150
|
+
)
|
|
151
|
+
for i in range(expected_count)
|
|
152
|
+
]
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def parse_batched_responses(
|
|
156
|
+
response: Any, expected_count: int, scenarios: list[ScenarioOutput]
|
|
157
|
+
) -> list[dict]:
|
|
158
|
+
"""
|
|
159
|
+
Parse batched response output from LLM.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
response: Raw LLM response
|
|
163
|
+
expected_count: Number of responses expected
|
|
164
|
+
scenarios: Original scenarios for fallback
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
List of response dicts with 'index' and 'messages'
|
|
168
|
+
"""
|
|
169
|
+
try:
|
|
170
|
+
content = extract_content(response)
|
|
171
|
+
json_str = extract_json(content, "[")
|
|
172
|
+
|
|
173
|
+
if json_str:
|
|
174
|
+
parsed = json.loads(json_str)
|
|
175
|
+
|
|
176
|
+
if isinstance(parsed, list):
|
|
177
|
+
results = []
|
|
178
|
+
for r in parsed:
|
|
179
|
+
index = r.get("index", 0)
|
|
180
|
+
|
|
181
|
+
if isinstance(r.get("messages"), list) and len(r["messages"]) > 0:
|
|
182
|
+
results.append(
|
|
183
|
+
{
|
|
184
|
+
"index": index,
|
|
185
|
+
"messages": [
|
|
186
|
+
ChatMessage(role=m["role"], content=m.get("content", ""))
|
|
187
|
+
for m in r["messages"]
|
|
188
|
+
],
|
|
189
|
+
}
|
|
190
|
+
)
|
|
191
|
+
else:
|
|
192
|
+
# Fallback: construct messages from old format
|
|
193
|
+
scenario = scenarios[index] if index < len(scenarios) else scenarios[0]
|
|
194
|
+
results.append(
|
|
195
|
+
{
|
|
196
|
+
"index": index,
|
|
197
|
+
"messages": [
|
|
198
|
+
ChatMessage(role="system", content=SYSTEM_PROMPT),
|
|
199
|
+
ChatMessage(
|
|
200
|
+
role="user",
|
|
201
|
+
content=f"Scenario: {scenario.scenario}\n\nContext: {scenario.context}",
|
|
202
|
+
),
|
|
203
|
+
ChatMessage(
|
|
204
|
+
role="assistant", content=r.get("response", "")
|
|
205
|
+
),
|
|
206
|
+
],
|
|
207
|
+
}
|
|
208
|
+
)
|
|
209
|
+
return results
|
|
210
|
+
except Exception:
|
|
211
|
+
pass # Fallback handles this
|
|
212
|
+
|
|
213
|
+
# Fallback
|
|
214
|
+
return [
|
|
215
|
+
{
|
|
216
|
+
"index": i,
|
|
217
|
+
"messages": [
|
|
218
|
+
ChatMessage(role="system", content=SYSTEM_PROMPT),
|
|
219
|
+
ChatMessage(
|
|
220
|
+
role="user",
|
|
221
|
+
content=f"Scenario: {scenarios[i].scenario}\n\nContext: {scenarios[i].context}",
|
|
222
|
+
),
|
|
223
|
+
ChatMessage(role="assistant", content="Unable to generate response"),
|
|
224
|
+
],
|
|
225
|
+
}
|
|
226
|
+
for i in range(min(expected_count, len(scenarios)))
|
|
227
|
+
]
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def parse_batched_grades(response: Any) -> list[GradeOutput]:
|
|
231
|
+
"""
|
|
232
|
+
Parse grading output from LLM response.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
response: Raw LLM response
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
List of parsed grades
|
|
239
|
+
"""
|
|
240
|
+
try:
|
|
241
|
+
content = extract_content(response)
|
|
242
|
+
json_str = extract_json(content, "[")
|
|
243
|
+
|
|
244
|
+
if json_str:
|
|
245
|
+
parsed = json.loads(json_str)
|
|
246
|
+
|
|
247
|
+
if isinstance(parsed, list):
|
|
248
|
+
grades = []
|
|
249
|
+
for g in parsed:
|
|
250
|
+
grades.append(
|
|
251
|
+
GradeOutput(
|
|
252
|
+
index=g.get("index", 0),
|
|
253
|
+
passed=g.get("pass", False),
|
|
254
|
+
policy_violations=g.get("policy_violations", []),
|
|
255
|
+
missing_citations=g.get("missing_citations", []),
|
|
256
|
+
incomplete_reasoning=g.get("incomplete_reasoning", []),
|
|
257
|
+
vague_recommendations=g.get("vague_recommendations", []),
|
|
258
|
+
feedback=g.get("feedback", ""),
|
|
259
|
+
)
|
|
260
|
+
)
|
|
261
|
+
return grades
|
|
262
|
+
except Exception:
|
|
263
|
+
pass # Return empty list below
|
|
264
|
+
|
|
265
|
+
return []
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def parse_single_response(response: Any) -> SingleResponse | None:
|
|
269
|
+
"""
|
|
270
|
+
Parse a single response from parallel generation.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
response: Raw LLM response for a single scenario
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
Parsed SingleResponse or None if parsing failed
|
|
277
|
+
"""
|
|
278
|
+
try:
|
|
279
|
+
content = extract_content(response)
|
|
280
|
+
# Strip markdown fences first
|
|
281
|
+
content = strip_markdown_fences(content)
|
|
282
|
+
|
|
283
|
+
# Try to find and parse valid JSON objects with messages
|
|
284
|
+
remaining = content
|
|
285
|
+
while remaining:
|
|
286
|
+
json_str = extract_json(remaining, "{")
|
|
287
|
+
if not json_str:
|
|
288
|
+
break
|
|
289
|
+
|
|
290
|
+
try:
|
|
291
|
+
parsed = json.loads(json_str)
|
|
292
|
+
|
|
293
|
+
# Validate it has the expected structure
|
|
294
|
+
if isinstance(parsed.get("messages"), list) and len(parsed["messages"]) >= 1:
|
|
295
|
+
messages = []
|
|
296
|
+
valid = True
|
|
297
|
+
|
|
298
|
+
for m in parsed["messages"]:
|
|
299
|
+
if not isinstance(m, dict) or "role" not in m or "content" not in m:
|
|
300
|
+
valid = False
|
|
301
|
+
break
|
|
302
|
+
|
|
303
|
+
msg_content = m.get("content", "")
|
|
304
|
+
# Reject if content contains refinement prompt leak markers
|
|
305
|
+
if "GRADER FEEDBACK" in msg_content or "Generate an IMPROVED response" in msg_content:
|
|
306
|
+
valid = False
|
|
307
|
+
break
|
|
308
|
+
|
|
309
|
+
messages.append(ChatMessage(role=m["role"], content=msg_content))
|
|
310
|
+
|
|
311
|
+
if valid and len(messages) >= 1:
|
|
312
|
+
return SingleResponse(messages=messages)
|
|
313
|
+
|
|
314
|
+
except json.JSONDecodeError:
|
|
315
|
+
pass
|
|
316
|
+
|
|
317
|
+
# Move past this JSON object and try to find another
|
|
318
|
+
end_pos = remaining.find(json_str) + len(json_str)
|
|
319
|
+
remaining = remaining[end_pos:]
|
|
320
|
+
|
|
321
|
+
except Exception:
|
|
322
|
+
pass # Caller handles None with fallback
|
|
323
|
+
|
|
324
|
+
return None
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
def parse_single_grade(response: Any) -> SingleGrade | None:
|
|
328
|
+
"""
|
|
329
|
+
Parse a single grade from parallel grading.
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
response: Raw LLM response for a single grade
|
|
333
|
+
|
|
334
|
+
Returns:
|
|
335
|
+
Parsed SingleGrade or None if parsing failed
|
|
336
|
+
"""
|
|
337
|
+
try:
|
|
338
|
+
content = extract_content(response)
|
|
339
|
+
json_str = extract_json(content, "{")
|
|
340
|
+
|
|
341
|
+
if json_str:
|
|
342
|
+
parsed = json.loads(json_str)
|
|
343
|
+
return SingleGrade(
|
|
344
|
+
passed=parsed.get("pass", False),
|
|
345
|
+
policy_violations=parsed.get("policy_violations", []),
|
|
346
|
+
missing_citations=parsed.get("missing_citations", []),
|
|
347
|
+
incomplete_reasoning=parsed.get("incomplete_reasoning", []),
|
|
348
|
+
vague_recommendations=parsed.get("vague_recommendations", []),
|
|
349
|
+
feedback=parsed.get("feedback", ""),
|
|
350
|
+
)
|
|
351
|
+
except Exception:
|
|
352
|
+
pass # Caller handles None with fallback
|
|
353
|
+
|
|
354
|
+
return None
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
def parse_policy_complexity(response: Any) -> PolicyComplexity:
|
|
358
|
+
"""
|
|
359
|
+
Parse policy complexity analysis from LLM response.
|
|
360
|
+
|
|
361
|
+
Args:
|
|
362
|
+
response: Raw LLM response
|
|
363
|
+
|
|
364
|
+
Returns:
|
|
365
|
+
Parsed PolicyComplexity with defaults if parsing fails
|
|
366
|
+
"""
|
|
367
|
+
try:
|
|
368
|
+
content = extract_content(response)
|
|
369
|
+
json_str = extract_json(content, "{")
|
|
370
|
+
|
|
371
|
+
if json_str:
|
|
372
|
+
parsed = json.loads(json_str)
|
|
373
|
+
return PolicyComplexity(
|
|
374
|
+
variable_count=parsed.get("variable_count", 2),
|
|
375
|
+
complexity_level=parsed.get("complexity_level", "conditional"),
|
|
376
|
+
recommended_turns=parsed.get("recommended_turns", 3),
|
|
377
|
+
reasoning=parsed.get("reasoning", "Defaulting to conditional complexity"),
|
|
378
|
+
)
|
|
379
|
+
except Exception:
|
|
380
|
+
pass # Fallback handles this
|
|
381
|
+
|
|
382
|
+
# Default fallback
|
|
383
|
+
return PolicyComplexity(
|
|
384
|
+
variable_count=2,
|
|
385
|
+
complexity_level="conditional",
|
|
386
|
+
recommended_turns=3,
|
|
387
|
+
reasoning="Unable to analyze policy, defaulting to conditional complexity with 3 turns",
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
def parse_policy_plan(response: Any, target_traces: int) -> PolicyPlan:
|
|
392
|
+
"""
|
|
393
|
+
Parse policy planning output from LLM response.
|
|
394
|
+
|
|
395
|
+
Args:
|
|
396
|
+
response: Raw LLM response
|
|
397
|
+
target_traces: Target number of traces for fallback
|
|
398
|
+
|
|
399
|
+
Returns:
|
|
400
|
+
Parsed PolicyPlan with defaults if parsing fails
|
|
401
|
+
"""
|
|
402
|
+
try:
|
|
403
|
+
content = extract_content(response)
|
|
404
|
+
json_str = extract_json(content, "{")
|
|
405
|
+
|
|
406
|
+
if json_str:
|
|
407
|
+
parsed = json.loads(json_str)
|
|
408
|
+
|
|
409
|
+
categories = []
|
|
410
|
+
for cat in parsed.get("categories", []):
|
|
411
|
+
categories.append(
|
|
412
|
+
{
|
|
413
|
+
"name": cat.get("name", "General"),
|
|
414
|
+
"description": cat.get("description", "General scenarios"),
|
|
415
|
+
"traces": cat.get("traces", target_traces // 3),
|
|
416
|
+
}
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
if categories:
|
|
420
|
+
return PolicyPlan(
|
|
421
|
+
categories=categories,
|
|
422
|
+
reasoning=parsed.get("reasoning", ""),
|
|
423
|
+
)
|
|
424
|
+
except Exception:
|
|
425
|
+
pass # Fallback handles this
|
|
426
|
+
|
|
427
|
+
# Default fallback plan
|
|
428
|
+
third = target_traces // 3
|
|
429
|
+
remainder = target_traces - (third * 3)
|
|
430
|
+
return PolicyPlan(
|
|
431
|
+
categories=[
|
|
432
|
+
{"name": "Happy Path", "description": "Clear success cases", "traces": third},
|
|
433
|
+
{"name": "Edge Cases", "description": "Ambiguous situations", "traces": third},
|
|
434
|
+
{
|
|
435
|
+
"name": "Violations",
|
|
436
|
+
"description": "Clear failure cases",
|
|
437
|
+
"traces": third + remainder,
|
|
438
|
+
},
|
|
439
|
+
],
|
|
440
|
+
reasoning="Default plan - unable to parse LLM response",
|
|
441
|
+
)
|
|
442
|
+
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""Pipeline module for decomposed generation phases."""
|
|
2
|
+
|
|
3
|
+
from synkro.pipeline.phases import (
|
|
4
|
+
PlanPhase,
|
|
5
|
+
ScenarioPhase,
|
|
6
|
+
ResponsePhase,
|
|
7
|
+
GradingPhase,
|
|
8
|
+
ToolCallResponsePhase,
|
|
9
|
+
)
|
|
10
|
+
from synkro.pipeline.runner import GenerationPipeline
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"PlanPhase",
|
|
14
|
+
"ScenarioPhase",
|
|
15
|
+
"ResponsePhase",
|
|
16
|
+
"GradingPhase",
|
|
17
|
+
"ToolCallResponsePhase",
|
|
18
|
+
"GenerationPipeline",
|
|
19
|
+
]
|
|
20
|
+
|