synkro 0.4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synkro might be problematic. Click here for more details.
- synkro/__init__.py +165 -0
- synkro/cli.py +120 -0
- synkro/core/__init__.py +7 -0
- synkro/core/dataset.py +233 -0
- synkro/core/policy.py +337 -0
- synkro/errors.py +178 -0
- synkro/examples/__init__.py +148 -0
- synkro/factory.py +160 -0
- synkro/formatters/__init__.py +12 -0
- synkro/formatters/qa.py +85 -0
- synkro/formatters/sft.py +90 -0
- synkro/formatters/tool_call.py +127 -0
- synkro/generation/__init__.py +9 -0
- synkro/generation/generator.py +163 -0
- synkro/generation/planner.py +87 -0
- synkro/generation/responses.py +160 -0
- synkro/generation/scenarios.py +90 -0
- synkro/generation/tool_responses.py +370 -0
- synkro/generation/tool_simulator.py +114 -0
- synkro/llm/__init__.py +7 -0
- synkro/llm/client.py +235 -0
- synkro/llm/rate_limits.py +95 -0
- synkro/models/__init__.py +43 -0
- synkro/models/anthropic.py +26 -0
- synkro/models/google.py +19 -0
- synkro/models/openai.py +31 -0
- synkro/modes/__init__.py +15 -0
- synkro/modes/config.py +66 -0
- synkro/modes/qa.py +18 -0
- synkro/modes/sft.py +18 -0
- synkro/modes/tool_call.py +18 -0
- synkro/parsers.py +442 -0
- synkro/pipeline/__init__.py +20 -0
- synkro/pipeline/phases.py +237 -0
- synkro/pipeline/runner.py +198 -0
- synkro/pipelines.py +105 -0
- synkro/prompts/__init__.py +44 -0
- synkro/prompts/base.py +167 -0
- synkro/prompts/qa_templates.py +97 -0
- synkro/prompts/templates.py +281 -0
- synkro/prompts/tool_templates.py +201 -0
- synkro/quality/__init__.py +14 -0
- synkro/quality/grader.py +130 -0
- synkro/quality/refiner.py +137 -0
- synkro/quality/tool_grader.py +126 -0
- synkro/quality/tool_refiner.py +128 -0
- synkro/reporting.py +213 -0
- synkro/schemas.py +325 -0
- synkro/types/__init__.py +41 -0
- synkro/types/core.py +113 -0
- synkro/types/dataset_type.py +30 -0
- synkro/types/tool.py +94 -0
- synkro-0.4.5.data/data/examples/__init__.py +148 -0
- synkro-0.4.5.dist-info/METADATA +221 -0
- synkro-0.4.5.dist-info/RECORD +58 -0
- synkro-0.4.5.dist-info/WHEEL +4 -0
- synkro-0.4.5.dist-info/entry_points.txt +2 -0
- synkro-0.4.5.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
"""Pipeline phases for generation.
|
|
2
|
+
|
|
3
|
+
Each phase is a self-contained, testable unit that handles one step
|
|
4
|
+
of the generation pipeline.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
from asyncio import Semaphore
|
|
9
|
+
from typing import TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
from synkro.core.policy import Policy
|
|
12
|
+
from synkro.types.core import Plan, Scenario, Trace
|
|
13
|
+
from synkro.generation.planner import Planner
|
|
14
|
+
from synkro.generation.scenarios import ScenarioGenerator
|
|
15
|
+
from synkro.generation.responses import ResponseGenerator
|
|
16
|
+
from synkro.quality.grader import Grader
|
|
17
|
+
from synkro.quality.refiner import Refiner
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from synkro.generation.tool_responses import ToolCallResponseGenerator
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class PlanPhase:
|
|
24
|
+
"""
|
|
25
|
+
Planning phase - analyzes policy and creates category distribution.
|
|
26
|
+
|
|
27
|
+
This phase uses a stronger model to understand the policy and
|
|
28
|
+
determine optimal scenario distribution.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
async def execute(self, policy: Policy, traces: int, planner: Planner) -> Plan:
|
|
32
|
+
"""
|
|
33
|
+
Execute the planning phase.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
policy: The policy to analyze
|
|
37
|
+
traces: Target number of traces
|
|
38
|
+
planner: Planner component to use
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Plan with categories and trace distribution
|
|
42
|
+
"""
|
|
43
|
+
return await planner.plan(policy.text, traces)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class ScenarioPhase:
|
|
47
|
+
"""
|
|
48
|
+
Scenario generation phase - creates scenarios for each category.
|
|
49
|
+
|
|
50
|
+
Runs in parallel across categories for efficiency.
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
async def execute(
|
|
54
|
+
self,
|
|
55
|
+
policy: Policy,
|
|
56
|
+
plan: Plan,
|
|
57
|
+
generator: ScenarioGenerator,
|
|
58
|
+
semaphore: Semaphore,
|
|
59
|
+
) -> list[Scenario]:
|
|
60
|
+
"""
|
|
61
|
+
Execute scenario generation.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
policy: The policy text
|
|
65
|
+
plan: Plan with categories
|
|
66
|
+
generator: ScenarioGenerator component
|
|
67
|
+
semaphore: Semaphore for rate limiting
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
List of all generated scenarios
|
|
71
|
+
"""
|
|
72
|
+
async def limited_generate(category):
|
|
73
|
+
async with semaphore:
|
|
74
|
+
return await generator.generate(policy.text, category.count, category=category)
|
|
75
|
+
|
|
76
|
+
tasks = [limited_generate(cat) for cat in plan.categories]
|
|
77
|
+
results = await asyncio.gather(*tasks)
|
|
78
|
+
|
|
79
|
+
# Flatten results
|
|
80
|
+
return [scenario for batch in results for scenario in batch]
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class ResponsePhase:
|
|
84
|
+
"""
|
|
85
|
+
Response generation phase - creates responses for each scenario.
|
|
86
|
+
|
|
87
|
+
Runs fully parallel with semaphore control.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
async def execute(
|
|
91
|
+
self,
|
|
92
|
+
policy: Policy,
|
|
93
|
+
scenarios: list[Scenario],
|
|
94
|
+
generator: ResponseGenerator,
|
|
95
|
+
semaphore: Semaphore,
|
|
96
|
+
) -> list[Trace]:
|
|
97
|
+
"""
|
|
98
|
+
Execute response generation.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
policy: The policy text
|
|
102
|
+
scenarios: List of scenarios to respond to
|
|
103
|
+
generator: ResponseGenerator component
|
|
104
|
+
semaphore: Semaphore for rate limiting
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
List of traces with generated responses
|
|
108
|
+
"""
|
|
109
|
+
async def limited_generate(scenario):
|
|
110
|
+
async with semaphore:
|
|
111
|
+
return await generator._generate_single(policy.text, scenario)
|
|
112
|
+
|
|
113
|
+
tasks = [limited_generate(s) for s in scenarios]
|
|
114
|
+
return await asyncio.gather(*tasks)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class GradingPhase:
|
|
118
|
+
"""
|
|
119
|
+
Grading and refinement phase - evaluates and improves responses.
|
|
120
|
+
|
|
121
|
+
Includes the refinement loop for failed traces.
|
|
122
|
+
"""
|
|
123
|
+
|
|
124
|
+
async def execute(
|
|
125
|
+
self,
|
|
126
|
+
policy: Policy,
|
|
127
|
+
traces: list[Trace],
|
|
128
|
+
grader: Grader,
|
|
129
|
+
refiner: Refiner,
|
|
130
|
+
max_iterations: int,
|
|
131
|
+
semaphore: Semaphore,
|
|
132
|
+
) -> tuple[list[Trace], float]:
|
|
133
|
+
"""
|
|
134
|
+
Execute grading and refinement.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
policy: The policy text
|
|
138
|
+
traces: List of traces to grade
|
|
139
|
+
grader: Grader component
|
|
140
|
+
refiner: Refiner component
|
|
141
|
+
max_iterations: Maximum refinement iterations
|
|
142
|
+
semaphore: Semaphore for rate limiting
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
Tuple of (graded traces, pass rate percentage)
|
|
146
|
+
"""
|
|
147
|
+
async def limited_grade(trace):
|
|
148
|
+
async with semaphore:
|
|
149
|
+
return await grader.grade(trace, policy.text)
|
|
150
|
+
|
|
151
|
+
async def limited_refine(trace, grade):
|
|
152
|
+
async with semaphore:
|
|
153
|
+
return await refiner.refine(trace, grade, policy.text)
|
|
154
|
+
|
|
155
|
+
# Initial grading
|
|
156
|
+
grade_tasks = [limited_grade(t) for t in traces]
|
|
157
|
+
grades = await asyncio.gather(*grade_tasks)
|
|
158
|
+
|
|
159
|
+
# Attach grades
|
|
160
|
+
final_traces = list(traces)
|
|
161
|
+
for trace, grade in zip(final_traces, grades):
|
|
162
|
+
trace.grade = grade
|
|
163
|
+
|
|
164
|
+
# Refinement loop
|
|
165
|
+
for iteration in range(1, max_iterations):
|
|
166
|
+
failed_indices = [i for i, t in enumerate(final_traces) if not t.grade.passed]
|
|
167
|
+
|
|
168
|
+
if not failed_indices:
|
|
169
|
+
break
|
|
170
|
+
|
|
171
|
+
# Refine failed traces
|
|
172
|
+
refine_tasks = [
|
|
173
|
+
limited_refine(final_traces[i], final_traces[i].grade)
|
|
174
|
+
for i in failed_indices
|
|
175
|
+
]
|
|
176
|
+
refined_traces = await asyncio.gather(*refine_tasks)
|
|
177
|
+
|
|
178
|
+
# Preserve original scenarios and update traces
|
|
179
|
+
for idx, refined in zip(failed_indices, refined_traces):
|
|
180
|
+
refined.scenario = final_traces[idx].scenario
|
|
181
|
+
final_traces[idx] = refined
|
|
182
|
+
|
|
183
|
+
# Re-grade refined traces
|
|
184
|
+
regrade_tasks = [limited_grade(final_traces[i]) for i in failed_indices]
|
|
185
|
+
new_grades = await asyncio.gather(*regrade_tasks)
|
|
186
|
+
|
|
187
|
+
for idx, grade in zip(failed_indices, new_grades):
|
|
188
|
+
final_traces[idx].grade = grade
|
|
189
|
+
|
|
190
|
+
# Calculate pass rate
|
|
191
|
+
passed_count = sum(1 for t in final_traces if t.grade and t.grade.passed)
|
|
192
|
+
pass_rate = (passed_count / len(final_traces) * 100) if final_traces else 0
|
|
193
|
+
|
|
194
|
+
return final_traces, pass_rate
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
class ToolCallResponsePhase:
|
|
198
|
+
"""
|
|
199
|
+
Tool call response generation phase - creates traces with proper tool calling format.
|
|
200
|
+
|
|
201
|
+
Uses ToolCallResponseGenerator to produce traces with:
|
|
202
|
+
- System message with tool descriptions
|
|
203
|
+
- User message
|
|
204
|
+
- Assistant message with tool_calls (or direct response)
|
|
205
|
+
- Tool response messages
|
|
206
|
+
- Final assistant message
|
|
207
|
+
"""
|
|
208
|
+
|
|
209
|
+
async def execute(
|
|
210
|
+
self,
|
|
211
|
+
policy: Policy,
|
|
212
|
+
scenarios: list[Scenario],
|
|
213
|
+
generator: "ToolCallResponseGenerator",
|
|
214
|
+
semaphore: Semaphore,
|
|
215
|
+
) -> list[Trace]:
|
|
216
|
+
"""
|
|
217
|
+
Execute tool call response generation.
|
|
218
|
+
|
|
219
|
+
Args:
|
|
220
|
+
policy: The policy/guidelines text
|
|
221
|
+
scenarios: List of scenarios to respond to
|
|
222
|
+
generator: ToolCallResponseGenerator component
|
|
223
|
+
semaphore: Semaphore for rate limiting
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
List of traces with proper tool calling format
|
|
227
|
+
"""
|
|
228
|
+
async def limited_generate(scenario):
|
|
229
|
+
async with semaphore:
|
|
230
|
+
return await generator.generate_single(policy.text, scenario)
|
|
231
|
+
|
|
232
|
+
tasks = [limited_generate(s) for s in scenarios]
|
|
233
|
+
return await asyncio.gather(*tasks)
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
__all__ = ["PlanPhase", "ScenarioPhase", "ResponsePhase", "GradingPhase", "ToolCallResponsePhase"]
|
|
237
|
+
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
"""Pipeline runner that orchestrates all phases."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
|
|
6
|
+
from synkro.core.policy import Policy
|
|
7
|
+
from synkro.core.dataset import Dataset
|
|
8
|
+
from synkro.factory import ComponentFactory
|
|
9
|
+
from synkro.reporting import ProgressReporter
|
|
10
|
+
from synkro.pipeline.phases import (
|
|
11
|
+
PlanPhase,
|
|
12
|
+
ScenarioPhase,
|
|
13
|
+
ResponsePhase,
|
|
14
|
+
GradingPhase,
|
|
15
|
+
ToolCallResponsePhase,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class GenerationPipeline:
|
|
20
|
+
"""
|
|
21
|
+
Orchestrates the full generation pipeline using decomposed phases.
|
|
22
|
+
|
|
23
|
+
This class coordinates the execution of all phases and reports
|
|
24
|
+
progress through the injected reporter.
|
|
25
|
+
|
|
26
|
+
Supports both standard SFT/QA generation and TOOL_CALL generation
|
|
27
|
+
with proper OpenAI function calling format.
|
|
28
|
+
|
|
29
|
+
Examples:
|
|
30
|
+
>>> pipeline = GenerationPipeline(factory, reporter, workers=10)
|
|
31
|
+
>>> dataset = await pipeline.run(policy, traces=50)
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
factory: ComponentFactory,
|
|
37
|
+
reporter: ProgressReporter,
|
|
38
|
+
workers: int,
|
|
39
|
+
max_iterations: int = 1,
|
|
40
|
+
skip_grading: bool = False,
|
|
41
|
+
):
|
|
42
|
+
"""
|
|
43
|
+
Initialize the pipeline.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
factory: ComponentFactory for creating pipeline components
|
|
47
|
+
reporter: ProgressReporter for reporting progress
|
|
48
|
+
workers: Number of concurrent workers (API calls)
|
|
49
|
+
max_iterations: Maximum refinement iterations
|
|
50
|
+
skip_grading: Whether to skip the grading phase
|
|
51
|
+
"""
|
|
52
|
+
self.factory = factory
|
|
53
|
+
self.reporter = reporter
|
|
54
|
+
self.workers = workers
|
|
55
|
+
self.max_iterations = max_iterations
|
|
56
|
+
self.skip_grading = skip_grading
|
|
57
|
+
|
|
58
|
+
# Phases
|
|
59
|
+
self.plan_phase = PlanPhase()
|
|
60
|
+
self.scenario_phase = ScenarioPhase()
|
|
61
|
+
self.response_phase = ResponsePhase()
|
|
62
|
+
self.grading_phase = GradingPhase()
|
|
63
|
+
self.tool_call_response_phase = ToolCallResponsePhase()
|
|
64
|
+
|
|
65
|
+
async def run(self, policy: Policy, traces: int, model: str, dataset_type: str) -> Dataset:
|
|
66
|
+
"""
|
|
67
|
+
Run the full generation pipeline.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
policy: The policy to generate from
|
|
71
|
+
traces: Target number of traces
|
|
72
|
+
model: Model name (for reporting)
|
|
73
|
+
dataset_type: Dataset type (for reporting)
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
Dataset with generated traces
|
|
77
|
+
"""
|
|
78
|
+
start_time = datetime.now()
|
|
79
|
+
semaphore = asyncio.Semaphore(self.workers)
|
|
80
|
+
|
|
81
|
+
# Check if this is a tool_call dataset
|
|
82
|
+
is_tool_call = dataset_type == "tool_call"
|
|
83
|
+
|
|
84
|
+
# Create components via factory
|
|
85
|
+
planner = self.factory.create_planner()
|
|
86
|
+
scenario_gen = self.factory.create_scenario_generator()
|
|
87
|
+
grader = self.factory.create_grader()
|
|
88
|
+
refiner = self.factory.create_refiner()
|
|
89
|
+
|
|
90
|
+
# Create appropriate response generator
|
|
91
|
+
if is_tool_call and self.factory.has_tools:
|
|
92
|
+
tool_call_gen = self.factory.create_tool_call_response_generator()
|
|
93
|
+
else:
|
|
94
|
+
response_gen = self.factory.create_response_generator()
|
|
95
|
+
|
|
96
|
+
# Report start
|
|
97
|
+
self.reporter.on_start(traces, model, dataset_type)
|
|
98
|
+
|
|
99
|
+
# Phase 1: Planning
|
|
100
|
+
plan = await self.plan_phase.execute(policy, traces, planner)
|
|
101
|
+
self.reporter.on_plan_complete(plan)
|
|
102
|
+
|
|
103
|
+
# Phase 2: Scenario generation
|
|
104
|
+
scenarios = await self.scenario_phase.execute(policy, plan, scenario_gen, semaphore)
|
|
105
|
+
self.reporter.on_scenarios_complete(scenarios)
|
|
106
|
+
|
|
107
|
+
# Phase 3: Response generation (different for tool_call)
|
|
108
|
+
if is_tool_call and self.factory.has_tools:
|
|
109
|
+
all_traces = await self.tool_call_response_phase.execute(
|
|
110
|
+
policy, scenarios, tool_call_gen, semaphore
|
|
111
|
+
)
|
|
112
|
+
else:
|
|
113
|
+
all_traces = await self.response_phase.execute(
|
|
114
|
+
policy, scenarios, response_gen, semaphore
|
|
115
|
+
)
|
|
116
|
+
self.reporter.on_responses_complete(list(all_traces))
|
|
117
|
+
|
|
118
|
+
# Phase 4: Grading (optional)
|
|
119
|
+
# Note: TOOL_CALL datasets now use specialized ToolCallGrader and
|
|
120
|
+
# ToolCallRefiner that preserve the tool_calls format.
|
|
121
|
+
pass_rate: float | None = None
|
|
122
|
+
|
|
123
|
+
if self.skip_grading:
|
|
124
|
+
final_traces = list(all_traces)
|
|
125
|
+
self.reporter.on_grading_skipped()
|
|
126
|
+
else:
|
|
127
|
+
final_traces, pass_rate = await self._run_grading_with_reporting(
|
|
128
|
+
policy, list(all_traces), grader, refiner, semaphore
|
|
129
|
+
)
|
|
130
|
+
self.reporter.on_grading_complete(final_traces, pass_rate)
|
|
131
|
+
|
|
132
|
+
# Report completion
|
|
133
|
+
elapsed = (datetime.now() - start_time).total_seconds()
|
|
134
|
+
self.reporter.on_complete(len(final_traces), elapsed, pass_rate)
|
|
135
|
+
|
|
136
|
+
return Dataset(traces=final_traces)
|
|
137
|
+
|
|
138
|
+
async def _run_grading_with_reporting(
|
|
139
|
+
self,
|
|
140
|
+
policy: Policy,
|
|
141
|
+
traces: list,
|
|
142
|
+
grader,
|
|
143
|
+
refiner,
|
|
144
|
+
semaphore,
|
|
145
|
+
) -> tuple[list, float]:
|
|
146
|
+
"""Run grading phase with refinement iteration reporting."""
|
|
147
|
+
|
|
148
|
+
async def limited_grade(trace):
|
|
149
|
+
async with semaphore:
|
|
150
|
+
return await grader.grade(trace, policy.text)
|
|
151
|
+
|
|
152
|
+
async def limited_refine(trace, grade):
|
|
153
|
+
async with semaphore:
|
|
154
|
+
return await refiner.refine(trace, grade, policy.text)
|
|
155
|
+
|
|
156
|
+
# Initial grading
|
|
157
|
+
grade_tasks = [limited_grade(t) for t in traces]
|
|
158
|
+
grades = await asyncio.gather(*grade_tasks)
|
|
159
|
+
|
|
160
|
+
final_traces = list(traces)
|
|
161
|
+
for trace, grade in zip(final_traces, grades):
|
|
162
|
+
trace.grade = grade
|
|
163
|
+
|
|
164
|
+
# Refinement loop with reporting
|
|
165
|
+
for iteration in range(1, self.max_iterations):
|
|
166
|
+
failed_indices = [i for i, t in enumerate(final_traces) if not t.grade.passed]
|
|
167
|
+
|
|
168
|
+
if not failed_indices:
|
|
169
|
+
break
|
|
170
|
+
|
|
171
|
+
self.reporter.on_refinement_start(iteration + 1, len(failed_indices))
|
|
172
|
+
|
|
173
|
+
# Refine
|
|
174
|
+
refine_tasks = [
|
|
175
|
+
limited_refine(final_traces[i], final_traces[i].grade)
|
|
176
|
+
for i in failed_indices
|
|
177
|
+
]
|
|
178
|
+
refined_traces = await asyncio.gather(*refine_tasks)
|
|
179
|
+
|
|
180
|
+
for idx, refined in zip(failed_indices, refined_traces):
|
|
181
|
+
refined.scenario = final_traces[idx].scenario
|
|
182
|
+
final_traces[idx] = refined
|
|
183
|
+
|
|
184
|
+
# Re-grade
|
|
185
|
+
regrade_tasks = [limited_grade(final_traces[i]) for i in failed_indices]
|
|
186
|
+
new_grades = await asyncio.gather(*regrade_tasks)
|
|
187
|
+
|
|
188
|
+
for idx, grade in zip(failed_indices, new_grades):
|
|
189
|
+
final_traces[idx].grade = grade
|
|
190
|
+
|
|
191
|
+
passed_count = sum(1 for t in final_traces if t.grade and t.grade.passed)
|
|
192
|
+
pass_rate = (passed_count / len(final_traces) * 100) if final_traces else 0
|
|
193
|
+
|
|
194
|
+
return final_traces, pass_rate
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
__all__ = ["GenerationPipeline"]
|
|
198
|
+
|
synkro/pipelines.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""Pipeline creation utilities.
|
|
2
|
+
|
|
3
|
+
Usage:
|
|
4
|
+
from synkro.pipelines import create_pipeline
|
|
5
|
+
from synkro.models.openai import OpenAI
|
|
6
|
+
from synkro.types import DatasetType
|
|
7
|
+
|
|
8
|
+
pipeline = create_pipeline(
|
|
9
|
+
model=OpenAI.GPT_5_MINI,
|
|
10
|
+
dataset_type=DatasetType.SFT,
|
|
11
|
+
)
|
|
12
|
+
dataset = pipeline.generate("policy text", traces=50)
|
|
13
|
+
|
|
14
|
+
# Tool calling pipeline
|
|
15
|
+
from synkro import ToolDefinition
|
|
16
|
+
|
|
17
|
+
web_search = ToolDefinition(
|
|
18
|
+
name="web_search",
|
|
19
|
+
description="Search the web",
|
|
20
|
+
parameters={"type": "object", "properties": {"query": {"type": "string"}}}
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
pipeline = create_pipeline(
|
|
24
|
+
dataset_type=DatasetType.TOOL_CALL,
|
|
25
|
+
tools=[web_search],
|
|
26
|
+
)
|
|
27
|
+
dataset = pipeline.generate("Search guidelines", traces=50)
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
from typing import TYPE_CHECKING
|
|
31
|
+
|
|
32
|
+
from synkro.generation.generator import Generator
|
|
33
|
+
from synkro.types import DatasetType
|
|
34
|
+
from synkro.models import Model, OpenAI
|
|
35
|
+
from synkro.reporting import ProgressReporter
|
|
36
|
+
|
|
37
|
+
if TYPE_CHECKING:
|
|
38
|
+
from synkro.types.tool import ToolDefinition
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def create_pipeline(
|
|
42
|
+
model: Model = OpenAI.GPT_5_MINI,
|
|
43
|
+
dataset_type: DatasetType = DatasetType.SFT,
|
|
44
|
+
grading_model: Model = OpenAI.GPT_52,
|
|
45
|
+
max_iterations: int = 3,
|
|
46
|
+
skip_grading: bool = False,
|
|
47
|
+
reporter: ProgressReporter | None = None,
|
|
48
|
+
tools: list["ToolDefinition"] | None = None,
|
|
49
|
+
) -> Generator:
|
|
50
|
+
"""
|
|
51
|
+
Create a pipeline for generating training datasets.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
model: Model enum for generation (default: OpenAI.GPT_5_MINI)
|
|
55
|
+
dataset_type: Type of dataset - QA, SFT, or TOOL_CALL (default: SFT)
|
|
56
|
+
grading_model: Model enum for grading (default: OpenAI.GPT_52)
|
|
57
|
+
max_iterations: Max refinement iterations per trace (default: 3)
|
|
58
|
+
skip_grading: Skip grading phase for faster generation (default: False)
|
|
59
|
+
reporter: Progress reporter (default: RichReporter for console output)
|
|
60
|
+
tools: List of ToolDefinition for TOOL_CALL dataset type
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Generator instance ready to use
|
|
64
|
+
|
|
65
|
+
Example:
|
|
66
|
+
>>> from synkro.pipelines import create_pipeline
|
|
67
|
+
>>> from synkro.models.openai import OpenAI
|
|
68
|
+
>>> from synkro.types import DatasetType
|
|
69
|
+
>>>
|
|
70
|
+
>>> pipeline = create_pipeline(
|
|
71
|
+
... model=OpenAI.GPT_5_MINI,
|
|
72
|
+
... dataset_type=DatasetType.SFT,
|
|
73
|
+
... )
|
|
74
|
+
>>> dataset = pipeline.generate("policy text", traces=50)
|
|
75
|
+
>>> dataset.save("training.jsonl")
|
|
76
|
+
|
|
77
|
+
>>> # Silent mode for embedding
|
|
78
|
+
>>> from synkro.reporting import SilentReporter
|
|
79
|
+
>>> pipeline = create_pipeline(reporter=SilentReporter())
|
|
80
|
+
|
|
81
|
+
>>> # Tool calling dataset
|
|
82
|
+
>>> from synkro import ToolDefinition
|
|
83
|
+
>>> search_tool = ToolDefinition(
|
|
84
|
+
... name="web_search",
|
|
85
|
+
... description="Search the web for information",
|
|
86
|
+
... parameters={"type": "object", "properties": {"query": {"type": "string"}}}
|
|
87
|
+
... )
|
|
88
|
+
>>> pipeline = create_pipeline(
|
|
89
|
+
... dataset_type=DatasetType.TOOL_CALL,
|
|
90
|
+
... tools=[search_tool],
|
|
91
|
+
... )
|
|
92
|
+
>>> dataset = pipeline.generate("Search guidelines", traces=50)
|
|
93
|
+
"""
|
|
94
|
+
return Generator(
|
|
95
|
+
dataset_type=dataset_type,
|
|
96
|
+
generation_model=model,
|
|
97
|
+
grading_model=grading_model,
|
|
98
|
+
max_iterations=max_iterations,
|
|
99
|
+
skip_grading=skip_grading,
|
|
100
|
+
reporter=reporter,
|
|
101
|
+
tools=tools,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
__all__ = ["create_pipeline"]
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
"""Prompt templates and customizable prompt classes for Synkro."""
|
|
2
|
+
|
|
3
|
+
from synkro.prompts.base import (
|
|
4
|
+
SystemPrompt,
|
|
5
|
+
ScenarioPrompt,
|
|
6
|
+
ResponsePrompt,
|
|
7
|
+
GradePrompt,
|
|
8
|
+
RefinePrompt,
|
|
9
|
+
PlanPrompt,
|
|
10
|
+
)
|
|
11
|
+
from synkro.prompts.templates import (
|
|
12
|
+
SYSTEM_PROMPT,
|
|
13
|
+
SCENARIO_GENERATOR_PROMPT,
|
|
14
|
+
CATEGORY_SCENARIO_PROMPT,
|
|
15
|
+
POLICY_PLANNING_PROMPT,
|
|
16
|
+
POLICY_COMPLEXITY_PROMPT,
|
|
17
|
+
BATCHED_RESPONSE_PROMPT,
|
|
18
|
+
BATCHED_GRADER_PROMPT,
|
|
19
|
+
BATCHED_REFINER_PROMPT,
|
|
20
|
+
SINGLE_RESPONSE_PROMPT,
|
|
21
|
+
SINGLE_GRADE_PROMPT,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
__all__ = [
|
|
25
|
+
# Prompt classes
|
|
26
|
+
"SystemPrompt",
|
|
27
|
+
"ScenarioPrompt",
|
|
28
|
+
"ResponsePrompt",
|
|
29
|
+
"GradePrompt",
|
|
30
|
+
"RefinePrompt",
|
|
31
|
+
"PlanPrompt",
|
|
32
|
+
# Raw templates
|
|
33
|
+
"SYSTEM_PROMPT",
|
|
34
|
+
"SCENARIO_GENERATOR_PROMPT",
|
|
35
|
+
"CATEGORY_SCENARIO_PROMPT",
|
|
36
|
+
"POLICY_PLANNING_PROMPT",
|
|
37
|
+
"POLICY_COMPLEXITY_PROMPT",
|
|
38
|
+
"BATCHED_RESPONSE_PROMPT",
|
|
39
|
+
"BATCHED_GRADER_PROMPT",
|
|
40
|
+
"BATCHED_REFINER_PROMPT",
|
|
41
|
+
"SINGLE_RESPONSE_PROMPT",
|
|
42
|
+
"SINGLE_GRADE_PROMPT",
|
|
43
|
+
]
|
|
44
|
+
|