synkro 0.4.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. synkro/__init__.py +179 -0
  2. synkro/advanced.py +186 -0
  3. synkro/cli.py +128 -0
  4. synkro/core/__init__.py +7 -0
  5. synkro/core/checkpoint.py +250 -0
  6. synkro/core/dataset.py +402 -0
  7. synkro/core/policy.py +337 -0
  8. synkro/errors.py +178 -0
  9. synkro/examples/__init__.py +148 -0
  10. synkro/factory.py +276 -0
  11. synkro/formatters/__init__.py +12 -0
  12. synkro/formatters/qa.py +98 -0
  13. synkro/formatters/sft.py +90 -0
  14. synkro/formatters/tool_call.py +127 -0
  15. synkro/generation/__init__.py +9 -0
  16. synkro/generation/follow_ups.py +134 -0
  17. synkro/generation/generator.py +220 -0
  18. synkro/generation/golden_responses.py +244 -0
  19. synkro/generation/golden_scenarios.py +276 -0
  20. synkro/generation/golden_tool_responses.py +416 -0
  21. synkro/generation/logic_extractor.py +126 -0
  22. synkro/generation/multiturn_responses.py +177 -0
  23. synkro/generation/planner.py +131 -0
  24. synkro/generation/responses.py +189 -0
  25. synkro/generation/scenarios.py +90 -0
  26. synkro/generation/tool_responses.py +376 -0
  27. synkro/generation/tool_simulator.py +114 -0
  28. synkro/interactive/__init__.py +12 -0
  29. synkro/interactive/hitl_session.py +77 -0
  30. synkro/interactive/logic_map_editor.py +173 -0
  31. synkro/interactive/rich_ui.py +205 -0
  32. synkro/llm/__init__.py +7 -0
  33. synkro/llm/client.py +235 -0
  34. synkro/llm/rate_limits.py +95 -0
  35. synkro/models/__init__.py +43 -0
  36. synkro/models/anthropic.py +26 -0
  37. synkro/models/google.py +19 -0
  38. synkro/models/openai.py +31 -0
  39. synkro/modes/__init__.py +15 -0
  40. synkro/modes/config.py +66 -0
  41. synkro/modes/qa.py +18 -0
  42. synkro/modes/sft.py +18 -0
  43. synkro/modes/tool_call.py +18 -0
  44. synkro/parsers.py +442 -0
  45. synkro/pipeline/__init__.py +20 -0
  46. synkro/pipeline/phases.py +592 -0
  47. synkro/pipeline/runner.py +424 -0
  48. synkro/pipelines.py +123 -0
  49. synkro/prompts/__init__.py +57 -0
  50. synkro/prompts/base.py +167 -0
  51. synkro/prompts/golden_templates.py +474 -0
  52. synkro/prompts/interactive_templates.py +65 -0
  53. synkro/prompts/multiturn_templates.py +156 -0
  54. synkro/prompts/qa_templates.py +97 -0
  55. synkro/prompts/templates.py +281 -0
  56. synkro/prompts/tool_templates.py +201 -0
  57. synkro/quality/__init__.py +14 -0
  58. synkro/quality/golden_refiner.py +163 -0
  59. synkro/quality/grader.py +153 -0
  60. synkro/quality/multiturn_grader.py +150 -0
  61. synkro/quality/refiner.py +137 -0
  62. synkro/quality/tool_grader.py +126 -0
  63. synkro/quality/tool_refiner.py +128 -0
  64. synkro/quality/verifier.py +228 -0
  65. synkro/reporting.py +537 -0
  66. synkro/schemas.py +472 -0
  67. synkro/types/__init__.py +41 -0
  68. synkro/types/core.py +126 -0
  69. synkro/types/dataset_type.py +30 -0
  70. synkro/types/logic_map.py +345 -0
  71. synkro/types/tool.py +94 -0
  72. synkro-0.4.12.data/data/examples/__init__.py +148 -0
  73. synkro-0.4.12.dist-info/METADATA +258 -0
  74. synkro-0.4.12.dist-info/RECORD +77 -0
  75. synkro-0.4.12.dist-info/WHEEL +4 -0
  76. synkro-0.4.12.dist-info/entry_points.txt +2 -0
  77. synkro-0.4.12.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,90 @@
1
+ """Scenario generation from policy documents."""
2
+
3
+ from synkro.llm.client import LLM
4
+ from synkro.models import Model, OpenAI
5
+ from synkro.types.core import Scenario, Category
6
+ from synkro.prompts.templates import SCENARIO_GENERATOR_PROMPT, CATEGORY_SCENARIO_PROMPT
7
+ from synkro.schemas import ScenariosArray
8
+
9
+
10
+ class ScenarioGenerator:
11
+ """
12
+ Generates realistic scenarios from policy documents.
13
+
14
+ Creates diverse scenarios that test different aspects of policy
15
+ understanding and compliance.
16
+
17
+ Examples:
18
+ >>> gen = ScenarioGenerator()
19
+ >>> scenarios = await gen.generate(policy.text, count=50)
20
+ >>> for s in scenarios:
21
+ ... print(s.description)
22
+ """
23
+
24
+ def __init__(self, llm: LLM | None = None, model: Model = OpenAI.GPT_4O_MINI):
25
+ """
26
+ Initialize the scenario generator.
27
+
28
+ Args:
29
+ llm: LLM client to use (creates one if not provided)
30
+ model: Model to use if creating LLM
31
+ """
32
+ self.llm = llm or LLM(model=model)
33
+ self.prompt_template = SCENARIO_GENERATOR_PROMPT
34
+
35
+ async def generate(
36
+ self,
37
+ policy_text: str,
38
+ count: int,
39
+ category: Category | None = None,
40
+ ) -> list[Scenario]:
41
+ """
42
+ Generate scenarios from the policy.
43
+
44
+ Args:
45
+ policy_text: The policy text
46
+ count: Number of scenarios to generate
47
+ category: Optional category to focus on
48
+
49
+ Returns:
50
+ List of generated scenarios
51
+ """
52
+ if category:
53
+ prompt = self._build_category_prompt(policy_text, count, category)
54
+ else:
55
+ prompt = self._build_general_prompt(policy_text, count)
56
+
57
+ # Use structured output for reliable scenario generation
58
+ parsed = await self.llm.generate_structured(prompt, ScenariosArray)
59
+ return [
60
+ Scenario(
61
+ description=s.scenario,
62
+ context=s.context,
63
+ category=category.name if category else None,
64
+ )
65
+ for s in parsed.scenarios[:count]
66
+ ]
67
+
68
+ def _build_general_prompt(self, policy_text: str, count: int) -> str:
69
+ """Build prompt for general scenario generation."""
70
+ return f"""{self.prompt_template}
71
+
72
+ POLICY:
73
+ {policy_text}
74
+
75
+ Generate exactly {count} diverse scenarios."""
76
+
77
+ def _build_category_prompt(
78
+ self, policy_text: str, count: int, category: Category
79
+ ) -> str:
80
+ """Build prompt for category-specific scenario generation."""
81
+ return f"""{CATEGORY_SCENARIO_PROMPT}
82
+
83
+ Category: {category.name}
84
+ Description: {category.description}
85
+
86
+ POLICY:
87
+ {policy_text}
88
+
89
+ Generate exactly {count} scenarios for the "{category.name}" category."""
90
+
@@ -0,0 +1,376 @@
1
+ """Tool call response generation with JSON mode for structured outputs."""
2
+
3
+ import json
4
+ import uuid
5
+ from typing import TYPE_CHECKING
6
+
7
+ from pydantic import BaseModel, Field
8
+
9
+ from synkro.llm.client import LLM
10
+ from synkro.models import Model, OpenAI
11
+ from synkro.types.core import Scenario, Trace, Message
12
+ from synkro.types.tool import ToolCall, ToolFunction, ToolDefinition
13
+
14
+ if TYPE_CHECKING:
15
+ from synkro.generation.tool_simulator import ToolSimulator
16
+
17
+
18
+ # =============================================================================
19
+ # Pydantic models for structured JSON output
20
+ # =============================================================================
21
+
22
+ class ToolCallRequest(BaseModel):
23
+ """A single tool call request from the LLM."""
24
+
25
+ name: str = Field(description="Name of the tool to call")
26
+ arguments: str = Field(description="Arguments as a JSON string, e.g. '{\"query\": \"test\"}'")
27
+
28
+ def get_arguments_dict(self) -> dict:
29
+ """Parse arguments JSON string to dict."""
30
+ return json.loads(self.arguments)
31
+
32
+
33
+ class ToolCallDecision(BaseModel):
34
+ """
35
+ Structured output for the LLM's tool calling decision.
36
+
37
+ The LLM outputs this to indicate whether tools are needed
38
+ and which ones to call.
39
+ """
40
+
41
+ needs_tool: bool = Field(
42
+ description="Whether a tool call is needed to answer the user's request"
43
+ )
44
+ reasoning: str = Field(
45
+ description="Brief explanation of why tool is/isn't needed"
46
+ )
47
+ tool_calls: list[ToolCallRequest] = Field(
48
+ default_factory=list,
49
+ description="List of tool calls to make (empty if needs_tool is False)"
50
+ )
51
+ direct_response: str | None = Field(
52
+ default=None,
53
+ description="Direct response if no tool is needed"
54
+ )
55
+
56
+
57
+ class FinalSynthesis(BaseModel):
58
+ """Structured output for synthesizing tool results into a response."""
59
+
60
+ response: str = Field(
61
+ description="Natural response incorporating the tool results"
62
+ )
63
+
64
+
65
+ # =============================================================================
66
+ # Tool Call Response Generator
67
+ # =============================================================================
68
+
69
+ class ToolCallResponseGenerator:
70
+ """
71
+ Generates tool call training traces using JSON mode for structured outputs.
72
+
73
+ Produces traces in OpenAI function calling format:
74
+ - system message with tool descriptions
75
+ - user message with request
76
+ - assistant message with tool_calls (or direct response)
77
+ - tool response messages
78
+ - final assistant message synthesizing results
79
+
80
+ Example:
81
+ >>> gen = ToolCallResponseGenerator(
82
+ ... tools=[web_search_tool, db_tool],
83
+ ... llm=LLM(model=OpenAI.GPT_4O),
84
+ ... simulator=tool_simulator,
85
+ ... )
86
+ >>> trace = await gen.generate_single(policy_text, scenario)
87
+ """
88
+
89
+ def __init__(
90
+ self,
91
+ tools: list[ToolDefinition],
92
+ llm: LLM | None = None,
93
+ simulator: "ToolSimulator | None" = None,
94
+ model: Model = OpenAI.GPT_4O_MINI,
95
+ ):
96
+ """
97
+ Initialize the tool call response generator.
98
+
99
+ Args:
100
+ tools: List of available tool definitions
101
+ llm: LLM client to use (creates one if not provided)
102
+ simulator: Tool simulator for generating tool responses
103
+ model: Model to use if creating LLM
104
+ """
105
+ self.tools = tools
106
+ self.tools_by_name = {t.name: t for t in tools}
107
+ self.llm = llm or LLM(model=model)
108
+ self.simulator = simulator
109
+
110
+ def _get_tools_description(self) -> str:
111
+ """Get formatted description of all tools for system prompt."""
112
+ descriptions = []
113
+ for tool in self.tools:
114
+ descriptions.append(tool.to_system_prompt())
115
+ return "\n\n".join(descriptions)
116
+
117
+ def _get_tools_json_schema(self) -> str:
118
+ """Get JSON schema representation of tools."""
119
+ tools_json = [tool.to_openai_format() for tool in self.tools]
120
+ return json.dumps(tools_json, indent=2)
121
+
122
+ def _generate_call_id(self) -> str:
123
+ """Generate a unique tool call ID."""
124
+ return f"call_{uuid.uuid4().hex[:12]}"
125
+
126
+ async def generate_single(
127
+ self,
128
+ policy_text: str,
129
+ scenario: Scenario,
130
+ target_turns: int = 1,
131
+ ) -> Trace:
132
+ """
133
+ Generate a single tool call trace.
134
+
135
+ Args:
136
+ policy_text: The policy/guidelines text
137
+ scenario: The scenario to respond to
138
+ target_turns: Number of conversation turns (1 for single-turn).
139
+ Note: Multi-turn tool calling is not yet fully implemented.
140
+ For now, target_turns > 1 will still generate a single turn.
141
+
142
+ Returns:
143
+ Trace with proper tool calling format
144
+ """
145
+ # TODO: Implement multi-turn tool calling support
146
+ # For now, we generate single-turn regardless of target_turns
147
+ tools_desc = self._get_tools_description()
148
+
149
+ # Step 1: Get LLM decision on tool usage
150
+ decision = await self._get_tool_decision(policy_text, scenario, tools_desc)
151
+
152
+ # Step 2: Build the message sequence
153
+ messages = await self._build_message_sequence(
154
+ policy_text, scenario, tools_desc, decision
155
+ )
156
+
157
+ return Trace(messages=messages, scenario=scenario)
158
+
159
+ async def _get_tool_decision(
160
+ self,
161
+ policy_text: str,
162
+ scenario: Scenario,
163
+ tools_desc: str,
164
+ ) -> ToolCallDecision:
165
+ """
166
+ Get the LLM's decision on whether to use tools.
167
+
168
+ Uses JSON mode to force structured output.
169
+ """
170
+ prompt = f"""You are a customer support agent deciding whether to use tools.
171
+
172
+ AVAILABLE TOOLS:
173
+ {tools_desc}
174
+
175
+ TOOL USAGE GUIDELINES:
176
+ {policy_text}
177
+
178
+ USER REQUEST:
179
+ {scenario.description}
180
+
181
+ CONTEXT:
182
+ {scenario.context}
183
+
184
+ Analyze this request and decide:
185
+ 1. Does this require calling a tool, or can you answer directly?
186
+ 2. If tools are needed, which ones and with what arguments?
187
+ 3. If no tools needed, provide the direct response.
188
+
189
+ Important rules:
190
+ - Only call tools when necessary (don't call for information you already know)
191
+ - Use correct tool names and parameter types
192
+ - If multiple tools are needed, list them all
193
+ - Provide clear reasoning for your decision"""
194
+
195
+ return await self.llm.generate_structured(prompt, ToolCallDecision)
196
+
197
+ async def _build_message_sequence(
198
+ self,
199
+ policy_text: str,
200
+ scenario: Scenario,
201
+ tools_desc: str,
202
+ decision: ToolCallDecision,
203
+ ) -> list[Message]:
204
+ """Build the full message sequence based on the tool decision."""
205
+ messages = []
206
+
207
+ # System message with tool descriptions
208
+ system_content = f"""You are a helpful customer support agent. You have access to the following tools:
209
+
210
+ {tools_desc}
211
+
212
+ Follow the tool usage guidelines provided to assist customers effectively."""
213
+
214
+ messages.append(Message(role="system", content=system_content))
215
+
216
+ # User message
217
+ messages.append(Message(role="user", content=scenario.description))
218
+
219
+ if decision.needs_tool and decision.tool_calls:
220
+ # Assistant message with tool_calls
221
+ tool_calls = []
222
+ for tc in decision.tool_calls:
223
+ call_id = self._generate_call_id()
224
+ tool_calls.append(ToolCall(
225
+ id=call_id,
226
+ type="function",
227
+ function=ToolFunction(
228
+ name=tc.name,
229
+ arguments=tc.arguments # Already a JSON string
230
+ )
231
+ ))
232
+
233
+ messages.append(Message(
234
+ role="assistant",
235
+ content=None,
236
+ tool_calls=tool_calls
237
+ ))
238
+
239
+ # Tool response messages
240
+ tool_results = []
241
+ for tc in tool_calls:
242
+ result = await self._simulate_tool_call(tc)
243
+ tool_results.append(result)
244
+
245
+ messages.append(Message(
246
+ role="tool",
247
+ content=result,
248
+ tool_call_id=tc.id
249
+ ))
250
+
251
+ # Final assistant message synthesizing results
252
+ final_response = await self._synthesize_response(
253
+ scenario.description, tool_calls, tool_results, policy_text
254
+ )
255
+ messages.append(Message(role="assistant", content=final_response))
256
+
257
+ else:
258
+ # Direct response without tools
259
+ response = decision.direct_response or await self._generate_direct_response(
260
+ policy_text, scenario, tools_desc
261
+ )
262
+ messages.append(Message(role="assistant", content=response))
263
+
264
+ return messages
265
+
266
+ async def _simulate_tool_call(self, tool_call: ToolCall) -> str:
267
+ """Simulate a tool response."""
268
+ if self.simulator:
269
+ return await self.simulator.simulate(tool_call)
270
+
271
+ # Fallback: generate a mock response based on tool definition
272
+ tool_name = tool_call.function.name
273
+ if tool_name in self.tools_by_name:
274
+ tool = self.tools_by_name[tool_name]
275
+ if tool.mock_responses:
276
+ # Use a mock response
277
+ import random
278
+ return random.choice(tool.mock_responses)
279
+
280
+ # Default mock response
281
+ args = json.loads(tool_call.function.arguments)
282
+ return json.dumps({
283
+ "status": "success",
284
+ "result": f"Simulated response for {tool_name}",
285
+ "query": args
286
+ })
287
+
288
+ async def _synthesize_response(
289
+ self,
290
+ user_request: str,
291
+ tool_calls: list[ToolCall],
292
+ tool_results: list[str],
293
+ policy_text: str,
294
+ ) -> str:
295
+ """Synthesize a natural response from tool results."""
296
+ # Build context of tool calls and results
297
+ tools_context = []
298
+ for tc, result in zip(tool_calls, tool_results):
299
+ tools_context.append(f"Tool: {tc.function.name}")
300
+ tools_context.append(f"Arguments: {tc.function.arguments}")
301
+ tools_context.append(f"Result: {result}")
302
+ tools_context.append("")
303
+
304
+ prompt = f"""Based on the tool results, provide a helpful response to the user.
305
+
306
+ USER REQUEST:
307
+ {user_request}
308
+
309
+ TOOL RESULTS:
310
+ {chr(10).join(tools_context)}
311
+
312
+ GUIDELINES:
313
+ {policy_text}
314
+
315
+ Synthesize the tool results into a natural, helpful response.
316
+ - Incorporate the information from the tool results
317
+ - Don't expose raw JSON or technical details
318
+ - Be conversational and helpful
319
+ - If a tool returned an error, acknowledge it and offer alternatives"""
320
+
321
+ synthesis = await self.llm.generate_structured(prompt, FinalSynthesis)
322
+ return synthesis.response
323
+
324
+ async def _generate_direct_response(
325
+ self,
326
+ policy_text: str,
327
+ scenario: Scenario,
328
+ tools_desc: str,
329
+ ) -> str:
330
+ """Generate a direct response when no tools are needed."""
331
+ prompt = f"""Provide a helpful response to the user's request.
332
+
333
+ USER REQUEST:
334
+ {scenario.description}
335
+
336
+ CONTEXT:
337
+ {scenario.context}
338
+
339
+ GUIDELINES:
340
+ {policy_text}
341
+
342
+ Note: No tools are needed for this request. Provide a direct, helpful response
343
+ based on your knowledge and the guidelines."""
344
+
345
+ synthesis = await self.llm.generate_structured(prompt, FinalSynthesis)
346
+ return synthesis.response
347
+
348
+ async def generate(
349
+ self,
350
+ policy_text: str,
351
+ scenarios: list[Scenario],
352
+ ) -> list[Trace]:
353
+ """
354
+ Generate traces for multiple scenarios.
355
+
356
+ Args:
357
+ policy_text: The policy/guidelines text
358
+ scenarios: List of scenarios to respond to
359
+
360
+ Returns:
361
+ List of traces with tool calling format
362
+ """
363
+ traces = []
364
+ for scenario in scenarios:
365
+ trace = await self.generate_single(policy_text, scenario)
366
+ traces.append(trace)
367
+ return traces
368
+
369
+
370
+ __all__ = [
371
+ "ToolCallResponseGenerator",
372
+ "ToolCallDecision",
373
+ "ToolCallRequest",
374
+ "FinalSynthesis",
375
+ ]
376
+
@@ -0,0 +1,114 @@
1
+ """Tool response simulator for training data generation."""
2
+
3
+ import json
4
+ import uuid
5
+ from typing import TYPE_CHECKING
6
+
7
+ from synkro.prompts.tool_templates import TOOL_SIMULATION_PROMPT
8
+
9
+ if TYPE_CHECKING:
10
+ from synkro.llm.client import LLM
11
+ from synkro.types.tool import ToolDefinition, ToolCall
12
+
13
+
14
+ class ToolSimulator:
15
+ """
16
+ Simulates tool responses for training data generation.
17
+
18
+ Uses an LLM to generate realistic, contextual tool responses
19
+ based on tool definitions and call arguments.
20
+
21
+ Example:
22
+ >>> from synkro.types.tool import ToolDefinition, ToolCall, ToolFunction
23
+ >>> simulator = ToolSimulator(tools=[web_search_tool], llm=llm)
24
+ >>> call = ToolCall(
25
+ ... id="call_1",
26
+ ... function=ToolFunction(name="web_search", arguments='{"query": "weather NYC"}')
27
+ ... )
28
+ >>> response = await simulator.simulate(call)
29
+ >>> print(response)
30
+ "NYC: 72°F, sunny with a high of 75°F expected"
31
+ """
32
+
33
+ def __init__(self, tools: list["ToolDefinition"], llm: "LLM"):
34
+ """
35
+ Initialize the simulator.
36
+
37
+ Args:
38
+ tools: List of available tool definitions
39
+ llm: LLM client for generating responses
40
+ """
41
+ self.tools = {t.name: t for t in tools}
42
+ self.llm = llm
43
+
44
+ async def simulate(self, tool_call: "ToolCall") -> str:
45
+ """
46
+ Simulate a tool response for the given call.
47
+
48
+ Args:
49
+ tool_call: The tool call to simulate
50
+
51
+ Returns:
52
+ Simulated tool response content
53
+ """
54
+ tool_name = tool_call.function.name
55
+
56
+ if tool_name not in self.tools:
57
+ return json.dumps({"error": f"Unknown tool: {tool_name}"})
58
+
59
+ tool = self.tools[tool_name]
60
+
61
+ # Format mock responses for the prompt
62
+ mock_responses = "\n".join(
63
+ f"- {r}" for r in tool.mock_responses
64
+ ) if tool.mock_responses else "No example responses provided"
65
+
66
+ prompt = TOOL_SIMULATION_PROMPT.format(
67
+ TOOL_NAME=tool.name,
68
+ TOOL_DESCRIPTION=tool.description,
69
+ TOOL_PARAMETERS=json.dumps(tool.parameters, indent=2),
70
+ ARGUMENTS=tool_call.function.arguments,
71
+ MOCK_RESPONSES=mock_responses,
72
+ )
73
+
74
+ response = await self.llm.generate(prompt)
75
+ return response.strip()
76
+
77
+ async def simulate_batch(self, tool_calls: list["ToolCall"]) -> list[str]:
78
+ """
79
+ Simulate responses for multiple tool calls.
80
+
81
+ Args:
82
+ tool_calls: List of tool calls to simulate
83
+
84
+ Returns:
85
+ List of simulated responses in order
86
+ """
87
+ import asyncio
88
+ return await asyncio.gather(*[self.simulate(tc) for tc in tool_calls])
89
+
90
+ def generate_call_id(self) -> str:
91
+ """Generate a unique tool call ID."""
92
+ return f"call_{uuid.uuid4().hex[:12]}"
93
+
94
+ def get_tools_description(self) -> str:
95
+ """
96
+ Get a formatted description of all available tools.
97
+
98
+ Returns:
99
+ Formatted string describing all tools
100
+ """
101
+ descriptions = []
102
+ for tool in self.tools.values():
103
+ descriptions.append(tool.to_system_prompt())
104
+ return "\n\n".join(descriptions)
105
+
106
+ def get_tools_json(self) -> list[dict]:
107
+ """
108
+ Get tools in OpenAI function format.
109
+
110
+ Returns:
111
+ List of tool definitions in OpenAI format
112
+ """
113
+ return [tool.to_openai_format() for tool in self.tools.values()]
114
+
@@ -0,0 +1,12 @@
1
+ """Interactive Human-in-the-Loop components for Logic Map editing."""
2
+
3
+ from synkro.interactive.logic_map_editor import LogicMapEditor
4
+ from synkro.interactive.hitl_session import HITLSession
5
+ from synkro.interactive.rich_ui import LogicMapDisplay, InteractivePrompt
6
+
7
+ __all__ = [
8
+ "LogicMapEditor",
9
+ "HITLSession",
10
+ "LogicMapDisplay",
11
+ "InteractivePrompt",
12
+ ]
@@ -0,0 +1,77 @@
1
+ """Human-in-the-Loop session state management."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import TYPE_CHECKING
7
+
8
+ if TYPE_CHECKING:
9
+ from synkro.types.logic_map import LogicMap
10
+
11
+
12
+ @dataclass
13
+ class HITLSession:
14
+ """
15
+ Tracks state of an interactive Logic Map editing session.
16
+
17
+ Supports undo/reset operations and maintains edit history.
18
+
19
+ Example:
20
+ >>> session = HITLSession(original_logic_map=logic_map)
21
+ >>> session.apply_change("Added rule R009", new_logic_map)
22
+ >>> session.undo() # Reverts to previous state
23
+ >>> session.reset() # Reverts to original
24
+ """
25
+
26
+ original_logic_map: "LogicMap"
27
+ current_logic_map: "LogicMap" = field(init=False)
28
+ history: list[tuple[str, "LogicMap"]] = field(default_factory=list)
29
+
30
+ def __post_init__(self) -> None:
31
+ """Initialize current_logic_map from original."""
32
+ self.current_logic_map = self.original_logic_map
33
+
34
+ def apply_change(self, feedback: str, new_map: "LogicMap") -> None:
35
+ """
36
+ Record a change in history and update current state.
37
+
38
+ Args:
39
+ feedback: The user feedback that triggered this change
40
+ new_map: The new Logic Map after applying the change
41
+ """
42
+ self.history.append((feedback, self.current_logic_map))
43
+ self.current_logic_map = new_map
44
+
45
+ def undo(self) -> "LogicMap | None":
46
+ """
47
+ Undo the last change and return the restored Logic Map.
48
+
49
+ Returns:
50
+ The previous Logic Map, or None if no history exists
51
+ """
52
+ if self.history:
53
+ _, previous_map = self.history.pop()
54
+ self.current_logic_map = previous_map
55
+ return self.current_logic_map
56
+ return None
57
+
58
+ def reset(self) -> "LogicMap":
59
+ """
60
+ Reset to the original Logic Map, clearing all history.
61
+
62
+ Returns:
63
+ The original Logic Map
64
+ """
65
+ self.history.clear()
66
+ self.current_logic_map = self.original_logic_map
67
+ return self.current_logic_map
68
+
69
+ @property
70
+ def can_undo(self) -> bool:
71
+ """Check if undo is available."""
72
+ return len(self.history) > 0
73
+
74
+ @property
75
+ def change_count(self) -> int:
76
+ """Number of changes made in this session."""
77
+ return len(self.history)