slimzero 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- slimzero/__init__.py +53 -0
- slimzero/__main__.py +124 -0
- slimzero/agent/__init__.py +0 -0
- slimzero/agent/gsd.py +328 -0
- slimzero/agent/ralph.py +357 -0
- slimzero/core.py +421 -0
- slimzero/dashboard/__init__.py +237 -0
- slimzero/exceptions.py +202 -0
- slimzero/plugins/__init__.py +154 -0
- slimzero/post/__init__.py +0 -0
- slimzero/post/flagger.py +221 -0
- slimzero/post/logger.py +228 -0
- slimzero/post/validator.py +176 -0
- slimzero/schemas.py +232 -0
- slimzero/stages/__init__.py +0 -0
- slimzero/stages/budget.py +290 -0
- slimzero/stages/few_shot.py +238 -0
- slimzero/stages/hallucination.py +226 -0
- slimzero/stages/history.py +210 -0
- slimzero/stages/injector.py +155 -0
- slimzero/stages/intent.py +245 -0
- slimzero/stages/rewriter.py +315 -0
- slimzero/stages/semantic_guard.py +204 -0
- slimzero-0.1.0.dist-info/METADATA +581 -0
- slimzero-0.1.0.dist-info/RECORD +28 -0
- slimzero-0.1.0.dist-info/WHEEL +4 -0
- slimzero-0.1.0.dist-info/entry_points.txt +2 -0
- slimzero-0.1.0.dist-info/licenses/LICENSE +21 -0
slimzero/__init__.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SlimZero - Zero-overhead prompt compression, response minimisation,
|
|
3
|
+
hallucination guarding, and autonomous agent orchestration.
|
|
4
|
+
|
|
5
|
+
SlimZero is a model-agnostic Python middleware library that sits between
|
|
6
|
+
any application and any LLM API.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
__version__ = "0.1.0"
|
|
10
|
+
__author__ = "SlimZero Contributors"
|
|
11
|
+
__license__ = "MIT"
|
|
12
|
+
|
|
13
|
+
from slimzero.core import SlimZero
|
|
14
|
+
from slimzero.schemas import (
|
|
15
|
+
IntentSchema,
|
|
16
|
+
StageInput,
|
|
17
|
+
StageOutput,
|
|
18
|
+
SlimZeroResult,
|
|
19
|
+
)
|
|
20
|
+
from slimzero.exceptions import (
|
|
21
|
+
SlimZeroError,
|
|
22
|
+
SlimZeroInputError,
|
|
23
|
+
SlimZeroRewriteError,
|
|
24
|
+
SlimZeroSemanticRejection,
|
|
25
|
+
SlimZeroBudgetWarning,
|
|
26
|
+
SlimZeroResponseWarning,
|
|
27
|
+
SlimZeroHallucinationFlag,
|
|
28
|
+
SlimZeroAgentError,
|
|
29
|
+
SlimZeroCircuitBreaker,
|
|
30
|
+
SlimZeroDriftHalt,
|
|
31
|
+
SlimZeroToolValidationError,
|
|
32
|
+
SlimZeroHumanEscalation,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
__all__ = [
|
|
36
|
+
"SlimZero",
|
|
37
|
+
"IntentSchema",
|
|
38
|
+
"StageInput",
|
|
39
|
+
"StageOutput",
|
|
40
|
+
"SlimZeroResult",
|
|
41
|
+
"SlimZeroError",
|
|
42
|
+
"SlimZeroInputError",
|
|
43
|
+
"SlimZeroRewriteError",
|
|
44
|
+
"SlimZeroSemanticRejection",
|
|
45
|
+
"SlimZeroBudgetWarning",
|
|
46
|
+
"SlimZeroResponseWarning",
|
|
47
|
+
"SlimZeroHallucinationFlag",
|
|
48
|
+
"SlimZeroAgentError",
|
|
49
|
+
"SlimZeroCircuitBreaker",
|
|
50
|
+
"SlimZeroDriftHalt",
|
|
51
|
+
"SlimZeroToolValidationError",
|
|
52
|
+
"SlimZeroHumanEscalation",
|
|
53
|
+
]
|
slimzero/__main__.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SlimZero CLI Entry Point
|
|
3
|
+
|
|
4
|
+
Usage:
|
|
5
|
+
python -m slimzero "Your prompt here"
|
|
6
|
+
slimzero "Your prompt here"
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import argparse
|
|
10
|
+
import sys
|
|
11
|
+
|
|
12
|
+
from slimzero import SlimZero
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def main():
|
|
16
|
+
"""Main CLI entry point."""
|
|
17
|
+
parser = argparse.ArgumentParser(
|
|
18
|
+
description="SlimZero - Zero-overhead prompt compression",
|
|
19
|
+
prog="slimzero",
|
|
20
|
+
)
|
|
21
|
+
parser.add_argument(
|
|
22
|
+
"prompt",
|
|
23
|
+
nargs="?",
|
|
24
|
+
default=None,
|
|
25
|
+
help="The prompt to process",
|
|
26
|
+
)
|
|
27
|
+
parser.add_argument(
|
|
28
|
+
"-m", "--model",
|
|
29
|
+
default="mock",
|
|
30
|
+
help="Model to use (default: mock)",
|
|
31
|
+
)
|
|
32
|
+
parser.add_argument(
|
|
33
|
+
"-t", "--token-budget",
|
|
34
|
+
type=int,
|
|
35
|
+
default=512,
|
|
36
|
+
help="Token budget (default: 512)",
|
|
37
|
+
)
|
|
38
|
+
parser.add_argument(
|
|
39
|
+
"-s", "--system",
|
|
40
|
+
default=None,
|
|
41
|
+
help="System prompt",
|
|
42
|
+
)
|
|
43
|
+
parser.add_argument(
|
|
44
|
+
"--stats",
|
|
45
|
+
action="store_true",
|
|
46
|
+
help="Show savings statistics",
|
|
47
|
+
)
|
|
48
|
+
parser.add_argument(
|
|
49
|
+
"--json",
|
|
50
|
+
action="store_true",
|
|
51
|
+
help="Output as JSON",
|
|
52
|
+
)
|
|
53
|
+
parser.add_argument(
|
|
54
|
+
"--export-json",
|
|
55
|
+
metavar="FILE",
|
|
56
|
+
help="Export stats to JSON file",
|
|
57
|
+
)
|
|
58
|
+
parser.add_argument(
|
|
59
|
+
"--export-md",
|
|
60
|
+
metavar="FILE",
|
|
61
|
+
help="Export stats to Markdown file",
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
args = parser.parse_args()
|
|
65
|
+
|
|
66
|
+
if args.prompt is None:
|
|
67
|
+
parser.print_help()
|
|
68
|
+
print("\nExample: slimzero 'Explain Python decorators please'")
|
|
69
|
+
sys.exit(1)
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
sz = SlimZero(
|
|
73
|
+
model=args.model,
|
|
74
|
+
token_budget=args.token_budget,
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
result = sz.call(
|
|
78
|
+
prompt=args.prompt,
|
|
79
|
+
system_prompt=args.system,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
if args.json:
|
|
83
|
+
import json
|
|
84
|
+
print(json.dumps(result.to_dict(), indent=2))
|
|
85
|
+
else:
|
|
86
|
+
print("=" * 60)
|
|
87
|
+
print("RESPONSE:")
|
|
88
|
+
print("=" * 60)
|
|
89
|
+
print(result.response)
|
|
90
|
+
print()
|
|
91
|
+
print("=" * 60)
|
|
92
|
+
print("STATS:")
|
|
93
|
+
print("=" * 60)
|
|
94
|
+
print(f"Original tokens: {result.original_input_tokens}")
|
|
95
|
+
print(f"Sent tokens: {result.sent_input_tokens}")
|
|
96
|
+
print(f"Savings: {result.input_token_savings_percent:.1f}%")
|
|
97
|
+
print(f"Stages: {', '.join(result.stages_applied)}")
|
|
98
|
+
|
|
99
|
+
if args.stats:
|
|
100
|
+
stats = sz.get_stats()
|
|
101
|
+
print()
|
|
102
|
+
print("=" * 60)
|
|
103
|
+
print("CUMULATIVE STATS:")
|
|
104
|
+
print("=" * 60)
|
|
105
|
+
print(f"Total calls: {stats['total_calls']}")
|
|
106
|
+
print(f"Tokens saved: {stats['cumulative_tokens_saved']}")
|
|
107
|
+
print(f"Cost saved: ${stats['cumulative_estimated_cost_usd']:.6f}")
|
|
108
|
+
print(f"Avg tokens/call: {stats['avg_tokens_saved_per_call']:.1f}")
|
|
109
|
+
|
|
110
|
+
if args.export_json:
|
|
111
|
+
sz.export_stats_json(args.export_json)
|
|
112
|
+
print(f"\nExported JSON to: {args.export_json}")
|
|
113
|
+
|
|
114
|
+
if args.export_md:
|
|
115
|
+
sz.export_stats_markdown(args.export_md)
|
|
116
|
+
print(f"\nExported Markdown to: {args.export_md}")
|
|
117
|
+
|
|
118
|
+
except Exception as e:
|
|
119
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
120
|
+
sys.exit(1)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
if __name__ == "__main__":
|
|
124
|
+
main()
|
|
File without changes
|
slimzero/agent/gsd.py
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SlimZero GSD Task Graph (US-014)
|
|
3
|
+
|
|
4
|
+
Task decomposition layer using networkx DiGraph.
|
|
5
|
+
Breaks goals into checkpointed sub-tasks with dependency management.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
from datetime import datetime, timezone
|
|
11
|
+
from enum import Enum
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Optional, Dict, Any, List, Set
|
|
14
|
+
|
|
15
|
+
from slimzero.exceptions import SlimZeroError
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
try:
|
|
20
|
+
import networkx as nx
|
|
21
|
+
NETWORKX_AVAILABLE = True
|
|
22
|
+
except ImportError:
|
|
23
|
+
NETWORKX_AVAILABLE = False
|
|
24
|
+
logger.warning("networkx not available. GSD task graph will use fallback.")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class TaskStatus(Enum):
|
|
28
|
+
"""Status of a task node."""
|
|
29
|
+
|
|
30
|
+
PENDING = "pending"
|
|
31
|
+
RUNNING = "running"
|
|
32
|
+
COMPLETED = "completed"
|
|
33
|
+
FAILED = "failed"
|
|
34
|
+
SKIPPED = "skipped"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class GSDTask:
|
|
38
|
+
"""Represents a single task node in the graph."""
|
|
39
|
+
|
|
40
|
+
def __init__(
|
|
41
|
+
self,
|
|
42
|
+
task_id: str,
|
|
43
|
+
description: str,
|
|
44
|
+
dependencies: Optional[List[str]] = None,
|
|
45
|
+
):
|
|
46
|
+
self.task_id = task_id
|
|
47
|
+
self.description = description
|
|
48
|
+
self.status = TaskStatus.PENDING
|
|
49
|
+
self.dependencies = dependencies or []
|
|
50
|
+
self.retry_count = 0
|
|
51
|
+
self.output: Optional[Any] = None
|
|
52
|
+
self.error: Optional[str] = None
|
|
53
|
+
self.created_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
54
|
+
self.completed_at: Optional[str] = None
|
|
55
|
+
|
|
56
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
57
|
+
"""Convert to dictionary."""
|
|
58
|
+
return {
|
|
59
|
+
"task_id": self.task_id,
|
|
60
|
+
"description": self.description,
|
|
61
|
+
"status": self.status.value,
|
|
62
|
+
"dependencies": self.dependencies,
|
|
63
|
+
"retry_count": self.retry_count,
|
|
64
|
+
"output": str(self.output) if self.output else None,
|
|
65
|
+
"error": self.error,
|
|
66
|
+
"created_at": self.created_at,
|
|
67
|
+
"completed_at": self.completed_at,
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
@classmethod
|
|
71
|
+
def from_dict(cls, data: Dict[str, Any]) -> "GSDTask":
|
|
72
|
+
"""Create from dictionary."""
|
|
73
|
+
task = cls(
|
|
74
|
+
task_id=data["task_id"],
|
|
75
|
+
description=data["description"],
|
|
76
|
+
dependencies=data.get("dependencies", []),
|
|
77
|
+
)
|
|
78
|
+
task.status = TaskStatus(data.get("status", "pending"))
|
|
79
|
+
task.retry_count = data.get("retry_count", 0)
|
|
80
|
+
task.output = data.get("output")
|
|
81
|
+
task.error = data.get("error")
|
|
82
|
+
task.created_at = data.get("created_at", datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"))
|
|
83
|
+
task.completed_at = data.get("completed_at")
|
|
84
|
+
return task
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class GSDTaskGraph:
|
|
88
|
+
"""
|
|
89
|
+
Task decomposition graph using networkx DiGraph.
|
|
90
|
+
|
|
91
|
+
Each node represents a task with dependencies.
|
|
92
|
+
Tasks run only when all dependencies complete.
|
|
93
|
+
Checkpointed to JSON for resume capability.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
def __init__(
|
|
97
|
+
self,
|
|
98
|
+
goal: str,
|
|
99
|
+
checkpoint_dir: Optional[str] = None,
|
|
100
|
+
max_retries: int = 3,
|
|
101
|
+
):
|
|
102
|
+
"""
|
|
103
|
+
Initialize GSD Task Graph.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
goal: The main goal to decompose.
|
|
107
|
+
checkpoint_dir: Directory for checkpoint files.
|
|
108
|
+
max_retries: Maximum retry attempts per task.
|
|
109
|
+
"""
|
|
110
|
+
self.goal = goal
|
|
111
|
+
self.checkpoint_dir = Path(checkpoint_dir) if checkpoint_dir else Path(".gsd")
|
|
112
|
+
self.max_retries = max_retries
|
|
113
|
+
|
|
114
|
+
if NETWORKX_AVAILABLE:
|
|
115
|
+
self._graph = nx.DiGraph()
|
|
116
|
+
else:
|
|
117
|
+
self._graph = None
|
|
118
|
+
self._fallback_tasks: Dict[str, GSDTask] = {}
|
|
119
|
+
|
|
120
|
+
self._tasks: Dict[str, GSDTask] = {}
|
|
121
|
+
self._checkpoint_path = self.checkpoint_dir / f"gsd_{hash(goal)}.json"
|
|
122
|
+
|
|
123
|
+
def _ensure_checkpoint_dir(self) -> None:
|
|
124
|
+
"""Ensure checkpoint directory exists."""
|
|
125
|
+
self.checkpoint_dir.mkdir(parents=True, exist_ok=True)
|
|
126
|
+
|
|
127
|
+
def add_task(self, task: GSDTask) -> None:
|
|
128
|
+
"""
|
|
129
|
+
Add a task to the graph.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
task: GSDTask to add.
|
|
133
|
+
"""
|
|
134
|
+
self._tasks[task.task_id] = task
|
|
135
|
+
|
|
136
|
+
if self._graph is not None:
|
|
137
|
+
self._graph.add_node(task.task_id)
|
|
138
|
+
for dep in task.dependencies:
|
|
139
|
+
self._graph.add_edge(dep, task.task_id)
|
|
140
|
+
else:
|
|
141
|
+
self._fallback_tasks[task.task_id] = task
|
|
142
|
+
|
|
143
|
+
def _validate_no_circular_dependencies(self) -> bool:
|
|
144
|
+
"""Validate graph has no circular dependencies."""
|
|
145
|
+
if self._graph is None:
|
|
146
|
+
return True
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
nx.find_cycle(self._graph)
|
|
150
|
+
return False
|
|
151
|
+
except nx.NetworkXNoCycle:
|
|
152
|
+
return True
|
|
153
|
+
|
|
154
|
+
def _is_valid_decomposition(self, tasks: List[Dict[str, Any]]) -> bool:
|
|
155
|
+
"""Validate task decomposition JSON."""
|
|
156
|
+
if not isinstance(tasks, list):
|
|
157
|
+
return False
|
|
158
|
+
|
|
159
|
+
task_ids = set()
|
|
160
|
+
for task in tasks:
|
|
161
|
+
if not isinstance(task, dict):
|
|
162
|
+
return False
|
|
163
|
+
if "task_id" not in task or "description" not in task:
|
|
164
|
+
return False
|
|
165
|
+
if task["task_id"] in task_ids:
|
|
166
|
+
return False
|
|
167
|
+
task_ids.add(task["task_id"])
|
|
168
|
+
|
|
169
|
+
return True
|
|
170
|
+
|
|
171
|
+
def decompose(self, decomposition_json: str) -> bool:
|
|
172
|
+
"""
|
|
173
|
+
Load task decomposition from JSON.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
decomposition_json: JSON string with task decomposition.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
True if valid, False otherwise.
|
|
180
|
+
"""
|
|
181
|
+
try:
|
|
182
|
+
data = json.loads(decomposition_json)
|
|
183
|
+
if not self._is_valid_decomposition(data):
|
|
184
|
+
logger.error("Invalid task decomposition format")
|
|
185
|
+
return False
|
|
186
|
+
|
|
187
|
+
for task_data in data:
|
|
188
|
+
task = GSDTask(
|
|
189
|
+
task_id=task_data["task_id"],
|
|
190
|
+
description=task_data["description"],
|
|
191
|
+
dependencies=task_data.get("dependencies", []),
|
|
192
|
+
)
|
|
193
|
+
self.add_task(task)
|
|
194
|
+
|
|
195
|
+
if not self._validate_no_circular_dependencies():
|
|
196
|
+
logger.error("Circular dependencies detected")
|
|
197
|
+
return False
|
|
198
|
+
|
|
199
|
+
self._ensure_checkpoint_dir()
|
|
200
|
+
self._save_checkpoint()
|
|
201
|
+
return True
|
|
202
|
+
|
|
203
|
+
except json.JSONDecodeError as e:
|
|
204
|
+
logger.error(f"Invalid JSON: {e}")
|
|
205
|
+
return False
|
|
206
|
+
|
|
207
|
+
def get_ready_tasks(self) -> List[GSDTask]:
|
|
208
|
+
"""Get tasks that are ready to run (all dependencies completed)."""
|
|
209
|
+
ready = []
|
|
210
|
+
for task in self._tasks.values():
|
|
211
|
+
if task.status != TaskStatus.PENDING:
|
|
212
|
+
continue
|
|
213
|
+
|
|
214
|
+
deps_completed = all(
|
|
215
|
+
self._tasks[dep].status == TaskStatus.COMPLETED
|
|
216
|
+
for dep in task.dependencies
|
|
217
|
+
if dep in self._tasks
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
if deps_completed:
|
|
221
|
+
ready.append(task)
|
|
222
|
+
|
|
223
|
+
return ready
|
|
224
|
+
|
|
225
|
+
def update_task_status(
|
|
226
|
+
self,
|
|
227
|
+
task_id: str,
|
|
228
|
+
status: TaskStatus,
|
|
229
|
+
output: Optional[Any] = None,
|
|
230
|
+
error: Optional[str] = None,
|
|
231
|
+
) -> None:
|
|
232
|
+
"""
|
|
233
|
+
Update task status.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
task_id: ID of task to update.
|
|
237
|
+
status: New status.
|
|
238
|
+
output: Task output (if completed).
|
|
239
|
+
error: Error message (if failed).
|
|
240
|
+
"""
|
|
241
|
+
if task_id not in self._tasks:
|
|
242
|
+
logger.warning(f"Unknown task: {task_id}")
|
|
243
|
+
return
|
|
244
|
+
|
|
245
|
+
task = self._tasks[task_id]
|
|
246
|
+
task.status = status
|
|
247
|
+
task.output = output
|
|
248
|
+
task.error = error
|
|
249
|
+
|
|
250
|
+
if status == TaskStatus.COMPLETED:
|
|
251
|
+
task.completed_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
252
|
+
|
|
253
|
+
if status == TaskStatus.FAILED:
|
|
254
|
+
task.retry_count += 1
|
|
255
|
+
|
|
256
|
+
self._save_checkpoint()
|
|
257
|
+
|
|
258
|
+
def _save_checkpoint(self) -> None:
|
|
259
|
+
"""Save checkpoint to JSON."""
|
|
260
|
+
self._ensure_checkpoint_dir()
|
|
261
|
+
|
|
262
|
+
checkpoint_data = {
|
|
263
|
+
"goal": self.goal,
|
|
264
|
+
"checkpoint_at": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
|
|
265
|
+
"tasks": {tid: task.to_dict() for tid, task in self._tasks.items()},
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
with open(self._checkpoint_path, "w", encoding="utf-8") as f:
|
|
269
|
+
json.dump(checkpoint_data, f, indent=2)
|
|
270
|
+
|
|
271
|
+
logger.info(f"Checkpoint saved: {self._checkpoint_path}")
|
|
272
|
+
|
|
273
|
+
def load_checkpoint(self) -> bool:
|
|
274
|
+
"""
|
|
275
|
+
Load checkpoint from file.
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
True if checkpoint loaded, False if none exists.
|
|
279
|
+
"""
|
|
280
|
+
if not self._checkpoint_path.exists():
|
|
281
|
+
return False
|
|
282
|
+
|
|
283
|
+
try:
|
|
284
|
+
with open(self._checkpoint_path, "r", encoding="utf-8") as f:
|
|
285
|
+
data = json.load(f)
|
|
286
|
+
|
|
287
|
+
self._tasks = {
|
|
288
|
+
tid: GSDTask.from_dict(tdata)
|
|
289
|
+
for tid, tdata in data.get("tasks", {}).items()
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
if self._graph is not None:
|
|
293
|
+
self._graph = nx.DiGraph()
|
|
294
|
+
for task in self._tasks.values():
|
|
295
|
+
self._graph.add_node(task.task_id)
|
|
296
|
+
for dep in task.dependencies:
|
|
297
|
+
self._graph.add_edge(dep, task.task_id)
|
|
298
|
+
|
|
299
|
+
logger.info(f"Checkpoint loaded: {self._checkpoint_path}")
|
|
300
|
+
return True
|
|
301
|
+
|
|
302
|
+
except Exception as e:
|
|
303
|
+
logger.error(f"Failed to load checkpoint: {e}")
|
|
304
|
+
return False
|
|
305
|
+
|
|
306
|
+
def get_pending_count(self) -> int:
|
|
307
|
+
"""Get count of pending tasks."""
|
|
308
|
+
return sum(1 for t in self._tasks.values() if t.status == TaskStatus.PENDING)
|
|
309
|
+
|
|
310
|
+
def get_completed_count(self) -> int:
|
|
311
|
+
"""Get count of completed tasks."""
|
|
312
|
+
return sum(1 for t in self._tasks.values() if t.status == TaskStatus.COMPLETED)
|
|
313
|
+
|
|
314
|
+
def is_complete(self) -> bool:
|
|
315
|
+
"""Check if all tasks are complete."""
|
|
316
|
+
if not self._tasks:
|
|
317
|
+
return False
|
|
318
|
+
return all(t.status in (TaskStatus.COMPLETED, TaskStatus.SKIPPED) for t in self._tasks.values())
|
|
319
|
+
|
|
320
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
321
|
+
"""Export graph state as dictionary."""
|
|
322
|
+
return {
|
|
323
|
+
"goal": self.goal,
|
|
324
|
+
"tasks": {tid: task.to_dict() for tid, task in self._tasks.items()},
|
|
325
|
+
"pending_count": self.get_pending_count(),
|
|
326
|
+
"completed_count": self.get_completed_count(),
|
|
327
|
+
"is_complete": self.is_complete(),
|
|
328
|
+
}
|