up-cli 0.1.1__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- up/__init__.py +1 -1
- up/ai_cli.py +229 -0
- up/cli.py +75 -4
- up/commands/agent.py +521 -0
- up/commands/bisect.py +343 -0
- up/commands/branch.py +350 -0
- up/commands/dashboard.py +248 -0
- up/commands/init.py +195 -6
- up/commands/learn.py +1741 -0
- up/commands/memory.py +545 -0
- up/commands/new.py +108 -10
- up/commands/provenance.py +267 -0
- up/commands/review.py +239 -0
- up/commands/start.py +1124 -0
- up/commands/status.py +360 -0
- up/commands/summarize.py +122 -0
- up/commands/sync.py +317 -0
- up/commands/vibe.py +304 -0
- up/context.py +421 -0
- up/core/__init__.py +69 -0
- up/core/checkpoint.py +479 -0
- up/core/provenance.py +364 -0
- up/core/state.py +678 -0
- up/events.py +512 -0
- up/git/__init__.py +37 -0
- up/git/utils.py +270 -0
- up/git/worktree.py +331 -0
- up/learn/__init__.py +155 -0
- up/learn/analyzer.py +227 -0
- up/learn/plan.py +374 -0
- up/learn/research.py +511 -0
- up/learn/utils.py +117 -0
- up/memory.py +1096 -0
- up/parallel.py +551 -0
- up/summarizer.py +407 -0
- up/templates/__init__.py +70 -2
- up/templates/config/__init__.py +502 -20
- up/templates/docs/SKILL.md +28 -0
- up/templates/docs/__init__.py +341 -0
- up/templates/docs/standards/HEADERS.md +24 -0
- up/templates/docs/standards/STRUCTURE.md +18 -0
- up/templates/docs/standards/TEMPLATES.md +19 -0
- up/templates/learn/__init__.py +567 -14
- up/templates/loop/__init__.py +546 -27
- up/templates/mcp/__init__.py +474 -0
- up/templates/projects/__init__.py +786 -0
- up/ui/__init__.py +14 -0
- up/ui/loop_display.py +650 -0
- up/ui/theme.py +137 -0
- up_cli-0.5.0.dist-info/METADATA +519 -0
- up_cli-0.5.0.dist-info/RECORD +55 -0
- up_cli-0.1.1.dist-info/METADATA +0 -186
- up_cli-0.1.1.dist-info/RECORD +0 -14
- {up_cli-0.1.1.dist-info → up_cli-0.5.0.dist-info}/WHEEL +0 -0
- {up_cli-0.1.1.dist-info → up_cli-0.5.0.dist-info}/entry_points.txt +0 -0
up/context.py
ADDED
|
@@ -0,0 +1,421 @@
|
|
|
1
|
+
"""Context window management for AI sessions.
|
|
2
|
+
|
|
3
|
+
Tracks estimated token usage and provides warnings when approaching limits.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import re
|
|
8
|
+
from dataclasses import dataclass, field, asdict
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# Token estimation constants (rough estimates)
|
|
15
|
+
CHARS_PER_TOKEN = 4 # Average characters per token
|
|
16
|
+
CODE_MULTIPLIER = 1.3 # Code typically uses more tokens
|
|
17
|
+
DEFAULT_BUDGET = 100_000 # Default context budget in tokens
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class ContextEntry:
|
|
22
|
+
"""A single context entry (file read, message, etc.)."""
|
|
23
|
+
|
|
24
|
+
timestamp: str
|
|
25
|
+
entry_type: str # 'file', 'message', 'tool_output'
|
|
26
|
+
source: str # File path or description
|
|
27
|
+
estimated_tokens: int
|
|
28
|
+
|
|
29
|
+
def to_dict(self) -> dict:
|
|
30
|
+
return asdict(self)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class ContextBudget:
|
|
35
|
+
"""Tracks context window usage."""
|
|
36
|
+
|
|
37
|
+
budget: int = DEFAULT_BUDGET
|
|
38
|
+
warning_threshold: float = 0.8 # Warn at 80%
|
|
39
|
+
critical_threshold: float = 0.9 # Critical at 90%
|
|
40
|
+
entries: list[ContextEntry] = field(default_factory=list)
|
|
41
|
+
total_tokens: int = 0
|
|
42
|
+
session_start: str = field(default_factory=lambda: datetime.now().isoformat())
|
|
43
|
+
|
|
44
|
+
@property
|
|
45
|
+
def usage_percent(self) -> float:
|
|
46
|
+
"""Get usage as percentage."""
|
|
47
|
+
return (self.total_tokens / self.budget) * 100 if self.budget > 0 else 0
|
|
48
|
+
|
|
49
|
+
@property
|
|
50
|
+
def remaining_tokens(self) -> int:
|
|
51
|
+
"""Get remaining token budget."""
|
|
52
|
+
return max(0, self.budget - self.total_tokens)
|
|
53
|
+
|
|
54
|
+
@property
|
|
55
|
+
def status(self) -> str:
|
|
56
|
+
"""Get status: OK, WARNING, or CRITICAL."""
|
|
57
|
+
ratio = self.total_tokens / self.budget if self.budget > 0 else 0
|
|
58
|
+
if ratio >= self.critical_threshold:
|
|
59
|
+
return "CRITICAL"
|
|
60
|
+
elif ratio >= self.warning_threshold:
|
|
61
|
+
return "WARNING"
|
|
62
|
+
return "OK"
|
|
63
|
+
|
|
64
|
+
def to_dict(self) -> dict:
|
|
65
|
+
return {
|
|
66
|
+
"budget": self.budget,
|
|
67
|
+
"total_tokens": self.total_tokens,
|
|
68
|
+
"remaining_tokens": self.remaining_tokens,
|
|
69
|
+
"usage_percent": round(self.usage_percent, 1),
|
|
70
|
+
"status": self.status,
|
|
71
|
+
"warning_threshold": self.warning_threshold,
|
|
72
|
+
"critical_threshold": self.critical_threshold,
|
|
73
|
+
"session_start": self.session_start,
|
|
74
|
+
"entry_count": len(self.entries),
|
|
75
|
+
"entries": [e.to_dict() for e in self.entries[-20:]], # Last 20 entries
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def estimate_tokens(text: str, is_code: bool = False) -> int:
|
|
80
|
+
"""Estimate token count for text.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
text: The text to estimate
|
|
84
|
+
is_code: Whether the text is code (uses higher multiplier)
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Estimated token count
|
|
88
|
+
"""
|
|
89
|
+
if not text:
|
|
90
|
+
return 0
|
|
91
|
+
|
|
92
|
+
# Basic character-based estimation
|
|
93
|
+
base_tokens = len(text) / CHARS_PER_TOKEN
|
|
94
|
+
|
|
95
|
+
# Apply code multiplier if needed
|
|
96
|
+
if is_code:
|
|
97
|
+
base_tokens *= CODE_MULTIPLIER
|
|
98
|
+
|
|
99
|
+
return int(base_tokens)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def estimate_file_tokens(path: Path) -> int:
|
|
103
|
+
"""Estimate tokens for a file.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
path: Path to the file
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
Estimated token count
|
|
110
|
+
"""
|
|
111
|
+
if not path.exists():
|
|
112
|
+
return 0
|
|
113
|
+
|
|
114
|
+
try:
|
|
115
|
+
content = path.read_text()
|
|
116
|
+
except (UnicodeDecodeError, PermissionError):
|
|
117
|
+
return 0
|
|
118
|
+
|
|
119
|
+
# Detect if it's code
|
|
120
|
+
code_extensions = {
|
|
121
|
+
'.py', '.js', '.ts', '.tsx', '.jsx', '.go', '.rs', '.java',
|
|
122
|
+
'.c', '.cpp', '.h', '.hpp', '.rb', '.sh', '.bash', '.zsh'
|
|
123
|
+
}
|
|
124
|
+
is_code = path.suffix.lower() in code_extensions
|
|
125
|
+
|
|
126
|
+
return estimate_tokens(content, is_code)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class ContextManager:
|
|
130
|
+
"""Manages context window budget for AI sessions.
|
|
131
|
+
|
|
132
|
+
Now uses the unified StateManager for storage while maintaining
|
|
133
|
+
backwards compatibility with the existing API.
|
|
134
|
+
"""
|
|
135
|
+
|
|
136
|
+
def __init__(
|
|
137
|
+
self,
|
|
138
|
+
workspace: Optional[Path] = None,
|
|
139
|
+
budget: int = DEFAULT_BUDGET
|
|
140
|
+
):
|
|
141
|
+
self.workspace = workspace or Path.cwd()
|
|
142
|
+
# Old location for migration
|
|
143
|
+
self._old_state_file = self.workspace / ".claude" / "context_budget.json"
|
|
144
|
+
# New unified state
|
|
145
|
+
self._use_unified_state = True
|
|
146
|
+
self.budget = ContextBudget(budget=budget)
|
|
147
|
+
self._load_state()
|
|
148
|
+
|
|
149
|
+
def _load_state(self) -> None:
|
|
150
|
+
"""Load state from unified state manager or migrate from old file."""
|
|
151
|
+
try:
|
|
152
|
+
from up.core.state import get_state_manager
|
|
153
|
+
manager = get_state_manager(self.workspace)
|
|
154
|
+
ctx = manager.state.context
|
|
155
|
+
|
|
156
|
+
# Sync from unified state
|
|
157
|
+
self.budget.budget = ctx.budget
|
|
158
|
+
self.budget.total_tokens = ctx.total_tokens
|
|
159
|
+
self.budget.warning_threshold = ctx.warning_threshold
|
|
160
|
+
self.budget.critical_threshold = ctx.critical_threshold
|
|
161
|
+
self.budget.session_start = ctx.session_start
|
|
162
|
+
|
|
163
|
+
# Convert entries
|
|
164
|
+
self.budget.entries = [
|
|
165
|
+
ContextEntry(**e) if isinstance(e, dict) else e
|
|
166
|
+
for e in ctx.entries
|
|
167
|
+
]
|
|
168
|
+
|
|
169
|
+
except ImportError:
|
|
170
|
+
# Fallback to old file-based storage
|
|
171
|
+
self._use_unified_state = False
|
|
172
|
+
self._load_state_legacy()
|
|
173
|
+
|
|
174
|
+
def _load_state_legacy(self) -> None:
|
|
175
|
+
"""Load state from old file location (for backwards compatibility)."""
|
|
176
|
+
if self._old_state_file.exists():
|
|
177
|
+
try:
|
|
178
|
+
data = json.loads(self._old_state_file.read_text())
|
|
179
|
+
self.budget.budget = data.get("budget", DEFAULT_BUDGET)
|
|
180
|
+
self.budget.total_tokens = data.get("total_tokens", 0)
|
|
181
|
+
self.budget.session_start = data.get("session_start", datetime.now().isoformat())
|
|
182
|
+
entries_data = data.get("entries", [])
|
|
183
|
+
self.budget.entries = [
|
|
184
|
+
ContextEntry(**e) for e in entries_data
|
|
185
|
+
]
|
|
186
|
+
except (json.JSONDecodeError, KeyError, TypeError):
|
|
187
|
+
pass
|
|
188
|
+
|
|
189
|
+
def _save_state(self) -> None:
|
|
190
|
+
"""Save state to unified state manager."""
|
|
191
|
+
if self._use_unified_state:
|
|
192
|
+
try:
|
|
193
|
+
from up.core.state import get_state_manager
|
|
194
|
+
manager = get_state_manager(self.workspace)
|
|
195
|
+
|
|
196
|
+
# Sync to unified state
|
|
197
|
+
manager.state.context.budget = self.budget.budget
|
|
198
|
+
manager.state.context.total_tokens = self.budget.total_tokens
|
|
199
|
+
manager.state.context.warning_threshold = self.budget.warning_threshold
|
|
200
|
+
manager.state.context.critical_threshold = self.budget.critical_threshold
|
|
201
|
+
manager.state.context.session_start = self.budget.session_start
|
|
202
|
+
manager.state.context.entries = [
|
|
203
|
+
e.to_dict() if hasattr(e, 'to_dict') else e
|
|
204
|
+
for e in self.budget.entries[-50:] # Keep last 50
|
|
205
|
+
]
|
|
206
|
+
|
|
207
|
+
manager.save()
|
|
208
|
+
return
|
|
209
|
+
except ImportError:
|
|
210
|
+
pass
|
|
211
|
+
|
|
212
|
+
# Fallback to old file-based storage
|
|
213
|
+
self._old_state_file.parent.mkdir(parents=True, exist_ok=True)
|
|
214
|
+
self._old_state_file.write_text(json.dumps(self.budget.to_dict(), indent=2))
|
|
215
|
+
|
|
216
|
+
def record_file_read(self, path: Path) -> ContextEntry:
|
|
217
|
+
"""Record a file being read into context.
|
|
218
|
+
|
|
219
|
+
Args:
|
|
220
|
+
path: Path to the file read
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
The context entry created
|
|
224
|
+
"""
|
|
225
|
+
tokens = estimate_file_tokens(path)
|
|
226
|
+
entry = ContextEntry(
|
|
227
|
+
timestamp=datetime.now().isoformat(),
|
|
228
|
+
entry_type="file",
|
|
229
|
+
source=str(path),
|
|
230
|
+
estimated_tokens=tokens
|
|
231
|
+
)
|
|
232
|
+
self.budget.entries.append(entry)
|
|
233
|
+
self.budget.total_tokens += tokens
|
|
234
|
+
self._save_state()
|
|
235
|
+
return entry
|
|
236
|
+
|
|
237
|
+
def record_message(self, message: str, role: str = "user") -> ContextEntry:
|
|
238
|
+
"""Record a message in context.
|
|
239
|
+
|
|
240
|
+
Args:
|
|
241
|
+
message: The message content
|
|
242
|
+
role: 'user' or 'assistant'
|
|
243
|
+
|
|
244
|
+
Returns:
|
|
245
|
+
The context entry created
|
|
246
|
+
"""
|
|
247
|
+
tokens = estimate_tokens(message)
|
|
248
|
+
entry = ContextEntry(
|
|
249
|
+
timestamp=datetime.now().isoformat(),
|
|
250
|
+
entry_type="message",
|
|
251
|
+
source=f"{role} message",
|
|
252
|
+
estimated_tokens=tokens
|
|
253
|
+
)
|
|
254
|
+
self.budget.entries.append(entry)
|
|
255
|
+
self.budget.total_tokens += tokens
|
|
256
|
+
self._save_state()
|
|
257
|
+
return entry
|
|
258
|
+
|
|
259
|
+
def record_tool_output(self, tool: str, output_size: int) -> ContextEntry:
|
|
260
|
+
"""Record tool output in context.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
tool: Name of the tool
|
|
264
|
+
output_size: Size of output in characters
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
The context entry created
|
|
268
|
+
"""
|
|
269
|
+
tokens = estimate_tokens("x" * output_size) # Rough estimate
|
|
270
|
+
entry = ContextEntry(
|
|
271
|
+
timestamp=datetime.now().isoformat(),
|
|
272
|
+
entry_type="tool_output",
|
|
273
|
+
source=f"tool:{tool}",
|
|
274
|
+
estimated_tokens=tokens
|
|
275
|
+
)
|
|
276
|
+
self.budget.entries.append(entry)
|
|
277
|
+
self.budget.total_tokens += tokens
|
|
278
|
+
self._save_state()
|
|
279
|
+
return entry
|
|
280
|
+
|
|
281
|
+
def get_status(self) -> dict:
|
|
282
|
+
"""Get current context budget status.
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
Status dictionary with usage info
|
|
286
|
+
"""
|
|
287
|
+
return self.budget.to_dict()
|
|
288
|
+
|
|
289
|
+
def check_budget(self) -> tuple[str, str]:
|
|
290
|
+
"""Check budget and return status with message.
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
Tuple of (status, message)
|
|
294
|
+
"""
|
|
295
|
+
status = self.budget.status
|
|
296
|
+
usage = self.budget.usage_percent
|
|
297
|
+
remaining = self.budget.remaining_tokens
|
|
298
|
+
|
|
299
|
+
if status == "CRITICAL":
|
|
300
|
+
msg = (
|
|
301
|
+
f"⚠️ CRITICAL: Context at {usage:.1f}% ({remaining:,} tokens remaining). "
|
|
302
|
+
"Consider summarizing and creating a checkpoint."
|
|
303
|
+
)
|
|
304
|
+
elif status == "WARNING":
|
|
305
|
+
msg = (
|
|
306
|
+
f"⚡ WARNING: Context at {usage:.1f}% ({remaining:,} tokens remaining). "
|
|
307
|
+
"Start planning for handoff."
|
|
308
|
+
)
|
|
309
|
+
else:
|
|
310
|
+
msg = f"✅ OK: Context at {usage:.1f}% ({remaining:,} tokens remaining)."
|
|
311
|
+
|
|
312
|
+
return status, msg
|
|
313
|
+
|
|
314
|
+
def reset(self) -> None:
|
|
315
|
+
"""Reset context budget for new session."""
|
|
316
|
+
self.budget = ContextBudget(budget=self.budget.budget)
|
|
317
|
+
self._save_state()
|
|
318
|
+
|
|
319
|
+
def estimate_file_impact(self, path: Path) -> dict:
|
|
320
|
+
"""Estimate impact of reading a file on budget.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
path: Path to the file
|
|
324
|
+
|
|
325
|
+
Returns:
|
|
326
|
+
Impact analysis dictionary
|
|
327
|
+
"""
|
|
328
|
+
tokens = estimate_file_tokens(path)
|
|
329
|
+
new_total = self.budget.total_tokens + tokens
|
|
330
|
+
new_percent = (new_total / self.budget.budget) * 100 if self.budget.budget > 0 else 0
|
|
331
|
+
|
|
332
|
+
return {
|
|
333
|
+
"file": str(path),
|
|
334
|
+
"estimated_tokens": tokens,
|
|
335
|
+
"current_total": self.budget.total_tokens,
|
|
336
|
+
"new_total": new_total,
|
|
337
|
+
"current_percent": round(self.budget.usage_percent, 1),
|
|
338
|
+
"new_percent": round(new_percent, 1),
|
|
339
|
+
"will_exceed_warning": new_percent >= self.budget.warning_threshold * 100,
|
|
340
|
+
"will_exceed_critical": new_percent >= self.budget.critical_threshold * 100,
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
def suggest_files_to_drop(self, target_reduction: int) -> list[str]:
|
|
344
|
+
"""Suggest files that could be dropped to reduce context.
|
|
345
|
+
|
|
346
|
+
Args:
|
|
347
|
+
target_reduction: Target token reduction
|
|
348
|
+
|
|
349
|
+
Returns:
|
|
350
|
+
List of file paths to consider dropping
|
|
351
|
+
"""
|
|
352
|
+
# Get file entries sorted by tokens (largest first)
|
|
353
|
+
file_entries = [
|
|
354
|
+
e for e in self.budget.entries
|
|
355
|
+
if e.entry_type == "file"
|
|
356
|
+
]
|
|
357
|
+
file_entries.sort(key=lambda e: e.estimated_tokens, reverse=True)
|
|
358
|
+
|
|
359
|
+
suggestions = []
|
|
360
|
+
reduction = 0
|
|
361
|
+
|
|
362
|
+
for entry in file_entries:
|
|
363
|
+
if reduction >= target_reduction:
|
|
364
|
+
break
|
|
365
|
+
suggestions.append(entry.source)
|
|
366
|
+
reduction += entry.estimated_tokens
|
|
367
|
+
|
|
368
|
+
return suggestions
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
def create_context_budget_file(target_dir: Path, budget: int = DEFAULT_BUDGET) -> Path:
|
|
372
|
+
"""Create initial context budget file for a project.
|
|
373
|
+
|
|
374
|
+
Args:
|
|
375
|
+
target_dir: Project directory
|
|
376
|
+
budget: Token budget
|
|
377
|
+
|
|
378
|
+
Returns:
|
|
379
|
+
Path to created file
|
|
380
|
+
"""
|
|
381
|
+
manager = ContextManager(workspace=target_dir, budget=budget)
|
|
382
|
+
manager.reset()
|
|
383
|
+
# Return the unified state file path
|
|
384
|
+
return target_dir / ".up" / "state.json"
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
# CLI integration
|
|
388
|
+
if __name__ == "__main__":
|
|
389
|
+
import sys
|
|
390
|
+
|
|
391
|
+
manager = ContextManager()
|
|
392
|
+
|
|
393
|
+
if len(sys.argv) > 1:
|
|
394
|
+
cmd = sys.argv[1]
|
|
395
|
+
|
|
396
|
+
if cmd == "status":
|
|
397
|
+
status, msg = manager.check_budget()
|
|
398
|
+
print(msg)
|
|
399
|
+
print(json.dumps(manager.get_status(), indent=2))
|
|
400
|
+
|
|
401
|
+
elif cmd == "reset":
|
|
402
|
+
manager.reset()
|
|
403
|
+
print("Context budget reset for new session.")
|
|
404
|
+
|
|
405
|
+
elif cmd == "estimate" and len(sys.argv) > 2:
|
|
406
|
+
path = Path(sys.argv[2])
|
|
407
|
+
impact = manager.estimate_file_impact(path)
|
|
408
|
+
print(json.dumps(impact, indent=2))
|
|
409
|
+
|
|
410
|
+
elif cmd == "record" and len(sys.argv) > 2:
|
|
411
|
+
path = Path(sys.argv[2])
|
|
412
|
+
entry = manager.record_file_read(path)
|
|
413
|
+
print(f"Recorded: {entry.source} ({entry.estimated_tokens} tokens)")
|
|
414
|
+
status, msg = manager.check_budget()
|
|
415
|
+
print(msg)
|
|
416
|
+
|
|
417
|
+
else:
|
|
418
|
+
print("Usage: python context.py [status|reset|estimate <file>|record <file>]")
|
|
419
|
+
else:
|
|
420
|
+
status, msg = manager.check_budget()
|
|
421
|
+
print(msg)
|
up/core/__init__.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
"""Core modules for up-cli.
|
|
2
|
+
|
|
3
|
+
This package contains the foundational modules used across all commands:
|
|
4
|
+
- state: Unified state management
|
|
5
|
+
- checkpoint: Git checkpoint operations
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from up.core.state import (
|
|
9
|
+
UnifiedState,
|
|
10
|
+
LoopState,
|
|
11
|
+
ContextState,
|
|
12
|
+
AgentState,
|
|
13
|
+
CircuitBreakerState,
|
|
14
|
+
StateManager,
|
|
15
|
+
get_state_manager,
|
|
16
|
+
get_state,
|
|
17
|
+
save_state,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
from up.core.checkpoint import (
|
|
21
|
+
CheckpointManager,
|
|
22
|
+
CheckpointMetadata,
|
|
23
|
+
CheckpointError,
|
|
24
|
+
GitError,
|
|
25
|
+
NotAGitRepoError,
|
|
26
|
+
CheckpointNotFoundError,
|
|
27
|
+
get_checkpoint_manager,
|
|
28
|
+
save_checkpoint,
|
|
29
|
+
restore_checkpoint,
|
|
30
|
+
get_diff,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
from up.core.provenance import (
|
|
34
|
+
ProvenanceEntry,
|
|
35
|
+
ProvenanceManager,
|
|
36
|
+
get_provenance_manager,
|
|
37
|
+
track_ai_operation,
|
|
38
|
+
complete_ai_operation,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
__all__ = [
|
|
42
|
+
# State
|
|
43
|
+
"UnifiedState",
|
|
44
|
+
"LoopState",
|
|
45
|
+
"ContextState",
|
|
46
|
+
"AgentState",
|
|
47
|
+
"CircuitBreakerState",
|
|
48
|
+
"StateManager",
|
|
49
|
+
"get_state_manager",
|
|
50
|
+
"get_state",
|
|
51
|
+
"save_state",
|
|
52
|
+
# Checkpoint
|
|
53
|
+
"CheckpointManager",
|
|
54
|
+
"CheckpointMetadata",
|
|
55
|
+
"CheckpointError",
|
|
56
|
+
"GitError",
|
|
57
|
+
"NotAGitRepoError",
|
|
58
|
+
"CheckpointNotFoundError",
|
|
59
|
+
"get_checkpoint_manager",
|
|
60
|
+
"save_checkpoint",
|
|
61
|
+
"restore_checkpoint",
|
|
62
|
+
"get_diff",
|
|
63
|
+
# Provenance
|
|
64
|
+
"ProvenanceEntry",
|
|
65
|
+
"ProvenanceManager",
|
|
66
|
+
"get_provenance_manager",
|
|
67
|
+
"track_ai_operation",
|
|
68
|
+
"complete_ai_operation",
|
|
69
|
+
]
|