up-cli 0.1.1__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- up/__init__.py +1 -1
- up/ai_cli.py +229 -0
- up/cli.py +75 -4
- up/commands/agent.py +521 -0
- up/commands/bisect.py +343 -0
- up/commands/branch.py +350 -0
- up/commands/dashboard.py +248 -0
- up/commands/init.py +195 -6
- up/commands/learn.py +1741 -0
- up/commands/memory.py +545 -0
- up/commands/new.py +108 -10
- up/commands/provenance.py +267 -0
- up/commands/review.py +239 -0
- up/commands/start.py +1124 -0
- up/commands/status.py +360 -0
- up/commands/summarize.py +122 -0
- up/commands/sync.py +317 -0
- up/commands/vibe.py +304 -0
- up/context.py +421 -0
- up/core/__init__.py +69 -0
- up/core/checkpoint.py +479 -0
- up/core/provenance.py +364 -0
- up/core/state.py +678 -0
- up/events.py +512 -0
- up/git/__init__.py +37 -0
- up/git/utils.py +270 -0
- up/git/worktree.py +331 -0
- up/learn/__init__.py +155 -0
- up/learn/analyzer.py +227 -0
- up/learn/plan.py +374 -0
- up/learn/research.py +511 -0
- up/learn/utils.py +117 -0
- up/memory.py +1096 -0
- up/parallel.py +551 -0
- up/summarizer.py +407 -0
- up/templates/__init__.py +70 -2
- up/templates/config/__init__.py +502 -20
- up/templates/docs/SKILL.md +28 -0
- up/templates/docs/__init__.py +341 -0
- up/templates/docs/standards/HEADERS.md +24 -0
- up/templates/docs/standards/STRUCTURE.md +18 -0
- up/templates/docs/standards/TEMPLATES.md +19 -0
- up/templates/learn/__init__.py +567 -14
- up/templates/loop/__init__.py +546 -27
- up/templates/mcp/__init__.py +474 -0
- up/templates/projects/__init__.py +786 -0
- up/ui/__init__.py +14 -0
- up/ui/loop_display.py +650 -0
- up/ui/theme.py +137 -0
- up_cli-0.5.0.dist-info/METADATA +519 -0
- up_cli-0.5.0.dist-info/RECORD +55 -0
- up_cli-0.1.1.dist-info/METADATA +0 -186
- up_cli-0.1.1.dist-info/RECORD +0 -14
- {up_cli-0.1.1.dist-info → up_cli-0.5.0.dist-info}/WHEEL +0 -0
- {up_cli-0.1.1.dist-info → up_cli-0.5.0.dist-info}/entry_points.txt +0 -0
up/core/provenance.py
ADDED
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
"""Provenance tracking for AI-generated code.
|
|
2
|
+
|
|
3
|
+
Records the lineage of every AI-generated change:
|
|
4
|
+
- Which AI model generated the code
|
|
5
|
+
- What prompt was used
|
|
6
|
+
- What files were modified
|
|
7
|
+
- Hash of input context
|
|
8
|
+
- Verification results
|
|
9
|
+
|
|
10
|
+
Stored in .up/provenance/ with content-addressed storage.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import hashlib
|
|
14
|
+
import json
|
|
15
|
+
import subprocess
|
|
16
|
+
from dataclasses import dataclass, field, asdict
|
|
17
|
+
from datetime import datetime
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from typing import Optional, List, Dict, Any
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class ProvenanceEntry:
|
|
24
|
+
"""A single provenance record for an AI operation."""
|
|
25
|
+
|
|
26
|
+
# Unique ID (content hash)
|
|
27
|
+
id: str = ""
|
|
28
|
+
|
|
29
|
+
# AI Model info
|
|
30
|
+
ai_model: str = "unknown" # claude, cursor, gpt-4, etc.
|
|
31
|
+
ai_version: str = ""
|
|
32
|
+
|
|
33
|
+
# Task info
|
|
34
|
+
task_id: str = ""
|
|
35
|
+
task_title: str = ""
|
|
36
|
+
prompt_hash: str = "" # Hash of the prompt sent
|
|
37
|
+
prompt_preview: str = "" # First 200 chars
|
|
38
|
+
|
|
39
|
+
# Context info
|
|
40
|
+
context_files: List[str] = field(default_factory=list)
|
|
41
|
+
context_hash: str = "" # Hash of all context files
|
|
42
|
+
|
|
43
|
+
# Result info
|
|
44
|
+
files_modified: List[str] = field(default_factory=list)
|
|
45
|
+
lines_added: int = 0
|
|
46
|
+
lines_removed: int = 0
|
|
47
|
+
|
|
48
|
+
# Git info
|
|
49
|
+
commit_sha: str = ""
|
|
50
|
+
branch: str = ""
|
|
51
|
+
|
|
52
|
+
# Verification
|
|
53
|
+
tests_passed: Optional[bool] = None
|
|
54
|
+
lint_passed: Optional[bool] = None
|
|
55
|
+
type_check_passed: Optional[bool] = None
|
|
56
|
+
verification_notes: str = ""
|
|
57
|
+
|
|
58
|
+
# Status
|
|
59
|
+
status: str = "pending" # pending, accepted, rejected, reverted
|
|
60
|
+
|
|
61
|
+
# Timestamps
|
|
62
|
+
created_at: str = field(default_factory=lambda: datetime.now().isoformat())
|
|
63
|
+
completed_at: Optional[str] = None
|
|
64
|
+
|
|
65
|
+
def __post_init__(self):
|
|
66
|
+
"""Generate ID if not set."""
|
|
67
|
+
if not self.id:
|
|
68
|
+
self.id = self._generate_id()
|
|
69
|
+
|
|
70
|
+
def _generate_id(self) -> str:
|
|
71
|
+
"""Generate content-addressed ID."""
|
|
72
|
+
content = f"{self.task_id}:{self.prompt_hash}:{self.context_hash}:{self.created_at}"
|
|
73
|
+
return hashlib.sha256(content.encode()).hexdigest()[:12]
|
|
74
|
+
|
|
75
|
+
def to_dict(self) -> dict:
|
|
76
|
+
return asdict(self)
|
|
77
|
+
|
|
78
|
+
@classmethod
|
|
79
|
+
def from_dict(cls, data: dict) -> "ProvenanceEntry":
|
|
80
|
+
return cls(**{k: v for k, v in data.items() if k in cls.__dataclass_fields__})
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class ProvenanceManager:
|
|
84
|
+
"""Manages provenance records for AI operations."""
|
|
85
|
+
|
|
86
|
+
PROVENANCE_DIR = ".up/provenance"
|
|
87
|
+
INDEX_FILE = "index.json"
|
|
88
|
+
|
|
89
|
+
def __init__(self, workspace: Optional[Path] = None):
|
|
90
|
+
self.workspace = workspace or Path.cwd()
|
|
91
|
+
self.provenance_dir = self.workspace / self.PROVENANCE_DIR
|
|
92
|
+
self.index_file = self.provenance_dir / self.INDEX_FILE
|
|
93
|
+
self._index: Dict[str, str] = {} # task_id -> entry_id
|
|
94
|
+
self._load_index()
|
|
95
|
+
|
|
96
|
+
def _load_index(self) -> None:
|
|
97
|
+
"""Load provenance index."""
|
|
98
|
+
if self.index_file.exists():
|
|
99
|
+
try:
|
|
100
|
+
self._index = json.loads(self.index_file.read_text())
|
|
101
|
+
except json.JSONDecodeError:
|
|
102
|
+
self._index = {}
|
|
103
|
+
|
|
104
|
+
def _save_index(self) -> None:
|
|
105
|
+
"""Save provenance index."""
|
|
106
|
+
self.provenance_dir.mkdir(parents=True, exist_ok=True)
|
|
107
|
+
self.index_file.write_text(json.dumps(self._index, indent=2))
|
|
108
|
+
|
|
109
|
+
def start_operation(
|
|
110
|
+
self,
|
|
111
|
+
task_id: str,
|
|
112
|
+
task_title: str,
|
|
113
|
+
prompt: str,
|
|
114
|
+
ai_model: str = "unknown",
|
|
115
|
+
context_files: List[str] = None
|
|
116
|
+
) -> ProvenanceEntry:
|
|
117
|
+
"""Start tracking a new AI operation.
|
|
118
|
+
|
|
119
|
+
Call this before running AI generation.
|
|
120
|
+
"""
|
|
121
|
+
# Hash the prompt
|
|
122
|
+
prompt_hash = hashlib.sha256(prompt.encode()).hexdigest()[:16]
|
|
123
|
+
|
|
124
|
+
# Hash context files
|
|
125
|
+
context_hash = ""
|
|
126
|
+
if context_files:
|
|
127
|
+
context_content = ""
|
|
128
|
+
for f in context_files:
|
|
129
|
+
path = self.workspace / f
|
|
130
|
+
if path.exists():
|
|
131
|
+
try:
|
|
132
|
+
context_content += path.read_text()
|
|
133
|
+
except Exception:
|
|
134
|
+
pass
|
|
135
|
+
context_hash = hashlib.sha256(context_content.encode()).hexdigest()[:16]
|
|
136
|
+
|
|
137
|
+
# Get current branch
|
|
138
|
+
branch = self._get_branch()
|
|
139
|
+
|
|
140
|
+
entry = ProvenanceEntry(
|
|
141
|
+
task_id=task_id,
|
|
142
|
+
task_title=task_title,
|
|
143
|
+
ai_model=ai_model,
|
|
144
|
+
prompt_hash=prompt_hash,
|
|
145
|
+
prompt_preview=prompt[:200] + "..." if len(prompt) > 200 else prompt,
|
|
146
|
+
context_files=context_files or [],
|
|
147
|
+
context_hash=context_hash,
|
|
148
|
+
branch=branch,
|
|
149
|
+
status="pending",
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# Save entry
|
|
153
|
+
self._save_entry(entry)
|
|
154
|
+
|
|
155
|
+
# Update index
|
|
156
|
+
self._index[task_id] = entry.id
|
|
157
|
+
self._save_index()
|
|
158
|
+
|
|
159
|
+
return entry
|
|
160
|
+
|
|
161
|
+
def complete_operation(
|
|
162
|
+
self,
|
|
163
|
+
entry_id: str,
|
|
164
|
+
files_modified: List[str] = None,
|
|
165
|
+
lines_added: int = 0,
|
|
166
|
+
lines_removed: int = 0,
|
|
167
|
+
commit_sha: str = "",
|
|
168
|
+
tests_passed: bool = None,
|
|
169
|
+
lint_passed: bool = None,
|
|
170
|
+
type_check_passed: bool = None,
|
|
171
|
+
status: str = "accepted"
|
|
172
|
+
) -> ProvenanceEntry:
|
|
173
|
+
"""Complete tracking an AI operation.
|
|
174
|
+
|
|
175
|
+
Call this after AI generation and verification.
|
|
176
|
+
"""
|
|
177
|
+
entry = self._load_entry(entry_id)
|
|
178
|
+
if not entry:
|
|
179
|
+
raise ValueError(f"Provenance entry not found: {entry_id}")
|
|
180
|
+
|
|
181
|
+
entry.files_modified = files_modified or []
|
|
182
|
+
entry.lines_added = lines_added
|
|
183
|
+
entry.lines_removed = lines_removed
|
|
184
|
+
entry.commit_sha = commit_sha or self._get_head_sha()
|
|
185
|
+
entry.tests_passed = tests_passed
|
|
186
|
+
entry.lint_passed = lint_passed
|
|
187
|
+
entry.type_check_passed = type_check_passed
|
|
188
|
+
entry.status = status
|
|
189
|
+
entry.completed_at = datetime.now().isoformat()
|
|
190
|
+
|
|
191
|
+
self._save_entry(entry)
|
|
192
|
+
return entry
|
|
193
|
+
|
|
194
|
+
def reject_operation(self, entry_id: str, reason: str = "") -> ProvenanceEntry:
|
|
195
|
+
"""Mark an operation as rejected (reverted)."""
|
|
196
|
+
entry = self._load_entry(entry_id)
|
|
197
|
+
if not entry:
|
|
198
|
+
raise ValueError(f"Provenance entry not found: {entry_id}")
|
|
199
|
+
|
|
200
|
+
entry.status = "rejected"
|
|
201
|
+
entry.verification_notes = reason
|
|
202
|
+
entry.completed_at = datetime.now().isoformat()
|
|
203
|
+
|
|
204
|
+
self._save_entry(entry)
|
|
205
|
+
return entry
|
|
206
|
+
|
|
207
|
+
def get_entry(self, entry_id: str) -> Optional[ProvenanceEntry]:
|
|
208
|
+
"""Get a provenance entry by ID."""
|
|
209
|
+
return self._load_entry(entry_id)
|
|
210
|
+
|
|
211
|
+
def get_entry_for_task(self, task_id: str) -> Optional[ProvenanceEntry]:
|
|
212
|
+
"""Get provenance entry for a task."""
|
|
213
|
+
entry_id = self._index.get(task_id)
|
|
214
|
+
if entry_id:
|
|
215
|
+
return self._load_entry(entry_id)
|
|
216
|
+
return None
|
|
217
|
+
|
|
218
|
+
def list_entries(self, limit: int = 50, status: str = None) -> List[ProvenanceEntry]:
|
|
219
|
+
"""List provenance entries."""
|
|
220
|
+
entries = []
|
|
221
|
+
|
|
222
|
+
if not self.provenance_dir.exists():
|
|
223
|
+
return entries
|
|
224
|
+
|
|
225
|
+
for file_path in sorted(self.provenance_dir.glob("*.json"), reverse=True):
|
|
226
|
+
if file_path.name == self.INDEX_FILE:
|
|
227
|
+
continue
|
|
228
|
+
|
|
229
|
+
entry = self._load_entry_from_file(file_path)
|
|
230
|
+
if entry:
|
|
231
|
+
if status and entry.status != status:
|
|
232
|
+
continue
|
|
233
|
+
entries.append(entry)
|
|
234
|
+
if len(entries) >= limit:
|
|
235
|
+
break
|
|
236
|
+
|
|
237
|
+
return entries
|
|
238
|
+
|
|
239
|
+
def get_stats(self) -> dict:
|
|
240
|
+
"""Get provenance statistics."""
|
|
241
|
+
entries = self.list_entries(limit=1000)
|
|
242
|
+
|
|
243
|
+
total = len(entries)
|
|
244
|
+
accepted = sum(1 for e in entries if e.status == "accepted")
|
|
245
|
+
rejected = sum(1 for e in entries if e.status == "rejected")
|
|
246
|
+
pending = sum(1 for e in entries if e.status == "pending")
|
|
247
|
+
|
|
248
|
+
total_lines_added = sum(e.lines_added for e in entries)
|
|
249
|
+
total_lines_removed = sum(e.lines_removed for e in entries)
|
|
250
|
+
|
|
251
|
+
tests_run = sum(1 for e in entries if e.tests_passed is not None)
|
|
252
|
+
tests_passed = sum(1 for e in entries if e.tests_passed is True)
|
|
253
|
+
|
|
254
|
+
models = {}
|
|
255
|
+
for e in entries:
|
|
256
|
+
models[e.ai_model] = models.get(e.ai_model, 0) + 1
|
|
257
|
+
|
|
258
|
+
return {
|
|
259
|
+
"total_operations": total,
|
|
260
|
+
"accepted": accepted,
|
|
261
|
+
"rejected": rejected,
|
|
262
|
+
"pending": pending,
|
|
263
|
+
"acceptance_rate": accepted / total if total > 0 else 0,
|
|
264
|
+
"total_lines_added": total_lines_added,
|
|
265
|
+
"total_lines_removed": total_lines_removed,
|
|
266
|
+
"tests_run": tests_run,
|
|
267
|
+
"tests_passed": tests_passed,
|
|
268
|
+
"test_pass_rate": tests_passed / tests_run if tests_run > 0 else 0,
|
|
269
|
+
"models_used": models,
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
def _save_entry(self, entry: ProvenanceEntry) -> None:
|
|
273
|
+
"""Save entry to file."""
|
|
274
|
+
self.provenance_dir.mkdir(parents=True, exist_ok=True)
|
|
275
|
+
file_path = self.provenance_dir / f"{entry.id}.json"
|
|
276
|
+
file_path.write_text(json.dumps(entry.to_dict(), indent=2))
|
|
277
|
+
|
|
278
|
+
def _load_entry(self, entry_id: str) -> Optional[ProvenanceEntry]:
|
|
279
|
+
"""Load entry from file."""
|
|
280
|
+
file_path = self.provenance_dir / f"{entry_id}.json"
|
|
281
|
+
return self._load_entry_from_file(file_path)
|
|
282
|
+
|
|
283
|
+
def _load_entry_from_file(self, file_path: Path) -> Optional[ProvenanceEntry]:
|
|
284
|
+
"""Load entry from file path."""
|
|
285
|
+
if not file_path.exists():
|
|
286
|
+
return None
|
|
287
|
+
try:
|
|
288
|
+
data = json.loads(file_path.read_text())
|
|
289
|
+
return ProvenanceEntry.from_dict(data)
|
|
290
|
+
except (json.JSONDecodeError, TypeError):
|
|
291
|
+
return None
|
|
292
|
+
|
|
293
|
+
def _get_branch(self) -> str:
|
|
294
|
+
"""Get current git branch."""
|
|
295
|
+
try:
|
|
296
|
+
result = subprocess.run(
|
|
297
|
+
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
|
298
|
+
cwd=self.workspace,
|
|
299
|
+
capture_output=True,
|
|
300
|
+
text=True
|
|
301
|
+
)
|
|
302
|
+
return result.stdout.strip() if result.returncode == 0 else ""
|
|
303
|
+
except Exception:
|
|
304
|
+
return ""
|
|
305
|
+
|
|
306
|
+
def _get_head_sha(self) -> str:
|
|
307
|
+
"""Get current HEAD commit SHA."""
|
|
308
|
+
try:
|
|
309
|
+
result = subprocess.run(
|
|
310
|
+
["git", "rev-parse", "HEAD"],
|
|
311
|
+
cwd=self.workspace,
|
|
312
|
+
capture_output=True,
|
|
313
|
+
text=True
|
|
314
|
+
)
|
|
315
|
+
return result.stdout.strip()[:12] if result.returncode == 0 else ""
|
|
316
|
+
except Exception:
|
|
317
|
+
return ""
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
# =============================================================================
|
|
321
|
+
# Module-level convenience functions
|
|
322
|
+
# =============================================================================
|
|
323
|
+
|
|
324
|
+
_default_manager: Optional[ProvenanceManager] = None
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
def get_provenance_manager(workspace: Optional[Path] = None) -> ProvenanceManager:
|
|
328
|
+
"""Get or create the default provenance manager."""
|
|
329
|
+
global _default_manager
|
|
330
|
+
if _default_manager is None or (workspace and _default_manager.workspace != workspace):
|
|
331
|
+
_default_manager = ProvenanceManager(workspace)
|
|
332
|
+
return _default_manager
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def track_ai_operation(
|
|
336
|
+
task_id: str,
|
|
337
|
+
task_title: str,
|
|
338
|
+
prompt: str,
|
|
339
|
+
ai_model: str = "unknown",
|
|
340
|
+
context_files: List[str] = None,
|
|
341
|
+
workspace: Optional[Path] = None
|
|
342
|
+
) -> ProvenanceEntry:
|
|
343
|
+
"""Start tracking an AI operation (convenience function)."""
|
|
344
|
+
return get_provenance_manager(workspace).start_operation(
|
|
345
|
+
task_id=task_id,
|
|
346
|
+
task_title=task_title,
|
|
347
|
+
prompt=prompt,
|
|
348
|
+
ai_model=ai_model,
|
|
349
|
+
context_files=context_files
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
def complete_ai_operation(
|
|
354
|
+
entry_id: str,
|
|
355
|
+
files_modified: List[str] = None,
|
|
356
|
+
status: str = "accepted",
|
|
357
|
+
workspace: Optional[Path] = None
|
|
358
|
+
) -> ProvenanceEntry:
|
|
359
|
+
"""Complete tracking an AI operation (convenience function)."""
|
|
360
|
+
return get_provenance_manager(workspace).complete_operation(
|
|
361
|
+
entry_id=entry_id,
|
|
362
|
+
files_modified=files_modified,
|
|
363
|
+
status=status
|
|
364
|
+
)
|