up-cli 0.1.1__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- up/__init__.py +1 -1
- up/ai_cli.py +229 -0
- up/cli.py +75 -4
- up/commands/agent.py +521 -0
- up/commands/bisect.py +343 -0
- up/commands/branch.py +350 -0
- up/commands/dashboard.py +248 -0
- up/commands/init.py +195 -6
- up/commands/learn.py +1741 -0
- up/commands/memory.py +545 -0
- up/commands/new.py +108 -10
- up/commands/provenance.py +267 -0
- up/commands/review.py +239 -0
- up/commands/start.py +1124 -0
- up/commands/status.py +360 -0
- up/commands/summarize.py +122 -0
- up/commands/sync.py +317 -0
- up/commands/vibe.py +304 -0
- up/context.py +421 -0
- up/core/__init__.py +69 -0
- up/core/checkpoint.py +479 -0
- up/core/provenance.py +364 -0
- up/core/state.py +678 -0
- up/events.py +512 -0
- up/git/__init__.py +37 -0
- up/git/utils.py +270 -0
- up/git/worktree.py +331 -0
- up/learn/__init__.py +155 -0
- up/learn/analyzer.py +227 -0
- up/learn/plan.py +374 -0
- up/learn/research.py +511 -0
- up/learn/utils.py +117 -0
- up/memory.py +1096 -0
- up/parallel.py +551 -0
- up/summarizer.py +407 -0
- up/templates/__init__.py +70 -2
- up/templates/config/__init__.py +502 -20
- up/templates/docs/SKILL.md +28 -0
- up/templates/docs/__init__.py +341 -0
- up/templates/docs/standards/HEADERS.md +24 -0
- up/templates/docs/standards/STRUCTURE.md +18 -0
- up/templates/docs/standards/TEMPLATES.md +19 -0
- up/templates/learn/__init__.py +567 -14
- up/templates/loop/__init__.py +546 -27
- up/templates/mcp/__init__.py +474 -0
- up/templates/projects/__init__.py +786 -0
- up/ui/__init__.py +14 -0
- up/ui/loop_display.py +650 -0
- up/ui/theme.py +137 -0
- up_cli-0.5.0.dist-info/METADATA +519 -0
- up_cli-0.5.0.dist-info/RECORD +55 -0
- up_cli-0.1.1.dist-info/METADATA +0 -186
- up_cli-0.1.1.dist-info/RECORD +0 -14
- {up_cli-0.1.1.dist-info → up_cli-0.5.0.dist-info}/WHEEL +0 -0
- {up_cli-0.1.1.dist-info → up_cli-0.5.0.dist-info}/entry_points.txt +0 -0
up/learn/plan.py
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
1
|
+
"""PRD generation for the learning system."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
import subprocess
|
|
6
|
+
from datetime import date
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
from rich.panel import Panel
|
|
12
|
+
from rich.table import Table
|
|
13
|
+
from tqdm import tqdm
|
|
14
|
+
|
|
15
|
+
from up.ai_cli import check_ai_cli, run_ai_prompt
|
|
16
|
+
from up.learn.utils import find_skill_dir, load_profile
|
|
17
|
+
|
|
18
|
+
console = Console()
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def analyze_research_file(file_path: Path, workspace: Path) -> dict:
|
|
22
|
+
"""Analyze a single research file using AI CLI."""
|
|
23
|
+
content = file_path.read_text()
|
|
24
|
+
|
|
25
|
+
max_chars = 10000
|
|
26
|
+
if len(content) > max_chars:
|
|
27
|
+
content = content[:max_chars] + "\n\n[... truncated ...]"
|
|
28
|
+
|
|
29
|
+
prompt = f"""Analyze this research document and extract:
|
|
30
|
+
|
|
31
|
+
1. **Key Patterns** - Design patterns, methodologies, workflows mentioned
|
|
32
|
+
2. **Best Practices** - Actionable guidelines and recommendations
|
|
33
|
+
3. **Gaps** - What's missing or could be improved in a typical project
|
|
34
|
+
4. **Action Items** - Specific things to implement
|
|
35
|
+
|
|
36
|
+
Be concise. Return as structured markdown.
|
|
37
|
+
|
|
38
|
+
Research file ({file_path.name}):
|
|
39
|
+
{content}
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
cli_name, available = check_ai_cli()
|
|
43
|
+
if not available:
|
|
44
|
+
return {"error": "No AI CLI available", "file": file_path.name}
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
if cli_name == "claude":
|
|
48
|
+
cmd = ["claude", "-p", prompt]
|
|
49
|
+
else:
|
|
50
|
+
cmd = ["agent", "-p", prompt]
|
|
51
|
+
|
|
52
|
+
result = subprocess.run(
|
|
53
|
+
cmd,
|
|
54
|
+
capture_output=True,
|
|
55
|
+
text=True,
|
|
56
|
+
timeout=300,
|
|
57
|
+
cwd=workspace
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
if result.returncode == 0:
|
|
61
|
+
return {
|
|
62
|
+
"file": file_path.name,
|
|
63
|
+
"analysis": result.stdout,
|
|
64
|
+
"cli": cli_name
|
|
65
|
+
}
|
|
66
|
+
else:
|
|
67
|
+
return {"error": result.stderr, "file": file_path.name}
|
|
68
|
+
except subprocess.TimeoutExpired:
|
|
69
|
+
return {"error": "Timeout", "file": file_path.name}
|
|
70
|
+
except Exception as e:
|
|
71
|
+
return {"error": str(e), "file": file_path.name}
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def learn_analyze(workspace: Path) -> None:
|
|
75
|
+
"""Analyze all research files and extract patterns with AI."""
|
|
76
|
+
skill_dir = find_skill_dir(workspace, "learning-system")
|
|
77
|
+
research_dir = skill_dir / "research"
|
|
78
|
+
deep_dir = skill_dir / "deep_analysis"
|
|
79
|
+
file_learnings_dir = skill_dir / "file_learnings"
|
|
80
|
+
insights_dir = skill_dir / "insights"
|
|
81
|
+
|
|
82
|
+
# Collect all analyzable files
|
|
83
|
+
files_to_analyze = []
|
|
84
|
+
|
|
85
|
+
if research_dir.exists():
|
|
86
|
+
files_to_analyze.extend(list(research_dir.glob("*.md")))
|
|
87
|
+
if deep_dir.exists():
|
|
88
|
+
files_to_analyze.extend(list(deep_dir.glob("*_content.md")))
|
|
89
|
+
if file_learnings_dir.exists():
|
|
90
|
+
files_to_analyze.extend(list(file_learnings_dir.glob("*.md")))
|
|
91
|
+
|
|
92
|
+
if not files_to_analyze:
|
|
93
|
+
console.print("[yellow]No research or learning files found.[/]")
|
|
94
|
+
console.print("Run [cyan]up learn \"topic\"[/] or [cyan]up learn \"file.md\"[/] first.")
|
|
95
|
+
return
|
|
96
|
+
|
|
97
|
+
console.print(Panel.fit(
|
|
98
|
+
"[bold blue]Learning System[/] - Analyze Research",
|
|
99
|
+
border_style="blue"
|
|
100
|
+
))
|
|
101
|
+
|
|
102
|
+
console.print(f"Found [cyan]{len(files_to_analyze)}[/] files to analyze")
|
|
103
|
+
|
|
104
|
+
cli_name, cli_available = check_ai_cli()
|
|
105
|
+
if not cli_available:
|
|
106
|
+
console.print("\n[yellow]No AI CLI available.[/]")
|
|
107
|
+
console.print("Install Claude CLI or Cursor Agent for automatic analysis.")
|
|
108
|
+
return
|
|
109
|
+
|
|
110
|
+
console.print(f"\n[yellow]Analyzing with {cli_name}...[/]")
|
|
111
|
+
|
|
112
|
+
all_patterns = []
|
|
113
|
+
insights_dir.mkdir(parents=True, exist_ok=True)
|
|
114
|
+
|
|
115
|
+
with tqdm(files_to_analyze, desc="Analyzing", unit="file") as pbar:
|
|
116
|
+
for file_path in pbar:
|
|
117
|
+
pbar.set_postfix_str(file_path.name[:30])
|
|
118
|
+
|
|
119
|
+
result = analyze_research_file(file_path, workspace)
|
|
120
|
+
|
|
121
|
+
if "error" in result:
|
|
122
|
+
console.print(f"\n[red]Error analyzing {file_path.name}: {result['error']}[/]")
|
|
123
|
+
continue
|
|
124
|
+
|
|
125
|
+
# Save individual analysis
|
|
126
|
+
analysis_file = insights_dir / f"{file_path.stem}_insights.md"
|
|
127
|
+
analysis_file.write_text(f"""# Insights: {file_path.name}
|
|
128
|
+
|
|
129
|
+
**Analyzed**: {date.today().isoformat()}
|
|
130
|
+
**Source**: `{file_path}`
|
|
131
|
+
|
|
132
|
+
---
|
|
133
|
+
|
|
134
|
+
{result['analysis']}
|
|
135
|
+
""")
|
|
136
|
+
|
|
137
|
+
all_patterns.append(f"### From {file_path.name}\n{result['analysis']}")
|
|
138
|
+
|
|
139
|
+
# Generate combined insights
|
|
140
|
+
patterns_file = insights_dir / "patterns.md"
|
|
141
|
+
patterns_content = f"""# Patterns Extracted
|
|
142
|
+
|
|
143
|
+
**Generated**: {date.today().isoformat()}
|
|
144
|
+
**Files Analyzed**: {len(files_to_analyze)}
|
|
145
|
+
|
|
146
|
+
---
|
|
147
|
+
|
|
148
|
+
{chr(10).join(all_patterns)}
|
|
149
|
+
"""
|
|
150
|
+
patterns_file.write_text(patterns_content)
|
|
151
|
+
|
|
152
|
+
# Generate gap analysis
|
|
153
|
+
gap_file = insights_dir / "gap-analysis.md"
|
|
154
|
+
gap_content = f"""# Gap Analysis
|
|
155
|
+
|
|
156
|
+
**Generated**: {date.today().isoformat()}
|
|
157
|
+
**Based on**: {len(files_to_analyze)} research files
|
|
158
|
+
|
|
159
|
+
---
|
|
160
|
+
|
|
161
|
+
## Files Analyzed
|
|
162
|
+
|
|
163
|
+
{chr(10).join(f'- {f.name}' for f in files_to_analyze)}
|
|
164
|
+
|
|
165
|
+
## Next Steps
|
|
166
|
+
|
|
167
|
+
1. Review patterns in `patterns.md`
|
|
168
|
+
2. Run `up learn plan` to generate improvement PRD
|
|
169
|
+
"""
|
|
170
|
+
gap_file.write_text(gap_content)
|
|
171
|
+
|
|
172
|
+
console.print(f"\n[green]✓[/] Analysis complete!")
|
|
173
|
+
console.print(f"\n[bold]Generated:[/]")
|
|
174
|
+
console.print(f" • [cyan]{patterns_file.relative_to(workspace)}[/]")
|
|
175
|
+
console.print(f" • [cyan]{gap_file.relative_to(workspace)}[/]")
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def learn_plan(workspace: Path, output: Optional[str] = None) -> None:
|
|
179
|
+
"""Generate improvement plan (PRD) from analysis."""
|
|
180
|
+
console.print(Panel.fit(
|
|
181
|
+
"[bold blue]Learning System[/] - Generate PRD",
|
|
182
|
+
border_style="blue"
|
|
183
|
+
))
|
|
184
|
+
|
|
185
|
+
skill_dir = find_skill_dir(workspace, "learning-system")
|
|
186
|
+
insights_dir = skill_dir / "insights"
|
|
187
|
+
|
|
188
|
+
# Collect all insights
|
|
189
|
+
insights_content = []
|
|
190
|
+
|
|
191
|
+
patterns_file = insights_dir / "patterns.md"
|
|
192
|
+
if patterns_file.exists():
|
|
193
|
+
content = patterns_file.read_text()
|
|
194
|
+
if len(content) > 100:
|
|
195
|
+
insights_content.append(f"## Patterns\n{content}")
|
|
196
|
+
|
|
197
|
+
gap_file = insights_dir / "gap-analysis.md"
|
|
198
|
+
if gap_file.exists():
|
|
199
|
+
content = gap_file.read_text()
|
|
200
|
+
if len(content) > 100:
|
|
201
|
+
insights_content.append(f"## Gap Analysis\n{content}")
|
|
202
|
+
|
|
203
|
+
# Read individual insight files
|
|
204
|
+
for f in insights_dir.glob("*_insights.md"):
|
|
205
|
+
content = f.read_text()
|
|
206
|
+
insights_content.append(f"## {f.stem}\n{content[:2000]}")
|
|
207
|
+
|
|
208
|
+
# Read research files
|
|
209
|
+
research_dir = skill_dir / "research"
|
|
210
|
+
if research_dir.exists():
|
|
211
|
+
for f in research_dir.glob("*.md"):
|
|
212
|
+
content = f.read_text()
|
|
213
|
+
if "AI Research" in content:
|
|
214
|
+
insights_content.append(f"## Research: {f.stem}\n{content[:2000]}")
|
|
215
|
+
|
|
216
|
+
if not insights_content:
|
|
217
|
+
console.print("[yellow]No insights found to generate PRD.[/]")
|
|
218
|
+
console.print("Run [cyan]up learn analyze[/] first.")
|
|
219
|
+
return
|
|
220
|
+
|
|
221
|
+
console.print(f"Found [cyan]{len(insights_content)}[/] insight sources")
|
|
222
|
+
|
|
223
|
+
profile = load_profile(workspace)
|
|
224
|
+
|
|
225
|
+
# Try AI to generate user stories
|
|
226
|
+
cli_name, cli_available = check_ai_cli()
|
|
227
|
+
user_stories = []
|
|
228
|
+
|
|
229
|
+
if cli_available:
|
|
230
|
+
console.print(f"\n[yellow]Generating tasks with {cli_name}...[/]")
|
|
231
|
+
|
|
232
|
+
all_insights = "\n\n".join(insights_content)
|
|
233
|
+
if len(all_insights) > 10000:
|
|
234
|
+
all_insights = all_insights[:10000] + "\n\n[... truncated ...]"
|
|
235
|
+
|
|
236
|
+
prompt = f"""Based on these insights, generate 5-10 actionable improvement tasks.
|
|
237
|
+
|
|
238
|
+
Project context:
|
|
239
|
+
- Languages: {', '.join(profile.get('languages', ['unknown']))}
|
|
240
|
+
- Frameworks: {', '.join(profile.get('frameworks', ['unknown']))}
|
|
241
|
+
|
|
242
|
+
Insights:
|
|
243
|
+
{all_insights}
|
|
244
|
+
|
|
245
|
+
Return ONLY a JSON array of user stories:
|
|
246
|
+
[
|
|
247
|
+
{{"id": "US-001", "title": "Short title", "description": "What to implement", "priority": "high|medium|low", "effort": "small|medium|large"}},
|
|
248
|
+
...
|
|
249
|
+
]
|
|
250
|
+
|
|
251
|
+
Focus on practical improvements. No explanation, just the JSON array."""
|
|
252
|
+
|
|
253
|
+
result = run_ai_prompt(workspace, prompt, cli_name, timeout=120)
|
|
254
|
+
|
|
255
|
+
if result:
|
|
256
|
+
try:
|
|
257
|
+
json_match = re.search(r'\[[\s\S]*\]', result)
|
|
258
|
+
if json_match:
|
|
259
|
+
user_stories = json.loads(json_match.group())
|
|
260
|
+
console.print(f"[green]✓[/] Generated {len(user_stories)} user stories")
|
|
261
|
+
except json.JSONDecodeError:
|
|
262
|
+
console.print("[yellow]Could not parse AI response[/]")
|
|
263
|
+
|
|
264
|
+
if not user_stories:
|
|
265
|
+
# Fallback: extract action items from insights
|
|
266
|
+
console.print("[yellow]Using basic task generation...[/]")
|
|
267
|
+
|
|
268
|
+
all_insights = "\n".join(insights_content)
|
|
269
|
+
action_items = []
|
|
270
|
+
|
|
271
|
+
for line in all_insights.splitlines():
|
|
272
|
+
line = line.strip()
|
|
273
|
+
if line.startswith("- [ ]"):
|
|
274
|
+
item = line[5:].strip()
|
|
275
|
+
if item and len(item) > 5:
|
|
276
|
+
action_items.append(item)
|
|
277
|
+
|
|
278
|
+
if not action_items:
|
|
279
|
+
action_items = profile.get("improvement_areas", [])
|
|
280
|
+
|
|
281
|
+
for i, item in enumerate(action_items[:10], 1):
|
|
282
|
+
priority = "medium"
|
|
283
|
+
if any(w in item.lower() for w in ["immediate", "critical", "urgent"]):
|
|
284
|
+
priority = "high"
|
|
285
|
+
elif any(w in item.lower() for w in ["optional", "later", "future"]):
|
|
286
|
+
priority = "low"
|
|
287
|
+
|
|
288
|
+
user_stories.append({
|
|
289
|
+
"id": f"US-{i:03d}",
|
|
290
|
+
"title": item[:60] + ("..." if len(item) > 60 else ""),
|
|
291
|
+
"description": item,
|
|
292
|
+
"priority": priority,
|
|
293
|
+
"effort": "medium"
|
|
294
|
+
})
|
|
295
|
+
|
|
296
|
+
if user_stories:
|
|
297
|
+
console.print(f"[green]✓[/] Extracted {len(user_stories)} tasks")
|
|
298
|
+
|
|
299
|
+
# Generate PRD
|
|
300
|
+
prd = {
|
|
301
|
+
"name": profile.get("name", workspace.name),
|
|
302
|
+
"version": "1.0.0",
|
|
303
|
+
"generated": date.today().isoformat(),
|
|
304
|
+
"source": "up learn plan",
|
|
305
|
+
"userStories": user_stories,
|
|
306
|
+
"metadata": {
|
|
307
|
+
"insights_count": len(insights_content),
|
|
308
|
+
"ai_generated": cli_available and len(user_stories) > 0,
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
output_path = Path(output) if output else skill_dir / "prd.json"
|
|
313
|
+
output_path.write_text(json.dumps(prd, indent=2))
|
|
314
|
+
|
|
315
|
+
console.print(f"\n[green]✓[/] PRD generated: [cyan]{output_path}[/]")
|
|
316
|
+
|
|
317
|
+
if user_stories:
|
|
318
|
+
console.print("\n[bold]Generated User Stories:[/]")
|
|
319
|
+
table = Table()
|
|
320
|
+
table.add_column("ID", style="cyan")
|
|
321
|
+
table.add_column("Title")
|
|
322
|
+
table.add_column("Priority")
|
|
323
|
+
|
|
324
|
+
for story in user_stories[:10]:
|
|
325
|
+
table.add_row(
|
|
326
|
+
story.get("id", "?"),
|
|
327
|
+
story.get("title", "")[:50],
|
|
328
|
+
story.get("priority", "medium")
|
|
329
|
+
)
|
|
330
|
+
console.print(table)
|
|
331
|
+
|
|
332
|
+
console.print("\n[bold]Next Steps:[/]")
|
|
333
|
+
console.print(f" 1. Review: [cyan]{output_path}[/]")
|
|
334
|
+
console.print(" 2. Start development: [cyan]up start[/]")
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def learn_status(workspace: Path) -> None:
|
|
338
|
+
"""Show learning system status."""
|
|
339
|
+
console.print(Panel.fit(
|
|
340
|
+
"[bold blue]Learning System[/] - Status",
|
|
341
|
+
border_style="blue"
|
|
342
|
+
))
|
|
343
|
+
|
|
344
|
+
skill_dir = find_skill_dir(workspace, "learning-system")
|
|
345
|
+
|
|
346
|
+
if not skill_dir.exists():
|
|
347
|
+
console.print("[yellow]Learning system not initialized.[/]")
|
|
348
|
+
console.print("Run [cyan]up init[/] to set up.")
|
|
349
|
+
return
|
|
350
|
+
|
|
351
|
+
files = {
|
|
352
|
+
"Project Profile": skill_dir / "project_profile.json",
|
|
353
|
+
"Sources Config": skill_dir / "sources.json",
|
|
354
|
+
"Patterns": skill_dir / "insights/patterns.md",
|
|
355
|
+
"Gap Analysis": skill_dir / "insights/gap-analysis.md",
|
|
356
|
+
"PRD": skill_dir / "prd.json",
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
table = Table(title="Learning System Files")
|
|
360
|
+
table.add_column("File", style="cyan")
|
|
361
|
+
table.add_column("Status")
|
|
362
|
+
|
|
363
|
+
for name, path in files.items():
|
|
364
|
+
if path.exists():
|
|
365
|
+
table.add_row(name, "[green]✓ exists[/]")
|
|
366
|
+
else:
|
|
367
|
+
table.add_row(name, "[dim]○ not created[/]")
|
|
368
|
+
|
|
369
|
+
console.print(table)
|
|
370
|
+
|
|
371
|
+
research_dir = skill_dir / "research"
|
|
372
|
+
if research_dir.exists():
|
|
373
|
+
research_count = len(list(research_dir.glob("*.md")))
|
|
374
|
+
console.print(f"\nResearch files: [cyan]{research_count}[/]")
|