up-cli 0.1.1__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. up/__init__.py +1 -1
  2. up/ai_cli.py +229 -0
  3. up/cli.py +75 -4
  4. up/commands/agent.py +521 -0
  5. up/commands/bisect.py +343 -0
  6. up/commands/branch.py +350 -0
  7. up/commands/dashboard.py +248 -0
  8. up/commands/init.py +195 -6
  9. up/commands/learn.py +1741 -0
  10. up/commands/memory.py +545 -0
  11. up/commands/new.py +108 -10
  12. up/commands/provenance.py +267 -0
  13. up/commands/review.py +239 -0
  14. up/commands/start.py +1124 -0
  15. up/commands/status.py +360 -0
  16. up/commands/summarize.py +122 -0
  17. up/commands/sync.py +317 -0
  18. up/commands/vibe.py +304 -0
  19. up/context.py +421 -0
  20. up/core/__init__.py +69 -0
  21. up/core/checkpoint.py +479 -0
  22. up/core/provenance.py +364 -0
  23. up/core/state.py +678 -0
  24. up/events.py +512 -0
  25. up/git/__init__.py +37 -0
  26. up/git/utils.py +270 -0
  27. up/git/worktree.py +331 -0
  28. up/learn/__init__.py +155 -0
  29. up/learn/analyzer.py +227 -0
  30. up/learn/plan.py +374 -0
  31. up/learn/research.py +511 -0
  32. up/learn/utils.py +117 -0
  33. up/memory.py +1096 -0
  34. up/parallel.py +551 -0
  35. up/summarizer.py +407 -0
  36. up/templates/__init__.py +70 -2
  37. up/templates/config/__init__.py +502 -20
  38. up/templates/docs/SKILL.md +28 -0
  39. up/templates/docs/__init__.py +341 -0
  40. up/templates/docs/standards/HEADERS.md +24 -0
  41. up/templates/docs/standards/STRUCTURE.md +18 -0
  42. up/templates/docs/standards/TEMPLATES.md +19 -0
  43. up/templates/learn/__init__.py +567 -14
  44. up/templates/loop/__init__.py +546 -27
  45. up/templates/mcp/__init__.py +474 -0
  46. up/templates/projects/__init__.py +786 -0
  47. up/ui/__init__.py +14 -0
  48. up/ui/loop_display.py +650 -0
  49. up/ui/theme.py +137 -0
  50. up_cli-0.5.0.dist-info/METADATA +519 -0
  51. up_cli-0.5.0.dist-info/RECORD +55 -0
  52. up_cli-0.1.1.dist-info/METADATA +0 -186
  53. up_cli-0.1.1.dist-info/RECORD +0 -14
  54. {up_cli-0.1.1.dist-info → up_cli-0.5.0.dist-info}/WHEEL +0 -0
  55. {up_cli-0.1.1.dist-info → up_cli-0.5.0.dist-info}/entry_points.txt +0 -0
up/commands/learn.py ADDED
@@ -0,0 +1,1741 @@
1
+ """up learn - Learning system CLI commands."""
2
+
3
+ import json
4
+ import os
5
+ import re
6
+ import sys
7
+ from pathlib import Path
8
+ from datetime import date
9
+
10
+ import click
11
+ from rich.console import Console
12
+ from rich.panel import Panel
13
+ from rich.table import Table
14
+
15
+ from up.ai_cli import check_ai_cli, run_ai_prompt as _run_ai_prompt
16
+
17
+ console = Console()
18
+
19
+
20
+ def check_vision_map_exists(workspace: Path) -> tuple[bool, Path]:
21
+ """Check if vision map is set up (not just template).
22
+
23
+ Returns:
24
+ (exists_and_configured, vision_path)
25
+ """
26
+ vision_path = workspace / "docs/roadmap/vision/PRODUCT_VISION.md"
27
+
28
+ if not vision_path.exists():
29
+ return False, vision_path
30
+
31
+ # Check if it's still just the template (not configured)
32
+ content = vision_path.read_text()
33
+ template_indicators = [
34
+ "One-line vision statement here",
35
+ "Problem 1 | Description",
36
+ "Metric 1 | Value",
37
+ ]
38
+
39
+ # If any template placeholder still exists, it's not properly configured
40
+ for indicator in template_indicators:
41
+ if indicator in content:
42
+ return False, vision_path
43
+
44
+ return True, vision_path
45
+
46
+
47
+ def is_valid_path(s: str) -> bool:
48
+ """Check if string looks like a path."""
49
+ # Check if it's an existing path
50
+ if Path(s).exists():
51
+ return True
52
+
53
+ # Check if it looks like a path pattern
54
+ path_indicators = ['/', '\\', './', '../', '~/', ':', 'C:\\']
55
+ return any(s.startswith(ind) or ind in s for ind in path_indicators)
56
+
57
+
58
+ def learn_self_improvement(workspace: Path, use_ai: bool = True) -> dict:
59
+ """Analyze current project for self-improvement opportunities.
60
+
61
+ This is called when `up learn` is used without arguments.
62
+ Uses AI by default for deeper insights, with basic analysis as fallback.
63
+ """
64
+ console.print(Panel.fit(
65
+ "[bold blue]Learning System[/] - Self-Improvement Analysis",
66
+ border_style="blue"
67
+ ))
68
+
69
+ # First, analyze current state
70
+ profile = analyze_project(workspace)
71
+ if not profile:
72
+ return {}
73
+
74
+ # Load existing profile if any to track improvements
75
+ skill_dir = find_skill_dir(workspace, "learning-system")
76
+ profile_file = skill_dir / "project_profile.json"
77
+ old_profile = {}
78
+ if profile_file.exists():
79
+ try:
80
+ old_profile = json.loads(profile_file.read_text())
81
+ except json.JSONDecodeError:
82
+ pass
83
+
84
+ # Identify what changed since last analysis
85
+ improvements = {
86
+ "new_patterns": [],
87
+ "new_frameworks": [],
88
+ "addressed_improvements": [],
89
+ "remaining_improvements": [],
90
+ }
91
+
92
+ # Check for new patterns
93
+ old_patterns = set(old_profile.get("patterns_detected", []))
94
+ new_patterns = set(profile.get("patterns_detected", []))
95
+ improvements["new_patterns"] = list(new_patterns - old_patterns)
96
+
97
+ # Check for new frameworks
98
+ old_frameworks = set(old_profile.get("frameworks", []))
99
+ new_frameworks = set(profile.get("frameworks", []))
100
+ improvements["new_frameworks"] = list(new_frameworks - old_frameworks)
101
+
102
+ # Check addressed improvements
103
+ old_areas = set(old_profile.get("improvement_areas", []))
104
+ new_areas = set(profile.get("improvement_areas", []))
105
+ improvements["addressed_improvements"] = list(old_areas - new_areas)
106
+ improvements["remaining_improvements"] = list(new_areas)
107
+
108
+ # Display results
109
+ display_profile(profile)
110
+
111
+ if improvements["new_patterns"]:
112
+ console.print("\n[green]✓ New Patterns Adopted:[/]")
113
+ for p in improvements["new_patterns"]:
114
+ console.print(f" • {p}")
115
+
116
+ if improvements["addressed_improvements"]:
117
+ console.print("\n[green]✓ Improvements Addressed:[/]")
118
+ for a in improvements["addressed_improvements"]:
119
+ console.print(f" • {a}")
120
+
121
+ if improvements["remaining_improvements"]:
122
+ console.print("\n[yellow]○ Areas for Improvement:[/]")
123
+ for r in improvements["remaining_improvements"]:
124
+ console.print(f" • {r}")
125
+
126
+ # Save updated profile
127
+ save_path = save_profile(workspace, profile)
128
+ console.print(f"\n[green]✓[/] Profile updated: [cyan]{save_path}[/]")
129
+
130
+ # Record learnings to memory
131
+ _record_learning_to_memory(workspace, profile, improvements)
132
+
133
+ return improvements
134
+
135
+
136
+ def learn_from_topic(workspace: Path, topic: str, use_ai: bool = True) -> dict:
137
+ """Learn in a specific direction provided by the user.
138
+
139
+ This is called when `up learn "topic"` is used.
140
+ Uses AI by default for research generation, with basic analysis as fallback.
141
+ """
142
+ console.print(Panel.fit(
143
+ f"[bold blue]Learning System[/] - Focused Learning: {topic}",
144
+ border_style="blue"
145
+ ))
146
+
147
+ # Check current project profile
148
+ profile = analyze_project(workspace)
149
+
150
+ # Generate learning plan for the topic
151
+ learning = {
152
+ "topic": topic,
153
+ "project_context": {
154
+ "languages": profile.get("languages", []),
155
+ "frameworks": profile.get("frameworks", []),
156
+ },
157
+ "learning_areas": [],
158
+ "action_items": [],
159
+ "ai_research": None,
160
+ }
161
+
162
+ # Try AI-powered research first
163
+ if use_ai:
164
+ cli_name, cli_available = check_ai_cli()
165
+ if cli_available:
166
+ console.print(f"\n[yellow]Researching with {cli_name}...[/]")
167
+ ai_result = _ai_research_topic(workspace, topic, profile, cli_name)
168
+ if ai_result:
169
+ learning["ai_research"] = ai_result
170
+
171
+ # Map topic to relevant areas (fallback or supplement)
172
+ topic_lower = topic.lower()
173
+
174
+ # Categorize the topic
175
+ categories = {
176
+ "testing": ["test", "testing", "unit test", "integration", "coverage", "pytest", "jest"],
177
+ "architecture": ["architecture", "pattern", "design", "structure", "clean", "solid", "ddd"],
178
+ "performance": ["performance", "speed", "fast", "optimize", "cache", "caching"],
179
+ "security": ["security", "auth", "authentication", "authorization", "jwt", "oauth"],
180
+ "api": ["api", "rest", "graphql", "endpoint", "route"],
181
+ "database": ["database", "db", "sql", "orm", "migration", "query"],
182
+ "documentation": ["doc", "documentation", "readme", "comment"],
183
+ "ci_cd": ["ci", "cd", "deploy", "pipeline", "github actions", "jenkins"],
184
+ "error_handling": ["error", "exception", "handling", "logging", "monitoring"],
185
+ }
186
+
187
+ matched_categories = []
188
+ for cat, keywords in categories.items():
189
+ if any(kw in topic_lower for kw in keywords):
190
+ matched_categories.append(cat)
191
+
192
+ # Generate learning areas based on categories and frameworks
193
+ if matched_categories:
194
+ for cat in matched_categories:
195
+ for fw in profile.get("frameworks", []):
196
+ learning["learning_areas"].append(f"{fw} {cat} best practices")
197
+ learning["learning_areas"].append(f"{topic} patterns")
198
+ else:
199
+ # General topic
200
+ learning["learning_areas"].append(f"{topic} implementation")
201
+ for fw in profile.get("frameworks", []):
202
+ learning["learning_areas"].append(f"{topic} in {fw}")
203
+
204
+ # Generate action items
205
+ learning["action_items"] = [
206
+ f"Research {topic} best practices",
207
+ f"Review current codebase for {topic} patterns",
208
+ f"Identify gaps in {topic} implementation",
209
+ f"Create improvement plan for {topic}",
210
+ ]
211
+
212
+ # Display AI research if available
213
+ if learning.get("ai_research"):
214
+ console.print("\n[green]✓ AI Research Complete[/]")
215
+ console.print(Panel(learning["ai_research"], title=f"Research: {topic}", border_style="green"))
216
+ else:
217
+ # Show basic analysis
218
+ console.print("\n[bold]Learning Focus:[/]")
219
+ console.print(f" Topic: [cyan]{topic}[/]")
220
+
221
+ console.print("\n[bold]Areas to Research:[/]")
222
+ for area in learning["learning_areas"][:5]:
223
+ console.print(f" • {area}")
224
+
225
+ console.print("\n[bold]Action Items:[/]")
226
+ for item in learning["action_items"]:
227
+ console.print(f" □ {item}")
228
+
229
+ # Save learning plan
230
+ skill_dir = find_skill_dir(workspace, "learning-system")
231
+ skill_dir.mkdir(parents=True, exist_ok=True)
232
+ research_dir = skill_dir / "research"
233
+ research_dir.mkdir(exist_ok=True)
234
+
235
+ # Create research file
236
+ safe_topic = re.sub(r'[^\w\s-]', '', topic).strip().replace(' ', '_').lower()
237
+ research_file = research_dir / f"{date.today().isoformat()}_{safe_topic}.md"
238
+
239
+ # Include AI research in the file
240
+ ai_section = ""
241
+ if learning.get("ai_research"):
242
+ ai_section = f"""## AI Research
243
+
244
+ {learning["ai_research"]}
245
+
246
+ ---
247
+
248
+ """
249
+
250
+ research_content = f"""# Learning: {topic}
251
+
252
+ **Created**: {date.today().isoformat()}
253
+ **Status**: {"✅ Researched" if learning.get("ai_research") else "📋 In Progress"}
254
+ **Method**: {"AI-powered" if learning.get("ai_research") else "Basic analysis"}
255
+
256
+ ## Context
257
+
258
+ Project languages: {', '.join(profile.get('languages', ['N/A']))}
259
+ Project frameworks: {', '.join(profile.get('frameworks', ['N/A']))}
260
+
261
+ {ai_section}## Learning Areas
262
+
263
+ {chr(10).join(f'- [ ] {area}' for area in learning['learning_areas'])}
264
+
265
+ ## Action Items
266
+
267
+ {chr(10).join(f'- [ ] {item}' for item in learning['action_items'])}
268
+
269
+ ## Applied Changes
270
+
271
+ *Track changes made based on learnings*
272
+ """
273
+
274
+ research_file.write_text(research_content)
275
+ console.print(f"\n[green]✓[/] Research file created: [cyan]{research_file}[/]")
276
+
277
+ # Record to memory
278
+ _record_topic_learning(workspace, learning)
279
+
280
+ console.print("\n[bold]Next Steps:[/]")
281
+ console.print(f" 1. Review [cyan]{research_file}[/]")
282
+ console.print(" 2. Run [cyan]up learn analyze[/] to process all research")
283
+ console.print(" 3. Run [cyan]up learn plan[/] to generate improvement PRD")
284
+
285
+ return learning
286
+
287
+
288
+ def learn_from_file(workspace: Path, file_path: str, use_ai: bool = True) -> dict:
289
+ """Learn from a single file (markdown, code, config, etc.).
290
+
291
+ Uses AI by default for deep analysis, with basic extraction as fallback.
292
+ """
293
+ source_file = Path(file_path).expanduser().resolve()
294
+
295
+ if not source_file.exists():
296
+ console.print(f"[red]Error: File not found: {file_path}[/]")
297
+ return {}
298
+
299
+ console.print(Panel.fit(
300
+ f"[bold blue]Learning System[/] - Learn from File: {source_file.name}",
301
+ border_style="blue"
302
+ ))
303
+
304
+ # Read file content
305
+ try:
306
+ content = source_file.read_text()
307
+ except Exception as e:
308
+ console.print(f"[red]Error reading file: {e}[/]")
309
+ return {}
310
+
311
+ file_ext = source_file.suffix.lower()
312
+ learnings = {
313
+ "source_file": source_file.name,
314
+ "source_path": str(source_file),
315
+ "file_type": file_ext,
316
+ "key_concepts": [],
317
+ "patterns_found": [],
318
+ "best_practices": [],
319
+ "code_snippets": [],
320
+ "action_items": [],
321
+ "ai_analysis": None,
322
+ }
323
+
324
+ # Try AI analysis first
325
+ ai_success = False
326
+ if use_ai:
327
+ cli_name, cli_available = check_ai_cli()
328
+ if cli_available:
329
+ console.print(f"\n[yellow]Analyzing with {cli_name}...[/]")
330
+ ai_result = _ai_analyze_file(workspace, content, source_file.name, cli_name)
331
+ if ai_result:
332
+ learnings["ai_analysis"] = ai_result
333
+ ai_success = True
334
+
335
+ # Always do basic extraction (supplements AI or provides fallback)
336
+ if file_ext in ['.md', '.markdown', '.txt', '.rst']:
337
+ learnings = _analyze_documentation_file(content, learnings)
338
+ elif file_ext in ['.py']:
339
+ learnings = _analyze_python_file(content, learnings)
340
+ elif file_ext in ['.js', '.ts', '.tsx', '.jsx']:
341
+ learnings = _analyze_javascript_file(content, learnings)
342
+ elif file_ext in ['.json', '.yaml', '.yml', '.toml']:
343
+ learnings = _analyze_config_file(content, learnings, file_ext)
344
+ else:
345
+ learnings = _analyze_generic_file(content, learnings)
346
+
347
+ # Display results
348
+ console.print(f"\n[bold]File:[/] {source_file.name}")
349
+ console.print(f"[bold]Type:[/] {file_ext or 'unknown'}")
350
+ console.print(f"[bold]Size:[/] {len(content)} characters, {len(content.splitlines())} lines")
351
+
352
+ # Show AI analysis if available
353
+ if learnings.get("ai_analysis"):
354
+ console.print("\n[green]✓ AI Analysis Complete[/]")
355
+ console.print(Panel(learnings["ai_analysis"], title="AI Insights", border_style="green"))
356
+ else:
357
+ # Show basic extraction results
358
+ if learnings["key_concepts"]:
359
+ console.print("\n[green]📚 Key Concepts:[/]")
360
+ for c in learnings["key_concepts"][:10]:
361
+ console.print(f" • {c}")
362
+
363
+ if learnings["patterns_found"]:
364
+ console.print("\n[blue]🔷 Patterns Found:[/]")
365
+ for p in learnings["patterns_found"][:10]:
366
+ console.print(f" • {p}")
367
+
368
+ if learnings["best_practices"]:
369
+ console.print("\n[yellow]✨ Best Practices:[/]")
370
+ for b in learnings["best_practices"][:10]:
371
+ console.print(f" • {b}")
372
+
373
+ if learnings["code_snippets"]:
374
+ console.print("\n[cyan]💻 Notable Code Patterns:[/]")
375
+ for s in learnings["code_snippets"][:5]:
376
+ console.print(f" • {s}")
377
+
378
+ # Save learnings
379
+ skill_dir = find_skill_dir(workspace, "learning-system")
380
+ skill_dir.mkdir(parents=True, exist_ok=True)
381
+ learnings_dir = skill_dir / "file_learnings"
382
+ learnings_dir.mkdir(exist_ok=True)
383
+
384
+ safe_name = re.sub(r'[^\w\s-]', '', source_file.stem).strip().replace(' ', '_').lower()
385
+ learning_file = learnings_dir / f"{date.today().isoformat()}_{safe_name}.json"
386
+ learning_file.write_text(json.dumps(learnings, indent=2))
387
+
388
+ # Create markdown summary
389
+ summary_file = learnings_dir / f"{date.today().isoformat()}_{safe_name}.md"
390
+
391
+ # Include AI analysis if available
392
+ ai_section = ""
393
+ if learnings.get("ai_analysis"):
394
+ ai_section = f"""## AI Analysis
395
+
396
+ {learnings["ai_analysis"]}
397
+
398
+ ---
399
+
400
+ """
401
+
402
+ summary_content = f"""# Learnings from: {source_file.name}
403
+
404
+ **Analyzed**: {date.today().isoformat()}
405
+ **Source**: `{source_file}`
406
+ **Type**: {file_ext or 'unknown'}
407
+ **Method**: {"AI-powered" if learnings.get("ai_analysis") else "Basic extraction"}
408
+
409
+ {ai_section}## Key Concepts (Basic Extraction)
410
+
411
+ {chr(10).join(f'- {c}' for c in learnings['key_concepts']) or '- None extracted'}
412
+
413
+ ## Patterns Found
414
+
415
+ {chr(10).join(f'- [ ] {p}' for p in learnings['patterns_found']) or '- None identified'}
416
+
417
+ ## Best Practices
418
+
419
+ {chr(10).join(f'- [ ] {b}' for b in learnings['best_practices']) or '- None identified'}
420
+
421
+ ## Action Items
422
+
423
+ - [ ] Review insights and apply to project
424
+ - [ ] Run `up learn analyze` to process all learnings
425
+ - [ ] Run `up learn plan` to generate improvement PRD
426
+ """
427
+ summary_file.write_text(summary_content)
428
+
429
+ console.print(f"\n[green]✓[/] Learnings saved to: [cyan]{summary_file}[/]")
430
+
431
+ # Record to memory
432
+ _record_file_learning(workspace, learnings)
433
+
434
+ console.print("\n[bold]Next Steps:[/]")
435
+ console.print(f" 1. Review [cyan]{summary_file}[/]")
436
+ console.print(" 2. Run [cyan]up learn analyze[/] to process all research")
437
+ console.print(" 3. Run [cyan]up learn plan[/] to generate improvement PRD")
438
+
439
+ return learnings
440
+
441
+
442
+ def _analyze_documentation_file(content: str, learnings: dict) -> dict:
443
+ """Extract insights from markdown/documentation files."""
444
+ lines = content.splitlines()
445
+
446
+ # Extract headers as key concepts
447
+ headers = []
448
+ for line in lines:
449
+ if line.startswith('#'):
450
+ header = line.lstrip('#').strip()
451
+ if header and len(header) > 2:
452
+ headers.append(header)
453
+ learnings["key_concepts"] = headers[:15]
454
+
455
+ # Look for patterns in content
456
+ pattern_keywords = [
457
+ ('pattern', 'Design pattern mentioned'),
458
+ ('best practice', 'Best practice documented'),
459
+ ('principle', 'Principle defined'),
460
+ ('architecture', 'Architecture concept'),
461
+ ('workflow', 'Workflow described'),
462
+ ('convention', 'Convention defined'),
463
+ ('standard', 'Standard referenced'),
464
+ ('guideline', 'Guideline provided'),
465
+ ]
466
+
467
+ content_lower = content.lower()
468
+ for keyword, description in pattern_keywords:
469
+ if keyword in content_lower:
470
+ learnings["patterns_found"].append(description)
471
+
472
+ # Extract bullet points as potential best practices
473
+ for line in lines:
474
+ line = line.strip()
475
+ if line.startswith(('- ', '* ', '• ')) and len(line) > 10:
476
+ practice = line.lstrip('-*• ').strip()
477
+ if len(practice) > 15 and len(practice) < 200:
478
+ learnings["best_practices"].append(practice[:100])
479
+ if len(learnings["best_practices"]) >= 10:
480
+ break
481
+
482
+ # Extract code blocks
483
+ in_code_block = False
484
+ code_lang = ""
485
+ for line in lines:
486
+ if line.startswith('```'):
487
+ if not in_code_block:
488
+ in_code_block = True
489
+ code_lang = line[3:].strip()
490
+ if code_lang:
491
+ learnings["code_snippets"].append(f"Code example in {code_lang}")
492
+ else:
493
+ in_code_block = False
494
+ code_lang = ""
495
+
496
+ return learnings
497
+
498
+
499
+ def _analyze_python_file(content: str, learnings: dict) -> dict:
500
+ """Extract patterns from Python code."""
501
+ lines = content.splitlines()
502
+
503
+ # Pattern detection
504
+ patterns = {
505
+ r'class.*Repository': 'Repository Pattern',
506
+ r'class.*Service': 'Service Layer Pattern',
507
+ r'class.*Factory': 'Factory Pattern',
508
+ r'@dataclass': 'Dataclass usage',
509
+ r'@property': 'Property decorators',
510
+ r'async def': 'Async/Await pattern',
511
+ r'def test_': 'Unit test pattern',
512
+ r'Protocol\)': 'Protocol (interface) pattern',
513
+ r'@abstractmethod': 'Abstract base class',
514
+ r'TypeVar': 'Generic types',
515
+ r'Callable\[': 'Callable types',
516
+ r'contextmanager': 'Context manager pattern',
517
+ r'@staticmethod': 'Static methods',
518
+ r'@classmethod': 'Class methods',
519
+ r'__enter__': 'Context manager implementation',
520
+ r'yield': 'Generator pattern',
521
+ }
522
+
523
+ for pattern, name in patterns.items():
524
+ if re.search(pattern, content, re.IGNORECASE):
525
+ learnings["patterns_found"].append(name)
526
+
527
+ # Extract class and function names as concepts
528
+ for line in lines:
529
+ if line.strip().startswith('class '):
530
+ match = re.match(r'class\s+(\w+)', line.strip())
531
+ if match:
532
+ learnings["key_concepts"].append(f"Class: {match.group(1)}")
533
+ elif line.strip().startswith('def '):
534
+ match = re.match(r'def\s+(\w+)', line.strip())
535
+ if match and not match.group(1).startswith('_'):
536
+ learnings["key_concepts"].append(f"Function: {match.group(1)}")
537
+
538
+ learnings["key_concepts"] = learnings["key_concepts"][:15]
539
+
540
+ # Extract docstrings as best practices
541
+ docstring_pattern = r'"""([^"]+)"""'
542
+ docstrings = re.findall(docstring_pattern, content)
543
+ for doc in docstrings[:5]:
544
+ doc = doc.strip().split('\n')[0] # First line only
545
+ if len(doc) > 20 and len(doc) < 150:
546
+ learnings["best_practices"].append(doc)
547
+
548
+ # Note decorators and imports
549
+ imports = [line for line in lines if line.startswith(('import ', 'from '))]
550
+ if imports:
551
+ learnings["code_snippets"].append(f"Uses {len(imports)} imports")
552
+
553
+ decorators = [line.strip() for line in lines if line.strip().startswith('@')]
554
+ if decorators:
555
+ unique_decorators = list(set(d.split('(')[0] for d in decorators))[:5]
556
+ learnings["code_snippets"].append(f"Decorators: {', '.join(unique_decorators)}")
557
+
558
+ return learnings
559
+
560
+
561
+ def _analyze_javascript_file(content: str, learnings: dict) -> dict:
562
+ """Extract patterns from JavaScript/TypeScript code."""
563
+ lines = content.splitlines()
564
+
565
+ # Pattern detection
566
+ patterns = {
567
+ r'async\s+function': 'Async functions',
568
+ r'await\s+': 'Await usage',
569
+ r'export\s+default': 'Default exports',
570
+ r'export\s+const': 'Named exports',
571
+ r'interface\s+': 'TypeScript interfaces',
572
+ r'type\s+\w+\s*=': 'Type aliases',
573
+ r'useState': 'React useState hook',
574
+ r'useEffect': 'React useEffect hook',
575
+ r'useCallback': 'React useCallback hook',
576
+ r'useMemo': 'React useMemo hook',
577
+ r'React\.memo': 'React memoization',
578
+ r'class\s+\w+\s+extends': 'Class inheritance',
579
+ r'=>': 'Arrow functions',
580
+ r'Promise': 'Promise usage',
581
+ r'try\s*{': 'Error handling',
582
+ }
583
+
584
+ for pattern, name in patterns.items():
585
+ if re.search(pattern, content):
586
+ learnings["patterns_found"].append(name)
587
+
588
+ # Extract exports and components
589
+ for line in lines:
590
+ if 'export' in line:
591
+ match = re.search(r'export\s+(?:default\s+)?(?:function|const|class)\s+(\w+)', line)
592
+ if match:
593
+ learnings["key_concepts"].append(f"Export: {match.group(1)}")
594
+
595
+ learnings["key_concepts"] = learnings["key_concepts"][:15]
596
+
597
+ # Extract JSDoc comments
598
+ jsdoc_pattern = r'/\*\*([^*]+)\*/'
599
+ jsdocs = re.findall(jsdoc_pattern, content)
600
+ for doc in jsdocs[:5]:
601
+ doc = doc.strip().split('\n')[0].strip('* ')
602
+ if len(doc) > 20 and len(doc) < 150:
603
+ learnings["best_practices"].append(doc)
604
+
605
+ return learnings
606
+
607
+
608
+ def _analyze_config_file(content: str, learnings: dict, file_ext: str) -> dict:
609
+ """Extract insights from configuration files."""
610
+ learnings["key_concepts"].append(f"Configuration file ({file_ext})")
611
+
612
+ if file_ext == '.json':
613
+ try:
614
+ data = json.loads(content)
615
+ if isinstance(data, dict):
616
+ learnings["key_concepts"].extend([f"Config key: {k}" for k in list(data.keys())[:10]])
617
+ except json.JSONDecodeError:
618
+ pass
619
+
620
+ elif file_ext in ['.yaml', '.yml']:
621
+ # Simple YAML key extraction
622
+ for line in content.splitlines():
623
+ if ':' in line and not line.strip().startswith('#'):
624
+ key = line.split(':')[0].strip()
625
+ if key and not key.startswith('-'):
626
+ learnings["key_concepts"].append(f"Config: {key}")
627
+ if len(learnings["key_concepts"]) >= 15:
628
+ break
629
+
630
+ elif file_ext == '.toml':
631
+ # Simple TOML section extraction
632
+ for line in content.splitlines():
633
+ if line.strip().startswith('['):
634
+ section = line.strip().strip('[]')
635
+ learnings["key_concepts"].append(f"Section: {section}")
636
+
637
+ # Look for common config patterns
638
+ config_patterns = [
639
+ ('database', 'Database configuration'),
640
+ ('cache', 'Caching configuration'),
641
+ ('logging', 'Logging configuration'),
642
+ ('auth', 'Authentication configuration'),
643
+ ('api', 'API configuration'),
644
+ ('env', 'Environment configuration'),
645
+ ]
646
+
647
+ content_lower = content.lower()
648
+ for keyword, description in config_patterns:
649
+ if keyword in content_lower:
650
+ learnings["patterns_found"].append(description)
651
+
652
+ return learnings
653
+
654
+
655
+ def _analyze_generic_file(content: str, learnings: dict) -> dict:
656
+ """Generic file analysis for unknown types."""
657
+ lines = content.splitlines()
658
+
659
+ # Extract non-empty lines as potential concepts
660
+ for line in lines[:20]:
661
+ line = line.strip()
662
+ if line and len(line) > 10 and len(line) < 100:
663
+ learnings["key_concepts"].append(line[:80])
664
+
665
+ learnings["key_concepts"] = learnings["key_concepts"][:10]
666
+
667
+ return learnings
668
+
669
+
670
+ def _record_file_learning(workspace: Path, learnings: dict) -> None:
671
+ """Record file learning to memory system."""
672
+ try:
673
+ from up.memory import MemoryManager
674
+
675
+ manager = MemoryManager(workspace, use_vectors=False)
676
+ content = f"Learned from file: {learnings['source_file']}. "
677
+ if learnings['patterns_found']:
678
+ content += f"Patterns: {', '.join(learnings['patterns_found'][:3])}"
679
+ manager.record_learning(content)
680
+ except Exception:
681
+ pass
682
+
683
+
684
+ def learn_from_project(workspace: Path, project_path: str, use_ai: bool = True) -> dict:
685
+ """Analyze external project for good design patterns.
686
+
687
+ This is called when `up learn "project/path"` is used.
688
+ Uses AI by default for deeper comparison insights.
689
+ """
690
+ external_project = Path(project_path).expanduser().resolve()
691
+
692
+ if not external_project.exists():
693
+ console.print(f"[red]Error: Path not found: {project_path}[/]")
694
+ return {}
695
+
696
+ # If it's a file, use file learning
697
+ if external_project.is_file():
698
+ return learn_from_file(workspace, project_path, use_ai=use_ai)
699
+
700
+ console.print(Panel.fit(
701
+ f"[bold blue]Learning System[/] - Learn from Project: {external_project.name}",
702
+ border_style="blue"
703
+ ))
704
+
705
+ # Analyze the external project
706
+ console.print("\n[bold]Analyzing External Project...[/]")
707
+ external_profile = analyze_project(external_project)
708
+
709
+ # Analyze current project
710
+ console.print("\n[bold]Analyzing Current Project...[/]")
711
+ current_profile = analyze_project(workspace)
712
+
713
+ # Compare and find learnable patterns
714
+ learnings = {
715
+ "source_project": external_project.name,
716
+ "source_path": str(external_project),
717
+ "patterns_to_adopt": [],
718
+ "frameworks_to_consider": [],
719
+ "structure_insights": [],
720
+ "file_organization": [],
721
+ }
722
+
723
+ # Find patterns in external project that current project doesn't have
724
+ current_patterns = set(current_profile.get("patterns_detected", []))
725
+ external_patterns = set(external_profile.get("patterns_detected", []))
726
+ new_patterns = external_patterns - current_patterns
727
+ learnings["patterns_to_adopt"] = list(new_patterns)
728
+
729
+ # Find frameworks to consider
730
+ current_frameworks = set(current_profile.get("frameworks", []))
731
+ external_frameworks = set(external_profile.get("frameworks", []))
732
+
733
+ # Only suggest frameworks for same languages
734
+ common_languages = set(current_profile.get("languages", [])) & set(external_profile.get("languages", []))
735
+ if common_languages:
736
+ new_frameworks = external_frameworks - current_frameworks
737
+ learnings["frameworks_to_consider"] = list(new_frameworks)
738
+
739
+ # Analyze file structure
740
+ structure_insights = _analyze_project_structure(external_project)
741
+ learnings["structure_insights"] = structure_insights
742
+
743
+ # Display external project profile
744
+ console.print("\n[bold]External Project Profile:[/]")
745
+ display_profile(external_profile)
746
+
747
+ # Display comparison
748
+ console.print("\n[bold]Comparison with Current Project:[/]")
749
+
750
+ table = Table()
751
+ table.add_column("Aspect", style="cyan")
752
+ table.add_column("Current Project")
753
+ table.add_column("External Project")
754
+
755
+ table.add_row(
756
+ "Languages",
757
+ ", ".join(current_profile.get("languages", [])) or "None",
758
+ ", ".join(external_profile.get("languages", [])) or "None"
759
+ )
760
+ table.add_row(
761
+ "Frameworks",
762
+ ", ".join(current_profile.get("frameworks", [])) or "None",
763
+ ", ".join(external_profile.get("frameworks", [])) or "None"
764
+ )
765
+ table.add_row(
766
+ "Patterns",
767
+ ", ".join(current_profile.get("patterns_detected", [])) or "None",
768
+ ", ".join(external_profile.get("patterns_detected", [])) or "None"
769
+ )
770
+
771
+ console.print(table)
772
+
773
+ # Display learnings
774
+ if learnings["patterns_to_adopt"]:
775
+ console.print("\n[green]✓ Patterns to Consider Adopting:[/]")
776
+ for p in learnings["patterns_to_adopt"]:
777
+ console.print(f" • {p}")
778
+
779
+ if learnings["frameworks_to_consider"]:
780
+ console.print("\n[yellow]○ Frameworks to Consider:[/]")
781
+ for f in learnings["frameworks_to_consider"]:
782
+ console.print(f" • {f}")
783
+
784
+ if learnings["structure_insights"]:
785
+ console.print("\n[blue]📁 Structure Insights:[/]")
786
+ for s in learnings["structure_insights"]:
787
+ console.print(f" • {s}")
788
+
789
+ # Save learnings
790
+ skill_dir = find_skill_dir(workspace, "learning-system")
791
+ skill_dir.mkdir(parents=True, exist_ok=True)
792
+ learnings_dir = skill_dir / "external_learnings"
793
+ learnings_dir.mkdir(exist_ok=True)
794
+
795
+ safe_name = re.sub(r'[^\w\s-]', '', external_project.name).strip().replace(' ', '_').lower()
796
+ learning_file = learnings_dir / f"{date.today().isoformat()}_{safe_name}.json"
797
+ learning_file.write_text(json.dumps(learnings, indent=2))
798
+
799
+ # Also create a markdown summary
800
+ summary_file = learnings_dir / f"{date.today().isoformat()}_{safe_name}.md"
801
+ summary_content = f"""# Learnings from: {external_project.name}
802
+
803
+ **Analyzed**: {date.today().isoformat()}
804
+ **Source**: `{external_project}`
805
+
806
+ ## Patterns to Adopt
807
+
808
+ {chr(10).join(f'- [ ] {p}' for p in learnings['patterns_to_adopt']) or '- None identified'}
809
+
810
+ ## Frameworks to Consider
811
+
812
+ {chr(10).join(f'- [ ] {f}' for f in learnings['frameworks_to_consider']) or '- None identified'}
813
+
814
+ ## Structure Insights
815
+
816
+ {chr(10).join(f'- {s}' for s in learnings['structure_insights']) or '- None identified'}
817
+
818
+ ## Action Items
819
+
820
+ - [ ] Review patterns and decide which to adopt
821
+ - [ ] Create implementation plan for chosen patterns
822
+ - [ ] Apply learnings to current project
823
+ """
824
+ summary_file.write_text(summary_content)
825
+
826
+ console.print(f"\n[green]✓[/] Learnings saved to: [cyan]{summary_file}[/]")
827
+
828
+ # Record to memory
829
+ _record_external_learning(workspace, learnings)
830
+
831
+ console.print("\n[bold]Next Steps:[/]")
832
+ console.print(f" 1. Review [cyan]{summary_file}[/]")
833
+ console.print(" 2. Select patterns to implement")
834
+ console.print(" 3. Run [cyan]up learn plan[/] to create improvement PRD")
835
+
836
+ return learnings
837
+
838
+
839
+ def _analyze_project_structure(project_path: Path) -> list:
840
+ """Analyze project directory structure for insights."""
841
+ insights = []
842
+
843
+ # Check for common good practices
844
+ good_patterns = {
845
+ "src": "Source code organization in src/ directory",
846
+ "tests": "Dedicated tests/ directory",
847
+ "docs": "Documentation directory present",
848
+ ".github": "GitHub workflows/CI present",
849
+ "scripts": "Automation scripts directory",
850
+ "__init__.py": "Proper Python package structure",
851
+ "pyproject.toml": "Modern Python packaging (PEP 517)",
852
+ "Makefile": "Make-based automation",
853
+ "docker-compose": "Docker containerization",
854
+ }
855
+
856
+ for pattern, description in good_patterns.items():
857
+ if (project_path / pattern).exists() or any(project_path.glob(f"**/{pattern}")):
858
+ insights.append(description)
859
+
860
+ return insights[:5] # Limit to top 5
861
+
862
+
863
+ def _record_learning_to_memory(workspace: Path, profile: dict, improvements: dict) -> None:
864
+ """Record self-improvement learnings to memory system."""
865
+ try:
866
+ from up.memory import MemoryManager
867
+
868
+ manager = MemoryManager(workspace, use_vectors=False) # Fast mode
869
+
870
+ content = f"Self-improvement analysis: Found {len(profile.get('patterns_detected', []))} patterns, "
871
+ content += f"{len(improvements.get('new_patterns', []))} new patterns adopted, "
872
+ content += f"{len(improvements.get('remaining_improvements', []))} areas for improvement"
873
+
874
+ if improvements.get('new_patterns'):
875
+ content += f". New patterns: {', '.join(improvements['new_patterns'])}"
876
+
877
+ manager.record_learning(content)
878
+ except Exception:
879
+ pass # Memory recording is optional
880
+
881
+
882
+ def _record_topic_learning(workspace: Path, learning: dict) -> None:
883
+ """Record topic learning to memory system."""
884
+ try:
885
+ from up.memory import MemoryManager
886
+
887
+ manager = MemoryManager(workspace, use_vectors=False)
888
+ content = f"Started learning about: {learning['topic']}. "
889
+ content += f"Research areas: {', '.join(learning['learning_areas'][:3])}"
890
+ manager.record_learning(content)
891
+ except Exception:
892
+ pass
893
+
894
+
895
+ def _record_external_learning(workspace: Path, learnings: dict) -> None:
896
+ """Record external project learning to memory system."""
897
+ try:
898
+ from up.memory import MemoryManager
899
+
900
+ manager = MemoryManager(workspace, use_vectors=False)
901
+ content = f"Learned from external project: {learnings['source_project']}. "
902
+ if learnings['patterns_to_adopt']:
903
+ content += f"Patterns to adopt: {', '.join(learnings['patterns_to_adopt'][:3])}"
904
+ manager.record_learning(content)
905
+ except Exception:
906
+ pass
907
+
908
+
909
+ def _ai_research_topic(workspace: Path, topic: str, profile: dict, cli_name: str) -> str | None:
910
+ """Use AI to research a topic in context of the project."""
911
+ languages = ", ".join(profile.get("languages", [])) or "unknown"
912
+ frameworks = ", ".join(profile.get("frameworks", [])) or "none"
913
+
914
+ prompt = f"""Research the topic "{topic}" for a software project with:
915
+ - Languages: {languages}
916
+ - Frameworks: {frameworks}
917
+
918
+ Provide:
919
+ 1. **Key Concepts** - Main ideas to understand (3-5 items)
920
+ 2. **Best Practices** - Actionable recommendations (3-5 items)
921
+ 3. **Implementation Steps** - How to implement in this stack (3-5 steps)
922
+ 4. **Common Pitfalls** - What to avoid (2-3 items)
923
+
924
+ Be concise and practical. Format with markdown."""
925
+
926
+ return _run_ai_prompt(workspace, prompt, cli_name, timeout=120)
927
+
928
+
929
+ def _ai_analyze_file(workspace: Path, content: str, filename: str, cli_name: str) -> str | None:
930
+ """Use AI to analyze a file and extract insights."""
931
+ # Truncate if too large
932
+ max_chars = 12000
933
+ if len(content) > max_chars:
934
+ half = max_chars // 2
935
+ content = content[:half] + "\n\n[... content truncated ...]\n\n" + content[-half:]
936
+ truncated = True
937
+ else:
938
+ truncated = False
939
+
940
+ prompt = f"""Analyze this file and extract actionable insights:
941
+
942
+ 1. **Key Concepts** - Main ideas and knowledge (5-8 items)
943
+ 2. **Patterns** - Design patterns, workflows, methodologies
944
+ 3. **Best Practices** - Actionable recommendations to apply
945
+ 4. **Implementation Ideas** - How to use these learnings
946
+
947
+ {"[Note: File was truncated due to size]" if truncated else ""}
948
+
949
+ File ({filename}):
950
+ {content}
951
+
952
+ Be concise. Format with markdown headers."""
953
+
954
+ return _run_ai_prompt(workspace, prompt, cli_name, timeout=180)
955
+
956
+
957
+ @click.group(invoke_without_command=True)
958
+ @click.argument("topic_or_path", required=False)
959
+ @click.option("--workspace", "-w", type=click.Path(exists=True), help="Workspace path")
960
+ @click.option("--no-ai", is_flag=True, help="Disable AI analysis (use basic extraction only)")
961
+ @click.pass_context
962
+ def learn_cmd(ctx, topic_or_path: str, workspace: str, no_ai: bool):
963
+ """Learning system - analyze, research, and improve.
964
+
965
+ All commands use Claude/Cursor AI by default with automatic fallback.
966
+
967
+ \b
968
+ Usage:
969
+ up learn Auto-analyze and improve (requires vision map)
970
+ up learn "topic" Learn about a specific topic/feature
971
+ up learn "file.md" Analyze file with AI (fallback: basic extraction)
972
+ up learn "project/path" Compare and learn from another project
973
+
974
+ \b
975
+ Subcommands:
976
+ up learn auto Analyze project (no vision check)
977
+ up learn analyze Analyze all research files with AI
978
+ up learn plan Generate improvement PRD
979
+ up learn status Show learning system status
980
+
981
+ \b
982
+ Options:
983
+ --no-ai Disable AI analysis (faster, basic extraction)
984
+
985
+ \b
986
+ Examples:
987
+ up learn # Self-improvement with AI
988
+ up learn "caching" # Learn about caching with AI research
989
+ up learn "guide.md" # AI-powered file analysis
990
+ up learn "../other-project" # Compare projects with AI insights
991
+ """
992
+ # If subcommand invoked, skip main logic
993
+ if ctx.invoked_subcommand is not None:
994
+ return
995
+
996
+ # Store options in context for subcommands
997
+ ctx.ensure_object(dict)
998
+ ctx.obj['workspace'] = workspace
999
+ ctx.obj['no_ai'] = no_ai
1000
+
1001
+ # Check if topic_or_path is actually a subcommand name
1002
+ # This happens because Click processes arguments before subcommands
1003
+ subcommands = ["auto", "analyze", "plan", "status"]
1004
+ if topic_or_path in subcommands:
1005
+ # Invoke the subcommand with stored options
1006
+ subcmd = ctx.command.commands[topic_or_path]
1007
+ ctx.invoke(subcmd, workspace=workspace)
1008
+ return
1009
+
1010
+ ws = Path(workspace) if workspace else Path.cwd()
1011
+ use_ai = not no_ai
1012
+
1013
+ # No argument: self-improvement mode
1014
+ if not topic_or_path:
1015
+ # Check if vision map is set up
1016
+ vision_exists, vision_path = check_vision_map_exists(ws)
1017
+
1018
+ if not vision_exists:
1019
+ console.print(Panel.fit(
1020
+ "[yellow]Vision Map Not Configured[/]",
1021
+ border_style="yellow"
1022
+ ))
1023
+ console.print("\nThe learning system requires a configured vision map to guide improvements.")
1024
+ console.print(f"\nPlease configure: [cyan]{vision_path}[/]")
1025
+ console.print("\nThe vision map should include:")
1026
+ console.print(" • Your product vision statement")
1027
+ console.print(" • Problem statement and pain points")
1028
+ console.print(" • Success metrics")
1029
+ console.print("\n[bold]Alternatives:[/]")
1030
+ console.print(" • [cyan]up learn auto[/] - Analyze without vision map")
1031
+ console.print(" • [cyan]up learn \"topic\"[/] - Learn about specific topic")
1032
+ console.print(" • [cyan]up learn \"path\"[/] - Learn from another project")
1033
+ return
1034
+
1035
+ # Vision exists, run self-improvement
1036
+ learn_self_improvement(ws, use_ai=use_ai)
1037
+ return
1038
+
1039
+ # Has argument: determine if topic or path
1040
+ if is_valid_path(topic_or_path):
1041
+ learn_from_project(ws, topic_or_path, use_ai=use_ai)
1042
+ else:
1043
+ learn_from_topic(ws, topic_or_path, use_ai=use_ai)
1044
+
1045
+
1046
+ @learn_cmd.command("auto")
1047
+ @click.option("--workspace", "-w", type=click.Path(exists=True), help="Workspace path")
1048
+ def learn_auto(workspace: str):
1049
+ """Auto-analyze project and identify improvements.
1050
+
1051
+ Scans the codebase to detect technologies, patterns, and
1052
+ generate research topics for improvement.
1053
+ """
1054
+ ws = Path(workspace) if workspace else Path.cwd()
1055
+
1056
+ console.print(Panel.fit(
1057
+ "[bold blue]Learning System[/] - Auto Analysis",
1058
+ border_style="blue"
1059
+ ))
1060
+
1061
+ # Run project analyzer
1062
+ profile = analyze_project(ws)
1063
+
1064
+ if profile is None:
1065
+ console.print("[red]Error: Could not analyze project[/]")
1066
+ return
1067
+
1068
+ # Display results
1069
+ display_profile(profile)
1070
+
1071
+ # Save profile
1072
+ save_path = save_profile(ws, profile)
1073
+ console.print(f"\n[green]✓[/] Profile saved to: [cyan]{save_path}[/]")
1074
+
1075
+
1076
+
1077
+ # Suggest next steps
1078
+ console.print("\n[bold]Next Steps:[/]")
1079
+ if profile.get("research_topics"):
1080
+ console.print(" 1. Research topics with: [cyan]up learn research \"topic\"[/]")
1081
+ console.print(" 2. Generate PRD with: [cyan]up learn plan[/]")
1082
+ console.print(" 3. Start development with: [cyan]/product-loop[/]")
1083
+
1084
+
1085
+ def analyze_research_file(file_path: Path, workspace: Path) -> dict:
1086
+ """Analyze a single research file using AI CLI."""
1087
+ content = file_path.read_text()
1088
+
1089
+ # Truncate if too large
1090
+ max_chars = 10000
1091
+ if len(content) > max_chars:
1092
+ content = content[:max_chars] + "\n\n[... truncated ...]"
1093
+
1094
+ prompt = f"""Analyze this research document and extract:
1095
+
1096
+ 1. **Key Patterns** - Design patterns, methodologies, workflows mentioned
1097
+ 2. **Best Practices** - Actionable guidelines and recommendations
1098
+ 3. **Gaps** - What's missing or could be improved in a typical project
1099
+ 4. **Action Items** - Specific things to implement
1100
+
1101
+ Be concise. Return as structured markdown.
1102
+
1103
+ Research file ({file_path.name}):
1104
+ {content}
1105
+ """
1106
+
1107
+ cli_name, available = check_ai_cli()
1108
+ if not available:
1109
+ return {"error": "No AI CLI available", "file": file_path.name}
1110
+
1111
+ import subprocess
1112
+ try:
1113
+ if cli_name == "claude":
1114
+ cmd = ["claude", "-p", prompt]
1115
+ else:
1116
+ cmd = ["agent", "-p", prompt]
1117
+
1118
+ result = subprocess.run(
1119
+ cmd,
1120
+ capture_output=True,
1121
+ text=True,
1122
+ timeout=300, # 5 minute timeout for longer files
1123
+ cwd=workspace
1124
+ )
1125
+
1126
+ if result.returncode == 0:
1127
+ return {
1128
+ "file": file_path.name,
1129
+ "analysis": result.stdout,
1130
+ "cli": cli_name
1131
+ }
1132
+ else:
1133
+ return {"error": result.stderr, "file": file_path.name}
1134
+ except subprocess.TimeoutExpired:
1135
+ return {"error": "Timeout", "file": file_path.name}
1136
+ except Exception as e:
1137
+ return {"error": str(e), "file": file_path.name}
1138
+
1139
+
1140
+ @learn_cmd.command("analyze")
1141
+ @click.option("--workspace", "-w", type=click.Path(exists=True), help="Workspace path")
1142
+ def learn_analyze(workspace: str):
1143
+ """Analyze all research files and extract patterns with AI.
1144
+
1145
+ Uses Claude/Cursor AI by default with automatic progress bar.
1146
+ Falls back to showing files if AI is unavailable.
1147
+ """
1148
+ from tqdm import tqdm
1149
+
1150
+ ws = Path(workspace) if workspace else Path.cwd()
1151
+
1152
+ skill_dir = find_skill_dir(ws, "learning-system")
1153
+ research_dir = skill_dir / "research"
1154
+ deep_dir = skill_dir / "deep_analysis"
1155
+ file_learnings_dir = skill_dir / "file_learnings"
1156
+ insights_dir = skill_dir / "insights"
1157
+
1158
+ # Collect all analyzable files
1159
+ files_to_analyze = []
1160
+
1161
+ if research_dir.exists():
1162
+ files_to_analyze.extend(list(research_dir.glob("*.md")))
1163
+
1164
+ if deep_dir.exists():
1165
+ files_to_analyze.extend(list(deep_dir.glob("*_content.md")))
1166
+
1167
+ if file_learnings_dir.exists():
1168
+ files_to_analyze.extend(list(file_learnings_dir.glob("*.md")))
1169
+
1170
+ if not files_to_analyze:
1171
+ console.print("[yellow]No research or learning files found.[/]")
1172
+ console.print("Run [cyan]up learn \"topic\"[/] or [cyan]up learn \"file.md\"[/] first.")
1173
+ return
1174
+
1175
+ console.print(Panel.fit(
1176
+ "[bold blue]Learning System[/] - Analyze Research",
1177
+ border_style="blue"
1178
+ ))
1179
+
1180
+ console.print(f"Found [cyan]{len(files_to_analyze)}[/] files to analyze:")
1181
+ for f in files_to_analyze:
1182
+ console.print(f" • {f.name}")
1183
+
1184
+ # Check AI availability
1185
+ cli_name, cli_available = check_ai_cli()
1186
+ if not cli_available:
1187
+ # Fallback mode - just show files
1188
+ console.print("\n[yellow]No AI CLI available. Showing files for manual review.[/]")
1189
+ console.print("\n[bold]Install Claude CLI or Cursor Agent for automatic analysis.[/]")
1190
+ console.print("\n[bold]Manual analysis:[/]")
1191
+ console.print(f" Update [cyan]{insights_dir}/patterns.md[/]")
1192
+ console.print(f" Update [cyan]{insights_dir}/gap-analysis.md[/]")
1193
+ return
1194
+
1195
+ console.print(f"\n[yellow]Analyzing with {cli_name}...[/]")
1196
+
1197
+ # Analyze each file with progress bar
1198
+ all_patterns = []
1199
+ all_practices = []
1200
+ all_gaps = []
1201
+ all_actions = []
1202
+
1203
+ insights_dir.mkdir(parents=True, exist_ok=True)
1204
+
1205
+ with tqdm(files_to_analyze, desc="Analyzing", unit="file") as pbar:
1206
+ for file_path in pbar:
1207
+ pbar.set_postfix_str(file_path.name[:30])
1208
+
1209
+ result = analyze_research_file(file_path, ws)
1210
+
1211
+ if "error" in result:
1212
+ console.print(f"\n[red]Error analyzing {file_path.name}: {result['error']}[/]")
1213
+ continue
1214
+
1215
+ # Save individual analysis
1216
+ analysis_file = insights_dir / f"{file_path.stem}_insights.md"
1217
+ analysis_file.write_text(f"""# Insights: {file_path.name}
1218
+
1219
+ **Analyzed**: {date.today().isoformat()}
1220
+ **Source**: `{file_path}`
1221
+ **Method**: {result.get('cli', 'unknown')} CLI
1222
+
1223
+ ---
1224
+
1225
+ {result['analysis']}
1226
+ """)
1227
+
1228
+ # Collect for combined report
1229
+ all_patterns.append(f"### From {file_path.name}\n{result['analysis']}")
1230
+
1231
+ # Generate combined insights files
1232
+ patterns_file = insights_dir / "patterns.md"
1233
+ patterns_content = f"""# Patterns Extracted
1234
+
1235
+ **Generated**: {date.today().isoformat()}
1236
+ **Files Analyzed**: {len(files_to_analyze)}
1237
+ **Method**: {cli_name} CLI (automatic)
1238
+
1239
+ ---
1240
+
1241
+ {chr(10).join(all_patterns)}
1242
+ """
1243
+ patterns_file.write_text(patterns_content)
1244
+
1245
+ # Generate gap analysis
1246
+ gap_file = insights_dir / "gap-analysis.md"
1247
+ gap_content = f"""# Gap Analysis
1248
+
1249
+ **Generated**: {date.today().isoformat()}
1250
+ **Based on**: {len(files_to_analyze)} research files
1251
+
1252
+ ---
1253
+
1254
+ ## Summary
1255
+
1256
+ Review the individual insight files in this directory for detailed analysis.
1257
+
1258
+ ## Files Analyzed
1259
+
1260
+ {chr(10).join(f'- {f.name}' for f in files_to_analyze)}
1261
+
1262
+ ## Next Steps
1263
+
1264
+ 1. Review patterns in `patterns.md`
1265
+ 2. Identify gaps relevant to your project
1266
+ 3. Run `up learn plan` to generate improvement PRD
1267
+ """
1268
+ gap_file.write_text(gap_content)
1269
+
1270
+ console.print(f"\n[green]✓[/] Analysis complete!")
1271
+ console.print(f"\n[bold]Generated:[/]")
1272
+ console.print(f" • [cyan]{patterns_file.relative_to(ws)}[/]")
1273
+ console.print(f" • [cyan]{gap_file.relative_to(ws)}[/]")
1274
+ console.print(f" • {len(files_to_analyze)} individual insight files")
1275
+
1276
+ console.print("\n[bold]Next Steps:[/]")
1277
+ console.print(" 1. Review: [cyan]@" + str(patterns_file.relative_to(ws)) + "[/]")
1278
+ console.print(" 2. Generate PRD: [cyan]up learn plan[/]")
1279
+ console.print(" 3. Start development: [cyan]up start[/]")
1280
+
1281
+
1282
+ @learn_cmd.command("plan")
1283
+ @click.option("--workspace", "-w", type=click.Path(exists=True), help="Workspace path")
1284
+ @click.option("--output", "-o", type=click.Path(), help="Output file path")
1285
+ def learn_plan(workspace: str, output: str):
1286
+ """Generate improvement plan (PRD) from analysis.
1287
+
1288
+ Uses AI to convert insights and patterns into actionable user stories.
1289
+ """
1290
+ ws = Path(workspace) if workspace else Path.cwd()
1291
+
1292
+ console.print(Panel.fit(
1293
+ "[bold blue]Learning System[/] - Generate PRD",
1294
+ border_style="blue"
1295
+ ))
1296
+
1297
+ skill_dir = find_skill_dir(ws, "learning-system")
1298
+ insights_dir = skill_dir / "insights"
1299
+
1300
+ # Collect all insights
1301
+ insights_content = []
1302
+
1303
+ # Read patterns
1304
+ patterns_file = insights_dir / "patterns.md"
1305
+ if patterns_file.exists():
1306
+ content = patterns_file.read_text()
1307
+ if len(content) > 100: # Not just template
1308
+ insights_content.append(f"## Patterns\n{content}")
1309
+
1310
+ # Read gap analysis
1311
+ gap_file = insights_dir / "gap-analysis.md"
1312
+ if gap_file.exists():
1313
+ content = gap_file.read_text()
1314
+ if len(content) > 100:
1315
+ insights_content.append(f"## Gap Analysis\n{content}")
1316
+
1317
+ # Read individual insight files
1318
+ for f in insights_dir.glob("*_insights.md"):
1319
+ content = f.read_text()
1320
+ insights_content.append(f"## {f.stem}\n{content[:2000]}")
1321
+
1322
+ # Read research files
1323
+ research_dir = skill_dir / "research"
1324
+ if research_dir.exists():
1325
+ for f in research_dir.glob("*.md"):
1326
+ content = f.read_text()
1327
+ if "AI Research" in content: # Has AI-generated content
1328
+ insights_content.append(f"## Research: {f.stem}\n{content[:2000]}")
1329
+
1330
+ # Read file learnings
1331
+ learnings_dir = skill_dir / "file_learnings"
1332
+ if learnings_dir.exists():
1333
+ for f in learnings_dir.glob("*.md"):
1334
+ content = f.read_text()
1335
+ if "AI Analysis" in content:
1336
+ insights_content.append(f"## Learning: {f.stem}\n{content[:2000]}")
1337
+
1338
+ if not insights_content:
1339
+ console.print("[yellow]No insights found to generate PRD.[/]")
1340
+ console.print("Run [cyan]up learn analyze[/] first to process research files.")
1341
+ return
1342
+
1343
+ console.print(f"Found [cyan]{len(insights_content)}[/] insight sources")
1344
+
1345
+ # Load profile if exists
1346
+ profile_file = skill_dir / "project_profile.json"
1347
+ profile = {}
1348
+ if profile_file.exists():
1349
+ try:
1350
+ profile = json.loads(profile_file.read_text())
1351
+ except json.JSONDecodeError:
1352
+ pass
1353
+
1354
+ # Try AI to generate user stories
1355
+ cli_name, cli_available = check_ai_cli()
1356
+ user_stories = []
1357
+
1358
+ if cli_available:
1359
+ console.print(f"\n[yellow]Generating tasks with {cli_name}...[/]")
1360
+
1361
+ # Truncate insights if too long
1362
+ all_insights = "\n\n".join(insights_content)
1363
+ if len(all_insights) > 10000:
1364
+ all_insights = all_insights[:10000] + "\n\n[... truncated ...]"
1365
+
1366
+ prompt = f"""Based on these insights and learnings, generate 5-10 actionable improvement tasks.
1367
+
1368
+ Project context:
1369
+ - Languages: {', '.join(profile.get('languages', ['unknown']))}
1370
+ - Frameworks: {', '.join(profile.get('frameworks', ['unknown']))}
1371
+
1372
+ Insights:
1373
+ {all_insights}
1374
+
1375
+ Return ONLY a JSON array of user stories in this exact format:
1376
+ [
1377
+ {{"id": "US-001", "title": "Short title", "description": "What to implement", "priority": "high|medium|low", "effort": "small|medium|large"}},
1378
+ ...
1379
+ ]
1380
+
1381
+ Focus on practical, implementable improvements. No explanation, just the JSON array."""
1382
+
1383
+ result = _run_ai_prompt(ws, prompt, cli_name, timeout=120)
1384
+
1385
+ if result:
1386
+ # Try to parse JSON from response
1387
+ try:
1388
+ # Find JSON array in response
1389
+ import re
1390
+ json_match = re.search(r'\[[\s\S]*\]', result)
1391
+ if json_match:
1392
+ user_stories = json.loads(json_match.group())
1393
+ console.print(f"[green]✓[/] Generated {len(user_stories)} user stories")
1394
+ except json.JSONDecodeError:
1395
+ console.print("[yellow]Could not parse AI response, using template[/]")
1396
+
1397
+ if not user_stories:
1398
+ # Fallback: extract action items from insights
1399
+ console.print("[yellow]Using basic task generation...[/]")
1400
+
1401
+ # Parse insights for action items (- [ ] checkbox items)
1402
+ all_insights = "\n".join(insights_content)
1403
+ action_items = []
1404
+
1405
+ # Find checkbox items: - [ ] task description
1406
+ for line in all_insights.splitlines():
1407
+ line = line.strip()
1408
+ if line.startswith("- [ ]"):
1409
+ item = line[5:].strip()
1410
+ if item and len(item) > 5:
1411
+ action_items.append(item)
1412
+
1413
+ # Also find numbered items after "Action Items" header
1414
+ in_action_section = False
1415
+ for line in all_insights.splitlines():
1416
+ if "action item" in line.lower() or "immediate" in line.lower():
1417
+ in_action_section = True
1418
+ continue
1419
+ if in_action_section:
1420
+ if line.startswith("#") or line.startswith("**") and not "- [" in line:
1421
+ in_action_section = False
1422
+ elif line.strip().startswith("-") and len(line.strip()) > 3:
1423
+ item = line.strip().lstrip("-[ ]").strip()
1424
+ if item and item not in action_items:
1425
+ action_items.append(item)
1426
+
1427
+ # Fallback to improvement areas if no action items found
1428
+ if not action_items:
1429
+ action_items = profile.get("improvement_areas", [])
1430
+
1431
+ # Generate user stories from action items
1432
+ for i, item in enumerate(action_items[:10], 1):
1433
+ # Determine priority based on keywords
1434
+ priority = "medium"
1435
+ if any(w in item.lower() for w in ["immediate", "critical", "urgent", "must"]):
1436
+ priority = "high"
1437
+ elif any(w in item.lower() for w in ["optional", "nice", "later", "future"]):
1438
+ priority = "low"
1439
+
1440
+ user_stories.append({
1441
+ "id": f"US-{i:03d}",
1442
+ "title": item[:60] + ("..." if len(item) > 60 else ""),
1443
+ "description": item,
1444
+ "priority": priority,
1445
+ "effort": "medium"
1446
+ })
1447
+
1448
+ if user_stories:
1449
+ console.print(f"[green]✓[/] Extracted {len(user_stories)} tasks from insights")
1450
+
1451
+ # Generate PRD
1452
+ prd = {
1453
+ "name": profile.get("name", ws.name),
1454
+ "version": "1.0.0",
1455
+ "generated": date.today().isoformat(),
1456
+ "source": "up learn plan",
1457
+ "userStories": user_stories,
1458
+ "metadata": {
1459
+ "insights_count": len(insights_content),
1460
+ "ai_generated": cli_available and len(user_stories) > 0,
1461
+ }
1462
+ }
1463
+
1464
+ output_path = Path(output) if output else skill_dir / "prd.json"
1465
+ output_path.write_text(json.dumps(prd, indent=2))
1466
+
1467
+ console.print(f"\n[green]✓[/] PRD generated: [cyan]{output_path}[/]")
1468
+
1469
+ # Display user stories
1470
+ if user_stories:
1471
+ console.print("\n[bold]Generated User Stories:[/]")
1472
+ table = Table()
1473
+ table.add_column("ID", style="cyan")
1474
+ table.add_column("Title")
1475
+ table.add_column("Priority")
1476
+ table.add_column("Effort")
1477
+
1478
+ for story in user_stories[:10]:
1479
+ table.add_row(
1480
+ story.get("id", "?"),
1481
+ story.get("title", "")[:50],
1482
+ story.get("priority", "medium"),
1483
+ story.get("effort", "medium")
1484
+ )
1485
+ console.print(table)
1486
+
1487
+ console.print("\n[bold]Next Steps:[/]")
1488
+ console.print(f" 1. Review: [cyan]{output_path}[/]")
1489
+ console.print(" 2. Edit priorities/details as needed")
1490
+ console.print(" 3. Start development: [cyan]up start[/]")
1491
+
1492
+
1493
+ @learn_cmd.command("status")
1494
+ @click.option("--workspace", "-w", type=click.Path(exists=True), help="Workspace path")
1495
+ def learn_status(workspace: str):
1496
+ """Show learning system status."""
1497
+ ws = Path(workspace) if workspace else Path.cwd()
1498
+
1499
+ console.print(Panel.fit(
1500
+ "[bold blue]Learning System[/] - Status",
1501
+ border_style="blue"
1502
+ ))
1503
+
1504
+ skill_dir = find_skill_dir(ws, "learning-system")
1505
+
1506
+ if not skill_dir.exists():
1507
+ console.print("[yellow]Learning system not initialized.[/]")
1508
+ console.print("Run [cyan]up init[/] to set up.")
1509
+ return
1510
+
1511
+ # Check files
1512
+ files = {
1513
+ "Project Profile": skill_dir / "project_profile.json",
1514
+ "Sources Config": skill_dir / "sources.json",
1515
+ "Patterns": skill_dir / "insights/patterns.md",
1516
+ "Gap Analysis": skill_dir / "insights/gap-analysis.md",
1517
+ "PRD": skill_dir / "prd.json",
1518
+ }
1519
+
1520
+ table = Table(title="Learning System Files")
1521
+ table.add_column("File", style="cyan")
1522
+ table.add_column("Status")
1523
+
1524
+ for name, path in files.items():
1525
+ if path.exists():
1526
+ table.add_row(name, "[green]✓ exists[/]")
1527
+ else:
1528
+ table.add_row(name, "[dim]○ not created[/]")
1529
+
1530
+ console.print(table)
1531
+
1532
+ # Count research files
1533
+ research_dir = skill_dir / "research"
1534
+ if research_dir.exists():
1535
+ research_count = len(list(research_dir.glob("*.md")))
1536
+ console.print(f"\nResearch files: [cyan]{research_count}[/]")
1537
+
1538
+
1539
+ def find_skill_dir(workspace: Path, skill_name: str) -> Path:
1540
+ """Find skill directory (Claude or Cursor)."""
1541
+ claude_skill = workspace / f".claude/skills/{skill_name}"
1542
+ cursor_skill = workspace / f".cursor/skills/{skill_name}"
1543
+
1544
+ if claude_skill.exists():
1545
+ return claude_skill
1546
+ if cursor_skill.exists():
1547
+ return cursor_skill
1548
+
1549
+ # Default to Claude
1550
+ return claude_skill
1551
+
1552
+
1553
+ def analyze_project(workspace: Path) -> dict:
1554
+ """Analyze project and return profile."""
1555
+ import os
1556
+ import re
1557
+
1558
+ profile = {
1559
+ "name": workspace.name,
1560
+ "languages": [],
1561
+ "frameworks": [],
1562
+ "patterns_detected": [],
1563
+ "improvement_areas": [],
1564
+ "research_topics": [],
1565
+ }
1566
+
1567
+ # Extension to language mapping
1568
+ extensions = {
1569
+ ".py": "Python",
1570
+ ".js": "JavaScript",
1571
+ ".ts": "TypeScript",
1572
+ ".tsx": "TypeScript",
1573
+ ".go": "Go",
1574
+ ".rs": "Rust",
1575
+ ".java": "Java",
1576
+ ".rb": "Ruby",
1577
+ }
1578
+
1579
+ # Framework indicators
1580
+ framework_indicators = {
1581
+ "fastapi": "FastAPI",
1582
+ "django": "Django",
1583
+ "flask": "Flask",
1584
+ "react": "React",
1585
+ "next": "Next.js",
1586
+ "vue": "Vue.js",
1587
+ "langchain": "LangChain",
1588
+ "langgraph": "LangGraph",
1589
+ "express": "Express",
1590
+ "pytest": "pytest",
1591
+ }
1592
+
1593
+ # Detect languages
1594
+ skip_dirs = {".git", "node_modules", "__pycache__", ".venv", "venv", "build", "dist"}
1595
+ found_languages = set()
1596
+
1597
+ for root, dirs, files in os.walk(workspace):
1598
+ dirs[:] = [d for d in dirs if d not in skip_dirs]
1599
+ for f in files:
1600
+ ext = Path(f).suffix.lower()
1601
+ if ext in extensions:
1602
+ found_languages.add(extensions[ext])
1603
+
1604
+ profile["languages"] = sorted(found_languages)
1605
+
1606
+ # Detect frameworks
1607
+ config_files = [
1608
+ workspace / "pyproject.toml",
1609
+ workspace / "requirements.txt",
1610
+ workspace / "package.json",
1611
+ ]
1612
+
1613
+ found_frameworks = set()
1614
+ for config in config_files:
1615
+ if config.exists():
1616
+ try:
1617
+ content = config.read_text().lower()
1618
+ for key, name in framework_indicators.items():
1619
+ if key in content:
1620
+ found_frameworks.add(name)
1621
+ except Exception:
1622
+ pass
1623
+
1624
+ profile["frameworks"] = sorted(found_frameworks)
1625
+
1626
+ # Detect patterns
1627
+ pattern_indicators = {
1628
+ r"class.*Repository": "Repository Pattern",
1629
+ r"class.*Service": "Service Layer",
1630
+ r"@dataclass": "Dataclasses",
1631
+ r"async def": "Async/Await",
1632
+ r"def test_": "Unit Tests",
1633
+ r"Protocol\)": "Protocol Pattern",
1634
+ }
1635
+
1636
+ src_dir = workspace / "src"
1637
+ if not src_dir.exists():
1638
+ src_dir = workspace
1639
+
1640
+ found_patterns = set()
1641
+ for py_file in src_dir.rglob("*.py"):
1642
+ try:
1643
+ content = py_file.read_text()
1644
+ for pattern, name in pattern_indicators.items():
1645
+ if re.search(pattern, content, re.IGNORECASE):
1646
+ found_patterns.add(name)
1647
+ except Exception:
1648
+ continue
1649
+
1650
+ profile["patterns_detected"] = sorted(found_patterns)
1651
+
1652
+ # Identify improvements
1653
+ improvements = []
1654
+ if "Python" in profile["languages"]:
1655
+ if "Unit Tests" not in profile["patterns_detected"]:
1656
+ improvements.append("add-unit-tests")
1657
+ if "Protocol Pattern" not in profile["patterns_detected"]:
1658
+ improvements.append("add-interfaces")
1659
+
1660
+ if any(f in profile["frameworks"] for f in ["FastAPI", "Django", "Flask"]):
1661
+ improvements.append("add-caching")
1662
+
1663
+ profile["improvement_areas"] = improvements
1664
+
1665
+ # Generate research topics
1666
+ topic_map = {
1667
+ "add-unit-tests": "testing best practices",
1668
+ "add-interfaces": "Python Protocol patterns",
1669
+ "add-caching": "caching strategies",
1670
+ }
1671
+
1672
+ topics = [topic_map[i] for i in improvements if i in topic_map]
1673
+ for fw in profile["frameworks"][:2]:
1674
+ topics.append(f"{fw} best practices")
1675
+
1676
+ profile["research_topics"] = topics[:5]
1677
+
1678
+ return profile
1679
+
1680
+
1681
+ def display_profile(profile: dict) -> None:
1682
+ """Display profile in rich format."""
1683
+ table = Table(title="Project Profile")
1684
+ table.add_column("Aspect", style="cyan")
1685
+ table.add_column("Detected")
1686
+
1687
+ table.add_row("Name", profile.get("name", "Unknown"))
1688
+ table.add_row("Languages", ", ".join(profile.get("languages", [])) or "None")
1689
+ table.add_row("Frameworks", ", ".join(profile.get("frameworks", [])) or "None")
1690
+ table.add_row("Patterns", ", ".join(profile.get("patterns_detected", [])) or "None")
1691
+ table.add_row("Improvements", ", ".join(profile.get("improvement_areas", [])) or "None")
1692
+ table.add_row("Research Topics", ", ".join(profile.get("research_topics", [])) or "None")
1693
+
1694
+ console.print(table)
1695
+
1696
+
1697
+ def save_profile(workspace: Path, profile: dict) -> Path:
1698
+ """Save profile to JSON file."""
1699
+ skill_dir = find_skill_dir(workspace, "learning-system")
1700
+ skill_dir.mkdir(parents=True, exist_ok=True)
1701
+
1702
+ filepath = skill_dir / "project_profile.json"
1703
+ filepath.write_text(json.dumps(profile, indent=2))
1704
+ return filepath
1705
+
1706
+
1707
+ def generate_prd_template(profile: dict) -> dict:
1708
+ """Generate PRD template from profile."""
1709
+ from datetime import date
1710
+
1711
+ prd = {
1712
+ "project": profile.get("name", "Project") + " Improvements",
1713
+ "branchName": "feature/improvements",
1714
+ "description": "Improvements identified by learning system",
1715
+ "createdAt": date.today().isoformat(),
1716
+ "userStories": [],
1717
+ }
1718
+
1719
+ # Generate user stories from improvement areas
1720
+ for i, area in enumerate(profile.get("improvement_areas", []), 1):
1721
+ story = {
1722
+ "id": f"US-{i:03d}",
1723
+ "title": area.replace("-", " ").title(),
1724
+ "description": f"Implement {area.replace('-', ' ')}",
1725
+ "acceptanceCriteria": [
1726
+ "Implementation complete",
1727
+ "Tests passing",
1728
+ "Documentation updated",
1729
+ ],
1730
+ "priority": i,
1731
+ "effort": "medium",
1732
+ "passes": False,
1733
+ "notes": "",
1734
+ }
1735
+ prd["userStories"].append(story)
1736
+
1737
+ return prd
1738
+
1739
+
1740
+ if __name__ == "__main__":
1741
+ learn_cmd()