up-cli 0.2.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. up/__init__.py +1 -1
  2. up/ai_cli.py +229 -0
  3. up/cli.py +54 -9
  4. up/commands/agent.py +521 -0
  5. up/commands/bisect.py +343 -0
  6. up/commands/branch.py +350 -0
  7. up/commands/init.py +195 -6
  8. up/commands/learn.py +1392 -32
  9. up/commands/memory.py +545 -0
  10. up/commands/provenance.py +267 -0
  11. up/commands/review.py +239 -0
  12. up/commands/start.py +752 -42
  13. up/commands/status.py +173 -18
  14. up/commands/sync.py +317 -0
  15. up/commands/vibe.py +304 -0
  16. up/context.py +64 -10
  17. up/core/__init__.py +69 -0
  18. up/core/checkpoint.py +479 -0
  19. up/core/provenance.py +364 -0
  20. up/core/state.py +678 -0
  21. up/events.py +512 -0
  22. up/git/__init__.py +37 -0
  23. up/git/utils.py +270 -0
  24. up/git/worktree.py +331 -0
  25. up/learn/__init__.py +155 -0
  26. up/learn/analyzer.py +227 -0
  27. up/learn/plan.py +374 -0
  28. up/learn/research.py +511 -0
  29. up/learn/utils.py +117 -0
  30. up/memory.py +1096 -0
  31. up/parallel.py +551 -0
  32. up/templates/config/__init__.py +1 -1
  33. up/templates/docs/SKILL.md +28 -0
  34. up/templates/docs/__init__.py +341 -0
  35. up/templates/docs/standards/HEADERS.md +24 -0
  36. up/templates/docs/standards/STRUCTURE.md +18 -0
  37. up/templates/docs/standards/TEMPLATES.md +19 -0
  38. up/templates/loop/__init__.py +92 -32
  39. up/ui/__init__.py +14 -0
  40. up/ui/loop_display.py +650 -0
  41. up/ui/theme.py +137 -0
  42. {up_cli-0.2.0.dist-info → up_cli-0.5.0.dist-info}/METADATA +160 -15
  43. up_cli-0.5.0.dist-info/RECORD +55 -0
  44. up_cli-0.2.0.dist-info/RECORD +0 -23
  45. {up_cli-0.2.0.dist-info → up_cli-0.5.0.dist-info}/WHEEL +0 -0
  46. {up_cli-0.2.0.dist-info → up_cli-0.5.0.dist-info}/entry_points.txt +0 -0
up/learn/research.py ADDED
@@ -0,0 +1,511 @@
1
+ """Research and file learning for the learning system."""
2
+
3
+ import json
4
+ import re
5
+ from datetime import date
6
+ from pathlib import Path
7
+ from typing import Optional
8
+
9
+ from rich.console import Console
10
+ from rich.panel import Panel
11
+ from rich.table import Table
12
+
13
+ from up.ai_cli import check_ai_cli, run_ai_prompt
14
+ from up.learn.utils import find_skill_dir, safe_filename, record_to_memory
15
+ from up.learn.analyzer import analyze_project, analyze_project_structure
16
+
17
+ console = Console()
18
+
19
+
20
+ def learn_from_topic(workspace: Path, topic: str, use_ai: bool = True) -> dict:
21
+ """Learn in a specific direction provided by the user."""
22
+ console.print(Panel.fit(
23
+ f"[bold blue]Learning System[/] - Focused Learning: {topic}",
24
+ border_style="blue"
25
+ ))
26
+
27
+ profile = analyze_project(workspace)
28
+
29
+ learning = {
30
+ "topic": topic,
31
+ "project_context": {
32
+ "languages": profile.get("languages", []),
33
+ "frameworks": profile.get("frameworks", []),
34
+ },
35
+ "learning_areas": [],
36
+ "action_items": [],
37
+ "ai_research": None,
38
+ }
39
+
40
+ # Try AI-powered research
41
+ if use_ai:
42
+ cli_name, cli_available = check_ai_cli()
43
+ if cli_available:
44
+ console.print(f"\n[yellow]Researching with {cli_name}...[/]")
45
+ ai_result = _ai_research_topic(workspace, topic, profile, cli_name)
46
+ if ai_result:
47
+ learning["ai_research"] = ai_result
48
+
49
+ # Map topic to relevant areas (fallback)
50
+ topic_lower = topic.lower()
51
+ categories = {
52
+ "testing": ["test", "testing", "unit test", "integration", "coverage", "pytest", "jest"],
53
+ "architecture": ["architecture", "pattern", "design", "structure", "clean", "solid", "ddd"],
54
+ "performance": ["performance", "speed", "fast", "optimize", "cache", "caching"],
55
+ "security": ["security", "auth", "authentication", "authorization", "jwt", "oauth"],
56
+ "api": ["api", "rest", "graphql", "endpoint", "route"],
57
+ "database": ["database", "db", "sql", "orm", "migration", "query"],
58
+ }
59
+
60
+ matched_categories = []
61
+ for cat, keywords in categories.items():
62
+ if any(kw in topic_lower for kw in keywords):
63
+ matched_categories.append(cat)
64
+
65
+ if matched_categories:
66
+ for cat in matched_categories:
67
+ for fw in profile.get("frameworks", []):
68
+ learning["learning_areas"].append(f"{fw} {cat} best practices")
69
+ learning["learning_areas"].append(f"{topic} patterns")
70
+ else:
71
+ learning["learning_areas"].append(f"{topic} implementation")
72
+ for fw in profile.get("frameworks", []):
73
+ learning["learning_areas"].append(f"{topic} in {fw}")
74
+
75
+ learning["action_items"] = [
76
+ f"Research {topic} best practices",
77
+ f"Review current codebase for {topic} patterns",
78
+ f"Identify gaps in {topic} implementation",
79
+ f"Create improvement plan for {topic}",
80
+ ]
81
+
82
+ # Display results
83
+ if learning.get("ai_research"):
84
+ console.print("\n[green]✓ AI Research Complete[/]")
85
+ console.print(Panel(learning["ai_research"], title=f"Research: {topic}", border_style="green"))
86
+ else:
87
+ console.print("\n[bold]Learning Focus:[/]")
88
+ console.print(f" Topic: [cyan]{topic}[/]")
89
+ console.print("\n[bold]Areas to Research:[/]")
90
+ for area in learning["learning_areas"][:5]:
91
+ console.print(f" • {area}")
92
+
93
+ # Save research file
94
+ skill_dir = find_skill_dir(workspace, "learning-system")
95
+ research_dir = skill_dir / "research"
96
+ research_dir.mkdir(parents=True, exist_ok=True)
97
+
98
+ safe_topic = safe_filename(topic)
99
+ research_file = research_dir / f"{date.today().isoformat()}_{safe_topic}.md"
100
+
101
+ ai_section = ""
102
+ if learning.get("ai_research"):
103
+ ai_section = f"## AI Research\n\n{learning['ai_research']}\n\n---\n\n"
104
+
105
+ research_content = f"""# Learning: {topic}
106
+
107
+ **Created**: {date.today().isoformat()}
108
+ **Status**: {"✅ Researched" if learning.get("ai_research") else "📋 In Progress"}
109
+
110
+ ## Context
111
+
112
+ Project languages: {', '.join(profile.get('languages', ['N/A']))}
113
+ Project frameworks: {', '.join(profile.get('frameworks', ['N/A']))}
114
+
115
+ {ai_section}## Learning Areas
116
+
117
+ {chr(10).join(f'- [ ] {area}' for area in learning['learning_areas'])}
118
+
119
+ ## Action Items
120
+
121
+ {chr(10).join(f'- [ ] {item}' for item in learning['action_items'])}
122
+ """
123
+
124
+ research_file.write_text(research_content)
125
+ console.print(f"\n[green]✓[/] Research file created: [cyan]{research_file}[/]")
126
+
127
+ record_to_memory(workspace, f"Started learning about: {topic}")
128
+
129
+ console.print("\n[bold]Next Steps:[/]")
130
+ console.print(f" 1. Review [cyan]{research_file}[/]")
131
+ console.print(" 2. Run [cyan]up learn analyze[/] to process all research")
132
+ console.print(" 3. Run [cyan]up learn plan[/] to generate improvement PRD")
133
+
134
+ return learning
135
+
136
+
137
+ def learn_from_file(workspace: Path, file_path: str, use_ai: bool = True) -> dict:
138
+ """Learn from a single file."""
139
+ source_file = Path(file_path).expanduser().resolve()
140
+
141
+ if not source_file.exists():
142
+ console.print(f"[red]Error: File not found: {file_path}[/]")
143
+ return {}
144
+
145
+ console.print(Panel.fit(
146
+ f"[bold blue]Learning System[/] - Learn from File: {source_file.name}",
147
+ border_style="blue"
148
+ ))
149
+
150
+ try:
151
+ content = source_file.read_text()
152
+ except Exception as e:
153
+ console.print(f"[red]Error reading file: {e}[/]")
154
+ return {}
155
+
156
+ file_ext = source_file.suffix.lower()
157
+ learnings = {
158
+ "source_file": source_file.name,
159
+ "source_path": str(source_file),
160
+ "file_type": file_ext,
161
+ "key_concepts": [],
162
+ "patterns_found": [],
163
+ "best_practices": [],
164
+ "code_snippets": [],
165
+ "ai_analysis": None,
166
+ }
167
+
168
+ # Try AI analysis
169
+ if use_ai:
170
+ cli_name, cli_available = check_ai_cli()
171
+ if cli_available:
172
+ console.print(f"\n[yellow]Analyzing with {cli_name}...[/]")
173
+ ai_result = _ai_analyze_file(workspace, content, source_file.name, cli_name)
174
+ if ai_result:
175
+ learnings["ai_analysis"] = ai_result
176
+
177
+ # Basic extraction by file type
178
+ if file_ext in ['.md', '.markdown', '.txt', '.rst']:
179
+ learnings = _analyze_documentation_file(content, learnings)
180
+ elif file_ext in ['.py']:
181
+ learnings = _analyze_python_file(content, learnings)
182
+ elif file_ext in ['.js', '.ts', '.tsx', '.jsx']:
183
+ learnings = _analyze_javascript_file(content, learnings)
184
+ elif file_ext in ['.json', '.yaml', '.yml', '.toml']:
185
+ learnings = _analyze_config_file(content, learnings, file_ext)
186
+ else:
187
+ learnings = _analyze_generic_file(content, learnings)
188
+
189
+ # Display results
190
+ console.print(f"\n[bold]File:[/] {source_file.name}")
191
+ console.print(f"[bold]Type:[/] {file_ext or 'unknown'}")
192
+ console.print(f"[bold]Size:[/] {len(content)} characters, {len(content.splitlines())} lines")
193
+
194
+ if learnings.get("ai_analysis"):
195
+ console.print("\n[green]✓ AI Analysis Complete[/]")
196
+ console.print(Panel(learnings["ai_analysis"], title="AI Insights", border_style="green"))
197
+ else:
198
+ if learnings["key_concepts"]:
199
+ console.print("\n[green]📚 Key Concepts:[/]")
200
+ for c in learnings["key_concepts"][:10]:
201
+ console.print(f" • {c}")
202
+
203
+ # Save learnings
204
+ skill_dir = find_skill_dir(workspace, "learning-system")
205
+ learnings_dir = skill_dir / "file_learnings"
206
+ learnings_dir.mkdir(parents=True, exist_ok=True)
207
+
208
+ safe_name = safe_filename(source_file.stem)
209
+ summary_file = learnings_dir / f"{date.today().isoformat()}_{safe_name}.md"
210
+
211
+ ai_section = ""
212
+ if learnings.get("ai_analysis"):
213
+ ai_section = f"## AI Analysis\n\n{learnings['ai_analysis']}\n\n---\n\n"
214
+
215
+ summary_content = f"""# Learnings from: {source_file.name}
216
+
217
+ **Analyzed**: {date.today().isoformat()}
218
+ **Source**: `{source_file}`
219
+ **Type**: {file_ext or 'unknown'}
220
+
221
+ {ai_section}## Key Concepts
222
+
223
+ {chr(10).join(f'- {c}' for c in learnings['key_concepts']) or '- None extracted'}
224
+
225
+ ## Patterns Found
226
+
227
+ {chr(10).join(f'- [ ] {p}' for p in learnings['patterns_found']) or '- None identified'}
228
+ """
229
+ summary_file.write_text(summary_content)
230
+
231
+ console.print(f"\n[green]✓[/] Learnings saved to: [cyan]{summary_file}[/]")
232
+ record_to_memory(workspace, f"Learned from file: {learnings['source_file']}")
233
+
234
+ return learnings
235
+
236
+
237
+ def learn_from_project(workspace: Path, project_path: str, use_ai: bool = True) -> dict:
238
+ """Analyze external project for good design patterns."""
239
+ external_project = Path(project_path).expanduser().resolve()
240
+
241
+ if not external_project.exists():
242
+ console.print(f"[red]Error: Path not found: {project_path}[/]")
243
+ return {}
244
+
245
+ if external_project.is_file():
246
+ return learn_from_file(workspace, project_path, use_ai=use_ai)
247
+
248
+ console.print(Panel.fit(
249
+ f"[bold blue]Learning System[/] - Learn from Project: {external_project.name}",
250
+ border_style="blue"
251
+ ))
252
+
253
+ console.print("\n[bold]Analyzing External Project...[/]")
254
+ external_profile = analyze_project(external_project)
255
+
256
+ console.print("\n[bold]Analyzing Current Project...[/]")
257
+ current_profile = analyze_project(workspace)
258
+
259
+ learnings = {
260
+ "source_project": external_project.name,
261
+ "source_path": str(external_project),
262
+ "patterns_to_adopt": [],
263
+ "frameworks_to_consider": [],
264
+ "structure_insights": [],
265
+ }
266
+
267
+ # Find patterns to adopt
268
+ current_patterns = set(current_profile.get("patterns_detected", []))
269
+ external_patterns = set(external_profile.get("patterns_detected", []))
270
+ learnings["patterns_to_adopt"] = list(external_patterns - current_patterns)
271
+
272
+ # Find frameworks to consider
273
+ current_frameworks = set(current_profile.get("frameworks", []))
274
+ external_frameworks = set(external_profile.get("frameworks", []))
275
+ common_languages = set(current_profile.get("languages", [])) & set(external_profile.get("languages", []))
276
+ if common_languages:
277
+ learnings["frameworks_to_consider"] = list(external_frameworks - current_frameworks)
278
+
279
+ learnings["structure_insights"] = analyze_project_structure(external_project)
280
+
281
+ # Display comparison
282
+ console.print("\n[bold]Comparison:[/]")
283
+ table = Table()
284
+ table.add_column("Aspect", style="cyan")
285
+ table.add_column("Current Project")
286
+ table.add_column("External Project")
287
+
288
+ table.add_row(
289
+ "Languages",
290
+ ", ".join(current_profile.get("languages", [])) or "None",
291
+ ", ".join(external_profile.get("languages", [])) or "None"
292
+ )
293
+ table.add_row(
294
+ "Frameworks",
295
+ ", ".join(current_profile.get("frameworks", [])) or "None",
296
+ ", ".join(external_profile.get("frameworks", [])) or "None"
297
+ )
298
+ console.print(table)
299
+
300
+ if learnings["patterns_to_adopt"]:
301
+ console.print("\n[green]✓ Patterns to Consider Adopting:[/]")
302
+ for p in learnings["patterns_to_adopt"]:
303
+ console.print(f" • {p}")
304
+
305
+ # Save learnings
306
+ skill_dir = find_skill_dir(workspace, "learning-system")
307
+ learnings_dir = skill_dir / "external_learnings"
308
+ learnings_dir.mkdir(parents=True, exist_ok=True)
309
+
310
+ safe_name = safe_filename(external_project.name)
311
+ summary_file = learnings_dir / f"{date.today().isoformat()}_{safe_name}.md"
312
+
313
+ summary_content = f"""# Learnings from: {external_project.name}
314
+
315
+ **Analyzed**: {date.today().isoformat()}
316
+ **Source**: `{external_project}`
317
+
318
+ ## Patterns to Adopt
319
+
320
+ {chr(10).join(f'- [ ] {p}' for p in learnings['patterns_to_adopt']) or '- None identified'}
321
+
322
+ ## Structure Insights
323
+
324
+ {chr(10).join(f'- {s}' for s in learnings['structure_insights']) or '- None identified'}
325
+ """
326
+ summary_file.write_text(summary_content)
327
+
328
+ console.print(f"\n[green]✓[/] Learnings saved to: [cyan]{summary_file}[/]")
329
+ record_to_memory(workspace, f"Learned from external project: {learnings['source_project']}")
330
+
331
+ return learnings
332
+
333
+
334
+ # =============================================================================
335
+ # AI Helper Functions
336
+ # =============================================================================
337
+
338
+ def _ai_research_topic(workspace: Path, topic: str, profile: dict, cli_name: str) -> Optional[str]:
339
+ """Use AI to research a topic."""
340
+ languages = ", ".join(profile.get("languages", [])) or "unknown"
341
+ frameworks = ", ".join(profile.get("frameworks", [])) or "none"
342
+
343
+ prompt = f"""Research the topic "{topic}" for a software project with:
344
+ - Languages: {languages}
345
+ - Frameworks: {frameworks}
346
+
347
+ Provide:
348
+ 1. **Key Concepts** - Main ideas to understand (3-5 items)
349
+ 2. **Best Practices** - Actionable recommendations (3-5 items)
350
+ 3. **Implementation Steps** - How to implement (3-5 steps)
351
+ 4. **Common Pitfalls** - What to avoid (2-3 items)
352
+
353
+ Be concise and practical. Format with markdown."""
354
+
355
+ return run_ai_prompt(workspace, prompt, cli_name, timeout=120)
356
+
357
+
358
+ def _ai_analyze_file(workspace: Path, content: str, filename: str, cli_name: str) -> Optional[str]:
359
+ """Use AI to analyze a file."""
360
+ max_chars = 12000
361
+ if len(content) > max_chars:
362
+ half = max_chars // 2
363
+ content = content[:half] + "\n\n[... content truncated ...]\n\n" + content[-half:]
364
+ truncated = True
365
+ else:
366
+ truncated = False
367
+
368
+ prompt = f"""Analyze this file and extract actionable insights:
369
+
370
+ 1. **Key Concepts** - Main ideas and knowledge (5-8 items)
371
+ 2. **Patterns** - Design patterns, workflows, methodologies
372
+ 3. **Best Practices** - Actionable recommendations
373
+ 4. **Implementation Ideas** - How to use these learnings
374
+
375
+ {"[Note: File was truncated due to size]" if truncated else ""}
376
+
377
+ File ({filename}):
378
+ {content}
379
+
380
+ Be concise. Format with markdown headers."""
381
+
382
+ return run_ai_prompt(workspace, prompt, cli_name, timeout=180)
383
+
384
+
385
+ # =============================================================================
386
+ # File Analyzers (Basic Extraction)
387
+ # =============================================================================
388
+
389
+ def _analyze_documentation_file(content: str, learnings: dict) -> dict:
390
+ """Extract insights from markdown/documentation files."""
391
+ lines = content.splitlines()
392
+
393
+ # Extract headers
394
+ headers = []
395
+ for line in lines:
396
+ if line.startswith('#'):
397
+ header = line.lstrip('#').strip()
398
+ if header and len(header) > 2:
399
+ headers.append(header)
400
+ learnings["key_concepts"] = headers[:15]
401
+
402
+ # Look for patterns
403
+ pattern_keywords = [
404
+ ('pattern', 'Design pattern mentioned'),
405
+ ('best practice', 'Best practice documented'),
406
+ ('principle', 'Principle defined'),
407
+ ('architecture', 'Architecture concept'),
408
+ ('workflow', 'Workflow described'),
409
+ ]
410
+
411
+ content_lower = content.lower()
412
+ for keyword, description in pattern_keywords:
413
+ if keyword in content_lower:
414
+ learnings["patterns_found"].append(description)
415
+
416
+ return learnings
417
+
418
+
419
+ def _analyze_python_file(content: str, learnings: dict) -> dict:
420
+ """Extract patterns from Python code."""
421
+ lines = content.splitlines()
422
+
423
+ patterns = {
424
+ r'class.*Repository': 'Repository Pattern',
425
+ r'class.*Service': 'Service Layer Pattern',
426
+ r'class.*Factory': 'Factory Pattern',
427
+ r'@dataclass': 'Dataclass usage',
428
+ r'async def': 'Async/Await pattern',
429
+ r'def test_': 'Unit test pattern',
430
+ r'Protocol\)': 'Protocol (interface) pattern',
431
+ }
432
+
433
+ for pattern, name in patterns.items():
434
+ if re.search(pattern, content, re.IGNORECASE):
435
+ learnings["patterns_found"].append(name)
436
+
437
+ # Extract class and function names
438
+ for line in lines:
439
+ if line.strip().startswith('class '):
440
+ match = re.match(r'class\s+(\w+)', line.strip())
441
+ if match:
442
+ learnings["key_concepts"].append(f"Class: {match.group(1)}")
443
+ elif line.strip().startswith('def '):
444
+ match = re.match(r'def\s+(\w+)', line.strip())
445
+ if match and not match.group(1).startswith('_'):
446
+ learnings["key_concepts"].append(f"Function: {match.group(1)}")
447
+
448
+ learnings["key_concepts"] = learnings["key_concepts"][:15]
449
+ return learnings
450
+
451
+
452
+ def _analyze_javascript_file(content: str, learnings: dict) -> dict:
453
+ """Extract patterns from JavaScript/TypeScript code."""
454
+ patterns = {
455
+ r'async\s+function': 'Async functions',
456
+ r'await\s+': 'Await usage',
457
+ r'export\s+default': 'Default exports',
458
+ r'interface\s+': 'TypeScript interfaces',
459
+ r'useState': 'React useState hook',
460
+ r'useEffect': 'React useEffect hook',
461
+ }
462
+
463
+ for pattern, name in patterns.items():
464
+ if re.search(pattern, content):
465
+ learnings["patterns_found"].append(name)
466
+
467
+ # Extract exports
468
+ for line in content.splitlines():
469
+ if 'export' in line:
470
+ match = re.search(r'export\s+(?:default\s+)?(?:function|const|class)\s+(\w+)', line)
471
+ if match:
472
+ learnings["key_concepts"].append(f"Export: {match.group(1)}")
473
+
474
+ learnings["key_concepts"] = learnings["key_concepts"][:15]
475
+ return learnings
476
+
477
+
478
+ def _analyze_config_file(content: str, learnings: dict, file_ext: str) -> dict:
479
+ """Extract insights from configuration files."""
480
+ learnings["key_concepts"].append(f"Configuration file ({file_ext})")
481
+
482
+ if file_ext == '.json':
483
+ try:
484
+ data = json.loads(content)
485
+ if isinstance(data, dict):
486
+ learnings["key_concepts"].extend([f"Config key: {k}" for k in list(data.keys())[:10]])
487
+ except json.JSONDecodeError:
488
+ pass
489
+ elif file_ext in ['.yaml', '.yml']:
490
+ for line in content.splitlines():
491
+ if ':' in line and not line.strip().startswith('#'):
492
+ key = line.split(':')[0].strip()
493
+ if key and not key.startswith('-'):
494
+ learnings["key_concepts"].append(f"Config: {key}")
495
+ if len(learnings["key_concepts"]) >= 15:
496
+ break
497
+
498
+ return learnings
499
+
500
+
501
+ def _analyze_generic_file(content: str, learnings: dict) -> dict:
502
+ """Generic file analysis."""
503
+ lines = content.splitlines()
504
+
505
+ for line in lines[:20]:
506
+ line = line.strip()
507
+ if line and len(line) > 10 and len(line) < 100:
508
+ learnings["key_concepts"].append(line[:80])
509
+
510
+ learnings["key_concepts"] = learnings["key_concepts"][:10]
511
+ return learnings
up/learn/utils.py ADDED
@@ -0,0 +1,117 @@
1
+ """Shared utilities for the learning system."""
2
+
3
+ import json
4
+ import re
5
+ from datetime import date
6
+ from pathlib import Path
7
+ from typing import Optional
8
+
9
+ from rich.console import Console
10
+ from rich.table import Table
11
+
12
+ console = Console()
13
+
14
+
15
+ def find_skill_dir(workspace: Path, skill_name: str) -> Path:
16
+ """Find skill directory (Claude or Cursor)."""
17
+ claude_skill = workspace / f".claude/skills/{skill_name}"
18
+ cursor_skill = workspace / f".cursor/skills/{skill_name}"
19
+
20
+ if claude_skill.exists():
21
+ return claude_skill
22
+ if cursor_skill.exists():
23
+ return cursor_skill
24
+
25
+ # Default to Claude
26
+ return claude_skill
27
+
28
+
29
+ def check_vision_map_exists(workspace: Path) -> tuple[bool, Path]:
30
+ """Check if vision map is set up (not just template).
31
+
32
+ Returns:
33
+ (exists_and_configured, vision_path)
34
+ """
35
+ vision_path = workspace / "docs/roadmap/vision/PRODUCT_VISION.md"
36
+
37
+ if not vision_path.exists():
38
+ return False, vision_path
39
+
40
+ content = vision_path.read_text()
41
+ template_indicators = [
42
+ "One-line vision statement here",
43
+ "Problem 1 | Description",
44
+ "Metric 1 | Value",
45
+ ]
46
+
47
+ for indicator in template_indicators:
48
+ if indicator in content:
49
+ return False, vision_path
50
+
51
+ return True, vision_path
52
+
53
+
54
+ def is_valid_path(s: str) -> bool:
55
+ """Check if string looks like a path."""
56
+ if Path(s).exists():
57
+ return True
58
+
59
+ path_indicators = ['/', '\\', './', '../', '~/', ':', 'C:\\']
60
+ return any(s.startswith(ind) or ind in s for ind in path_indicators)
61
+
62
+
63
+ def safe_filename(name: str) -> str:
64
+ """Convert string to safe filename."""
65
+ return re.sub(r'[^\w\s-]', '', name).strip().replace(' ', '_').lower()
66
+
67
+
68
+ def record_to_memory(workspace: Path, content: str, entry_type: str = "learning") -> None:
69
+ """Record entry to memory system (optional)."""
70
+ try:
71
+ from up.memory import MemoryManager
72
+ manager = MemoryManager(workspace, use_vectors=False)
73
+ if entry_type == "learning":
74
+ manager.record_learning(content)
75
+ else:
76
+ manager.record(content, entry_type=entry_type)
77
+ except Exception:
78
+ pass # Memory recording is optional
79
+
80
+
81
+ def display_profile(profile: dict) -> None:
82
+ """Display profile in rich format."""
83
+ table = Table(title="Project Profile")
84
+ table.add_column("Aspect", style="cyan")
85
+ table.add_column("Detected")
86
+
87
+ table.add_row("Name", profile.get("name", "Unknown"))
88
+ table.add_row("Languages", ", ".join(profile.get("languages", [])) or "None")
89
+ table.add_row("Frameworks", ", ".join(profile.get("frameworks", [])) or "None")
90
+ table.add_row("Patterns", ", ".join(profile.get("patterns_detected", [])) or "None")
91
+ table.add_row("Improvements", ", ".join(profile.get("improvement_areas", [])) or "None")
92
+ table.add_row("Research Topics", ", ".join(profile.get("research_topics", [])) or "None")
93
+
94
+ console.print(table)
95
+
96
+
97
+ def save_profile(workspace: Path, profile: dict) -> Path:
98
+ """Save profile to JSON file."""
99
+ skill_dir = find_skill_dir(workspace, "learning-system")
100
+ skill_dir.mkdir(parents=True, exist_ok=True)
101
+
102
+ filepath = skill_dir / "project_profile.json"
103
+ filepath.write_text(json.dumps(profile, indent=2))
104
+ return filepath
105
+
106
+
107
+ def load_profile(workspace: Path) -> dict:
108
+ """Load profile from file if exists."""
109
+ skill_dir = find_skill_dir(workspace, "learning-system")
110
+ profile_file = skill_dir / "project_profile.json"
111
+
112
+ if profile_file.exists():
113
+ try:
114
+ return json.loads(profile_file.read_text())
115
+ except json.JSONDecodeError:
116
+ pass
117
+ return {}