up-cli 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- up/cli.py +27 -1
- up/commands/dashboard.py +248 -0
- up/commands/learn.py +381 -0
- up/commands/new.py +108 -10
- up/commands/start.py +414 -0
- up/commands/status.py +205 -0
- up/commands/summarize.py +122 -0
- up/context.py +367 -0
- up/summarizer.py +407 -0
- up/templates/__init__.py +70 -2
- up/templates/config/__init__.py +502 -20
- up/templates/learn/__init__.py +567 -14
- up/templates/loop/__init__.py +480 -21
- up/templates/mcp/__init__.py +474 -0
- up/templates/projects/__init__.py +786 -0
- up_cli-0.2.0.dist-info/METADATA +374 -0
- up_cli-0.2.0.dist-info/RECORD +23 -0
- up_cli-0.1.1.dist-info/METADATA +0 -186
- up_cli-0.1.1.dist-info/RECORD +0 -14
- {up_cli-0.1.1.dist-info → up_cli-0.2.0.dist-info}/WHEEL +0 -0
- {up_cli-0.1.1.dist-info → up_cli-0.2.0.dist-info}/entry_points.txt +0 -0
up/summarizer.py
ADDED
|
@@ -0,0 +1,407 @@
|
|
|
1
|
+
"""Conversation summarizer for Claude and Cursor chat history.
|
|
2
|
+
|
|
3
|
+
Extracts patterns, learnings, and actionable insights from AI conversation history.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import re
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Optional
|
|
12
|
+
from collections import Counter
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class ConversationPattern:
|
|
17
|
+
"""A pattern extracted from conversations."""
|
|
18
|
+
name: str
|
|
19
|
+
description: str
|
|
20
|
+
frequency: int
|
|
21
|
+
examples: list[str] = field(default_factory=list)
|
|
22
|
+
category: str = "general"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class ConversationInsight:
|
|
27
|
+
"""An insight or learning from conversations."""
|
|
28
|
+
title: str
|
|
29
|
+
description: str
|
|
30
|
+
source_count: int
|
|
31
|
+
confidence: str # high, medium, low
|
|
32
|
+
actionable: bool = False
|
|
33
|
+
action: str = ""
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dataclass
|
|
37
|
+
class SummaryReport:
|
|
38
|
+
"""Summary report from conversation analysis."""
|
|
39
|
+
total_conversations: int
|
|
40
|
+
total_messages: int
|
|
41
|
+
date_range: tuple[str, str]
|
|
42
|
+
top_topics: list[tuple[str, int]]
|
|
43
|
+
patterns: list[ConversationPattern]
|
|
44
|
+
insights: list[ConversationInsight]
|
|
45
|
+
code_snippets: list[dict]
|
|
46
|
+
errors_encountered: list[str]
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class ConversationSummarizer:
|
|
50
|
+
"""Analyzes and summarizes AI conversation history."""
|
|
51
|
+
|
|
52
|
+
# Common coding topics to detect
|
|
53
|
+
TOPIC_PATTERNS = {
|
|
54
|
+
"testing": r"\b(test|pytest|jest|unit test|integration test|coverage)\b",
|
|
55
|
+
"debugging": r"\b(debug|error|exception|traceback|fix|bug)\b",
|
|
56
|
+
"refactoring": r"\b(refactor|clean up|improve|optimize|restructure)\b",
|
|
57
|
+
"documentation": r"\b(document|readme|docstring|comment|explain)\b",
|
|
58
|
+
"api": r"\b(api|endpoint|route|http|rest|graphql)\b",
|
|
59
|
+
"database": r"\b(database|sql|query|migration|schema|model)\b",
|
|
60
|
+
"authentication": r"\b(auth|login|jwt|session|permission|role)\b",
|
|
61
|
+
"deployment": r"\b(deploy|docker|kubernetes|ci/cd|pipeline)\b",
|
|
62
|
+
"performance": r"\b(performance|optimize|cache|slow|fast|memory)\b",
|
|
63
|
+
"security": r"\b(security|vulnerability|sanitize|escape|inject)\b",
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
# Error patterns
|
|
67
|
+
ERROR_PATTERNS = [
|
|
68
|
+
r"error:\s*(.+?)(?:\n|$)",
|
|
69
|
+
r"exception:\s*(.+?)(?:\n|$)",
|
|
70
|
+
r"failed:\s*(.+?)(?:\n|$)",
|
|
71
|
+
r"TypeError:\s*(.+?)(?:\n|$)",
|
|
72
|
+
r"ValueError:\s*(.+?)(?:\n|$)",
|
|
73
|
+
r"ImportError:\s*(.+?)(?:\n|$)",
|
|
74
|
+
]
|
|
75
|
+
|
|
76
|
+
def __init__(self, conversations: list[dict]):
|
|
77
|
+
"""Initialize with conversation data.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
conversations: List of conversation dicts with messages
|
|
81
|
+
"""
|
|
82
|
+
self.conversations = conversations
|
|
83
|
+
self.all_messages = []
|
|
84
|
+
self._extract_messages()
|
|
85
|
+
|
|
86
|
+
def _extract_messages(self) -> None:
|
|
87
|
+
"""Extract all messages from conversations."""
|
|
88
|
+
for conv in self.conversations:
|
|
89
|
+
for msg in conv.get("messages", []):
|
|
90
|
+
self.all_messages.append({
|
|
91
|
+
"role": msg.get("role", "unknown"),
|
|
92
|
+
"content": msg.get("content", ""),
|
|
93
|
+
"timestamp": msg.get("timestamp"),
|
|
94
|
+
"conversation_id": conv.get("id"),
|
|
95
|
+
"project": conv.get("project"),
|
|
96
|
+
})
|
|
97
|
+
|
|
98
|
+
def analyze(self) -> SummaryReport:
|
|
99
|
+
"""Analyze conversations and generate summary report."""
|
|
100
|
+
# Basic stats
|
|
101
|
+
total_convs = len(self.conversations)
|
|
102
|
+
total_msgs = len(self.all_messages)
|
|
103
|
+
|
|
104
|
+
# Date range
|
|
105
|
+
timestamps = [
|
|
106
|
+
m["timestamp"] for m in self.all_messages
|
|
107
|
+
if m.get("timestamp")
|
|
108
|
+
]
|
|
109
|
+
if timestamps:
|
|
110
|
+
date_range = (
|
|
111
|
+
self._format_timestamp(min(timestamps)),
|
|
112
|
+
self._format_timestamp(max(timestamps)),
|
|
113
|
+
)
|
|
114
|
+
else:
|
|
115
|
+
date_range = ("Unknown", "Unknown")
|
|
116
|
+
|
|
117
|
+
# Analyze topics
|
|
118
|
+
top_topics = self._analyze_topics()
|
|
119
|
+
|
|
120
|
+
# Extract patterns
|
|
121
|
+
patterns = self._extract_patterns()
|
|
122
|
+
|
|
123
|
+
# Generate insights
|
|
124
|
+
insights = self._generate_insights(top_topics, patterns)
|
|
125
|
+
|
|
126
|
+
# Extract code snippets
|
|
127
|
+
code_snippets = self._extract_code_snippets()
|
|
128
|
+
|
|
129
|
+
# Extract errors
|
|
130
|
+
errors = self._extract_errors()
|
|
131
|
+
|
|
132
|
+
return SummaryReport(
|
|
133
|
+
total_conversations=total_convs,
|
|
134
|
+
total_messages=total_msgs,
|
|
135
|
+
date_range=date_range,
|
|
136
|
+
top_topics=top_topics[:10],
|
|
137
|
+
patterns=patterns[:10],
|
|
138
|
+
insights=insights[:10],
|
|
139
|
+
code_snippets=code_snippets[:20],
|
|
140
|
+
errors_encountered=errors[:20],
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
def _format_timestamp(self, ts: int) -> str:
|
|
144
|
+
"""Format timestamp to readable string."""
|
|
145
|
+
try:
|
|
146
|
+
dt = datetime.fromtimestamp(ts / 1000)
|
|
147
|
+
return dt.strftime("%Y-%m-%d")
|
|
148
|
+
except (ValueError, TypeError):
|
|
149
|
+
return "Unknown"
|
|
150
|
+
|
|
151
|
+
def _analyze_topics(self) -> list[tuple[str, int]]:
|
|
152
|
+
"""Analyze topic frequency in conversations."""
|
|
153
|
+
topic_counts = Counter()
|
|
154
|
+
|
|
155
|
+
for msg in self.all_messages:
|
|
156
|
+
content = msg.get("content", "").lower()
|
|
157
|
+
for topic, pattern in self.TOPIC_PATTERNS.items():
|
|
158
|
+
if re.search(pattern, content, re.IGNORECASE):
|
|
159
|
+
topic_counts[topic] += 1
|
|
160
|
+
|
|
161
|
+
return topic_counts.most_common()
|
|
162
|
+
|
|
163
|
+
def _extract_patterns(self) -> list[ConversationPattern]:
|
|
164
|
+
"""Extract common patterns from conversations."""
|
|
165
|
+
patterns = []
|
|
166
|
+
|
|
167
|
+
# Pattern: Common user request types
|
|
168
|
+
request_patterns = {
|
|
169
|
+
"implementation": r"(implement|create|add|build)\s+(\w+)",
|
|
170
|
+
"fix": r"(fix|resolve|solve)\s+(\w+)",
|
|
171
|
+
"explain": r"(explain|what is|how does)\s+(\w+)",
|
|
172
|
+
"refactor": r"(refactor|improve|clean)\s+(\w+)",
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
request_counts = Counter()
|
|
176
|
+
request_examples = {}
|
|
177
|
+
|
|
178
|
+
for msg in self.all_messages:
|
|
179
|
+
if msg.get("role") != "user":
|
|
180
|
+
continue
|
|
181
|
+
content = msg.get("content", "")
|
|
182
|
+
|
|
183
|
+
for pattern_name, pattern in request_patterns.items():
|
|
184
|
+
matches = re.findall(pattern, content, re.IGNORECASE)
|
|
185
|
+
if matches:
|
|
186
|
+
request_counts[pattern_name] += len(matches)
|
|
187
|
+
if pattern_name not in request_examples:
|
|
188
|
+
request_examples[pattern_name] = []
|
|
189
|
+
if len(request_examples[pattern_name]) < 3:
|
|
190
|
+
request_examples[pattern_name].append(content[:100])
|
|
191
|
+
|
|
192
|
+
for name, count in request_counts.most_common(10):
|
|
193
|
+
patterns.append(ConversationPattern(
|
|
194
|
+
name=f"Request: {name}",
|
|
195
|
+
description=f"User frequently requests {name} actions",
|
|
196
|
+
frequency=count,
|
|
197
|
+
examples=request_examples.get(name, []),
|
|
198
|
+
category="request_type",
|
|
199
|
+
))
|
|
200
|
+
|
|
201
|
+
return patterns
|
|
202
|
+
|
|
203
|
+
def _generate_insights(
|
|
204
|
+
self,
|
|
205
|
+
topics: list[tuple[str, int]],
|
|
206
|
+
patterns: list[ConversationPattern]
|
|
207
|
+
) -> list[ConversationInsight]:
|
|
208
|
+
"""Generate actionable insights from analysis."""
|
|
209
|
+
insights = []
|
|
210
|
+
|
|
211
|
+
# Insight from top topics
|
|
212
|
+
if topics:
|
|
213
|
+
top_topic = topics[0][0]
|
|
214
|
+
insights.append(ConversationInsight(
|
|
215
|
+
title=f"Primary Focus: {top_topic.title()}",
|
|
216
|
+
description=f"Most conversations involve {top_topic} ({topics[0][1]} mentions)",
|
|
217
|
+
source_count=topics[0][1],
|
|
218
|
+
confidence="high",
|
|
219
|
+
actionable=True,
|
|
220
|
+
action=f"Consider creating documentation or templates for {top_topic}",
|
|
221
|
+
))
|
|
222
|
+
|
|
223
|
+
# Insight from error patterns
|
|
224
|
+
error_count = sum(1 for m in self.all_messages if "error" in m.get("content", "").lower())
|
|
225
|
+
if error_count > 10:
|
|
226
|
+
insights.append(ConversationInsight(
|
|
227
|
+
title="Frequent Debugging Sessions",
|
|
228
|
+
description=f"{error_count} messages contain error-related content",
|
|
229
|
+
source_count=error_count,
|
|
230
|
+
confidence="high",
|
|
231
|
+
actionable=True,
|
|
232
|
+
action="Consider improving error handling or adding better logging",
|
|
233
|
+
))
|
|
234
|
+
|
|
235
|
+
# Insight from conversation length
|
|
236
|
+
avg_msgs = len(self.all_messages) / max(len(self.conversations), 1)
|
|
237
|
+
if avg_msgs > 20:
|
|
238
|
+
insights.append(ConversationInsight(
|
|
239
|
+
title="Long Conversation Sessions",
|
|
240
|
+
description=f"Average {avg_msgs:.0f} messages per conversation",
|
|
241
|
+
source_count=len(self.conversations),
|
|
242
|
+
confidence="medium",
|
|
243
|
+
actionable=True,
|
|
244
|
+
action="Consider breaking complex tasks into smaller sessions",
|
|
245
|
+
))
|
|
246
|
+
|
|
247
|
+
return insights
|
|
248
|
+
|
|
249
|
+
def _extract_code_snippets(self) -> list[dict]:
|
|
250
|
+
"""Extract code snippets from conversations."""
|
|
251
|
+
snippets = []
|
|
252
|
+
code_pattern = r"```(\w+)?\n(.*?)```"
|
|
253
|
+
|
|
254
|
+
for msg in self.all_messages:
|
|
255
|
+
if msg.get("role") != "assistant":
|
|
256
|
+
continue
|
|
257
|
+
content = msg.get("content", "")
|
|
258
|
+
|
|
259
|
+
matches = re.findall(code_pattern, content, re.DOTALL)
|
|
260
|
+
for lang, code in matches:
|
|
261
|
+
if len(code.strip()) > 20: # Skip trivial snippets
|
|
262
|
+
snippets.append({
|
|
263
|
+
"language": lang or "unknown",
|
|
264
|
+
"code": code.strip()[:500], # Limit size
|
|
265
|
+
"project": msg.get("project"),
|
|
266
|
+
})
|
|
267
|
+
|
|
268
|
+
return snippets
|
|
269
|
+
|
|
270
|
+
def _extract_errors(self) -> list[str]:
|
|
271
|
+
"""Extract unique errors from conversations."""
|
|
272
|
+
errors = set()
|
|
273
|
+
|
|
274
|
+
for msg in self.all_messages:
|
|
275
|
+
content = msg.get("content", "")
|
|
276
|
+
for pattern in self.ERROR_PATTERNS:
|
|
277
|
+
matches = re.findall(pattern, content, re.IGNORECASE)
|
|
278
|
+
for match in matches:
|
|
279
|
+
error_text = match.strip()[:100]
|
|
280
|
+
if len(error_text) > 10:
|
|
281
|
+
errors.add(error_text)
|
|
282
|
+
|
|
283
|
+
return sorted(errors)
|
|
284
|
+
|
|
285
|
+
def to_markdown(self) -> str:
|
|
286
|
+
"""Generate markdown report."""
|
|
287
|
+
report = self.analyze()
|
|
288
|
+
|
|
289
|
+
lines = [
|
|
290
|
+
"# Conversation Analysis Report",
|
|
291
|
+
"",
|
|
292
|
+
f"**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M')}",
|
|
293
|
+
f"**Period**: {report.date_range[0]} to {report.date_range[1]}",
|
|
294
|
+
"",
|
|
295
|
+
"---",
|
|
296
|
+
"",
|
|
297
|
+
"## Summary",
|
|
298
|
+
"",
|
|
299
|
+
f"- **Total Conversations**: {report.total_conversations}",
|
|
300
|
+
f"- **Total Messages**: {report.total_messages}",
|
|
301
|
+
"",
|
|
302
|
+
"## Top Topics",
|
|
303
|
+
"",
|
|
304
|
+
]
|
|
305
|
+
|
|
306
|
+
for topic, count in report.top_topics:
|
|
307
|
+
lines.append(f"- {topic.title()}: {count} mentions")
|
|
308
|
+
|
|
309
|
+
lines.extend([
|
|
310
|
+
"",
|
|
311
|
+
"## Key Insights",
|
|
312
|
+
"",
|
|
313
|
+
])
|
|
314
|
+
|
|
315
|
+
for insight in report.insights:
|
|
316
|
+
lines.append(f"### {insight.title}")
|
|
317
|
+
lines.append(f"")
|
|
318
|
+
lines.append(f"{insight.description}")
|
|
319
|
+
if insight.actionable:
|
|
320
|
+
lines.append(f"")
|
|
321
|
+
lines.append(f"**Action**: {insight.action}")
|
|
322
|
+
lines.append("")
|
|
323
|
+
|
|
324
|
+
if report.errors_encountered:
|
|
325
|
+
lines.extend([
|
|
326
|
+
"## Common Errors",
|
|
327
|
+
"",
|
|
328
|
+
])
|
|
329
|
+
for error in report.errors_encountered[:10]:
|
|
330
|
+
lines.append(f"- `{error}`")
|
|
331
|
+
|
|
332
|
+
lines.extend([
|
|
333
|
+
"",
|
|
334
|
+
"---",
|
|
335
|
+
"",
|
|
336
|
+
"*Generated by up-cli conversation summarizer*",
|
|
337
|
+
])
|
|
338
|
+
|
|
339
|
+
return "\n".join(lines)
|
|
340
|
+
|
|
341
|
+
def to_json(self) -> str:
|
|
342
|
+
"""Generate JSON report."""
|
|
343
|
+
report = self.analyze()
|
|
344
|
+
|
|
345
|
+
return json.dumps({
|
|
346
|
+
"generated_at": datetime.now().isoformat(),
|
|
347
|
+
"summary": {
|
|
348
|
+
"total_conversations": report.total_conversations,
|
|
349
|
+
"total_messages": report.total_messages,
|
|
350
|
+
"date_range": report.date_range,
|
|
351
|
+
},
|
|
352
|
+
"top_topics": [
|
|
353
|
+
{"topic": t, "count": c} for t, c in report.top_topics
|
|
354
|
+
],
|
|
355
|
+
"insights": [
|
|
356
|
+
{
|
|
357
|
+
"title": i.title,
|
|
358
|
+
"description": i.description,
|
|
359
|
+
"actionable": i.actionable,
|
|
360
|
+
"action": i.action if i.actionable else None,
|
|
361
|
+
}
|
|
362
|
+
for i in report.insights
|
|
363
|
+
],
|
|
364
|
+
"errors": report.errors_encountered,
|
|
365
|
+
}, indent=2)
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
def summarize_cursor_history(output_format: str = "markdown") -> str:
|
|
369
|
+
"""Summarize Cursor chat history.
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
output_format: 'markdown' or 'json'
|
|
373
|
+
|
|
374
|
+
Returns:
|
|
375
|
+
Formatted summary report
|
|
376
|
+
"""
|
|
377
|
+
# Import the export script
|
|
378
|
+
try:
|
|
379
|
+
from scripts.export_cursor_history import load_all_data
|
|
380
|
+
except ImportError:
|
|
381
|
+
# Try relative import
|
|
382
|
+
import sys
|
|
383
|
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "scripts"))
|
|
384
|
+
from export_cursor_history import load_all_data
|
|
385
|
+
|
|
386
|
+
conversations = load_all_data()
|
|
387
|
+
summarizer = ConversationSummarizer(conversations)
|
|
388
|
+
|
|
389
|
+
if output_format == "json":
|
|
390
|
+
return summarizer.to_json()
|
|
391
|
+
return summarizer.to_markdown()
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
# CLI
|
|
395
|
+
if __name__ == "__main__":
|
|
396
|
+
import sys
|
|
397
|
+
|
|
398
|
+
output_format = "markdown"
|
|
399
|
+
if len(sys.argv) > 1 and sys.argv[1] == "--json":
|
|
400
|
+
output_format = "json"
|
|
401
|
+
|
|
402
|
+
try:
|
|
403
|
+
result = summarize_cursor_history(output_format)
|
|
404
|
+
print(result)
|
|
405
|
+
except Exception as e:
|
|
406
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
407
|
+
sys.exit(1)
|
up/templates/__init__.py
CHANGED
|
@@ -18,13 +18,20 @@ def scaffold_project(
|
|
|
18
18
|
from up.templates.loop import create_loop_system
|
|
19
19
|
from up.templates.docs_skill import create_docs_skill
|
|
20
20
|
from up.templates.config import create_config_files
|
|
21
|
+
from up.templates.mcp import create_mcp_config
|
|
22
|
+
from up.context import create_context_budget_file
|
|
21
23
|
|
|
22
24
|
# Create base structure
|
|
23
25
|
_create_base_structure(target_dir, ai_target)
|
|
24
26
|
|
|
25
|
-
# Create config files
|
|
27
|
+
# Create config files (CLAUDE.md, .cursor/rules/, etc.)
|
|
26
28
|
create_config_files(target_dir, ai_target, force)
|
|
27
29
|
|
|
30
|
+
# Create context budget tracking
|
|
31
|
+
if ai_target in ("claude", "both"):
|
|
32
|
+
console.print(" [dim]Creating context budget tracking...[/]")
|
|
33
|
+
create_context_budget_file(target_dir)
|
|
34
|
+
|
|
28
35
|
# Create selected systems
|
|
29
36
|
if "docs" in systems:
|
|
30
37
|
console.print(" [dim]Creating docs system...[/]")
|
|
@@ -39,6 +46,13 @@ def scaffold_project(
|
|
|
39
46
|
console.print(" [dim]Creating product-loop system...[/]")
|
|
40
47
|
create_loop_system(target_dir, ai_target, force)
|
|
41
48
|
|
|
49
|
+
if "mcp" in systems:
|
|
50
|
+
console.print(" [dim]Creating MCP configuration...[/]")
|
|
51
|
+
create_mcp_config(target_dir, ai_target, force)
|
|
52
|
+
|
|
53
|
+
# Create handoff file
|
|
54
|
+
_create_handoff_file(target_dir, force)
|
|
55
|
+
|
|
42
56
|
|
|
43
57
|
def _create_base_structure(target_dir: Path, ai_target: str) -> None:
|
|
44
58
|
"""Create base directory structure."""
|
|
@@ -48,7 +62,61 @@ def _create_base_structure(target_dir: Path, ai_target: str) -> None:
|
|
|
48
62
|
if ai_target in ("claude", "both"):
|
|
49
63
|
dirs.append(".claude/skills")
|
|
50
64
|
if ai_target in ("cursor", "both"):
|
|
51
|
-
dirs.append(".cursor")
|
|
65
|
+
dirs.append(".cursor/rules")
|
|
52
66
|
|
|
53
67
|
for d in dirs:
|
|
54
68
|
(target_dir / d).mkdir(parents=True, exist_ok=True)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _create_handoff_file(target_dir: Path, force: bool) -> None:
|
|
72
|
+
"""Create initial handoff file for session continuity."""
|
|
73
|
+
from datetime import date
|
|
74
|
+
|
|
75
|
+
handoff_dir = target_dir / "docs/handoff"
|
|
76
|
+
handoff_dir.mkdir(parents=True, exist_ok=True)
|
|
77
|
+
|
|
78
|
+
content = f"""# Latest Session Handoff
|
|
79
|
+
|
|
80
|
+
**Date**: {date.today().isoformat()}
|
|
81
|
+
**Status**: 🟢 Ready
|
|
82
|
+
|
|
83
|
+
---
|
|
84
|
+
|
|
85
|
+
## Session Summary
|
|
86
|
+
|
|
87
|
+
Project initialized with up-cli.
|
|
88
|
+
|
|
89
|
+
## What Was Done
|
|
90
|
+
|
|
91
|
+
- Initialized project structure
|
|
92
|
+
- Set up documentation system
|
|
93
|
+
- Configured AI assistant integration
|
|
94
|
+
|
|
95
|
+
## Current State
|
|
96
|
+
|
|
97
|
+
- All systems initialized and ready
|
|
98
|
+
- No blockers
|
|
99
|
+
|
|
100
|
+
## Next Steps
|
|
101
|
+
|
|
102
|
+
1. Define project vision in `docs/roadmap/vision/PRODUCT_VISION.md`
|
|
103
|
+
2. Run `/learn auto` to analyze project
|
|
104
|
+
3. Begin development with `/product-loop`
|
|
105
|
+
|
|
106
|
+
## Files Modified
|
|
107
|
+
|
|
108
|
+
- CLAUDE.md
|
|
109
|
+
- .cursorrules
|
|
110
|
+
- docs/* (initial structure)
|
|
111
|
+
|
|
112
|
+
## Notes
|
|
113
|
+
|
|
114
|
+
Ready for development.
|
|
115
|
+
|
|
116
|
+
---
|
|
117
|
+
|
|
118
|
+
*Update this file at the end of each session*
|
|
119
|
+
"""
|
|
120
|
+
filepath = handoff_dir / "LATEST.md"
|
|
121
|
+
if not filepath.exists() or force:
|
|
122
|
+
filepath.write_text(content)
|