clicodelog 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clicodelog/__init__.py +6 -0
- clicodelog/__main__.py +6 -0
- clicodelog/app.py +996 -0
- clicodelog/cli.py +56 -0
- clicodelog/templates/index.html +1067 -0
- clicodelog-0.1.0.dist-info/METADATA +305 -0
- clicodelog-0.1.0.dist-info/RECORD +11 -0
- clicodelog-0.1.0.dist-info/WHEEL +5 -0
- clicodelog-0.1.0.dist-info/entry_points.txt +2 -0
- clicodelog-0.1.0.dist-info/licenses/LICENSE +21 -0
- clicodelog-0.1.0.dist-info/top_level.txt +1 -0
clicodelog/app.py
ADDED
|
@@ -0,0 +1,996 @@
|
|
|
1
|
+
"""
|
|
2
|
+
cli code log
|
|
3
|
+
A web app to browse, inspect, and export logs from CLI-based AI coding agents.
|
|
4
|
+
Data is copied from source directories to ~/.clicodelog/data/ for backup and local use.
|
|
5
|
+
Background sync runs every hour to keep data updated.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import base64
|
|
9
|
+
import json
|
|
10
|
+
import os
|
|
11
|
+
import shutil
|
|
12
|
+
import threading
|
|
13
|
+
import time
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
from flask import Flask, Response, jsonify, render_template, request
|
|
18
|
+
from flask_cors import CORS
|
|
19
|
+
|
|
20
|
+
# Package directory for templates
|
|
21
|
+
PACKAGE_DIR = Path(__file__).parent
|
|
22
|
+
|
|
23
|
+
# User data directory
|
|
24
|
+
APP_DATA_DIR = Path.home() / ".clicodelog"
|
|
25
|
+
DATA_DIR = APP_DATA_DIR / "data"
|
|
26
|
+
|
|
27
|
+
# Sync interval in seconds (1 hour = 3600 seconds)
|
|
28
|
+
SYNC_INTERVAL = 3600
|
|
29
|
+
|
|
30
|
+
# Source configurations for different tools
|
|
31
|
+
SOURCES = {
|
|
32
|
+
"claude-code": {
|
|
33
|
+
"name": "Claude Code",
|
|
34
|
+
"source_dir": Path.home() / ".claude" / "projects",
|
|
35
|
+
"data_subdir": "claude-code"
|
|
36
|
+
},
|
|
37
|
+
"codex": {
|
|
38
|
+
"name": "OpenAI Codex",
|
|
39
|
+
"source_dir": Path.home() / ".codex" / "sessions",
|
|
40
|
+
"data_subdir": "codex"
|
|
41
|
+
},
|
|
42
|
+
"gemini": {
|
|
43
|
+
"name": "Google Gemini",
|
|
44
|
+
"source_dir": Path.home() / ".gemini" / "tmp",
|
|
45
|
+
"data_subdir": "gemini"
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
# Lock for thread-safe sync operations
|
|
50
|
+
sync_lock = threading.Lock()
|
|
51
|
+
last_sync_time = {} # Track per-source sync times
|
|
52
|
+
current_source = "claude-code" # Default source
|
|
53
|
+
|
|
54
|
+
# Create Flask app with template folder from package
|
|
55
|
+
app = Flask(__name__, template_folder=str(PACKAGE_DIR / "templates"))
|
|
56
|
+
CORS(app)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def sync_data(source_id=None, silent=False):
|
|
60
|
+
"""Copy data from source directory to data dir for backup."""
|
|
61
|
+
global last_sync_time
|
|
62
|
+
|
|
63
|
+
if source_id is None:
|
|
64
|
+
source_id = current_source
|
|
65
|
+
|
|
66
|
+
if source_id not in SOURCES:
|
|
67
|
+
if not silent:
|
|
68
|
+
print(f"Unknown source: {source_id}")
|
|
69
|
+
return False
|
|
70
|
+
|
|
71
|
+
source_config = SOURCES[source_id]
|
|
72
|
+
source_dir = source_config["source_dir"]
|
|
73
|
+
dest_dir = DATA_DIR / source_config["data_subdir"]
|
|
74
|
+
|
|
75
|
+
with sync_lock:
|
|
76
|
+
if not source_dir.exists():
|
|
77
|
+
if not silent:
|
|
78
|
+
print(f"Source directory not found: {source_dir}")
|
|
79
|
+
return False
|
|
80
|
+
|
|
81
|
+
# Create data directory if it doesn't exist
|
|
82
|
+
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
|
83
|
+
|
|
84
|
+
# Copy source directory
|
|
85
|
+
if not silent:
|
|
86
|
+
print(f"Syncing {source_config['name']} data from {source_dir} to {dest_dir}...")
|
|
87
|
+
|
|
88
|
+
if dest_dir.exists():
|
|
89
|
+
# Remove old data and replace with fresh copy
|
|
90
|
+
shutil.rmtree(dest_dir)
|
|
91
|
+
|
|
92
|
+
shutil.copytree(source_dir, dest_dir)
|
|
93
|
+
|
|
94
|
+
# Count what was copied
|
|
95
|
+
if source_id == "claude-code":
|
|
96
|
+
project_count = sum(1 for p in dest_dir.iterdir() if p.is_dir())
|
|
97
|
+
session_count = sum(1 for p in dest_dir.iterdir() if p.is_dir() for _ in p.glob("*.jsonl"))
|
|
98
|
+
elif source_id == "codex":
|
|
99
|
+
session_files = list(dest_dir.rglob("*.jsonl"))
|
|
100
|
+
session_count = len(session_files)
|
|
101
|
+
# Count unique cwds as projects
|
|
102
|
+
project_count = len(set(get_codex_cwd(f) for f in session_files if get_codex_cwd(f)))
|
|
103
|
+
else: # gemini - sessions are in {hash}/chats/session-*.json
|
|
104
|
+
session_files = list(dest_dir.rglob("chats/session-*.json"))
|
|
105
|
+
session_count = len(session_files)
|
|
106
|
+
# Count unique project hashes as projects
|
|
107
|
+
project_count = len(set(get_gemini_project_hash(f) for f in session_files if get_gemini_project_hash(f)))
|
|
108
|
+
|
|
109
|
+
last_sync_time[source_id] = datetime.now()
|
|
110
|
+
|
|
111
|
+
if not silent:
|
|
112
|
+
print(f"Synced {project_count} projects with {session_count} sessions")
|
|
113
|
+
else:
|
|
114
|
+
print(f"[{last_sync_time[source_id].strftime('%Y-%m-%d %H:%M:%S')}] Background sync ({source_config['name']}): {project_count} projects, {session_count} sessions")
|
|
115
|
+
|
|
116
|
+
return True
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def get_codex_cwd(session_file):
|
|
120
|
+
"""Extract cwd from a Codex session file for grouping."""
|
|
121
|
+
try:
|
|
122
|
+
with open(session_file, 'r') as f:
|
|
123
|
+
for line in f:
|
|
124
|
+
try:
|
|
125
|
+
entry = json.loads(line)
|
|
126
|
+
if entry.get("type") == "session_meta":
|
|
127
|
+
return entry.get("payload", {}).get("cwd", "")
|
|
128
|
+
except json.JSONDecodeError:
|
|
129
|
+
continue
|
|
130
|
+
except Exception:
|
|
131
|
+
pass
|
|
132
|
+
return None
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def get_gemini_project_hash(session_file):
|
|
136
|
+
"""Extract projectHash from a Gemini session file for grouping."""
|
|
137
|
+
try:
|
|
138
|
+
with open(session_file, 'r') as f:
|
|
139
|
+
data = json.load(f)
|
|
140
|
+
return data.get("projectHash", "")
|
|
141
|
+
except Exception:
|
|
142
|
+
pass
|
|
143
|
+
return None
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def background_sync():
|
|
147
|
+
"""Background thread that syncs data every SYNC_INTERVAL seconds."""
|
|
148
|
+
while True:
|
|
149
|
+
time.sleep(SYNC_INTERVAL)
|
|
150
|
+
for source_id in SOURCES:
|
|
151
|
+
try:
|
|
152
|
+
sync_data(source_id=source_id, silent=True)
|
|
153
|
+
except Exception as e:
|
|
154
|
+
print(f"[Background sync error for {source_id}] {e}")
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def encode_path_id(path):
|
|
158
|
+
"""Encode a path as a safe ID using base64."""
|
|
159
|
+
return base64.urlsafe_b64encode(path.encode()).decode().rstrip('=')
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def decode_path_id(encoded_id):
|
|
163
|
+
"""Decode a base64-encoded path ID."""
|
|
164
|
+
# Add back padding if needed
|
|
165
|
+
padding = 4 - len(encoded_id) % 4
|
|
166
|
+
if padding != 4:
|
|
167
|
+
encoded_id += '=' * padding
|
|
168
|
+
return base64.urlsafe_b64decode(encoded_id.encode()).decode()
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def get_projects(source_id=None):
|
|
172
|
+
"""Get all project directories for the specified source."""
|
|
173
|
+
if source_id is None:
|
|
174
|
+
source_id = current_source
|
|
175
|
+
|
|
176
|
+
if source_id not in SOURCES:
|
|
177
|
+
return []
|
|
178
|
+
|
|
179
|
+
data_dir = DATA_DIR / SOURCES[source_id]["data_subdir"]
|
|
180
|
+
|
|
181
|
+
if not data_dir.exists():
|
|
182
|
+
return []
|
|
183
|
+
|
|
184
|
+
projects = []
|
|
185
|
+
|
|
186
|
+
if source_id == "claude-code":
|
|
187
|
+
# Claude Code: projects are directories
|
|
188
|
+
for project_dir in sorted(data_dir.iterdir()):
|
|
189
|
+
if project_dir.is_dir():
|
|
190
|
+
# Convert directory name back to readable path
|
|
191
|
+
readable_name = project_dir.name.replace("-", "/").lstrip("/")
|
|
192
|
+
sessions = list(project_dir.glob("*.jsonl"))
|
|
193
|
+
projects.append({
|
|
194
|
+
"id": project_dir.name,
|
|
195
|
+
"name": readable_name,
|
|
196
|
+
"session_count": len(sessions),
|
|
197
|
+
"path": str(project_dir)
|
|
198
|
+
})
|
|
199
|
+
elif source_id == "codex":
|
|
200
|
+
# Codex: group sessions by cwd
|
|
201
|
+
session_files = list(data_dir.rglob("*.jsonl"))
|
|
202
|
+
cwd_sessions = {}
|
|
203
|
+
|
|
204
|
+
for session_file in session_files:
|
|
205
|
+
cwd = get_codex_cwd(session_file)
|
|
206
|
+
if cwd:
|
|
207
|
+
if cwd not in cwd_sessions:
|
|
208
|
+
cwd_sessions[cwd] = []
|
|
209
|
+
cwd_sessions[cwd].append(session_file)
|
|
210
|
+
|
|
211
|
+
for cwd, sessions in sorted(cwd_sessions.items()):
|
|
212
|
+
# Create a safe ID from the cwd path using base64 encoding
|
|
213
|
+
project_id = encode_path_id(cwd)
|
|
214
|
+
projects.append({
|
|
215
|
+
"id": project_id,
|
|
216
|
+
"name": cwd,
|
|
217
|
+
"session_count": len(sessions),
|
|
218
|
+
"path": cwd
|
|
219
|
+
})
|
|
220
|
+
else: # gemini
|
|
221
|
+
# Gemini: group sessions by projectHash
|
|
222
|
+
session_files = list(data_dir.rglob("chats/session-*.json"))
|
|
223
|
+
hash_sessions = {}
|
|
224
|
+
|
|
225
|
+
for session_file in session_files:
|
|
226
|
+
project_hash = get_gemini_project_hash(session_file)
|
|
227
|
+
if project_hash:
|
|
228
|
+
if project_hash not in hash_sessions:
|
|
229
|
+
hash_sessions[project_hash] = []
|
|
230
|
+
hash_sessions[project_hash].append(session_file)
|
|
231
|
+
|
|
232
|
+
for project_hash, sessions in sorted(hash_sessions.items()):
|
|
233
|
+
# Use the hash as ID, show shortened hash as name
|
|
234
|
+
projects.append({
|
|
235
|
+
"id": project_hash,
|
|
236
|
+
"name": f"Project {project_hash[:8]}...",
|
|
237
|
+
"session_count": len(sessions),
|
|
238
|
+
"path": str(data_dir / project_hash)
|
|
239
|
+
})
|
|
240
|
+
|
|
241
|
+
return projects
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def get_sessions(project_id, source_id=None):
|
|
245
|
+
"""Get all sessions for a project."""
|
|
246
|
+
if source_id is None:
|
|
247
|
+
source_id = current_source
|
|
248
|
+
|
|
249
|
+
if source_id not in SOURCES:
|
|
250
|
+
return []
|
|
251
|
+
|
|
252
|
+
data_dir = DATA_DIR / SOURCES[source_id]["data_subdir"]
|
|
253
|
+
|
|
254
|
+
if source_id == "claude-code":
|
|
255
|
+
project_dir = data_dir / project_id
|
|
256
|
+
if not project_dir.exists():
|
|
257
|
+
return []
|
|
258
|
+
session_files = sorted(project_dir.glob("*.jsonl"), key=lambda x: x.stat().st_mtime, reverse=True)
|
|
259
|
+
elif source_id == "codex":
|
|
260
|
+
# Decode the project_id to get the actual cwd
|
|
261
|
+
try:
|
|
262
|
+
target_cwd = decode_path_id(project_id)
|
|
263
|
+
except Exception:
|
|
264
|
+
return []
|
|
265
|
+
|
|
266
|
+
all_sessions = list(data_dir.rglob("*.jsonl"))
|
|
267
|
+
session_files = [f for f in all_sessions if get_codex_cwd(f) == target_cwd]
|
|
268
|
+
session_files = sorted(session_files, key=lambda x: x.stat().st_mtime, reverse=True)
|
|
269
|
+
else: # gemini
|
|
270
|
+
# project_id is the projectHash
|
|
271
|
+
all_sessions = list(data_dir.rglob("chats/session-*.json"))
|
|
272
|
+
session_files = [f for f in all_sessions if get_gemini_project_hash(f) == project_id]
|
|
273
|
+
session_files = sorted(session_files, key=lambda x: x.stat().st_mtime, reverse=True)
|
|
274
|
+
|
|
275
|
+
sessions = []
|
|
276
|
+
for session_file in session_files:
|
|
277
|
+
session_info = parse_session_info(session_file, source_id)
|
|
278
|
+
if session_info:
|
|
279
|
+
sessions.append(session_info)
|
|
280
|
+
|
|
281
|
+
return sessions
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def parse_session_info(session_file, source_id):
|
|
285
|
+
"""Parse session file to extract metadata."""
|
|
286
|
+
first_summary = None
|
|
287
|
+
message_count = 0
|
|
288
|
+
first_timestamp = None
|
|
289
|
+
last_timestamp = None
|
|
290
|
+
first_user_message = None
|
|
291
|
+
|
|
292
|
+
try:
|
|
293
|
+
if source_id == "gemini":
|
|
294
|
+
# Gemini uses regular JSON files
|
|
295
|
+
with open(session_file, 'r') as f:
|
|
296
|
+
data = json.load(f)
|
|
297
|
+
first_timestamp = data.get("startTime")
|
|
298
|
+
last_timestamp = data.get("lastUpdated")
|
|
299
|
+
messages = data.get("messages", [])
|
|
300
|
+
for msg in messages:
|
|
301
|
+
msg_type = msg.get("type")
|
|
302
|
+
if msg_type in ("user", "gemini"):
|
|
303
|
+
message_count += 1
|
|
304
|
+
if msg_type == "user" and not first_user_message:
|
|
305
|
+
content = msg.get("content", "")
|
|
306
|
+
if isinstance(content, str) and len(content) < 500:
|
|
307
|
+
first_user_message = content[:100]
|
|
308
|
+
else:
|
|
309
|
+
# JSONL format for claude-code and codex
|
|
310
|
+
with open(session_file, 'r') as f:
|
|
311
|
+
for line in f:
|
|
312
|
+
try:
|
|
313
|
+
entry = json.loads(line)
|
|
314
|
+
|
|
315
|
+
if source_id == "claude-code":
|
|
316
|
+
if entry.get("type") == "summary" and not first_summary:
|
|
317
|
+
first_summary = entry.get("summary", "")
|
|
318
|
+
if entry.get("timestamp"):
|
|
319
|
+
if not first_timestamp:
|
|
320
|
+
first_timestamp = entry.get("timestamp")
|
|
321
|
+
last_timestamp = entry.get("timestamp")
|
|
322
|
+
if entry.get("type") in ("user", "assistant"):
|
|
323
|
+
message_count += 1
|
|
324
|
+
# Get first user message as fallback summary
|
|
325
|
+
if entry.get("type") == "user" and not first_user_message:
|
|
326
|
+
msg = entry.get("message", {})
|
|
327
|
+
content = msg.get("content", "")
|
|
328
|
+
if isinstance(content, list):
|
|
329
|
+
for block in content:
|
|
330
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
331
|
+
first_user_message = block.get("text", "")[:100]
|
|
332
|
+
break
|
|
333
|
+
elif isinstance(content, str):
|
|
334
|
+
first_user_message = content[:100]
|
|
335
|
+
else: # codex
|
|
336
|
+
entry_type = entry.get("type")
|
|
337
|
+
timestamp = entry.get("timestamp")
|
|
338
|
+
|
|
339
|
+
if timestamp:
|
|
340
|
+
if not first_timestamp:
|
|
341
|
+
first_timestamp = timestamp
|
|
342
|
+
last_timestamp = timestamp
|
|
343
|
+
|
|
344
|
+
if entry_type == "response_item":
|
|
345
|
+
payload = entry.get("payload", {})
|
|
346
|
+
role = payload.get("role")
|
|
347
|
+
if role in ("user", "assistant"):
|
|
348
|
+
message_count += 1
|
|
349
|
+
# Get first user message as summary
|
|
350
|
+
if role == "user" and not first_user_message:
|
|
351
|
+
content = payload.get("content", [])
|
|
352
|
+
for block in content:
|
|
353
|
+
if isinstance(block, dict) and block.get("type") == "input_text":
|
|
354
|
+
text = block.get("text", "")
|
|
355
|
+
# Skip system messages
|
|
356
|
+
if not text.startswith("<") and len(text) < 500:
|
|
357
|
+
first_user_message = text[:100]
|
|
358
|
+
break
|
|
359
|
+
elif entry_type == "event_msg":
|
|
360
|
+
payload = entry.get("payload", {})
|
|
361
|
+
if payload.get("type") == "user_message" and not first_user_message:
|
|
362
|
+
first_user_message = payload.get("message", "")[:100]
|
|
363
|
+
|
|
364
|
+
except json.JSONDecodeError:
|
|
365
|
+
continue
|
|
366
|
+
except Exception as e:
|
|
367
|
+
print(f"Error reading {session_file}: {e}")
|
|
368
|
+
return None
|
|
369
|
+
|
|
370
|
+
# Use first user message as summary if no summary found
|
|
371
|
+
if not first_summary:
|
|
372
|
+
first_summary = first_user_message or "No summary"
|
|
373
|
+
|
|
374
|
+
return {
|
|
375
|
+
"id": session_file.stem,
|
|
376
|
+
"filename": session_file.name,
|
|
377
|
+
"summary": first_summary,
|
|
378
|
+
"message_count": message_count,
|
|
379
|
+
"first_timestamp": first_timestamp,
|
|
380
|
+
"last_timestamp": last_timestamp,
|
|
381
|
+
"size": session_file.stat().st_size,
|
|
382
|
+
"modified": datetime.fromtimestamp(session_file.stat().st_mtime).isoformat(),
|
|
383
|
+
"full_path": str(session_file) # Store full path for codex sessions
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
def get_conversation(project_id, session_id, source_id=None):
|
|
388
|
+
"""Get all messages in a conversation."""
|
|
389
|
+
if source_id is None:
|
|
390
|
+
source_id = current_source
|
|
391
|
+
|
|
392
|
+
if source_id not in SOURCES:
|
|
393
|
+
return {"error": "Unknown source"}
|
|
394
|
+
|
|
395
|
+
data_dir = DATA_DIR / SOURCES[source_id]["data_subdir"]
|
|
396
|
+
|
|
397
|
+
if source_id == "claude-code":
|
|
398
|
+
session_file = data_dir / project_id / f"{session_id}.jsonl"
|
|
399
|
+
elif source_id == "codex":
|
|
400
|
+
# Decode the project_id to get the actual cwd
|
|
401
|
+
try:
|
|
402
|
+
target_cwd = decode_path_id(project_id)
|
|
403
|
+
except Exception:
|
|
404
|
+
return {"error": "Invalid project ID"}
|
|
405
|
+
|
|
406
|
+
session_file = None
|
|
407
|
+
for f in data_dir.rglob("*.jsonl"):
|
|
408
|
+
if f.stem == session_id and get_codex_cwd(f) == target_cwd:
|
|
409
|
+
session_file = f
|
|
410
|
+
break
|
|
411
|
+
else: # gemini
|
|
412
|
+
# project_id is the projectHash
|
|
413
|
+
session_file = None
|
|
414
|
+
for f in data_dir.rglob("chats/session-*.json"):
|
|
415
|
+
if f.stem == session_id and get_gemini_project_hash(f) == project_id:
|
|
416
|
+
session_file = f
|
|
417
|
+
break
|
|
418
|
+
|
|
419
|
+
if not session_file or not session_file.exists():
|
|
420
|
+
return {"error": "Session not found"}
|
|
421
|
+
|
|
422
|
+
if source_id == "claude-code":
|
|
423
|
+
return parse_claude_conversation(session_file, session_id)
|
|
424
|
+
elif source_id == "codex":
|
|
425
|
+
return parse_codex_conversation(session_file, session_id)
|
|
426
|
+
else:
|
|
427
|
+
return parse_gemini_conversation(session_file, session_id)
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
def parse_claude_conversation(session_file, session_id):
|
|
431
|
+
"""Parse Claude Code conversation format."""
|
|
432
|
+
messages = []
|
|
433
|
+
summaries = []
|
|
434
|
+
|
|
435
|
+
with open(session_file, 'r') as f:
|
|
436
|
+
for line_num, line in enumerate(f):
|
|
437
|
+
try:
|
|
438
|
+
entry = json.loads(line)
|
|
439
|
+
entry_type = entry.get("type")
|
|
440
|
+
|
|
441
|
+
if entry_type == "summary":
|
|
442
|
+
summaries.append(entry.get("summary", ""))
|
|
443
|
+
|
|
444
|
+
elif entry_type == "user":
|
|
445
|
+
msg = entry.get("message", {})
|
|
446
|
+
content = msg.get("content", "")
|
|
447
|
+
if isinstance(content, list):
|
|
448
|
+
# Extract text from content blocks
|
|
449
|
+
text_parts = []
|
|
450
|
+
for block in content:
|
|
451
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
452
|
+
text_parts.append(block.get("text", ""))
|
|
453
|
+
elif isinstance(block, str):
|
|
454
|
+
text_parts.append(block)
|
|
455
|
+
content = "\n".join(text_parts)
|
|
456
|
+
|
|
457
|
+
messages.append({
|
|
458
|
+
"role": "user",
|
|
459
|
+
"content": content,
|
|
460
|
+
"timestamp": entry.get("timestamp"),
|
|
461
|
+
"uuid": entry.get("uuid"),
|
|
462
|
+
"cwd": entry.get("cwd"),
|
|
463
|
+
"gitBranch": entry.get("gitBranch")
|
|
464
|
+
})
|
|
465
|
+
|
|
466
|
+
elif entry_type == "assistant":
|
|
467
|
+
msg = entry.get("message", {})
|
|
468
|
+
content_blocks = msg.get("content", [])
|
|
469
|
+
|
|
470
|
+
text_content = []
|
|
471
|
+
thinking_content = []
|
|
472
|
+
tool_uses = []
|
|
473
|
+
|
|
474
|
+
for block in content_blocks:
|
|
475
|
+
if isinstance(block, dict):
|
|
476
|
+
block_type = block.get("type")
|
|
477
|
+
if block_type == "text":
|
|
478
|
+
text_content.append(block.get("text", ""))
|
|
479
|
+
elif block_type == "thinking":
|
|
480
|
+
thinking_content.append(block.get("thinking", ""))
|
|
481
|
+
elif block_type == "tool_use":
|
|
482
|
+
tool_uses.append({
|
|
483
|
+
"name": block.get("name", ""),
|
|
484
|
+
"input": block.get("input", {})
|
|
485
|
+
})
|
|
486
|
+
|
|
487
|
+
messages.append({
|
|
488
|
+
"role": "assistant",
|
|
489
|
+
"content": "\n".join(text_content),
|
|
490
|
+
"thinking": "\n".join(thinking_content) if thinking_content else None,
|
|
491
|
+
"tool_uses": tool_uses if tool_uses else None,
|
|
492
|
+
"timestamp": entry.get("timestamp"),
|
|
493
|
+
"uuid": entry.get("uuid"),
|
|
494
|
+
"model": msg.get("model"),
|
|
495
|
+
"usage": msg.get("usage")
|
|
496
|
+
})
|
|
497
|
+
|
|
498
|
+
except json.JSONDecodeError as e:
|
|
499
|
+
print(f"Error parsing line {line_num}: {e}")
|
|
500
|
+
continue
|
|
501
|
+
|
|
502
|
+
return {
|
|
503
|
+
"summaries": summaries,
|
|
504
|
+
"messages": messages,
|
|
505
|
+
"session_id": session_id
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
|
|
509
|
+
def parse_codex_conversation(session_file, session_id):
|
|
510
|
+
"""Parse OpenAI Codex conversation format."""
|
|
511
|
+
messages = []
|
|
512
|
+
summaries = []
|
|
513
|
+
session_meta = {}
|
|
514
|
+
|
|
515
|
+
with open(session_file, 'r') as f:
|
|
516
|
+
for line_num, line in enumerate(f):
|
|
517
|
+
try:
|
|
518
|
+
entry = json.loads(line)
|
|
519
|
+
entry_type = entry.get("type")
|
|
520
|
+
timestamp = entry.get("timestamp")
|
|
521
|
+
|
|
522
|
+
if entry_type == "session_meta":
|
|
523
|
+
session_meta = entry.get("payload", {})
|
|
524
|
+
|
|
525
|
+
elif entry_type == "response_item":
|
|
526
|
+
payload = entry.get("payload", {})
|
|
527
|
+
role = payload.get("role")
|
|
528
|
+
payload_type = payload.get("type")
|
|
529
|
+
|
|
530
|
+
if payload_type == "message" and role == "user":
|
|
531
|
+
content_blocks = payload.get("content", [])
|
|
532
|
+
text_parts = []
|
|
533
|
+
for block in content_blocks:
|
|
534
|
+
if isinstance(block, dict) and block.get("type") == "input_text":
|
|
535
|
+
text = block.get("text", "")
|
|
536
|
+
# Skip system messages
|
|
537
|
+
if (text.startswith("<") or
|
|
538
|
+
text.startswith("# AGENTS.md") or
|
|
539
|
+
text.startswith("<environment_context") or
|
|
540
|
+
"<permissions instructions>" in text or
|
|
541
|
+
len(text) > 1000): # Very long messages are likely system prompts
|
|
542
|
+
continue
|
|
543
|
+
text_parts.append(text)
|
|
544
|
+
if text_parts:
|
|
545
|
+
messages.append({
|
|
546
|
+
"role": "user",
|
|
547
|
+
"content": "\n".join(text_parts),
|
|
548
|
+
"timestamp": timestamp
|
|
549
|
+
})
|
|
550
|
+
|
|
551
|
+
elif payload_type == "message" and role == "assistant":
|
|
552
|
+
content_blocks = payload.get("content", [])
|
|
553
|
+
text_parts = []
|
|
554
|
+
for block in content_blocks:
|
|
555
|
+
if isinstance(block, dict) and block.get("type") == "output_text":
|
|
556
|
+
text_parts.append(block.get("text", ""))
|
|
557
|
+
if text_parts:
|
|
558
|
+
messages.append({
|
|
559
|
+
"role": "assistant",
|
|
560
|
+
"content": "\n".join(text_parts),
|
|
561
|
+
"timestamp": timestamp,
|
|
562
|
+
"model": session_meta.get("model_provider", "openai")
|
|
563
|
+
})
|
|
564
|
+
|
|
565
|
+
elif payload_type == "function_call":
|
|
566
|
+
# Tool/function call
|
|
567
|
+
messages.append({
|
|
568
|
+
"role": "assistant",
|
|
569
|
+
"content": "",
|
|
570
|
+
"timestamp": timestamp,
|
|
571
|
+
"tool_uses": [{
|
|
572
|
+
"name": payload.get("name", ""),
|
|
573
|
+
"input": payload.get("arguments", "")
|
|
574
|
+
}],
|
|
575
|
+
"model": session_meta.get("model_provider", "openai")
|
|
576
|
+
})
|
|
577
|
+
|
|
578
|
+
elif payload_type == "reasoning":
|
|
579
|
+
# Reasoning/thinking block
|
|
580
|
+
summary_parts = payload.get("summary", [])
|
|
581
|
+
thinking_text = ""
|
|
582
|
+
for part in summary_parts:
|
|
583
|
+
if isinstance(part, dict) and part.get("type") == "summary_text":
|
|
584
|
+
thinking_text += part.get("text", "") + "\n"
|
|
585
|
+
if thinking_text:
|
|
586
|
+
messages.append({
|
|
587
|
+
"role": "assistant",
|
|
588
|
+
"content": "",
|
|
589
|
+
"thinking": thinking_text.strip(),
|
|
590
|
+
"timestamp": timestamp,
|
|
591
|
+
"model": session_meta.get("model_provider", "openai")
|
|
592
|
+
})
|
|
593
|
+
|
|
594
|
+
elif entry_type == "event_msg":
|
|
595
|
+
payload = entry.get("payload", {})
|
|
596
|
+
msg_type = payload.get("type")
|
|
597
|
+
|
|
598
|
+
if msg_type == "agent_message":
|
|
599
|
+
messages.append({
|
|
600
|
+
"role": "assistant",
|
|
601
|
+
"content": payload.get("message", ""),
|
|
602
|
+
"timestamp": timestamp,
|
|
603
|
+
"model": session_meta.get("model_provider", "openai")
|
|
604
|
+
})
|
|
605
|
+
|
|
606
|
+
elif entry_type == "turn_context":
|
|
607
|
+
# Extract model info from turn context
|
|
608
|
+
payload = entry.get("payload", {})
|
|
609
|
+
if payload.get("model"):
|
|
610
|
+
session_meta["model"] = payload.get("model")
|
|
611
|
+
|
|
612
|
+
except json.JSONDecodeError as e:
|
|
613
|
+
print(f"Error parsing line {line_num}: {e}")
|
|
614
|
+
continue
|
|
615
|
+
|
|
616
|
+
# Consolidate consecutive assistant messages with only tool_uses or thinking
|
|
617
|
+
consolidated = []
|
|
618
|
+
for msg in messages:
|
|
619
|
+
if msg["role"] == "assistant" and consolidated and consolidated[-1]["role"] == "assistant":
|
|
620
|
+
prev = consolidated[-1]
|
|
621
|
+
# Merge tool_uses
|
|
622
|
+
if msg.get("tool_uses") and not msg.get("content"):
|
|
623
|
+
if prev.get("tool_uses"):
|
|
624
|
+
prev["tool_uses"].extend(msg["tool_uses"])
|
|
625
|
+
else:
|
|
626
|
+
prev["tool_uses"] = msg["tool_uses"]
|
|
627
|
+
continue
|
|
628
|
+
# Merge thinking
|
|
629
|
+
if msg.get("thinking") and not msg.get("content"):
|
|
630
|
+
if prev.get("thinking"):
|
|
631
|
+
prev["thinking"] += "\n" + msg["thinking"]
|
|
632
|
+
else:
|
|
633
|
+
prev["thinking"] = msg["thinking"]
|
|
634
|
+
continue
|
|
635
|
+
consolidated.append(msg)
|
|
636
|
+
|
|
637
|
+
return {
|
|
638
|
+
"summaries": summaries,
|
|
639
|
+
"messages": consolidated,
|
|
640
|
+
"session_id": session_id,
|
|
641
|
+
"meta": {
|
|
642
|
+
"cwd": session_meta.get("cwd"),
|
|
643
|
+
"model": session_meta.get("model"),
|
|
644
|
+
"cli_version": session_meta.get("cli_version")
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
|
|
649
|
+
def parse_gemini_conversation(session_file, session_id):
|
|
650
|
+
"""Parse Google Gemini conversation format."""
|
|
651
|
+
messages = []
|
|
652
|
+
summaries = []
|
|
653
|
+
session_meta = {}
|
|
654
|
+
|
|
655
|
+
with open(session_file, 'r') as f:
|
|
656
|
+
data = json.load(f)
|
|
657
|
+
session_meta = {
|
|
658
|
+
"sessionId": data.get("sessionId"),
|
|
659
|
+
"projectHash": data.get("projectHash"),
|
|
660
|
+
"startTime": data.get("startTime"),
|
|
661
|
+
"lastUpdated": data.get("lastUpdated")
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
for msg in data.get("messages", []):
|
|
665
|
+
msg_type = msg.get("type")
|
|
666
|
+
timestamp = msg.get("timestamp")
|
|
667
|
+
content = msg.get("content", "")
|
|
668
|
+
|
|
669
|
+
if msg_type == "user":
|
|
670
|
+
messages.append({
|
|
671
|
+
"role": "user",
|
|
672
|
+
"content": content,
|
|
673
|
+
"timestamp": timestamp
|
|
674
|
+
})
|
|
675
|
+
elif msg_type == "gemini":
|
|
676
|
+
# Extract thinking from thoughts array
|
|
677
|
+
thinking_parts = []
|
|
678
|
+
thoughts = msg.get("thoughts", [])
|
|
679
|
+
for thought in thoughts:
|
|
680
|
+
if isinstance(thought, dict):
|
|
681
|
+
subject = thought.get("subject", "")
|
|
682
|
+
desc = thought.get("description", "")
|
|
683
|
+
if subject or desc:
|
|
684
|
+
thinking_parts.append(f"**{subject}**: {desc}" if subject else desc)
|
|
685
|
+
|
|
686
|
+
# Extract tool calls
|
|
687
|
+
tool_uses = []
|
|
688
|
+
tool_calls = msg.get("toolCalls", [])
|
|
689
|
+
for tool_call in tool_calls:
|
|
690
|
+
if isinstance(tool_call, dict):
|
|
691
|
+
tool_uses.append({
|
|
692
|
+
"name": tool_call.get("name", ""),
|
|
693
|
+
"input": tool_call.get("args", {})
|
|
694
|
+
})
|
|
695
|
+
|
|
696
|
+
messages.append({
|
|
697
|
+
"role": "assistant",
|
|
698
|
+
"content": content,
|
|
699
|
+
"thinking": "\n".join(thinking_parts) if thinking_parts else None,
|
|
700
|
+
"tool_uses": tool_uses if tool_uses else None,
|
|
701
|
+
"timestamp": timestamp,
|
|
702
|
+
"model": msg.get("model", "gemini"),
|
|
703
|
+
"tokens": msg.get("tokens")
|
|
704
|
+
})
|
|
705
|
+
|
|
706
|
+
return {
|
|
707
|
+
"summaries": summaries,
|
|
708
|
+
"messages": messages,
|
|
709
|
+
"session_id": session_id,
|
|
710
|
+
"meta": session_meta
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
|
|
714
|
+
@app.route('/')
|
|
715
|
+
def index():
|
|
716
|
+
return render_template('index.html')
|
|
717
|
+
|
|
718
|
+
|
|
719
|
+
@app.route('/api/sources')
|
|
720
|
+
def api_sources():
|
|
721
|
+
"""Get available sources."""
|
|
722
|
+
sources = []
|
|
723
|
+
for source_id, config in SOURCES.items():
|
|
724
|
+
sources.append({
|
|
725
|
+
"id": source_id,
|
|
726
|
+
"name": config["name"],
|
|
727
|
+
"available": config["source_dir"].exists()
|
|
728
|
+
})
|
|
729
|
+
return jsonify({
|
|
730
|
+
"sources": sources,
|
|
731
|
+
"current": current_source
|
|
732
|
+
})
|
|
733
|
+
|
|
734
|
+
|
|
735
|
+
@app.route('/api/sources/<source_id>', methods=['POST'])
|
|
736
|
+
def api_set_source(source_id):
|
|
737
|
+
"""Set the current source."""
|
|
738
|
+
global current_source
|
|
739
|
+
if source_id not in SOURCES:
|
|
740
|
+
return jsonify({"error": "Unknown source"}), 400
|
|
741
|
+
current_source = source_id
|
|
742
|
+
return jsonify({"status": "success", "current": current_source})
|
|
743
|
+
|
|
744
|
+
|
|
745
|
+
@app.route('/api/projects')
|
|
746
|
+
def api_projects():
|
|
747
|
+
source_id = request.args.get('source', current_source)
|
|
748
|
+
return jsonify(get_projects(source_id))
|
|
749
|
+
|
|
750
|
+
|
|
751
|
+
@app.route('/api/projects/<project_id>/sessions')
|
|
752
|
+
def api_sessions(project_id):
|
|
753
|
+
source_id = request.args.get('source', current_source)
|
|
754
|
+
return jsonify(get_sessions(project_id, source_id))
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
@app.route('/api/projects/<project_id>/sessions/<session_id>')
|
|
758
|
+
def api_conversation(project_id, session_id):
|
|
759
|
+
source_id = request.args.get('source', current_source)
|
|
760
|
+
return jsonify(get_conversation(project_id, session_id, source_id))
|
|
761
|
+
|
|
762
|
+
|
|
763
|
+
@app.route('/api/search')
|
|
764
|
+
def api_search():
|
|
765
|
+
"""Search across all conversations."""
|
|
766
|
+
query = request.args.get('q', '').lower()
|
|
767
|
+
source_id = request.args.get('source', current_source)
|
|
768
|
+
|
|
769
|
+
if not query:
|
|
770
|
+
return jsonify([])
|
|
771
|
+
|
|
772
|
+
if source_id not in SOURCES:
|
|
773
|
+
return jsonify([])
|
|
774
|
+
|
|
775
|
+
data_dir = DATA_DIR / SOURCES[source_id]["data_subdir"]
|
|
776
|
+
if not data_dir.exists():
|
|
777
|
+
return jsonify([])
|
|
778
|
+
|
|
779
|
+
results = []
|
|
780
|
+
|
|
781
|
+
if source_id == "claude-code":
|
|
782
|
+
for project_dir in data_dir.iterdir():
|
|
783
|
+
if not project_dir.is_dir():
|
|
784
|
+
continue
|
|
785
|
+
|
|
786
|
+
for session_file in project_dir.glob("*.jsonl"):
|
|
787
|
+
try:
|
|
788
|
+
with open(session_file, 'r') as f:
|
|
789
|
+
content = f.read().lower()
|
|
790
|
+
if query in content:
|
|
791
|
+
results.append({
|
|
792
|
+
"project_id": project_dir.name,
|
|
793
|
+
"session_id": session_file.stem,
|
|
794
|
+
"project_name": project_dir.name.replace("-", "/").lstrip("/")
|
|
795
|
+
})
|
|
796
|
+
except Exception:
|
|
797
|
+
continue
|
|
798
|
+
elif source_id == "codex":
|
|
799
|
+
for session_file in data_dir.rglob("*.jsonl"):
|
|
800
|
+
try:
|
|
801
|
+
with open(session_file, 'r') as f:
|
|
802
|
+
content = f.read().lower()
|
|
803
|
+
if query in content:
|
|
804
|
+
cwd = get_codex_cwd(session_file)
|
|
805
|
+
if cwd:
|
|
806
|
+
project_id = encode_path_id(cwd)
|
|
807
|
+
results.append({
|
|
808
|
+
"project_id": project_id,
|
|
809
|
+
"session_id": session_file.stem,
|
|
810
|
+
"project_name": cwd
|
|
811
|
+
})
|
|
812
|
+
except Exception:
|
|
813
|
+
continue
|
|
814
|
+
else: # gemini
|
|
815
|
+
for session_file in data_dir.rglob("chats/session-*.json"):
|
|
816
|
+
try:
|
|
817
|
+
with open(session_file, 'r') as f:
|
|
818
|
+
content = f.read().lower()
|
|
819
|
+
if query in content:
|
|
820
|
+
project_hash = get_gemini_project_hash(session_file)
|
|
821
|
+
if project_hash:
|
|
822
|
+
results.append({
|
|
823
|
+
"project_id": project_hash,
|
|
824
|
+
"session_id": session_file.stem,
|
|
825
|
+
"project_name": f"Project {project_hash[:8]}..."
|
|
826
|
+
})
|
|
827
|
+
except Exception:
|
|
828
|
+
continue
|
|
829
|
+
|
|
830
|
+
return jsonify(results[:50]) # Limit results
|
|
831
|
+
|
|
832
|
+
|
|
833
|
+
@app.route('/api/projects/<project_id>/sessions/<session_id>/export')
|
|
834
|
+
def api_export(project_id, session_id):
|
|
835
|
+
"""Export conversation as text file."""
|
|
836
|
+
source_id = request.args.get('source', current_source)
|
|
837
|
+
conversation = get_conversation(project_id, session_id, source_id)
|
|
838
|
+
|
|
839
|
+
if "error" in conversation:
|
|
840
|
+
return jsonify(conversation), 404
|
|
841
|
+
|
|
842
|
+
# Build text content
|
|
843
|
+
lines = []
|
|
844
|
+
lines.append("=" * 60)
|
|
845
|
+
lines.append(f"Session: {session_id}")
|
|
846
|
+
lines.append(f"Project: {project_id.replace('-', '/').lstrip('/')}")
|
|
847
|
+
lines.append("=" * 60)
|
|
848
|
+
lines.append("")
|
|
849
|
+
|
|
850
|
+
# Add summaries if present
|
|
851
|
+
if conversation.get("summaries"):
|
|
852
|
+
lines.append("SUMMARIES:")
|
|
853
|
+
for s in conversation["summaries"]:
|
|
854
|
+
lines.append(f" * {s}")
|
|
855
|
+
lines.append("")
|
|
856
|
+
lines.append("-" * 60)
|
|
857
|
+
lines.append("")
|
|
858
|
+
|
|
859
|
+
# Add messages
|
|
860
|
+
for msg in conversation.get("messages", []):
|
|
861
|
+
role = msg["role"].upper()
|
|
862
|
+
timestamp = msg.get("timestamp", "")
|
|
863
|
+
|
|
864
|
+
lines.append(f"[{role}] {timestamp}")
|
|
865
|
+
if msg.get("model"):
|
|
866
|
+
lines.append(f"Model: {msg['model']}")
|
|
867
|
+
lines.append("-" * 40)
|
|
868
|
+
|
|
869
|
+
# Content
|
|
870
|
+
if msg.get("content"):
|
|
871
|
+
lines.append(msg["content"])
|
|
872
|
+
|
|
873
|
+
# Thinking
|
|
874
|
+
if msg.get("thinking"):
|
|
875
|
+
lines.append("")
|
|
876
|
+
lines.append("--- THINKING ---")
|
|
877
|
+
lines.append(msg["thinking"])
|
|
878
|
+
lines.append("--- END THINKING ---")
|
|
879
|
+
|
|
880
|
+
# Tool uses
|
|
881
|
+
if msg.get("tool_uses"):
|
|
882
|
+
lines.append("")
|
|
883
|
+
for tool in msg["tool_uses"]:
|
|
884
|
+
lines.append(f"[TOOL: {tool['name']}]")
|
|
885
|
+
if isinstance(tool.get("input"), dict):
|
|
886
|
+
for k, v in tool["input"].items():
|
|
887
|
+
val = str(v)[:200] + "..." if len(str(v)) > 200 else str(v)
|
|
888
|
+
lines.append(f" {k}: {val}")
|
|
889
|
+
else:
|
|
890
|
+
lines.append(f" {tool.get('input', '')}")
|
|
891
|
+
|
|
892
|
+
# Usage stats
|
|
893
|
+
if msg.get("usage"):
|
|
894
|
+
usage = msg["usage"]
|
|
895
|
+
tokens = usage.get("input_tokens", 0) + usage.get("output_tokens", 0)
|
|
896
|
+
lines.append(f"\n[Tokens: {tokens}]")
|
|
897
|
+
|
|
898
|
+
lines.append("")
|
|
899
|
+
lines.append("=" * 60)
|
|
900
|
+
lines.append("")
|
|
901
|
+
|
|
902
|
+
text_content = "\n".join(lines)
|
|
903
|
+
|
|
904
|
+
return Response(
|
|
905
|
+
text_content,
|
|
906
|
+
mimetype="text/plain",
|
|
907
|
+
headers={"Content-Disposition": f"attachment; filename={session_id}.txt"}
|
|
908
|
+
)
|
|
909
|
+
|
|
910
|
+
|
|
911
|
+
@app.route('/api/sync', methods=['POST'])
|
|
912
|
+
def api_sync():
|
|
913
|
+
"""Manually trigger a data sync."""
|
|
914
|
+
source_id = request.args.get('source', current_source)
|
|
915
|
+
try:
|
|
916
|
+
sync_data(source_id=source_id, silent=True)
|
|
917
|
+
return jsonify({
|
|
918
|
+
"status": "success",
|
|
919
|
+
"source": source_id,
|
|
920
|
+
"last_sync": last_sync_time.get(source_id).isoformat() if last_sync_time.get(source_id) else None
|
|
921
|
+
})
|
|
922
|
+
except Exception as e:
|
|
923
|
+
return jsonify({"status": "error", "message": str(e)}), 500
|
|
924
|
+
|
|
925
|
+
|
|
926
|
+
@app.route('/api/status')
|
|
927
|
+
def api_status():
|
|
928
|
+
"""Get sync status."""
|
|
929
|
+
source_id = request.args.get('source', current_source)
|
|
930
|
+
source_config = SOURCES.get(source_id, {})
|
|
931
|
+
data_dir = DATA_DIR / source_config.get("data_subdir", "")
|
|
932
|
+
|
|
933
|
+
return jsonify({
|
|
934
|
+
"source": source_id,
|
|
935
|
+
"last_sync": last_sync_time.get(source_id).isoformat() if last_sync_time.get(source_id) else None,
|
|
936
|
+
"sync_interval_hours": SYNC_INTERVAL / 3600,
|
|
937
|
+
"data_dir": str(data_dir)
|
|
938
|
+
})
|
|
939
|
+
|
|
940
|
+
|
|
941
|
+
def find_available_port(host, start_port, max_attempts=100):
|
|
942
|
+
"""Find an available port starting from start_port."""
|
|
943
|
+
import socket
|
|
944
|
+
|
|
945
|
+
for port in range(start_port, start_port + max_attempts):
|
|
946
|
+
try:
|
|
947
|
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
|
948
|
+
s.bind((host, port))
|
|
949
|
+
return port
|
|
950
|
+
except OSError:
|
|
951
|
+
continue
|
|
952
|
+
raise RuntimeError(f"Could not find an available port in range {start_port}-{start_port + max_attempts}")
|
|
953
|
+
|
|
954
|
+
|
|
955
|
+
def run_server(host="127.0.0.1", port=5050, skip_sync=False, debug=False):
|
|
956
|
+
"""Run the Flask server."""
|
|
957
|
+
from clicodelog import __version__
|
|
958
|
+
|
|
959
|
+
print("=" * 60)
|
|
960
|
+
print(f"cli code log v{__version__}")
|
|
961
|
+
print("=" * 60)
|
|
962
|
+
|
|
963
|
+
if not skip_sync:
|
|
964
|
+
# Sync data from all sources
|
|
965
|
+
print("\nSyncing data from all sources...")
|
|
966
|
+
for source_id, config in SOURCES.items():
|
|
967
|
+
print(f"\n{config['name']}:")
|
|
968
|
+
print(f" Source: {config['source_dir']}")
|
|
969
|
+
print(f" Backup: {DATA_DIR / config['data_subdir']}")
|
|
970
|
+
|
|
971
|
+
if sync_data(source_id=source_id):
|
|
972
|
+
print(" Sync completed!")
|
|
973
|
+
else:
|
|
974
|
+
print(" Warning: Could not sync. Using existing local data if available.")
|
|
975
|
+
|
|
976
|
+
# Start background sync thread
|
|
977
|
+
print(f"\nBackground sync: Every {SYNC_INTERVAL // 3600} hour(s)")
|
|
978
|
+
sync_thread = threading.Thread(target=background_sync, daemon=True)
|
|
979
|
+
sync_thread.start()
|
|
980
|
+
print("Background sync thread started.")
|
|
981
|
+
else:
|
|
982
|
+
print("\nSkipping initial sync (--no-sync flag)")
|
|
983
|
+
|
|
984
|
+
# Find available port
|
|
985
|
+
actual_port = find_available_port(host, port)
|
|
986
|
+
if actual_port != port:
|
|
987
|
+
print(f"\nPort {port} is busy, using port {actual_port} instead")
|
|
988
|
+
|
|
989
|
+
print(f"\nStarting server...")
|
|
990
|
+
print(f"Open http://{host}:{actual_port} in your browser")
|
|
991
|
+
print("=" * 60)
|
|
992
|
+
app.run(host=host, port=actual_port, debug=debug, use_reloader=False)
|
|
993
|
+
|
|
994
|
+
|
|
995
|
+
if __name__ == '__main__':
|
|
996
|
+
run_server()
|