repr-cli 0.2.15__py3-none-any.whl → 0.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. repr/__init__.py +1 -1
  2. repr/api.py +363 -62
  3. repr/auth.py +47 -38
  4. repr/change_synthesis.py +478 -0
  5. repr/cli.py +4103 -267
  6. repr/config.py +119 -11
  7. repr/configure.py +889 -0
  8. repr/cron.py +419 -0
  9. repr/dashboard/__init__.py +9 -0
  10. repr/dashboard/build.py +126 -0
  11. repr/dashboard/dist/assets/index-BYFVbEev.css +1 -0
  12. repr/dashboard/dist/assets/index-BrrhyJFO.css +1 -0
  13. repr/dashboard/dist/assets/index-CcEg74ts.js +270 -0
  14. repr/dashboard/dist/assets/index-Cerc-iA_.js +377 -0
  15. repr/dashboard/dist/assets/index-CjVcBW2L.css +1 -0
  16. repr/dashboard/dist/assets/index-Dfl3mR5E.js +377 -0
  17. repr/dashboard/dist/favicon.svg +4 -0
  18. repr/dashboard/dist/index.html +14 -0
  19. repr/dashboard/manager.py +234 -0
  20. repr/dashboard/server.py +1298 -0
  21. repr/db.py +980 -0
  22. repr/hooks.py +3 -2
  23. repr/loaders/__init__.py +22 -0
  24. repr/loaders/base.py +156 -0
  25. repr/loaders/claude_code.py +287 -0
  26. repr/loaders/clawdbot.py +313 -0
  27. repr/loaders/gemini_antigravity.py +381 -0
  28. repr/mcp_server.py +1196 -0
  29. repr/models.py +503 -0
  30. repr/openai_analysis.py +25 -0
  31. repr/session_extractor.py +481 -0
  32. repr/storage.py +360 -0
  33. repr/story_synthesis.py +1296 -0
  34. repr/templates.py +68 -4
  35. repr/timeline.py +710 -0
  36. repr/tools.py +17 -8
  37. {repr_cli-0.2.15.dist-info → repr_cli-0.2.17.dist-info}/METADATA +50 -10
  38. repr_cli-0.2.17.dist-info/RECORD +52 -0
  39. {repr_cli-0.2.15.dist-info → repr_cli-0.2.17.dist-info}/WHEEL +1 -1
  40. {repr_cli-0.2.15.dist-info → repr_cli-0.2.17.dist-info}/entry_points.txt +1 -0
  41. repr_cli-0.2.15.dist-info/RECORD +0 -26
  42. {repr_cli-0.2.15.dist-info → repr_cli-0.2.17.dist-info}/licenses/LICENSE +0 -0
  43. {repr_cli-0.2.15.dist-info → repr_cli-0.2.17.dist-info}/top_level.txt +0 -0
repr/configure.py ADDED
@@ -0,0 +1,889 @@
1
+ """
2
+ Configuration wizard for repr CLI.
3
+
4
+ Provides:
5
+ - First-run detection
6
+ - Interactive setup wizard (LLM → Repos → Schedule)
7
+ - Individual configuration commands
8
+ """
9
+
10
+ import shutil
11
+ from pathlib import Path
12
+ from typing import Any
13
+
14
+ import httpx
15
+ from rich.prompt import Prompt
16
+ from rich.table import Table
17
+ from simple_term_menu import TerminalMenu
18
+
19
+ from .config import (
20
+ CONFIG_FILE,
21
+ BYOK_PROVIDERS,
22
+ add_byok_provider,
23
+ get_llm_config,
24
+ get_tracked_repos,
25
+ load_config,
26
+ save_config,
27
+ set_llm_config,
28
+ add_tracked_repo,
29
+ set_repo_hook_status,
30
+ )
31
+ from .keychain import store_secret, get_secret
32
+ from .llm import detect_all_local_llms, LocalLLMInfo
33
+ from .ui import (
34
+ console,
35
+ print_header,
36
+ print_success,
37
+ print_error,
38
+ print_warning,
39
+ print_info,
40
+ print_next_steps,
41
+ confirm,
42
+ create_spinner,
43
+ BRAND_PRIMARY,
44
+ BRAND_MUTED,
45
+ )
46
+
47
+
48
+ # =============================================================================
49
+ # MENU HELPERS
50
+ # =============================================================================
51
+
52
+ def select_option(options: list[str], title: str = "", default_index: int = 0) -> int | None:
53
+ """
54
+ Show an arrow-key navigable menu and return selected index.
55
+
56
+ Args:
57
+ options: List of option strings to display
58
+ title: Optional title above the menu
59
+ default_index: Index to highlight initially
60
+
61
+ Returns:
62
+ Selected index, or None if cancelled (Esc/q)
63
+ """
64
+ menu = TerminalMenu(
65
+ options,
66
+ title=title if title else None,
67
+ cursor_index=default_index,
68
+ menu_cursor="→ ",
69
+ menu_cursor_style=("fg_cyan", "bold"),
70
+ menu_highlight_style=("fg_cyan", "bold"),
71
+ cycle_cursor=True,
72
+ clear_screen=False,
73
+ show_shortcut_hints=False,
74
+ )
75
+ index = menu.show()
76
+ return index
77
+
78
+
79
+ def select_with_filter(
80
+ options: list[dict[str, Any]],
81
+ title: str = "",
82
+ default_id: str | None = None,
83
+ ) -> dict[str, Any] | None:
84
+ """
85
+ Show a filterable menu for model/item selection.
86
+
87
+ Args:
88
+ options: List of dicts with 'id' and 'name' keys
89
+ title: Optional title
90
+ default_id: ID to highlight initially
91
+
92
+ Returns:
93
+ Selected option dict, or None if cancelled
94
+ """
95
+ if not options:
96
+ return None
97
+
98
+ # Build display strings
99
+ display = [f"{opt.get('name', opt['id'])}" for opt in options]
100
+
101
+ # Find default index
102
+ default_index = 0
103
+ if default_id:
104
+ for i, opt in enumerate(options):
105
+ if opt["id"] == default_id:
106
+ default_index = i
107
+ break
108
+
109
+ menu = TerminalMenu(
110
+ display,
111
+ title=title if title else None,
112
+ cursor_index=default_index,
113
+ menu_cursor="→ ",
114
+ menu_cursor_style=("fg_cyan", "bold"),
115
+ menu_highlight_style=("fg_cyan", "bold"),
116
+ cycle_cursor=True,
117
+ clear_screen=False,
118
+ show_shortcut_hints=False,
119
+ show_search_hint=False,
120
+ search_key="/",
121
+ search_highlight_style=("fg_yellow", "bold"),
122
+ )
123
+
124
+ index = menu.show()
125
+ if index is None:
126
+ return None
127
+ return options[index]
128
+
129
+
130
+ # =============================================================================
131
+ # PROVIDER DEFINITIONS
132
+ # =============================================================================
133
+
134
+ # Local providers (run on user's machine)
135
+ LOCAL_PROVIDERS = {
136
+ "ollama": {
137
+ "name": "Ollama",
138
+ "description": "Local, private, free - most popular choice",
139
+ "url": "http://localhost:11434",
140
+ "models_endpoint": "/api/tags",
141
+ "api_style": "openai",
142
+ "install_url": "https://ollama.ai",
143
+ },
144
+ "lmstudio": {
145
+ "name": "LM Studio",
146
+ "description": "Local with GUI model management",
147
+ "url": "http://localhost:1234",
148
+ "models_endpoint": "/v1/models",
149
+ "api_style": "openai",
150
+ "install_url": "https://lmstudio.ai",
151
+ },
152
+ }
153
+
154
+ # API providers (require API key)
155
+ API_PROVIDERS = {
156
+ "openai": {
157
+ "name": "OpenAI",
158
+ "description": "GPT-4, GPT-4o, etc.",
159
+ "base_url": "https://api.openai.com/v1",
160
+ "models_endpoint": "/models",
161
+ "api_style": "openai",
162
+ "auth_methods": ["api_key"],
163
+ "default_model": "gpt-4o-mini",
164
+ "env_var": "OPENAI_API_KEY",
165
+ },
166
+ "anthropic": {
167
+ "name": "Anthropic",
168
+ "description": "Claude models",
169
+ "base_url": "https://api.anthropic.com/v1",
170
+ "models_endpoint": "/models",
171
+ "api_style": "anthropic",
172
+ "auth_methods": ["api_key"],
173
+ "default_model": "claude-sonnet-4-20250514",
174
+ "env_var": "ANTHROPIC_API_KEY",
175
+ },
176
+ "gemini": {
177
+ "name": "Google Gemini",
178
+ "description": "Gemini Pro, Flash, etc.",
179
+ "base_url": "https://generativelanguage.googleapis.com/v1beta",
180
+ "models_endpoint": "/models",
181
+ "api_style": "gemini",
182
+ "auth_methods": ["api_key"],
183
+ "default_model": "gemini-1.5-flash",
184
+ "env_var": "GEMINI_API_KEY",
185
+ },
186
+ "groq": {
187
+ "name": "Groq",
188
+ "description": "Fast inference for open models",
189
+ "base_url": "https://api.groq.com/openai/v1",
190
+ "models_endpoint": "/models",
191
+ "api_style": "openai",
192
+ "auth_methods": ["api_key"],
193
+ "default_model": "llama-3.1-70b-versatile",
194
+ "env_var": "GROQ_API_KEY",
195
+ },
196
+ "together": {
197
+ "name": "Together AI",
198
+ "description": "Open source models",
199
+ "base_url": "https://api.together.xyz/v1",
200
+ "models_endpoint": "/models",
201
+ "api_style": "openai",
202
+ "auth_methods": ["api_key"],
203
+ "default_model": "meta-llama/Llama-3-70b-chat-hf",
204
+ "env_var": "TOGETHER_API_KEY",
205
+ },
206
+ "openrouter": {
207
+ "name": "OpenRouter",
208
+ "description": "Access multiple providers through one API",
209
+ "base_url": "https://openrouter.ai/api/v1",
210
+ "models_endpoint": "/models",
211
+ "api_style": "openai",
212
+ "auth_methods": ["api_key"],
213
+ "default_model": "anthropic/claude-3.5-sonnet",
214
+ "env_var": "OPENROUTER_API_KEY",
215
+ },
216
+ }
217
+
218
+
219
+ # =============================================================================
220
+ # FIRST RUN DETECTION
221
+ # =============================================================================
222
+
223
+ def is_first_run() -> bool:
224
+ """
225
+ Check if this is the first run of repr.
226
+
227
+ First run is determined by:
228
+ - No config file exists, OR
229
+ - Config exists but no LLM is configured
230
+ """
231
+ if not CONFIG_FILE.exists():
232
+ return True
233
+
234
+ config = load_config()
235
+ llm_config = config.get("llm", {})
236
+
237
+ # Check if any LLM is configured
238
+ has_local = llm_config.get("local_provider") is not None
239
+ has_byok = bool(llm_config.get("byok", {}))
240
+
241
+ return not (has_local or has_byok)
242
+
243
+
244
+ def is_configured() -> bool:
245
+ """Check if repr is fully configured (has LLM)."""
246
+ return not is_first_run()
247
+
248
+
249
+ # =============================================================================
250
+ # MODEL LISTING
251
+ # =============================================================================
252
+
253
+ def list_ollama_models(url: str = "http://localhost:11434") -> list[dict[str, Any]]:
254
+ """List available Ollama models with details."""
255
+ try:
256
+ resp = httpx.get(f"{url}/api/tags", timeout=5)
257
+ if resp.status_code == 200:
258
+ data = resp.json()
259
+ models = []
260
+ for m in data.get("models", []):
261
+ name = m.get("name", "")
262
+ if name:
263
+ models.append({
264
+ "id": name,
265
+ "name": name.split(":")[0], # Remove tag
266
+ "size": m.get("size", 0),
267
+ "modified": m.get("modified_at", ""),
268
+ })
269
+ return models
270
+ except Exception:
271
+ pass
272
+ return []
273
+
274
+
275
+ def list_openai_models(api_key: str, base_url: str = "https://api.openai.com/v1") -> list[dict[str, Any]]:
276
+ """List available OpenAI models."""
277
+ try:
278
+ resp = httpx.get(
279
+ f"{base_url}/models",
280
+ headers={"Authorization": f"Bearer {api_key}"},
281
+ timeout=10,
282
+ )
283
+ if resp.status_code == 200:
284
+ data = resp.json()
285
+ models = []
286
+ for m in data.get("data", []):
287
+ model_id = m.get("id", "")
288
+ # Filter to chat models
289
+ if any(x in model_id for x in ["gpt", "o1", "o3", "chatgpt"]):
290
+ models.append({
291
+ "id": model_id,
292
+ "name": model_id,
293
+ "owned_by": m.get("owned_by", ""),
294
+ })
295
+ return sorted(models, key=lambda x: x["id"])
296
+ except Exception:
297
+ pass
298
+ return []
299
+
300
+
301
+ def list_anthropic_models(api_key: str) -> list[dict[str, Any]]:
302
+ """List available Anthropic models from API."""
303
+ try:
304
+ resp = httpx.get(
305
+ "https://api.anthropic.com/v1/models",
306
+ headers={
307
+ "x-api-key": api_key,
308
+ "anthropic-version": "2023-06-01",
309
+ },
310
+ timeout=10,
311
+ )
312
+ if resp.status_code == 200:
313
+ data = resp.json()
314
+ models = []
315
+ for m in data.get("data", []):
316
+ model_id = m.get("id", "")
317
+ display_name = m.get("display_name", model_id)
318
+ models.append({
319
+ "id": model_id,
320
+ "name": display_name,
321
+ })
322
+ return models
323
+ except Exception:
324
+ pass
325
+ return []
326
+
327
+
328
+ def list_gemini_models(api_key: str) -> list[dict[str, Any]]:
329
+ """List available Gemini models from API."""
330
+ try:
331
+ resp = httpx.get(
332
+ f"https://generativelanguage.googleapis.com/v1beta/models?key={api_key}",
333
+ timeout=10,
334
+ )
335
+ if resp.status_code == 200:
336
+ data = resp.json()
337
+ models = []
338
+ for m in data.get("models", []):
339
+ name = m.get("name", "").replace("models/", "")
340
+ if "generateContent" in m.get("supportedGenerationMethods", []):
341
+ models.append({
342
+ "id": name,
343
+ "name": m.get("displayName", name),
344
+ })
345
+ return models
346
+ except Exception:
347
+ pass
348
+ return []
349
+
350
+
351
+ def list_groq_models(api_key: str) -> list[dict[str, Any]]:
352
+ """List available Groq models from API."""
353
+ try:
354
+ resp = httpx.get(
355
+ "https://api.groq.com/openai/v1/models",
356
+ headers={"Authorization": f"Bearer {api_key}"},
357
+ timeout=10,
358
+ )
359
+ if resp.status_code == 200:
360
+ data = resp.json()
361
+ return [{"id": m["id"], "name": m["id"]} for m in data.get("data", [])]
362
+ except Exception:
363
+ pass
364
+ return []
365
+
366
+
367
+ def list_provider_models(provider: str, api_key: str | None = None, url: str | None = None) -> list[dict[str, Any]]:
368
+ """List models for a given provider."""
369
+ if provider == "ollama":
370
+ return list_ollama_models(url or "http://localhost:11434")
371
+ elif provider == "lmstudio":
372
+ # LM Studio uses OpenAI-compatible API
373
+ try:
374
+ resp = httpx.get(f"{url or 'http://localhost:1234'}/v1/models", timeout=5)
375
+ if resp.status_code == 200:
376
+ data = resp.json()
377
+ return [{"id": m["id"], "name": m["id"]} for m in data.get("data", [])]
378
+ except Exception:
379
+ pass
380
+ return []
381
+ elif provider == "openai":
382
+ return list_openai_models(api_key, url or "https://api.openai.com/v1") if api_key else []
383
+ elif provider == "anthropic":
384
+ return list_anthropic_models(api_key) if api_key else []
385
+ elif provider == "gemini":
386
+ return list_gemini_models(api_key) if api_key else []
387
+ elif provider == "groq":
388
+ return list_groq_models(api_key) if api_key else []
389
+ elif provider in ("together", "openrouter"):
390
+ # These use OpenAI-compatible API
391
+ if api_key:
392
+ base = API_PROVIDERS[provider]["base_url"]
393
+ return list_openai_models(api_key, base)
394
+ return []
395
+ return []
396
+
397
+
398
+ # =============================================================================
399
+ # MODEL SELECTION UI
400
+ # =============================================================================
401
+
402
+ def select_model(models: list[dict[str, Any]], default: str | None = None) -> str | None:
403
+ """
404
+ Interactive model selection with arrow keys.
405
+
406
+ Args:
407
+ models: List of model dicts with 'id' and 'name' keys
408
+ default: Default model ID to pre-select
409
+
410
+ Returns:
411
+ Selected model ID or None if cancelled
412
+ """
413
+ if not models:
414
+ return Prompt.ask("Model name", default=default or "")
415
+
416
+ # Find default index
417
+ default_index = 0
418
+ if default:
419
+ for i, m in enumerate(models):
420
+ if m["id"] == default:
421
+ default_index = i
422
+ break
423
+
424
+ # Build display strings
425
+ display = []
426
+ for m in models:
427
+ name = m.get("name", m["id"])
428
+ if m["id"] == default:
429
+ display.append(f"{name} (default)")
430
+ else:
431
+ display.append(name)
432
+
433
+ console.print()
434
+
435
+ selected = select_with_filter(models, title="Select model:", default_id=default)
436
+ if selected:
437
+ return selected["id"]
438
+ return default
439
+
440
+
441
+ # =============================================================================
442
+ # LLM WIZARD
443
+ # =============================================================================
444
+
445
+ def wizard_llm() -> bool:
446
+ """
447
+ Interactive LLM configuration wizard.
448
+
449
+ Returns:
450
+ True if configured successfully
451
+ """
452
+ console.print()
453
+ console.print("[bold]LLM Setup[/]")
454
+ console.print("─" * 40)
455
+ console.print()
456
+
457
+ # Detect local LLMs
458
+ with create_spinner() as progress:
459
+ task = progress.add_task("Detecting local LLMs...", total=None)
460
+ local_llms = detect_all_local_llms()
461
+
462
+ # Build provider options
463
+ options = []
464
+
465
+ # Add detected local LLMs first
466
+ for llm in local_llms:
467
+ options.append({
468
+ "type": "local",
469
+ "provider": llm.provider,
470
+ "name": f"{llm.name} (detected)",
471
+ "description": f"{len(llm.models)} models available",
472
+ "url": llm.url,
473
+ "models": llm.models,
474
+ })
475
+
476
+ # Add other local providers if not detected
477
+ for provider_id, info in LOCAL_PROVIDERS.items():
478
+ if not any(o["provider"] == provider_id for o in options):
479
+ options.append({
480
+ "type": "local_manual",
481
+ "provider": provider_id,
482
+ "name": info["name"],
483
+ "description": f"{info['description']} (not running)",
484
+ "url": info["url"],
485
+ })
486
+
487
+ # Add API providers
488
+ for provider_id, info in API_PROVIDERS.items():
489
+ options.append({
490
+ "type": "api",
491
+ "provider": provider_id,
492
+ "name": info["name"],
493
+ "description": info["description"],
494
+ })
495
+
496
+ # Build menu display strings
497
+ menu_items = []
498
+ for opt in options:
499
+ name = opt["name"]
500
+ desc = opt["description"]
501
+ if opt["type"] == "local":
502
+ menu_items.append(f"● {name} - {desc}")
503
+ else:
504
+ menu_items.append(f" {name} - {desc}")
505
+
506
+ console.print()
507
+
508
+ # Show menu
509
+ idx = select_option(menu_items, title="Select LLM provider:")
510
+ if idx is None:
511
+ return False
512
+
513
+ selected = options[idx]
514
+ console.print()
515
+
516
+ # Configure based on type
517
+ if selected["type"] == "local":
518
+ return _configure_local_llm(selected)
519
+ elif selected["type"] == "local_manual":
520
+ return _configure_local_llm_manual(selected)
521
+ else:
522
+ return _configure_api_llm(selected)
523
+
524
+
525
+ def _configure_local_llm(selected: dict) -> bool:
526
+ """Configure a detected local LLM."""
527
+ provider = selected["provider"]
528
+ url = selected["url"]
529
+ models = selected.get("models", [])
530
+
531
+ console.print(f"Configuring {selected['name']}...")
532
+ console.print()
533
+
534
+ # Get models
535
+ model_list = list_provider_models(provider, url=url)
536
+
537
+ if not model_list:
538
+ print_warning("Could not fetch model list")
539
+ model = Prompt.ask("Model name", default="llama3.2")
540
+ else:
541
+ model = select_model(model_list, default=model_list[0]["id"] if model_list else None)
542
+ if not model:
543
+ return False
544
+
545
+ # Save configuration
546
+ set_llm_config(
547
+ local_api_url=f"{url}/v1",
548
+ local_model=model,
549
+ default="local",
550
+ )
551
+
552
+ # Save provider info
553
+ config = load_config()
554
+ config["llm"]["local_provider"] = provider
555
+ save_config(config)
556
+
557
+ print_success(f"Configured {selected['name']} with model {model}")
558
+ return True
559
+
560
+
561
+ def _configure_local_llm_manual(selected: dict) -> bool:
562
+ """Configure a local LLM that's not currently running."""
563
+ provider = selected["provider"]
564
+ info = LOCAL_PROVIDERS[provider]
565
+
566
+ console.print(f"{info['name']} is not currently running.")
567
+ console.print()
568
+ console.print(f"Install from: [link={info['install_url']}]{info['install_url']}[/link]")
569
+ console.print()
570
+
571
+ if not confirm(f"Configure {info['name']} anyway?"):
572
+ return False
573
+
574
+ url = Prompt.ask("Endpoint URL", default=info["url"])
575
+ model = Prompt.ask("Model name", default="llama3.2")
576
+
577
+ # Save configuration
578
+ set_llm_config(
579
+ local_api_url=f"{url}/v1",
580
+ local_model=model,
581
+ default="local",
582
+ )
583
+
584
+ config = load_config()
585
+ config["llm"]["local_provider"] = provider
586
+ save_config(config)
587
+
588
+ print_success(f"Configured {info['name']} with model {model}")
589
+ print_info(f"Make sure {info['name']} is running before generating stories")
590
+ return True
591
+
592
+
593
+ def _configure_api_llm(selected: dict) -> bool:
594
+ """Configure an API-based LLM provider."""
595
+ provider = selected["provider"]
596
+ info = API_PROVIDERS[provider]
597
+
598
+ console.print(f"Configuring {info['name']}...")
599
+ console.print()
600
+
601
+ # Check environment variable first
602
+ import os
603
+ api_key = None
604
+ env_key = os.getenv(info.get("env_var", ""))
605
+ if env_key:
606
+ if confirm(f"Use {info['env_var']} from environment?"):
607
+ api_key = env_key
608
+
609
+ if not api_key:
610
+ api_key = Prompt.ask("API Key", password=True)
611
+
612
+ if not api_key:
613
+ print_error("API key required")
614
+ return False
615
+
616
+ # Test the key and list models
617
+ console.print()
618
+ with create_spinner() as progress:
619
+ task = progress.add_task("Verifying API key...", total=None)
620
+ models = list_provider_models(provider, api_key=api_key)
621
+
622
+ if not models:
623
+ print_error("Could not fetch models - check your API key")
624
+ if not confirm("Continue with default model?"):
625
+ return False
626
+ models = [{"id": info["default_model"], "name": info["default_model"]}]
627
+
628
+ # Select model
629
+ model = select_model(models, default=info["default_model"])
630
+ if not model:
631
+ return False
632
+
633
+ # Save to keychain and config
634
+ add_byok_provider(provider, api_key, model)
635
+
636
+ # Set as default
637
+ set_llm_config(default=f"byok:{provider}")
638
+
639
+ print_success(f"Configured {info['name']} with model {model}")
640
+ return True
641
+
642
+
643
+ # =============================================================================
644
+ # REPOS WIZARD
645
+ # =============================================================================
646
+
647
+ def wizard_repos() -> bool:
648
+ """
649
+ Interactive repository configuration wizard.
650
+
651
+ Returns:
652
+ True if configured successfully
653
+ """
654
+ from .discovery import discover_repos
655
+
656
+ console.print()
657
+ console.print("[bold]Repository Setup[/]")
658
+ console.print("─" * 40)
659
+ console.print()
660
+
661
+ # Check existing tracked repos
662
+ tracked = get_tracked_repos()
663
+ if tracked:
664
+ console.print(f"Currently tracking {len(tracked)} repositories")
665
+ if not confirm("Scan for more?", default=False):
666
+ return True
667
+
668
+ # Determine scan path
669
+ default_paths = [Path.home() / "code", Path.home() / "projects", Path.home() / "dev"]
670
+ scan_path = None
671
+
672
+ for p in default_paths:
673
+ if p.exists():
674
+ scan_path = p
675
+ break
676
+
677
+ if not scan_path:
678
+ scan_path = Path.cwd()
679
+
680
+ path_input = Prompt.ask("Directory to scan", default=str(scan_path))
681
+ scan_path = Path(path_input).expanduser().resolve()
682
+
683
+ if not scan_path.exists():
684
+ print_error(f"Directory not found: {scan_path}")
685
+ return False
686
+
687
+ # Scan for repos
688
+ console.print()
689
+ with create_spinner() as progress:
690
+ task = progress.add_task(f"Scanning {scan_path}...", total=None)
691
+ repos = discover_repos([scan_path], min_commits=5)
692
+
693
+ if not repos:
694
+ print_warning(f"No repositories found in {scan_path}")
695
+ return False
696
+
697
+ # Show found repos
698
+ console.print(f"Found [bold]{len(repos)}[/] repositories")
699
+ console.print()
700
+
701
+ for repo in repos[:10]:
702
+ lang = repo.primary_language or "Unknown"
703
+ console.print(f" ✓ {repo.name} ({repo.commit_count} commits) [{lang}]")
704
+
705
+ if len(repos) > 10:
706
+ console.print(f" ... and {len(repos) - 10} more")
707
+
708
+ console.print()
709
+
710
+ # Ask to track
711
+ if confirm("Track these repositories?", default=True):
712
+ for repo in repos:
713
+ add_tracked_repo(str(repo.path))
714
+ print_success(f"Tracking {len(repos)} repositories")
715
+ return True
716
+
717
+ return False
718
+
719
+
720
+ # =============================================================================
721
+ # SCHEDULE WIZARD
722
+ # =============================================================================
723
+
724
+ def wizard_schedule() -> bool:
725
+ """
726
+ Interactive schedule configuration wizard.
727
+
728
+ Returns:
729
+ True if configured successfully
730
+ """
731
+ from .hooks import install_hook
732
+ from .cron import install_cron
733
+
734
+ console.print()
735
+ console.print("[bold]Schedule Setup[/]")
736
+ console.print("─" * 40)
737
+ console.print()
738
+
739
+ schedule_options = [
740
+ "Scheduled (recommended) - Every 4 hours via cron",
741
+ "On commit - After every 5 commits via git hook",
742
+ "Manual only - Run `repr generate` yourself",
743
+ ]
744
+
745
+ choice = select_option(schedule_options, title="How should repr generate stories?")
746
+ if choice is None:
747
+ choice = 0 # Default to scheduled
748
+
749
+ tracked = get_tracked_repos()
750
+ config = load_config()
751
+
752
+ if choice == 0:
753
+ # Scheduled via cron
754
+ result = install_cron(interval_hours=4, min_commits=3)
755
+ if result["success"]:
756
+ print_success("Cron job installed (every 4h)")
757
+ config["generation"]["auto_generate_on_hook"] = False
758
+ save_config(config)
759
+ # Install hooks for queue tracking
760
+ for repo in tracked:
761
+ try:
762
+ install_hook(Path(repo["path"]))
763
+ set_repo_hook_status(repo["path"], True)
764
+ except Exception:
765
+ pass
766
+ else:
767
+ print_warning(f"Could not install cron: {result['message']}")
768
+ print_info("You can set it up later with `repr cron install`")
769
+
770
+ elif choice == 1:
771
+ # On-commit via hooks
772
+ config["generation"]["auto_generate_on_hook"] = True
773
+ save_config(config)
774
+ hook_count = 0
775
+ for repo in tracked:
776
+ try:
777
+ install_hook(Path(repo["path"]))
778
+ set_repo_hook_status(repo["path"], True)
779
+ hook_count += 1
780
+ except Exception:
781
+ pass
782
+ print_success(f"Hooks installed in {hook_count} repos (generates after 5 commits)")
783
+
784
+ else:
785
+ # Manual only (choice == 2)
786
+ config["generation"]["auto_generate_on_hook"] = False
787
+ save_config(config)
788
+ print_info("Manual mode - run `repr generate` when you want stories")
789
+
790
+ return True
791
+
792
+
793
+ # =============================================================================
794
+ # FULL WIZARD
795
+ # =============================================================================
796
+
797
+ def run_full_wizard() -> bool:
798
+ """
799
+ Run the complete setup wizard (first-run experience).
800
+
801
+ Order: LLM → Repos → Schedule
802
+
803
+ Returns:
804
+ True if completed successfully
805
+ """
806
+ print_header()
807
+ console.print("Welcome to repr! Let's get you set up.")
808
+ console.print()
809
+ console.print(f"[{BRAND_MUTED}]Works locally first — sign in later for sync and sharing.[/]")
810
+
811
+ # Step 1: LLM
812
+ console.print()
813
+ console.print("[bold]Step 1 of 3: LLM[/]")
814
+ if not wizard_llm():
815
+ print_warning("LLM not configured. You can set it up later with `repr configure llm`")
816
+
817
+ # Step 2: Repos
818
+ console.print()
819
+ console.print("[bold]Step 2 of 3: Repositories[/]")
820
+ if not wizard_repos():
821
+ print_warning("No repos tracked. You can add them later with `repr configure repos`")
822
+
823
+ # Step 3: Schedule
824
+ console.print()
825
+ console.print("[bold]Step 3 of 3: Schedule[/]")
826
+ wizard_schedule()
827
+
828
+ # Done
829
+ console.print()
830
+ print_success("Setup complete!")
831
+ console.print()
832
+
833
+ print_next_steps([
834
+ "repr week See what you worked on this week",
835
+ "repr generate Save stories permanently",
836
+ "repr login Unlock cloud sync and publishing",
837
+ ])
838
+
839
+ return True
840
+
841
+
842
+ # =============================================================================
843
+ # CONFIGURE MENU
844
+ # =============================================================================
845
+
846
+ def run_configure_menu() -> None:
847
+ """
848
+ Show the main configure menu.
849
+ """
850
+ console.print()
851
+
852
+ # Get current status
853
+ llm_config = get_llm_config()
854
+ tracked = get_tracked_repos()
855
+ config = load_config()
856
+
857
+ # LLM status
858
+ llm_status = "Not configured"
859
+ if llm_config.get("local_provider"):
860
+ model = llm_config.get("local_model", "unknown")
861
+ llm_status = f"{llm_config['local_provider'].title()} ({model})"
862
+ elif llm_config.get("byok"):
863
+ providers = list(llm_config["byok"].keys())
864
+ llm_status = ", ".join(p.title() for p in providers)
865
+
866
+ # Schedule status
867
+ schedule_status = "Manual"
868
+ if config.get("cron", {}).get("installed"):
869
+ interval = config["cron"].get("interval_hours", 4)
870
+ schedule_status = f"Every {interval}h via cron"
871
+ elif config.get("generation", {}).get("auto_generate_on_hook"):
872
+ schedule_status = "On commit via hooks"
873
+
874
+ menu_options = [
875
+ f"LLM - {llm_status}",
876
+ f"Repositories - {len(tracked)} tracked",
877
+ f"Schedule - {schedule_status}",
878
+ "Quit",
879
+ ]
880
+
881
+ choice = select_option(menu_options, title="What would you like to configure?")
882
+
883
+ if choice == 0:
884
+ wizard_llm()
885
+ elif choice == 1:
886
+ wizard_repos()
887
+ elif choice == 2:
888
+ wizard_schedule()
889
+ # choice == 3 or None means quit