repr-cli 0.2.12__py3-none-any.whl → 0.2.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- repr/api.py +56 -0
- repr/auth.py +17 -0
- repr/cli.py +129 -27
- repr/openai_analysis.py +19 -1
- repr/templates.py +6 -0
- repr/tools.py +12 -0
- repr/ui.py +60 -1
- {repr_cli-0.2.12.dist-info → repr_cli-0.2.14.dist-info}/METADATA +1 -1
- {repr_cli-0.2.12.dist-info → repr_cli-0.2.14.dist-info}/RECORD +13 -13
- {repr_cli-0.2.12.dist-info → repr_cli-0.2.14.dist-info}/WHEEL +0 -0
- {repr_cli-0.2.12.dist-info → repr_cli-0.2.14.dist-info}/entry_points.txt +0 -0
- {repr_cli-0.2.12.dist-info → repr_cli-0.2.14.dist-info}/licenses/LICENSE +0 -0
- {repr_cli-0.2.12.dist-info → repr_cli-0.2.14.dist-info}/top_level.txt +0 -0
repr/api.py
CHANGED
|
@@ -360,6 +360,62 @@ async def push_story(story_data: dict[str, Any]) -> dict[str, Any]:
|
|
|
360
360
|
raise APIError(f"Network error: {str(e)}")
|
|
361
361
|
|
|
362
362
|
|
|
363
|
+
BATCH_SIZE = 200 # Maximum stories per batch request
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
async def push_stories_batch(stories: list[dict[str, Any]]) -> dict[str, Any]:
|
|
367
|
+
"""
|
|
368
|
+
Push multiple stories to repr.dev in batches.
|
|
369
|
+
|
|
370
|
+
Stories are automatically chunked into batches of BATCH_SIZE (200).
|
|
371
|
+
|
|
372
|
+
Args:
|
|
373
|
+
stories: List of story data dicts, each including summary, content, repo info, etc.
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
Dict with 'pushed' count, 'failed' count, and 'results' list
|
|
377
|
+
|
|
378
|
+
Raises:
|
|
379
|
+
APIError: If request fails
|
|
380
|
+
AuthError: If not authenticated
|
|
381
|
+
"""
|
|
382
|
+
all_results: list[dict[str, Any]] = []
|
|
383
|
+
total_pushed = 0
|
|
384
|
+
total_failed = 0
|
|
385
|
+
|
|
386
|
+
# Process in chunks of BATCH_SIZE
|
|
387
|
+
for i in range(0, len(stories), BATCH_SIZE):
|
|
388
|
+
chunk = stories[i:i + BATCH_SIZE]
|
|
389
|
+
|
|
390
|
+
async with httpx.AsyncClient() as client:
|
|
391
|
+
try:
|
|
392
|
+
response = await client.post(
|
|
393
|
+
f"{_get_stories_url()}/batch",
|
|
394
|
+
headers=_get_headers(),
|
|
395
|
+
json={"stories": chunk},
|
|
396
|
+
timeout=180, # 3 minutes for large batches
|
|
397
|
+
)
|
|
398
|
+
response.raise_for_status()
|
|
399
|
+
result = response.json()
|
|
400
|
+
|
|
401
|
+
total_pushed += result.get("pushed", 0)
|
|
402
|
+
total_failed += result.get("failed", 0)
|
|
403
|
+
all_results.extend(result.get("results", []))
|
|
404
|
+
|
|
405
|
+
except httpx.HTTPStatusError as e:
|
|
406
|
+
if e.response.status_code == 401:
|
|
407
|
+
raise AuthError("Session expired. Please run 'repr login' again.")
|
|
408
|
+
raise APIError(f"Failed to push stories batch: {e.response.status_code}")
|
|
409
|
+
except httpx.RequestError as e:
|
|
410
|
+
raise APIError(f"Network error: {str(e)}")
|
|
411
|
+
|
|
412
|
+
return {
|
|
413
|
+
"pushed": total_pushed,
|
|
414
|
+
"failed": total_failed,
|
|
415
|
+
"results": all_results,
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
|
|
363
419
|
async def get_public_profile_settings() -> dict[str, Any]:
|
|
364
420
|
"""
|
|
365
421
|
Get the current user's public profile settings.
|
repr/auth.py
CHANGED
|
@@ -5,12 +5,15 @@ Tokens are stored securely in OS keychain (see keychain.py).
|
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
import asyncio
|
|
8
|
+
import platform
|
|
9
|
+
import socket
|
|
8
10
|
import time
|
|
9
11
|
from dataclasses import dataclass
|
|
10
12
|
|
|
11
13
|
import httpx
|
|
12
14
|
|
|
13
15
|
from .config import set_auth, clear_auth, get_auth, is_authenticated, get_api_base
|
|
16
|
+
from .telemetry import get_device_id
|
|
14
17
|
|
|
15
18
|
|
|
16
19
|
def _get_device_code_url() -> str:
|
|
@@ -25,6 +28,16 @@ POLL_INTERVAL = 5 # seconds
|
|
|
25
28
|
MAX_POLL_TIME = 600 # 10 minutes
|
|
26
29
|
|
|
27
30
|
|
|
31
|
+
def _get_device_name() -> str:
|
|
32
|
+
"""Get a friendly device name for display."""
|
|
33
|
+
try:
|
|
34
|
+
hostname = socket.gethostname()
|
|
35
|
+
system = platform.system()
|
|
36
|
+
return f"{hostname} ({system})"
|
|
37
|
+
except Exception:
|
|
38
|
+
return "Unknown Device"
|
|
39
|
+
|
|
40
|
+
|
|
28
41
|
@dataclass
|
|
29
42
|
class DeviceCodeResponse:
|
|
30
43
|
"""Response from device code request."""
|
|
@@ -106,6 +119,8 @@ async def poll_for_token(device_code: str, interval: int = POLL_INTERVAL) -> Tok
|
|
|
106
119
|
json={
|
|
107
120
|
"device_code": device_code,
|
|
108
121
|
"client_id": "repr-cli",
|
|
122
|
+
"device_id": get_device_id(),
|
|
123
|
+
"device_name": _get_device_name(),
|
|
109
124
|
},
|
|
110
125
|
timeout=30,
|
|
111
126
|
)
|
|
@@ -314,6 +329,8 @@ class AuthFlow:
|
|
|
314
329
|
json={
|
|
315
330
|
"device_code": device_code_response.device_code,
|
|
316
331
|
"client_id": "repr-cli",
|
|
332
|
+
"device_id": get_device_id(),
|
|
333
|
+
"device_name": _get_device_name(),
|
|
317
334
|
},
|
|
318
335
|
timeout=30,
|
|
319
336
|
)
|
repr/cli.py
CHANGED
|
@@ -21,7 +21,7 @@ import os
|
|
|
21
21
|
import sys
|
|
22
22
|
from datetime import datetime, timedelta
|
|
23
23
|
from pathlib import Path
|
|
24
|
-
from typing import Optional, List, Dict
|
|
24
|
+
from typing import Optional, List, Dict, Callable
|
|
25
25
|
from collections import defaultdict
|
|
26
26
|
|
|
27
27
|
import typer
|
|
@@ -44,6 +44,7 @@ from .ui import (
|
|
|
44
44
|
format_relative_time,
|
|
45
45
|
format_bytes,
|
|
46
46
|
confirm,
|
|
47
|
+
BatchProgress,
|
|
47
48
|
BRAND_PRIMARY,
|
|
48
49
|
BRAND_SUCCESS,
|
|
49
50
|
BRAND_WARNING,
|
|
@@ -460,6 +461,8 @@ def generate(
|
|
|
460
461
|
|
|
461
462
|
if commits:
|
|
462
463
|
# Specific commits
|
|
464
|
+
if not json_output:
|
|
465
|
+
console.print(f" Collecting specified commits...")
|
|
463
466
|
commit_shas = [s.strip() for s in commits.split(",")]
|
|
464
467
|
commit_list = get_commits_by_shas(repo_path, commit_shas)
|
|
465
468
|
else:
|
|
@@ -472,12 +475,16 @@ def generate(
|
|
|
472
475
|
since_str = _parse_date_reference(since_date)
|
|
473
476
|
|
|
474
477
|
# Recent commits within timeframe
|
|
478
|
+
if not json_output:
|
|
479
|
+
console.print(f" Scanning commits...")
|
|
475
480
|
commit_list = get_commits_with_diffs(
|
|
476
481
|
repo_path,
|
|
477
482
|
count=500, # Higher limit when filtering by time
|
|
478
483
|
days=timeframe_days,
|
|
479
484
|
since=since_str,
|
|
480
485
|
)
|
|
486
|
+
if not json_output and commit_list:
|
|
487
|
+
console.print(f" Found {len(commit_list)} commits")
|
|
481
488
|
|
|
482
489
|
if not commit_list:
|
|
483
490
|
if not json_output:
|
|
@@ -551,15 +558,39 @@ def generate(
|
|
|
551
558
|
console.print(f" [{BRAND_MUTED}]Skipped {repo_info.name}[/]")
|
|
552
559
|
continue
|
|
553
560
|
|
|
554
|
-
#
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
561
|
+
# Calculate number of batches for progress
|
|
562
|
+
num_batches = (len(commit_list) + batch_size - 1) // batch_size
|
|
563
|
+
|
|
564
|
+
# Generate stories with progress tracking
|
|
565
|
+
if not json_output and num_batches > 1:
|
|
566
|
+
# Use progress bar for multiple batches
|
|
567
|
+
with BatchProgress(num_batches, f"Analyzing {repo_info.name}") as progress:
|
|
568
|
+
def on_progress(batch_num, total, status):
|
|
569
|
+
if status == "complete":
|
|
570
|
+
progress.update(1, f"batch {batch_num}/{total}")
|
|
571
|
+
|
|
572
|
+
stories = _generate_stories(
|
|
573
|
+
commits=commit_list,
|
|
574
|
+
repo_info=repo_info,
|
|
575
|
+
batch_size=batch_size,
|
|
576
|
+
local=local,
|
|
577
|
+
template=template,
|
|
578
|
+
custom_prompt=prompt,
|
|
579
|
+
progress_callback=on_progress,
|
|
580
|
+
)
|
|
581
|
+
else:
|
|
582
|
+
# Single batch or JSON mode - no progress bar needed
|
|
583
|
+
if not json_output and num_batches == 1:
|
|
584
|
+
console.print(f" Analyzing {len(commit_list)} commits...")
|
|
585
|
+
|
|
586
|
+
stories = _generate_stories(
|
|
587
|
+
commits=commit_list,
|
|
588
|
+
repo_info=repo_info,
|
|
589
|
+
batch_size=batch_size,
|
|
590
|
+
local=local,
|
|
591
|
+
template=template,
|
|
592
|
+
custom_prompt=prompt,
|
|
593
|
+
)
|
|
563
594
|
|
|
564
595
|
for story in stories:
|
|
565
596
|
if not json_output:
|
|
@@ -606,9 +637,15 @@ async def _generate_stories_async(
|
|
|
606
637
|
batch_size: int,
|
|
607
638
|
local: bool,
|
|
608
639
|
template: str = "resume",
|
|
609
|
-
custom_prompt: str
|
|
640
|
+
custom_prompt: Optional[str] = None,
|
|
641
|
+
progress_callback: Optional[Callable] = None,
|
|
610
642
|
) -> list[dict]:
|
|
611
|
-
"""Generate stories from commits using LLM (async implementation).
|
|
643
|
+
"""Generate stories from commits using LLM (async implementation).
|
|
644
|
+
|
|
645
|
+
Args:
|
|
646
|
+
progress_callback: Optional callback with signature (batch_num, total_batches, status)
|
|
647
|
+
where status is 'processing' or 'complete'
|
|
648
|
+
"""
|
|
612
649
|
from .openai_analysis import get_openai_client, extract_commit_batch
|
|
613
650
|
from .templates import build_generation_prompt, StoryOutput
|
|
614
651
|
|
|
@@ -634,6 +671,10 @@ async def _generate_stories_async(
|
|
|
634
671
|
|
|
635
672
|
try:
|
|
636
673
|
for i, batch in enumerate(batches):
|
|
674
|
+
# Report progress - starting this batch
|
|
675
|
+
if progress_callback:
|
|
676
|
+
progress_callback(i + 1, len(batches), "processing")
|
|
677
|
+
|
|
637
678
|
try:
|
|
638
679
|
# Build prompt with template
|
|
639
680
|
system_prompt, user_prompt = build_generation_prompt(
|
|
@@ -665,6 +706,9 @@ async def _generate_stories_async(
|
|
|
665
706
|
# Fallback for string response
|
|
666
707
|
content = result
|
|
667
708
|
if not content or content.startswith("[Batch"):
|
|
709
|
+
# Report progress - batch complete (even if empty)
|
|
710
|
+
if progress_callback:
|
|
711
|
+
progress_callback(i + 1, len(batches), "complete")
|
|
668
712
|
continue
|
|
669
713
|
lines = [l.strip() for l in content.split("\n") if l.strip()]
|
|
670
714
|
summary = lines[0] if lines else "Story"
|
|
@@ -700,6 +744,12 @@ async def _generate_stories_async(
|
|
|
700
744
|
"generated_locally": local,
|
|
701
745
|
"template": template,
|
|
702
746
|
"needs_review": False,
|
|
747
|
+
# Categories
|
|
748
|
+
"category": story_output.category,
|
|
749
|
+
"scope": story_output.scope,
|
|
750
|
+
"stack": story_output.stack,
|
|
751
|
+
"initiative": story_output.initiative,
|
|
752
|
+
"complexity": story_output.complexity,
|
|
703
753
|
}
|
|
704
754
|
|
|
705
755
|
# Save story
|
|
@@ -707,7 +757,14 @@ async def _generate_stories_async(
|
|
|
707
757
|
metadata["id"] = story_id
|
|
708
758
|
stories.append(metadata)
|
|
709
759
|
|
|
760
|
+
# Report progress - batch complete
|
|
761
|
+
if progress_callback:
|
|
762
|
+
progress_callback(i + 1, len(batches), "complete")
|
|
763
|
+
|
|
710
764
|
except Exception as e:
|
|
765
|
+
# Report progress even on failure
|
|
766
|
+
if progress_callback:
|
|
767
|
+
progress_callback(i + 1, len(batches), "complete")
|
|
711
768
|
console.print(f" [{BRAND_MUTED}]Batch {i+1} failed: {e}[/]")
|
|
712
769
|
finally:
|
|
713
770
|
# Properly close the async client
|
|
@@ -722,7 +779,8 @@ def _generate_stories(
|
|
|
722
779
|
batch_size: int,
|
|
723
780
|
local: bool,
|
|
724
781
|
template: str = "resume",
|
|
725
|
-
custom_prompt: str
|
|
782
|
+
custom_prompt: Optional[str] = None,
|
|
783
|
+
progress_callback: Optional[Callable] = None,
|
|
726
784
|
) -> list[dict]:
|
|
727
785
|
"""Generate stories from commits using LLM."""
|
|
728
786
|
return asyncio.run(_generate_stories_async(
|
|
@@ -732,6 +790,7 @@ def _generate_stories(
|
|
|
732
790
|
local=local,
|
|
733
791
|
template=template,
|
|
734
792
|
custom_prompt=custom_prompt,
|
|
793
|
+
progress_callback=progress_callback,
|
|
735
794
|
))
|
|
736
795
|
|
|
737
796
|
|
|
@@ -742,6 +801,9 @@ def _generate_stories(
|
|
|
742
801
|
@app.command()
|
|
743
802
|
def stories(
|
|
744
803
|
repo: Optional[str] = typer.Option(None, "--repo", help="Filter by repository"),
|
|
804
|
+
category: Optional[str] = typer.Option(None, "--category", "-c", help="Filter by category (feature, bugfix, refactor, perf, infra, docs, test, chore)"),
|
|
805
|
+
scope: Optional[str] = typer.Option(None, "--scope", "-s", help="Filter by scope (user-facing, internal, platform, ops)"),
|
|
806
|
+
stack: Optional[str] = typer.Option(None, "--stack", help="Filter by stack (frontend, backend, database, infra, mobile, fullstack)"),
|
|
745
807
|
needs_review: bool = typer.Option(False, "--needs-review", help="Show only stories needing review"),
|
|
746
808
|
json_output: bool = typer.Option(False, "--json", help="Output as JSON"),
|
|
747
809
|
):
|
|
@@ -751,10 +813,21 @@ def stories(
|
|
|
751
813
|
Example:
|
|
752
814
|
repr stories
|
|
753
815
|
repr stories --repo myproject
|
|
816
|
+
repr stories --category feature
|
|
817
|
+
repr stories --scope user-facing
|
|
818
|
+
repr stories --stack backend
|
|
754
819
|
repr stories --needs-review
|
|
755
820
|
"""
|
|
756
821
|
story_list = list_stories(repo_name=repo, needs_review=needs_review)
|
|
757
822
|
|
|
823
|
+
# Apply category filters (local filtering since storage doesn't support these yet)
|
|
824
|
+
if category:
|
|
825
|
+
story_list = [s for s in story_list if s.get("category") == category]
|
|
826
|
+
if scope:
|
|
827
|
+
story_list = [s for s in story_list if s.get("scope") == scope]
|
|
828
|
+
if stack:
|
|
829
|
+
story_list = [s for s in story_list if s.get("stack") == stack]
|
|
830
|
+
|
|
758
831
|
if json_output:
|
|
759
832
|
print(json.dumps(story_list, indent=2, default=str))
|
|
760
833
|
return
|
|
@@ -794,7 +867,11 @@ def stories(
|
|
|
794
867
|
summary = story.get("summary", "Untitled")
|
|
795
868
|
created = format_relative_time(story.get("created_at", ""))
|
|
796
869
|
|
|
797
|
-
|
|
870
|
+
# Category badge
|
|
871
|
+
cat = story.get("category", "")
|
|
872
|
+
cat_badge = f"[{BRAND_MUTED}][{cat}][/] " if cat else ""
|
|
873
|
+
|
|
874
|
+
console.print(f" {status} {cat_badge}{summary} [{BRAND_MUTED}]• {created}[/]")
|
|
798
875
|
console.print()
|
|
799
876
|
|
|
800
877
|
if len(story_list) > 20:
|
|
@@ -858,6 +935,15 @@ def story(
|
|
|
858
935
|
console.print()
|
|
859
936
|
console.print(f"[{BRAND_MUTED}]ID: {story_id}[/]")
|
|
860
937
|
console.print(f"[{BRAND_MUTED}]Created: {metadata.get('created_at', 'unknown')}[/]")
|
|
938
|
+
# Show categories if present
|
|
939
|
+
cat = metadata.get("category")
|
|
940
|
+
scope = metadata.get("scope")
|
|
941
|
+
stack = metadata.get("stack")
|
|
942
|
+
initiative = metadata.get("initiative")
|
|
943
|
+
complexity = metadata.get("complexity")
|
|
944
|
+
if cat or scope or stack:
|
|
945
|
+
cats = [c for c in [cat, scope, stack, initiative, complexity] if c]
|
|
946
|
+
console.print(f"[{BRAND_MUTED}]Categories: {', '.join(cats)}[/]")
|
|
861
947
|
|
|
862
948
|
elif action == "edit":
|
|
863
949
|
# Open in $EDITOR
|
|
@@ -1041,21 +1127,37 @@ def push(
|
|
|
1041
1127
|
console.print("Run without --dry-run to publish")
|
|
1042
1128
|
raise typer.Exit()
|
|
1043
1129
|
|
|
1044
|
-
#
|
|
1045
|
-
from .api import
|
|
1130
|
+
# Build batch payload
|
|
1131
|
+
from .api import push_stories_batch
|
|
1046
1132
|
|
|
1047
|
-
|
|
1133
|
+
stories_payload = []
|
|
1048
1134
|
for s in to_push:
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1135
|
+
content, meta = load_story(s["id"])
|
|
1136
|
+
# Use local story ID as client_id for sync
|
|
1137
|
+
payload = {**meta, "content": content, "client_id": s["id"]}
|
|
1138
|
+
stories_payload.append(payload)
|
|
1139
|
+
|
|
1140
|
+
# Push all stories in a single batch request
|
|
1141
|
+
try:
|
|
1142
|
+
result = asyncio.run(push_stories_batch(stories_payload))
|
|
1143
|
+
pushed = result.get("pushed", 0)
|
|
1144
|
+
results = result.get("results", [])
|
|
1145
|
+
|
|
1146
|
+
# Mark successful stories as pushed and display results
|
|
1147
|
+
for i, story_result in enumerate(results):
|
|
1148
|
+
story_id_local = to_push[i]["id"]
|
|
1149
|
+
summary = to_push[i].get("summary", story_id_local)[:50]
|
|
1150
|
+
|
|
1151
|
+
if story_result.get("success"):
|
|
1152
|
+
mark_story_pushed(story_id_local)
|
|
1153
|
+
console.print(f" [{BRAND_SUCCESS}]✓[/] {summary}")
|
|
1154
|
+
else:
|
|
1155
|
+
error_msg = story_result.get("error", "Unknown error")
|
|
1156
|
+
console.print(f" [{BRAND_ERROR}]✗[/] {summary}: {error_msg}")
|
|
1157
|
+
|
|
1158
|
+
except (APIError, AuthError) as e:
|
|
1159
|
+
print_error(f"Batch push failed: {e}")
|
|
1160
|
+
raise typer.Exit(1)
|
|
1059
1161
|
|
|
1060
1162
|
# Log operation
|
|
1061
1163
|
if pushed > 0:
|
repr/openai_analysis.py
CHANGED
|
@@ -23,6 +23,11 @@ class ExtractedStory(BaseModel):
|
|
|
23
23
|
"""A single coherent block of work."""
|
|
24
24
|
title: str = Field(description="One-line title, max 120 chars. Dev jargon welcome. e.g. 'Wire up Redis caching for auth tokens'")
|
|
25
25
|
summary: str = Field(description="Markdown - what was built, how it works, why it matters")
|
|
26
|
+
category: str = Field(description="Work type. One of: feature, bugfix, refactor, perf, infra, docs, test, chore")
|
|
27
|
+
scope: str = Field(description="Impact scope. One of: user-facing, internal, platform, ops")
|
|
28
|
+
stack: str = Field(description="Stack layer. One of: frontend, backend, database, infra, mobile, fullstack")
|
|
29
|
+
initiative: str = Field(description="Initiative type. One of: greenfield, migration, integration, scaling, incident-response, tech-debt")
|
|
30
|
+
complexity: str = Field(description="Complexity/effort. One of: quick-win, project, epic, architecture")
|
|
26
31
|
|
|
27
32
|
|
|
28
33
|
class ExtractedCommitBatch(BaseModel):
|
|
@@ -205,6 +210,11 @@ Per story:
|
|
|
205
210
|
Bad: "Improved authentication system" (too vague)
|
|
206
211
|
Bad: "Enhanced user experience" (meaningless)
|
|
207
212
|
- summary: Markdown. What was built, how it works, any interesting decisions.
|
|
213
|
+
- category: Work type - feature, bugfix, refactor, perf, infra, docs, test, or chore
|
|
214
|
+
- scope: Who's affected - user-facing, internal, platform, or ops
|
|
215
|
+
- stack: Where in tech stack - frontend, backend, database, infra, mobile, or fullstack
|
|
216
|
+
- initiative: Why this work - greenfield, migration, integration, scaling, incident-response, or tech-debt
|
|
217
|
+
- complexity: Effort level - quick-win, project, epic, or architecture
|
|
208
218
|
|
|
209
219
|
No corporate fluff. No "enhanced", "improved", "robust". Just say what happened."""
|
|
210
220
|
|
|
@@ -231,7 +241,15 @@ No corporate fluff. No "enhanced", "improved", "robust". Just say what happened.
|
|
|
231
241
|
if parsed and parsed.stories:
|
|
232
242
|
# Convert each story to StoryOutput
|
|
233
243
|
return [
|
|
234
|
-
StoryOutput(
|
|
244
|
+
StoryOutput(
|
|
245
|
+
summary=story.title,
|
|
246
|
+
content=story.summary,
|
|
247
|
+
category=story.category,
|
|
248
|
+
scope=story.scope,
|
|
249
|
+
stack=story.stack,
|
|
250
|
+
initiative=story.initiative,
|
|
251
|
+
complexity=story.complexity,
|
|
252
|
+
)
|
|
235
253
|
for story in parsed.stories
|
|
236
254
|
]
|
|
237
255
|
# Fallback if parsing failed (e.g., refusal)
|
repr/templates.py
CHANGED
|
@@ -16,6 +16,12 @@ class StoryOutput(BaseModel):
|
|
|
16
16
|
"""Structured output for a generated story."""
|
|
17
17
|
summary: str = Field(description="One-line technical summary of the work (max 120 chars, no fluff)")
|
|
18
18
|
content: str = Field(description="Full technical description in markdown")
|
|
19
|
+
# Categories - all inferred by LLM, nullable for backwards compatibility with older stories
|
|
20
|
+
category: str | None = Field(default=None, description="Work type: feature, bugfix, refactor, perf, infra, docs, test, chore")
|
|
21
|
+
scope: str | None = Field(default=None, description="Impact scope: user-facing, internal, platform, ops")
|
|
22
|
+
stack: str | None = Field(default=None, description="Stack layer: frontend, backend, database, infra, mobile, fullstack")
|
|
23
|
+
initiative: str | None = Field(default=None, description="Initiative type: greenfield, migration, integration, scaling, incident-response, tech-debt")
|
|
24
|
+
complexity: str | None = Field(default=None, description="Complexity: quick-win, project, epic, architecture")
|
|
19
25
|
|
|
20
26
|
|
|
21
27
|
# Template definitions
|
repr/tools.py
CHANGED
|
@@ -90,6 +90,10 @@ def get_recent_commits(
|
|
|
90
90
|
# Stop if we've gone past the time window
|
|
91
91
|
if commit.committed_date < cutoff_timestamp:
|
|
92
92
|
break
|
|
93
|
+
|
|
94
|
+
# Skip merge commits (commits with more than one parent)
|
|
95
|
+
if len(commit.parents) > 1:
|
|
96
|
+
continue
|
|
93
97
|
|
|
94
98
|
commits.append({
|
|
95
99
|
"sha": commit.hexsha[:8],
|
|
@@ -160,6 +164,10 @@ def get_commits_with_diffs(
|
|
|
160
164
|
if since_timestamp and commit.committed_date <= since_timestamp:
|
|
161
165
|
break
|
|
162
166
|
|
|
167
|
+
# Skip merge commits (commits with more than one parent)
|
|
168
|
+
if len(commit.parents) > 1:
|
|
169
|
+
continue
|
|
170
|
+
|
|
163
171
|
# Get files changed with diffs
|
|
164
172
|
files = []
|
|
165
173
|
parent = commit.parents[0] if commit.parents else None
|
|
@@ -495,6 +503,10 @@ def get_commits_by_shas(
|
|
|
495
503
|
# Skip invalid SHAs
|
|
496
504
|
continue
|
|
497
505
|
|
|
506
|
+
# Skip merge commits (commits with more than one parent)
|
|
507
|
+
if len(commit.parents) > 1:
|
|
508
|
+
continue
|
|
509
|
+
|
|
498
510
|
# Get files changed with diffs
|
|
499
511
|
files = []
|
|
500
512
|
parent = commit.parents[0] if commit.parents else None
|
repr/ui.py
CHANGED
|
@@ -8,8 +8,11 @@ from rich.console import Console
|
|
|
8
8
|
from rich.markdown import Markdown
|
|
9
9
|
from rich.panel import Panel
|
|
10
10
|
from rich.table import Table
|
|
11
|
-
from rich.progress import Progress, SpinnerColumn, TextColumn
|
|
11
|
+
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn, TimeElapsedColumn
|
|
12
12
|
from rich.prompt import Confirm
|
|
13
|
+
from rich.live import Live
|
|
14
|
+
from rich.spinner import Spinner
|
|
15
|
+
from rich.text import Text
|
|
13
16
|
|
|
14
17
|
# Brand colors
|
|
15
18
|
BRAND_PRIMARY = "#6366f1" # Indigo
|
|
@@ -93,6 +96,62 @@ def create_spinner(message: str = "Working...") -> Progress:
|
|
|
93
96
|
)
|
|
94
97
|
|
|
95
98
|
|
|
99
|
+
def create_progress_bar(total: int, description: str = "Processing") -> Progress:
|
|
100
|
+
"""Create a progress bar for batch processing."""
|
|
101
|
+
return Progress(
|
|
102
|
+
SpinnerColumn(),
|
|
103
|
+
TextColumn("[progress.description]{task.description}"),
|
|
104
|
+
BarColumn(bar_width=30),
|
|
105
|
+
TaskProgressColumn(),
|
|
106
|
+
TimeElapsedColumn(),
|
|
107
|
+
console=console,
|
|
108
|
+
transient=False,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
class BatchProgress:
|
|
113
|
+
"""Progress tracker for batch processing with live updates."""
|
|
114
|
+
|
|
115
|
+
def __init__(self, total: int, description: str = "Processing"):
|
|
116
|
+
self.total = total
|
|
117
|
+
self.description = description
|
|
118
|
+
self.current = 0
|
|
119
|
+
self.current_detail = ""
|
|
120
|
+
self.progress = Progress(
|
|
121
|
+
SpinnerColumn(),
|
|
122
|
+
TextColumn("[progress.description]{task.description}"),
|
|
123
|
+
BarColumn(bar_width=30),
|
|
124
|
+
TaskProgressColumn(),
|
|
125
|
+
TimeElapsedColumn(),
|
|
126
|
+
console=console,
|
|
127
|
+
transient=False,
|
|
128
|
+
)
|
|
129
|
+
self.task_id = None
|
|
130
|
+
|
|
131
|
+
def __enter__(self):
|
|
132
|
+
self.progress.start()
|
|
133
|
+
self.task_id = self.progress.add_task(self.description, total=self.total)
|
|
134
|
+
return self
|
|
135
|
+
|
|
136
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
137
|
+
self.progress.stop()
|
|
138
|
+
return False
|
|
139
|
+
|
|
140
|
+
def update(self, advance: int = 1, detail: str = None):
|
|
141
|
+
"""Update progress by advancing and optionally changing detail text."""
|
|
142
|
+
self.current += advance
|
|
143
|
+
if detail:
|
|
144
|
+
self.current_detail = detail
|
|
145
|
+
self.progress.update(self.task_id, advance=advance, description=f"{self.description} • {detail}")
|
|
146
|
+
else:
|
|
147
|
+
self.progress.update(self.task_id, advance=advance)
|
|
148
|
+
|
|
149
|
+
def set_detail(self, detail: str):
|
|
150
|
+
"""Set the detail text without advancing."""
|
|
151
|
+
self.current_detail = detail
|
|
152
|
+
self.progress.update(self.task_id, description=f"{self.description} • {detail}")
|
|
153
|
+
|
|
154
|
+
|
|
96
155
|
def create_table(title: str, columns: list[str]) -> Table:
|
|
97
156
|
"""Create a styled table."""
|
|
98
157
|
table = Table(title=title, border_style=BRAND_MUTED)
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
repr/__init__.py,sha256=jraImidqaPxv03Uy76zPtnAcNnOl5KLZSXYBzxI85BI,446
|
|
2
2
|
repr/__main__.py,sha256=N7amYwdGB3yzk2ZJJbtH2hhESNkDuhDL11dDEm5Kl60,166
|
|
3
|
-
repr/api.py,sha256=
|
|
4
|
-
repr/auth.py,sha256
|
|
5
|
-
repr/cli.py,sha256=
|
|
3
|
+
repr/api.py,sha256=SfcoGysf_G5tXegM-zk_Rzsh5Zs59ptzUlStWgrHXP0,13812
|
|
4
|
+
repr/auth.py,sha256=xFtDRtdecT0T8ToTEpGinJoLf53zRQSdKSoPWjeyvoQ,12252
|
|
5
|
+
repr/cli.py,sha256=WUMdr2a48sYM3g5f9gP265b_XYmqWkkfUGTlPNnlXho,90874
|
|
6
6
|
repr/config.py,sha256=GZf5ucrBFIfOo9UtKE-DAZ9Ns1suAKG0jvUAY64oGIc,30601
|
|
7
7
|
repr/discovery.py,sha256=2RYmJleqV7TbxIMMYP2izkEBUeKH7U1F-U4KAUlUNww,14816
|
|
8
8
|
repr/doctor.py,sha256=-ZyaRu_tb0vpT-ol7vLkgke68UQAxbpwqbubTJqbWsU,13443
|
|
@@ -10,17 +10,17 @@ repr/extractor.py,sha256=lGPN8gwTF_ZSezoQoPBMnf95nCJArGIteNiInfb39FM,10566
|
|
|
10
10
|
repr/hooks.py,sha256=Af78HiuxhN7OEu4npiZWoxFktuMlNUtSTjYgYDEzzFQ,20011
|
|
11
11
|
repr/keychain.py,sha256=CpKU3tjFZVEPgiHiplSAtBQFDPA6qOSovv4IXXgJXbY,6957
|
|
12
12
|
repr/llm.py,sha256=inABX2kwEhPnON7sjCzcTMZZeCf0k3G08LyrKsi6Sko,14637
|
|
13
|
-
repr/openai_analysis.py,sha256=
|
|
13
|
+
repr/openai_analysis.py,sha256=PE3UBTXcmJe-uwZUKR00PAfghGHIgsxHi1SWA15uRKM,28883
|
|
14
14
|
repr/privacy.py,sha256=sN1tkoZjCDSwAjkQWeH6rHaLrtv727yT1HNHQ54GRis,9834
|
|
15
15
|
repr/storage.py,sha256=72nfFcR2Y98vpSjaO7zVHisq_Ln2UrHmGyDhEqEmDjU,14863
|
|
16
16
|
repr/telemetry.py,sha256=M1NribTkiezpvweLrdbJxKDU2mlTe7frke6sUP0Yhiw,7000
|
|
17
|
-
repr/templates.py,sha256=
|
|
18
|
-
repr/tools.py,sha256=
|
|
19
|
-
repr/ui.py,sha256=
|
|
17
|
+
repr/templates.py,sha256=O9pONIkrfWR-JqIyRp6C7e_csnD50SgbJxNNTZSjWFg,7449
|
|
18
|
+
repr/tools.py,sha256=opAWp34PY_8So9n_nYQQiF3OpokQsJebanhOdjX867Q,21130
|
|
19
|
+
repr/ui.py,sha256=29pl_paJEWfCUu4GcDaePlmeWTQ0B3MFhD7de6LgJsM,6105
|
|
20
20
|
repr/updater.py,sha256=rybVVIxDk6RmKaswyKogVun8egVaonyH9nh_q2Rr0Vk,7335
|
|
21
|
-
repr_cli-0.2.
|
|
22
|
-
repr_cli-0.2.
|
|
23
|
-
repr_cli-0.2.
|
|
24
|
-
repr_cli-0.2.
|
|
25
|
-
repr_cli-0.2.
|
|
26
|
-
repr_cli-0.2.
|
|
21
|
+
repr_cli-0.2.14.dist-info/licenses/LICENSE,sha256=tI16Ry3IQhjsde6weJ_in6czzWW2EF4Chz1uicyDLAA,1061
|
|
22
|
+
repr_cli-0.2.14.dist-info/METADATA,sha256=ak9KNCXxCYKC87u6hXuJFiZhA_g9lhGJkMJmeBzo_yM,11228
|
|
23
|
+
repr_cli-0.2.14.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
24
|
+
repr_cli-0.2.14.dist-info/entry_points.txt,sha256=SJoKgNB-fRy6O2T_lztFr9T3ND_BQl0ijWxNW-J7dUU,38
|
|
25
|
+
repr_cli-0.2.14.dist-info/top_level.txt,sha256=LNgPqdJPQnlicRve7uzI4a6rEUdcxHrNkUq_2w7eeiA,5
|
|
26
|
+
repr_cli-0.2.14.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|