repr-cli 0.2.9__tar.gz → 0.2.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. {repr_cli-0.2.9/repr_cli.egg-info → repr_cli-0.2.12}/PKG-INFO +2 -1
  2. {repr_cli-0.2.9 → repr_cli-0.2.12}/pyproject.toml +2 -1
  3. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/__main__.py +2 -0
  4. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/cli.py +87 -36
  5. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/doctor.py +2 -0
  6. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/hooks.py +99 -1
  7. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/llm.py +2 -0
  8. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/openai_analysis.py +83 -47
  9. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/privacy.py +2 -0
  10. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/telemetry.py +2 -0
  11. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/templates.py +60 -56
  12. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/ui.py +2 -0
  13. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/updater.py +2 -0
  14. {repr_cli-0.2.9 → repr_cli-0.2.12/repr_cli.egg-info}/PKG-INFO +2 -1
  15. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr_cli.egg-info/requires.txt +1 -0
  16. {repr_cli-0.2.9 → repr_cli-0.2.12}/setup.py +2 -0
  17. {repr_cli-0.2.9 → repr_cli-0.2.12}/LICENSE +0 -0
  18. {repr_cli-0.2.9 → repr_cli-0.2.12}/README.md +0 -0
  19. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/__init__.py +0 -0
  20. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/api.py +0 -0
  21. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/auth.py +0 -0
  22. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/config.py +0 -0
  23. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/discovery.py +0 -0
  24. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/extractor.py +0 -0
  25. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/keychain.py +0 -0
  26. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/storage.py +0 -0
  27. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr/tools.py +0 -0
  28. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr_cli.egg-info/SOURCES.txt +0 -0
  29. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr_cli.egg-info/dependency_links.txt +0 -0
  30. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr_cli.egg-info/entry_points.txt +0 -0
  31. {repr_cli-0.2.9 → repr_cli-0.2.12}/repr_cli.egg-info/top_level.txt +0 -0
  32. {repr_cli-0.2.9 → repr_cli-0.2.12}/setup.cfg +0 -0
  33. {repr_cli-0.2.9 → repr_cli-0.2.12}/tests/test_environment_variables.py +0 -0
  34. {repr_cli-0.2.9 → repr_cli-0.2.12}/tests/test_network_sandboxing.py +0 -0
  35. {repr_cli-0.2.9 → repr_cli-0.2.12}/tests/test_privacy_guarantees.py +0 -0
  36. {repr_cli-0.2.9 → repr_cli-0.2.12}/tests/test_profile_export.py +0 -0
  37. {repr_cli-0.2.9 → repr_cli-0.2.12}/tests/test_repo_identity.py +0 -0
  38. {repr_cli-0.2.9 → repr_cli-0.2.12}/tests/test_stories_review.py +0 -0
  39. {repr_cli-0.2.9 → repr_cli-0.2.12}/tests/test_token_budget.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: repr-cli
3
- Version: 0.2.9
3
+ Version: 0.2.12
4
4
  Summary: A beautiful, privacy-first CLI that analyzes your code repositories and generates a compelling developer profile
5
5
  Author-email: Repr <hello@repr.dev>
6
6
  License: MIT License
@@ -48,6 +48,7 @@ Requires-Dist: pygments>=2.16.0
48
48
  Requires-Dist: httpx>=0.25.0
49
49
  Requires-Dist: openai>=1.0.0
50
50
  Requires-Dist: keyring>=24.0.0
51
+ Requires-Dist: pydantic>=2.0.0
51
52
  Provides-Extra: dev
52
53
  Requires-Dist: pytest>=7.0.0; extra == "dev"
53
54
  Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "repr-cli"
7
- version = "0.2.9"
7
+ version = "0.2.12"
8
8
  description = "A beautiful, privacy-first CLI that analyzes your code repositories and generates a compelling developer profile"
9
9
  readme = "README.md"
10
10
  license = {file = "LICENSE"}
@@ -32,6 +32,7 @@ dependencies = [
32
32
  "httpx>=0.25.0",
33
33
  "openai>=1.0.0",
34
34
  "keyring>=24.0.0",
35
+ "pydantic>=2.0.0",
35
36
  ]
36
37
 
37
38
  [project.optional-dependencies]
@@ -38,6 +38,8 @@ if __name__ == "__main__":
38
38
 
39
39
 
40
40
 
41
+
42
+
41
43
 
42
44
 
43
45
 
@@ -610,7 +610,7 @@ async def _generate_stories_async(
610
610
  ) -> list[dict]:
611
611
  """Generate stories from commits using LLM (async implementation)."""
612
612
  from .openai_analysis import get_openai_client, extract_commit_batch
613
- from .templates import build_generation_prompt
613
+ from .templates import build_generation_prompt, StoryOutput
614
614
 
615
615
  stories = []
616
616
 
@@ -643,8 +643,8 @@ async def _generate_stories_async(
643
643
  custom_prompt=custom_prompt,
644
644
  )
645
645
 
646
- # Extract story from batch
647
- content = await extract_commit_batch(
646
+ # Extract story from batch using structured output
647
+ result = await extract_commit_batch(
648
648
  client=client,
649
649
  commits=batch,
650
650
  batch_num=i + 1,
@@ -652,44 +652,60 @@ async def _generate_stories_async(
652
652
  model=model,
653
653
  system_prompt=system_prompt,
654
654
  user_prompt=user_prompt,
655
+ structured=True,
655
656
  )
656
657
 
657
- if not content or content.startswith("[Batch"):
658
- continue
659
-
660
- # Extract summary (first non-empty line)
661
- lines = [l.strip() for l in content.split("\n") if l.strip()]
662
- summary = lines[0][:100] if lines else "Story"
663
- # Clean up summary
664
- summary = summary.lstrip("#-•* ").strip()
665
-
666
- # Build metadata
658
+ # Handle structured output - now returns list[StoryOutput]
659
+ story_outputs: list[StoryOutput] = []
660
+ if isinstance(result, list):
661
+ story_outputs = result
662
+ elif isinstance(result, StoryOutput):
663
+ story_outputs = [result]
664
+ else:
665
+ # Fallback for string response
666
+ content = result
667
+ if not content or content.startswith("[Batch"):
668
+ continue
669
+ lines = [l.strip() for l in content.split("\n") if l.strip()]
670
+ summary = lines[0] if lines else "Story"
671
+ summary = summary.lstrip("#-•* ").strip()
672
+ story_outputs = [StoryOutput(summary=summary, content=content)]
673
+
674
+ # Build shared metadata for all stories from this batch
667
675
  commit_shas = [c["full_sha"] for c in batch]
668
676
  first_date = min(c["date"] for c in batch)
669
677
  last_date = max(c["date"] for c in batch)
670
678
  total_files = sum(len(c.get("files", [])) for c in batch)
671
679
  total_adds = sum(c.get("insertions", 0) for c in batch)
672
680
  total_dels = sum(c.get("deletions", 0) for c in batch)
673
-
674
- metadata = {
675
- "summary": summary,
676
- "repo_name": repo_info.name,
677
- "repo_path": str(repo_info.path),
678
- "commit_shas": commit_shas,
679
- "first_commit_at": first_date,
680
- "last_commit_at": last_date,
681
- "files_changed": total_files,
682
- "lines_added": total_adds,
683
- "lines_removed": total_dels,
684
- "generated_locally": local,
685
- "template": template,
686
- "needs_review": False,
687
- }
688
-
689
- # Save story
690
- story_id = save_story(content, metadata)
691
- metadata["id"] = story_id
692
- stories.append(metadata)
681
+
682
+ # Save each story from this batch
683
+ for story_output in story_outputs:
684
+ content = story_output.content
685
+ summary = story_output.summary
686
+
687
+ if not content or content.startswith("[Batch"):
688
+ continue
689
+
690
+ metadata = {
691
+ "summary": summary,
692
+ "repo_name": repo_info.name,
693
+ "repo_path": str(repo_info.path),
694
+ "commit_shas": commit_shas,
695
+ "first_commit_at": first_date,
696
+ "last_commit_at": last_date,
697
+ "files_changed": total_files,
698
+ "lines_added": total_adds,
699
+ "lines_removed": total_dels,
700
+ "generated_locally": local,
701
+ "template": template,
702
+ "needs_review": False,
703
+ }
704
+
705
+ # Save story
706
+ story_id = save_story(content, metadata)
707
+ metadata["id"] = story_id
708
+ stories.append(metadata)
693
709
 
694
710
  except Exception as e:
695
711
  console.print(f" [{BRAND_MUTED}]Batch {i+1} failed: {e}[/]")
@@ -788,7 +804,8 @@ def stories(
788
804
  @app.command()
789
805
  def story(
790
806
  action: str = typer.Argument(..., help="Action: view, edit, delete, hide, feature, regenerate"),
791
- story_id: str = typer.Argument(..., help="Story ID (ULID)"),
807
+ story_id: Optional[str] = typer.Argument(None, help="Story ID (ULID)"),
808
+ all_stories: bool = typer.Option(False, "--all", help="Apply to all stories (for delete)"),
792
809
  ):
793
810
  """
794
811
  Manage a single story.
@@ -796,7 +813,38 @@ def story(
796
813
  Examples:
797
814
  repr story view 01ARYZ6S41TSV4RRFFQ69G5FAV
798
815
  repr story delete 01ARYZ6S41TSV4RRFFQ69G5FAV
816
+ repr story delete --all
799
817
  """
818
+ # Handle --all flag for delete
819
+ if all_stories:
820
+ if action != "delete":
821
+ print_error("--all flag only works with 'delete' action")
822
+ raise typer.Exit(1)
823
+
824
+ story_list = list_stories()
825
+ if not story_list:
826
+ print_info("No stories to delete")
827
+ raise typer.Exit()
828
+
829
+ console.print(f"This will delete [bold]{len(story_list)}[/] stories.")
830
+ if confirm("Delete all stories?"):
831
+ deleted = 0
832
+ for s in story_list:
833
+ try:
834
+ delete_story(s["id"])
835
+ deleted += 1
836
+ except Exception:
837
+ pass
838
+ print_success(f"Deleted {deleted} stories")
839
+ else:
840
+ print_info("Cancelled")
841
+ raise typer.Exit()
842
+
843
+ # Require story_id for single-story operations
844
+ if not story_id:
845
+ print_error("Story ID required (or use --all for delete)")
846
+ raise typer.Exit(1)
847
+
800
848
  result = load_story(story_id)
801
849
 
802
850
  if not result:
@@ -1000,7 +1048,9 @@ def push(
1000
1048
  for s in to_push:
1001
1049
  try:
1002
1050
  content, meta = load_story(s["id"])
1003
- asyncio.run(api_push_story({**meta, "content": content}))
1051
+ # Use local story ID as client_id for sync
1052
+ payload = {**meta, "content": content, "client_id": s["id"]}
1053
+ asyncio.run(api_push_story(payload))
1004
1054
  mark_story_pushed(s["id"])
1005
1055
  console.print(f" [{BRAND_SUCCESS}]✓[/] {s.get('summary', s.get('id'))[:50]}")
1006
1056
  pushed += 1
@@ -1049,7 +1099,8 @@ def sync():
1049
1099
  for s in unpushed:
1050
1100
  try:
1051
1101
  content, meta = load_story(s["id"])
1052
- asyncio.run(api_push_story({**meta, "content": content}))
1102
+ payload = {**meta, "content": content, "client_id": s["id"]}
1103
+ asyncio.run(api_push_story(payload))
1053
1104
  mark_story_pushed(s["id"])
1054
1105
  except Exception:
1055
1106
  pass
@@ -490,6 +490,8 @@ def run_all_checks() -> DoctorReport:
490
490
 
491
491
 
492
492
 
493
+
494
+
493
495
 
494
496
 
495
497
 
@@ -423,6 +423,8 @@ def queue_commit(repo_path: Path, commit_sha: str, message: str | None = None) -
423
423
  """Add a commit to the queue.
424
424
 
425
425
  Uses file locking to handle concurrent commits safely.
426
+ If auto_generate_on_hook is enabled and queue size meets batch_size,
427
+ triggers background story generation.
426
428
 
427
429
  Args:
428
430
  repo_path: Path to repository root
@@ -435,6 +437,8 @@ def queue_commit(repo_path: Path, commit_sha: str, message: str | None = None) -
435
437
  queue_path = get_queue_path(repo_path)
436
438
  lock_path = queue_path.with_suffix(".lock")
437
439
 
440
+ queue_size = 0
441
+
438
442
  try:
439
443
  fd = _acquire_lock(lock_path)
440
444
  try:
@@ -452,7 +456,7 @@ def queue_commit(repo_path: Path, commit_sha: str, message: str | None = None) -
452
456
  })
453
457
 
454
458
  save_queue(repo_path, queue)
455
- return True
459
+ queue_size = len(queue)
456
460
 
457
461
  finally:
458
462
  _release_lock(fd)
@@ -460,6 +464,100 @@ def queue_commit(repo_path: Path, commit_sha: str, message: str | None = None) -
460
464
  except QueueLockError:
461
465
  # Could not get lock, skip queuing
462
466
  return False
467
+
468
+ # Check if we should auto-generate stories
469
+ _maybe_auto_generate(repo_path, queue_size)
470
+
471
+ return True
472
+
473
+
474
+ def _maybe_auto_generate(repo_path: Path, queue_size: int) -> None:
475
+ """Check if auto-generation should be triggered and spawn background process.
476
+
477
+ Args:
478
+ repo_path: Path to repository root
479
+ queue_size: Current number of commits in queue
480
+ """
481
+ from .config import load_config
482
+
483
+ config = load_config()
484
+ generation_config = config.get("generation", {})
485
+
486
+ # Check if auto-generation is enabled
487
+ if not generation_config.get("auto_generate_on_hook", False):
488
+ return
489
+
490
+ batch_size = generation_config.get("batch_size", 5)
491
+
492
+ # Only trigger if queue size meets batch threshold
493
+ if queue_size < batch_size:
494
+ return
495
+
496
+ # Spawn background generation process
497
+ _spawn_background_generate(repo_path)
498
+
499
+
500
+ def _spawn_background_generate(repo_path: Path) -> None:
501
+ """Spawn a background process to generate stories for a repo.
502
+
503
+ Uses subprocess.Popen with detached process to not block the git hook.
504
+ Logs to ~/.repr/logs/auto_generate.log
505
+
506
+ Args:
507
+ repo_path: Path to repository to generate stories for
508
+ """
509
+ import subprocess
510
+ import sys
511
+ from .config import REPR_HOME
512
+
513
+ # Ensure log directory exists
514
+ log_dir = REPR_HOME / "logs"
515
+ log_dir.mkdir(parents=True, exist_ok=True)
516
+ log_file = log_dir / "auto_generate.log"
517
+
518
+ # Build command - use sys.executable to find repr command
519
+ # repr generate --repo <path> --local --json
520
+ cmd = [
521
+ sys.executable, "-m", "repr",
522
+ "generate",
523
+ "--repo", str(repo_path),
524
+ "--local", # Always use local LLM for auto-generation
525
+ "--json",
526
+ ]
527
+
528
+ try:
529
+ # Open log file for appending
530
+ with open(log_file, "a") as log:
531
+ log.write(f"\n[{datetime.now().isoformat()}] Auto-generating for {repo_path}\n")
532
+ log.flush()
533
+
534
+ # Spawn detached background process
535
+ # On Unix, use start_new_session=True to fully detach
536
+ # On Windows, use DETACHED_PROCESS flag
537
+ if sys.platform == "win32":
538
+ DETACHED_PROCESS = 0x00000008
539
+ subprocess.Popen(
540
+ cmd,
541
+ stdout=log,
542
+ stderr=log,
543
+ creationflags=DETACHED_PROCESS,
544
+ close_fds=True,
545
+ )
546
+ else:
547
+ subprocess.Popen(
548
+ cmd,
549
+ stdout=log,
550
+ stderr=log,
551
+ start_new_session=True,
552
+ close_fds=True,
553
+ )
554
+ except Exception as e:
555
+ # Silently fail - don't block the git commit
556
+ try:
557
+ with open(log_file, "a") as log:
558
+ log.write(f"[{datetime.now().isoformat()}] Error spawning auto-generate: {e}\n")
559
+ except Exception:
560
+ pass
463
561
 
464
562
 
465
563
  def dequeue_commits(repo_path: Path, commit_shas: list[str]) -> int:
@@ -538,6 +538,8 @@ def get_effective_llm_mode() -> tuple[str, dict[str, Any]]:
538
538
 
539
539
 
540
540
 
541
+
542
+
541
543
 
542
544
 
543
545
 
@@ -11,10 +11,23 @@ import asyncio
11
11
  from typing import Any
12
12
 
13
13
  from openai import AsyncOpenAI
14
+ from pydantic import BaseModel, Field
14
15
 
15
16
  from .tools import get_commits_with_diffs
16
17
  from .discovery import RepoInfo
17
18
  from .config import get_litellm_config, get_llm_config, get_api_base
19
+ from .templates import StoryOutput
20
+
21
+
22
+ class ExtractedStory(BaseModel):
23
+ """A single coherent block of work."""
24
+ title: str = Field(description="One-line title, max 120 chars. Dev jargon welcome. e.g. 'Wire up Redis caching for auth tokens'")
25
+ summary: str = Field(description="Markdown - what was built, how it works, why it matters")
26
+
27
+
28
+ class ExtractedCommitBatch(BaseModel):
29
+ """Schema for extraction phase output - one or more stories from a batch of commits."""
30
+ stories: list[ExtractedStory] = Field(description="List of distinct blocks of work found in the commits")
18
31
 
19
32
 
20
33
  # Model configuration (defaults for OpenAI)
@@ -124,10 +137,11 @@ async def extract_commit_batch(
124
137
  model: str = None,
125
138
  system_prompt: str = None,
126
139
  user_prompt: str = None,
127
- ) -> str:
140
+ structured: bool = False,
141
+ ) -> str | list[StoryOutput]:
128
142
  """
129
143
  Extraction phase: Extract accomplishments from a batch of commits.
130
-
144
+
131
145
  Args:
132
146
  client: OpenAI client
133
147
  commits: List of commits with diffs
@@ -136,9 +150,10 @@ async def extract_commit_batch(
136
150
  model: Model name to use (defaults to stored config or DEFAULT_EXTRACTION_MODEL)
137
151
  system_prompt: Custom system prompt (optional, uses default if not provided)
138
152
  user_prompt: Custom user prompt (optional, uses default if not provided)
139
-
153
+ structured: If True, return list of StoryOutput with summary/content fields
154
+
140
155
  Returns:
141
- Summary of technical accomplishments in this batch
156
+ Summary of technical accomplishments (str) or list[StoryOutput] if structured=True
142
157
  """
143
158
  if not model:
144
159
  llm_config = get_llm_config()
@@ -176,60 +191,81 @@ Files changed:"""
176
191
  commits_formatted = "\n\n---\n".join(commits_text)
177
192
 
178
193
  if not system_prompt:
179
- system_prompt = """You are analyzing a developer's actual code commits to extract specific technical accomplishments WITH the reasoning behind them.
180
-
181
- Your job: Read the commit messages and diffs, then list CONCRETE technical accomplishments with SPECIFIC details AND infer WHY those decisions were made.
182
-
183
- For each accomplishment, capture:
184
- 1. WHAT was built (the technical implementation)
185
- 2. WHY it was needed (the problem being solved, the user/business need, or the technical constraint)
186
-
187
- Rules:
188
- - Use EXACT technology names from the code (FastAPI, React, SQLAlchemy, not "web framework")
189
- - Describe SPECIFIC features built (e.g., "JWT authentication with refresh tokens", not "auth system")
190
- - INFER the motivation when possible:
191
- - Performance changes → what latency/throughput problem was being solved?
192
- - New features → what user capability was being enabled?
193
- - Refactors → what maintainability or scalability issue was being addressed?
194
- - Error handling → what failure mode was being prevented?
195
- - Mention architectural patterns when evident (microservices, event-driven, REST API, etc.)
196
- - Include scale indicators (number of endpoints, integrations, etc.)
197
- - Be concise but specific - bullet points are fine
198
-
199
- What NOT to do:
200
- - Don't write vague statements like "worked on backend"
201
- - Don't guess technologies not shown in the diffs
202
- - Don't include process/methodology unless there's evidence
203
- - Don't fabricate motivations that aren't supported by the code/commits"""
194
+ system_prompt = """Read the commits and diffs. Understand what the dev actually shipped.
204
195
 
205
- if not user_prompt:
206
- user_prompt = f"""Analyze commits batch {batch_num}/{total_batches} and extract technical accomplishments:
196
+ Write it up like one dev explaining to another what got done. Use real dev jargon - talk about wiring up endpoints, spinning up services, hooking into APIs, plumbing data through, etc.
207
197
 
208
- {commits_formatted}
198
+ Group related commits into one story. Split unrelated work into separate stories.
209
199
 
210
- List the specific technical work done in this batch. For each item:
211
- 1. What was BUILT (the concrete implementation)
212
- 2. Why it was needed (infer from context: what problem was solved? what user need? what constraint?)
200
+ Per story:
201
+ - title: One punchy line, max 120 chars. Say what was built. Tech details when relevant.
202
+ Good: "Wire up WebSocket streaming for chat responses"
203
+ Good: "Plumb user prefs through to the settings modal"
204
+ Good: "Fix race condition in token refresh flow"
205
+ Bad: "Improved authentication system" (too vague)
206
+ Bad: "Enhanced user experience" (meaningless)
207
+ - summary: Markdown. What was built, how it works, any interesting decisions.
213
208
 
214
- Focus on substance, not process."""
209
+ No corporate fluff. No "enhanced", "improved", "robust". Just say what happened."""
210
+
211
+ if not user_prompt:
212
+ user_prompt = f"""Commits batch {batch_num}/{total_batches}:
213
+
214
+ {commits_formatted}"""
215
215
 
216
216
  try:
217
- response = await client.chat.completions.create(
218
- model=model,
219
- messages=[
220
- {"role": "system", "content": system_prompt},
221
- {"role": "user", "content": user_prompt},
222
- ],
223
- temperature=EXTRACTION_TEMPERATURE,
224
- max_tokens=16000, # Increased for reasoning models that use tokens for thinking
225
- )
226
-
227
- return response.choices[0].message.content or ""
217
+ if structured:
218
+ # Use structured output with Pydantic model
219
+ response = await client.beta.chat.completions.parse(
220
+ model=model,
221
+ messages=[
222
+ {"role": "system", "content": system_prompt},
223
+ {"role": "user", "content": user_prompt},
224
+ ],
225
+ temperature=EXTRACTION_TEMPERATURE,
226
+ max_tokens=16000,
227
+ response_format=ExtractedCommitBatch,
228
+ )
229
+
230
+ parsed = response.choices[0].message.parsed
231
+ if parsed and parsed.stories:
232
+ # Convert each story to StoryOutput
233
+ return [
234
+ StoryOutput(summary=story.title, content=story.summary)
235
+ for story in parsed.stories
236
+ ]
237
+ # Fallback if parsing failed (e.g., refusal)
238
+ content = response.choices[0].message.content or ""
239
+ return [
240
+ StoryOutput(
241
+ summary=f"Batch {batch_num} analysis",
242
+ content=content if content else "[No content extracted]",
243
+ )
244
+ ]
245
+ else:
246
+ response = await client.chat.completions.create(
247
+ model=model,
248
+ messages=[
249
+ {"role": "system", "content": system_prompt},
250
+ {"role": "user", "content": user_prompt},
251
+ ],
252
+ temperature=EXTRACTION_TEMPERATURE,
253
+ max_tokens=16000,
254
+ )
255
+
256
+ return response.choices[0].message.content or ""
228
257
  except Exception as e:
229
258
  error_msg = str(e).lower()
230
259
  # Handle content moderation blocks gracefully
231
260
  if "blocked" in error_msg or "content" in error_msg or "moderation" in error_msg:
232
261
  # Skip this batch but continue with others
262
+ if structured:
263
+ return [
264
+ StoryOutput(
265
+ summary=f"Batch {batch_num} skipped",
266
+ content=f"[Batch {batch_num} skipped - content filter triggered]",
267
+ )
268
+ ]
233
269
  return f"[Batch {batch_num} skipped - content filter triggered]"
234
270
  # Re-raise other errors
235
271
  raise
@@ -365,6 +365,8 @@ def clear_audit_log() -> int:
365
365
 
366
366
 
367
367
 
368
+
369
+
368
370
 
369
371
 
370
372
 
@@ -311,6 +311,8 @@ def get_pending_events() -> list[dict[str, Any]]:
311
311
 
312
312
 
313
313
 
314
+
315
+
314
316
 
315
317
 
316
318
 
@@ -9,103 +9,107 @@ Provides different prompts for generating stories based on use case:
9
9
  """
10
10
 
11
11
  from typing import Any
12
+ from pydantic import BaseModel, Field
13
+
14
+
15
+ class StoryOutput(BaseModel):
16
+ """Structured output for a generated story."""
17
+ summary: str = Field(description="One-line technical summary of the work (max 120 chars, no fluff)")
18
+ content: str = Field(description="Full technical description in markdown")
12
19
 
13
20
 
14
21
  # Template definitions
15
22
  TEMPLATES = {
16
23
  "resume": {
17
24
  "name": "Resume",
18
- "description": "Professional accomplishment summaries for resumes and portfolios",
19
- "system_prompt": """You are helping a developer document their work accomplishments for a professional resume or portfolio.
20
-
21
- Focus on:
22
- - Quantifiable impact where possible (performance improvements, user metrics, etc.)
23
- - Technical complexity and problem-solving
24
- - Leadership and collaboration
25
- - Technologies and skills demonstrated
26
-
27
- Write in first person, using action verbs. Keep it concise and impactful.
28
- Format: 2-3 bullet points per accomplishment, each 1-2 sentences.""",
29
- "user_prompt_template": """Based on these commits, write professional accomplishment summaries:
25
+ "description": "Technical work log for resumes and portfolios",
26
+ "system_prompt": """Extract technical work from commits. Be direct and specific.
27
+
28
+ Output JSON with:
29
+ - summary: One line, max 120 chars. State what was done technically. No adjectives, no fluff.
30
+ Good: "Added JWT refresh token rotation with Redis session store"
31
+ Bad: "Enhanced authentication system with improved security"
32
+ - content: Markdown with technical details. What was built, how, what tech.
33
+
34
+ Rules:
35
+ - Name specific technologies, libraries, patterns
36
+ - Describe the implementation, not the benefit
37
+ - No marketing language (enhanced, streamlined, robust, seamless)
38
+ - No resume verbs (spearheaded, leveraged, drove)
39
+ - If there's a metric, include it. If not, don't invent one.""",
40
+ "user_prompt_template": """Repository: {repo_name}
30
41
 
31
- Repository: {repo_name}
32
42
  Commits:
33
43
  {commits_summary}
34
44
 
35
- Generate 1-3 accomplishment summaries suitable for a resume.""",
45
+ Output JSON with summary and content.""",
36
46
  },
37
47
 
38
48
  "changelog": {
39
49
  "name": "Changelog",
40
50
  "description": "Technical change documentation for release notes",
41
- "system_prompt": """You are writing technical changelog entries for a software project.
51
+ "system_prompt": """Extract changes from commits for a changelog. Be specific.
52
+
53
+ Output JSON with:
54
+ - summary: One line describing the main change (max 120 chars)
55
+ - content: Markdown changelog with categories (Added/Changed/Fixed/Removed)
56
+
57
+ Rules:
58
+ - List actual changes, not benefits
59
+ - Include file/module names when relevant
60
+ - No fluff words (improved, enhanced, better)""",
61
+ "user_prompt_template": """Repository: {repo_name}
42
62
 
43
- Focus on:
44
- - What changed (features, fixes, improvements)
45
- - Why it matters (user impact, developer experience)
46
- - Breaking changes or migration notes
47
- - Technical details relevant to other developers
48
-
49
- Use conventional changelog format with categories:
50
- - Added: New features
51
- - Changed: Changes to existing functionality
52
- - Fixed: Bug fixes
53
- - Removed: Removed features
54
- - Security: Security improvements""",
55
- "user_prompt_template": """Generate changelog entries from these commits:
56
-
57
- Repository: {repo_name}
58
63
  Commits:
59
64
  {commits_summary}
60
65
 
61
- Write changelog entries grouped by category.""",
66
+ Output JSON with summary and content.""",
62
67
  },
63
68
 
64
69
  "narrative": {
65
70
  "name": "Narrative",
66
- "description": "Storytelling format for blogs or case studies",
67
- "system_prompt": """You are helping a developer tell the story of their work in an engaging narrative format.
71
+ "description": "Technical narrative for blogs or case studies",
72
+ "system_prompt": """Write a technical narrative from commits.
73
+
74
+ Output JSON with:
75
+ - summary: One-line description of what was built (max 120 chars)
76
+ - content: Markdown narrative explaining the technical work
68
77
 
69
78
  Focus on:
70
- - The challenge or problem being solved
71
- - The approach and decision-making process
72
- - Obstacles encountered and how they were overcome
73
- - Results and lessons learned
79
+ - What problem was solved
80
+ - How it was implemented technically
81
+ - What decisions were made and why
74
82
 
75
- Write in a conversational, engaging tone suitable for a blog post or case study.
76
- Use present tense for engagement. Include technical details but make it accessible.""",
77
- "user_prompt_template": """Tell the story of this development work:
83
+ No marketing language. Write like you're explaining to another engineer.""",
84
+ "user_prompt_template": """Repository: {repo_name}
78
85
 
79
- Repository: {repo_name}
80
86
  Commits:
81
87
  {commits_summary}
82
88
 
83
- Write a narrative (2-3 paragraphs) that would work as a blog post section.""",
89
+ Output JSON with summary and content.""",
84
90
  },
85
91
 
86
92
  "interview": {
87
93
  "name": "Interview Prep",
88
- "description": "Behavioral interview preparation with STAR format",
89
- "system_prompt": """You are helping a developer prepare for behavioral interviews using the STAR method.
94
+ "description": "Technical interview preparation",
95
+ "system_prompt": """Extract technical work for interview prep.
90
96
 
91
- Format each accomplishment as:
92
- - Situation: Context and background
93
- - Task: What needed to be done
94
- - Action: What you did specifically
95
- - Result: The outcome and impact
97
+ Output JSON with:
98
+ - summary: One-line technical summary (max 120 chars)
99
+ - content: Markdown with situation/task/action/result format
96
100
 
97
101
  Focus on:
98
- - Technical decision-making
99
- - Problem-solving approach
100
- - Collaboration and communication
101
- - Quantifiable results""",
102
- "user_prompt_template": """Create interview-ready stories from these commits:
102
+ - Specific technical decisions made
103
+ - Problems encountered and solutions
104
+ - Technologies and patterns used
105
+
106
+ No resume language. Be specific about what you actually did.""",
107
+ "user_prompt_template": """Repository: {repo_name}
103
108
 
104
- Repository: {repo_name}
105
109
  Commits:
106
110
  {commits_summary}
107
111
 
108
- Generate 1-2 STAR-format stories for behavioral interviews.""",
112
+ Output JSON with summary and content.""",
109
113
  },
110
114
  }
111
115
 
@@ -177,6 +177,8 @@ def confirm(message: str, default: bool = False) -> bool:
177
177
 
178
178
 
179
179
 
180
+
181
+
180
182
 
181
183
 
182
184
 
@@ -277,6 +277,8 @@ def perform_update(force: bool = False) -> bool:
277
277
 
278
278
 
279
279
 
280
+
281
+
280
282
 
281
283
 
282
284
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: repr-cli
3
- Version: 0.2.9
3
+ Version: 0.2.12
4
4
  Summary: A beautiful, privacy-first CLI that analyzes your code repositories and generates a compelling developer profile
5
5
  Author-email: Repr <hello@repr.dev>
6
6
  License: MIT License
@@ -48,6 +48,7 @@ Requires-Dist: pygments>=2.16.0
48
48
  Requires-Dist: httpx>=0.25.0
49
49
  Requires-Dist: openai>=1.0.0
50
50
  Requires-Dist: keyring>=24.0.0
51
+ Requires-Dist: pydantic>=2.0.0
51
52
  Provides-Extra: dev
52
53
  Requires-Dist: pytest>=7.0.0; extra == "dev"
53
54
  Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
@@ -5,6 +5,7 @@ pygments>=2.16.0
5
5
  httpx>=0.25.0
6
6
  openai>=1.0.0
7
7
  keyring>=24.0.0
8
+ pydantic>=2.0.0
8
9
 
9
10
  [dev]
10
11
  pytest>=7.0.0
@@ -39,6 +39,8 @@ if __name__ == "__main__":
39
39
 
40
40
 
41
41
 
42
+
43
+
42
44
 
43
45
 
44
46
 
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes