repr-cli 0.2.9__tar.gz → 0.2.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. {repr_cli-0.2.9/repr_cli.egg-info → repr_cli-0.2.11}/PKG-INFO +2 -1
  2. {repr_cli-0.2.9 → repr_cli-0.2.11}/pyproject.toml +2 -1
  3. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/cli.py +87 -36
  4. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/openai_analysis.py +83 -47
  5. {repr_cli-0.2.9 → repr_cli-0.2.11/repr_cli.egg-info}/PKG-INFO +2 -1
  6. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr_cli.egg-info/requires.txt +1 -0
  7. {repr_cli-0.2.9 → repr_cli-0.2.11}/LICENSE +0 -0
  8. {repr_cli-0.2.9 → repr_cli-0.2.11}/README.md +0 -0
  9. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/__init__.py +0 -0
  10. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/__main__.py +0 -0
  11. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/api.py +0 -0
  12. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/auth.py +0 -0
  13. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/config.py +0 -0
  14. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/discovery.py +0 -0
  15. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/doctor.py +0 -0
  16. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/extractor.py +0 -0
  17. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/hooks.py +0 -0
  18. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/keychain.py +0 -0
  19. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/llm.py +0 -0
  20. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/privacy.py +0 -0
  21. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/storage.py +0 -0
  22. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/telemetry.py +0 -0
  23. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/templates.py +0 -0
  24. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/tools.py +0 -0
  25. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/ui.py +0 -0
  26. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr/updater.py +0 -0
  27. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr_cli.egg-info/SOURCES.txt +0 -0
  28. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr_cli.egg-info/dependency_links.txt +0 -0
  29. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr_cli.egg-info/entry_points.txt +0 -0
  30. {repr_cli-0.2.9 → repr_cli-0.2.11}/repr_cli.egg-info/top_level.txt +0 -0
  31. {repr_cli-0.2.9 → repr_cli-0.2.11}/setup.cfg +0 -0
  32. {repr_cli-0.2.9 → repr_cli-0.2.11}/setup.py +0 -0
  33. {repr_cli-0.2.9 → repr_cli-0.2.11}/tests/test_environment_variables.py +0 -0
  34. {repr_cli-0.2.9 → repr_cli-0.2.11}/tests/test_network_sandboxing.py +0 -0
  35. {repr_cli-0.2.9 → repr_cli-0.2.11}/tests/test_privacy_guarantees.py +0 -0
  36. {repr_cli-0.2.9 → repr_cli-0.2.11}/tests/test_profile_export.py +0 -0
  37. {repr_cli-0.2.9 → repr_cli-0.2.11}/tests/test_repo_identity.py +0 -0
  38. {repr_cli-0.2.9 → repr_cli-0.2.11}/tests/test_stories_review.py +0 -0
  39. {repr_cli-0.2.9 → repr_cli-0.2.11}/tests/test_token_budget.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: repr-cli
3
- Version: 0.2.9
3
+ Version: 0.2.11
4
4
  Summary: A beautiful, privacy-first CLI that analyzes your code repositories and generates a compelling developer profile
5
5
  Author-email: Repr <hello@repr.dev>
6
6
  License: MIT License
@@ -48,6 +48,7 @@ Requires-Dist: pygments>=2.16.0
48
48
  Requires-Dist: httpx>=0.25.0
49
49
  Requires-Dist: openai>=1.0.0
50
50
  Requires-Dist: keyring>=24.0.0
51
+ Requires-Dist: pydantic>=2.0.0
51
52
  Provides-Extra: dev
52
53
  Requires-Dist: pytest>=7.0.0; extra == "dev"
53
54
  Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "repr-cli"
7
- version = "0.2.9"
7
+ version = "0.2.11"
8
8
  description = "A beautiful, privacy-first CLI that analyzes your code repositories and generates a compelling developer profile"
9
9
  readme = "README.md"
10
10
  license = {file = "LICENSE"}
@@ -32,6 +32,7 @@ dependencies = [
32
32
  "httpx>=0.25.0",
33
33
  "openai>=1.0.0",
34
34
  "keyring>=24.0.0",
35
+ "pydantic>=2.0.0",
35
36
  ]
36
37
 
37
38
  [project.optional-dependencies]
@@ -610,7 +610,7 @@ async def _generate_stories_async(
610
610
  ) -> list[dict]:
611
611
  """Generate stories from commits using LLM (async implementation)."""
612
612
  from .openai_analysis import get_openai_client, extract_commit_batch
613
- from .templates import build_generation_prompt
613
+ from .templates import build_generation_prompt, StoryOutput
614
614
 
615
615
  stories = []
616
616
 
@@ -643,8 +643,8 @@ async def _generate_stories_async(
643
643
  custom_prompt=custom_prompt,
644
644
  )
645
645
 
646
- # Extract story from batch
647
- content = await extract_commit_batch(
646
+ # Extract story from batch using structured output
647
+ result = await extract_commit_batch(
648
648
  client=client,
649
649
  commits=batch,
650
650
  batch_num=i + 1,
@@ -652,44 +652,60 @@ async def _generate_stories_async(
652
652
  model=model,
653
653
  system_prompt=system_prompt,
654
654
  user_prompt=user_prompt,
655
+ structured=True,
655
656
  )
656
657
 
657
- if not content or content.startswith("[Batch"):
658
- continue
659
-
660
- # Extract summary (first non-empty line)
661
- lines = [l.strip() for l in content.split("\n") if l.strip()]
662
- summary = lines[0][:100] if lines else "Story"
663
- # Clean up summary
664
- summary = summary.lstrip("#-•* ").strip()
665
-
666
- # Build metadata
658
+ # Handle structured output - now returns list[StoryOutput]
659
+ story_outputs: list[StoryOutput] = []
660
+ if isinstance(result, list):
661
+ story_outputs = result
662
+ elif isinstance(result, StoryOutput):
663
+ story_outputs = [result]
664
+ else:
665
+ # Fallback for string response
666
+ content = result
667
+ if not content or content.startswith("[Batch"):
668
+ continue
669
+ lines = [l.strip() for l in content.split("\n") if l.strip()]
670
+ summary = lines[0] if lines else "Story"
671
+ summary = summary.lstrip("#-•* ").strip()
672
+ story_outputs = [StoryOutput(summary=summary, content=content)]
673
+
674
+ # Build shared metadata for all stories from this batch
667
675
  commit_shas = [c["full_sha"] for c in batch]
668
676
  first_date = min(c["date"] for c in batch)
669
677
  last_date = max(c["date"] for c in batch)
670
678
  total_files = sum(len(c.get("files", [])) for c in batch)
671
679
  total_adds = sum(c.get("insertions", 0) for c in batch)
672
680
  total_dels = sum(c.get("deletions", 0) for c in batch)
673
-
674
- metadata = {
675
- "summary": summary,
676
- "repo_name": repo_info.name,
677
- "repo_path": str(repo_info.path),
678
- "commit_shas": commit_shas,
679
- "first_commit_at": first_date,
680
- "last_commit_at": last_date,
681
- "files_changed": total_files,
682
- "lines_added": total_adds,
683
- "lines_removed": total_dels,
684
- "generated_locally": local,
685
- "template": template,
686
- "needs_review": False,
687
- }
688
-
689
- # Save story
690
- story_id = save_story(content, metadata)
691
- metadata["id"] = story_id
692
- stories.append(metadata)
681
+
682
+ # Save each story from this batch
683
+ for story_output in story_outputs:
684
+ content = story_output.content
685
+ summary = story_output.summary
686
+
687
+ if not content or content.startswith("[Batch"):
688
+ continue
689
+
690
+ metadata = {
691
+ "summary": summary,
692
+ "repo_name": repo_info.name,
693
+ "repo_path": str(repo_info.path),
694
+ "commit_shas": commit_shas,
695
+ "first_commit_at": first_date,
696
+ "last_commit_at": last_date,
697
+ "files_changed": total_files,
698
+ "lines_added": total_adds,
699
+ "lines_removed": total_dels,
700
+ "generated_locally": local,
701
+ "template": template,
702
+ "needs_review": False,
703
+ }
704
+
705
+ # Save story
706
+ story_id = save_story(content, metadata)
707
+ metadata["id"] = story_id
708
+ stories.append(metadata)
693
709
 
694
710
  except Exception as e:
695
711
  console.print(f" [{BRAND_MUTED}]Batch {i+1} failed: {e}[/]")
@@ -788,7 +804,8 @@ def stories(
788
804
  @app.command()
789
805
  def story(
790
806
  action: str = typer.Argument(..., help="Action: view, edit, delete, hide, feature, regenerate"),
791
- story_id: str = typer.Argument(..., help="Story ID (ULID)"),
807
+ story_id: Optional[str] = typer.Argument(None, help="Story ID (ULID)"),
808
+ all_stories: bool = typer.Option(False, "--all", help="Apply to all stories (for delete)"),
792
809
  ):
793
810
  """
794
811
  Manage a single story.
@@ -796,7 +813,38 @@ def story(
796
813
  Examples:
797
814
  repr story view 01ARYZ6S41TSV4RRFFQ69G5FAV
798
815
  repr story delete 01ARYZ6S41TSV4RRFFQ69G5FAV
816
+ repr story delete --all
799
817
  """
818
+ # Handle --all flag for delete
819
+ if all_stories:
820
+ if action != "delete":
821
+ print_error("--all flag only works with 'delete' action")
822
+ raise typer.Exit(1)
823
+
824
+ story_list = list_stories()
825
+ if not story_list:
826
+ print_info("No stories to delete")
827
+ raise typer.Exit()
828
+
829
+ console.print(f"This will delete [bold]{len(story_list)}[/] stories.")
830
+ if confirm("Delete all stories?"):
831
+ deleted = 0
832
+ for s in story_list:
833
+ try:
834
+ delete_story(s["id"])
835
+ deleted += 1
836
+ except Exception:
837
+ pass
838
+ print_success(f"Deleted {deleted} stories")
839
+ else:
840
+ print_info("Cancelled")
841
+ raise typer.Exit()
842
+
843
+ # Require story_id for single-story operations
844
+ if not story_id:
845
+ print_error("Story ID required (or use --all for delete)")
846
+ raise typer.Exit(1)
847
+
800
848
  result = load_story(story_id)
801
849
 
802
850
  if not result:
@@ -1000,7 +1048,9 @@ def push(
1000
1048
  for s in to_push:
1001
1049
  try:
1002
1050
  content, meta = load_story(s["id"])
1003
- asyncio.run(api_push_story({**meta, "content": content}))
1051
+ # Use local story ID as client_id for sync
1052
+ payload = {**meta, "content": content, "client_id": s["id"]}
1053
+ asyncio.run(api_push_story(payload))
1004
1054
  mark_story_pushed(s["id"])
1005
1055
  console.print(f" [{BRAND_SUCCESS}]✓[/] {s.get('summary', s.get('id'))[:50]}")
1006
1056
  pushed += 1
@@ -1049,7 +1099,8 @@ def sync():
1049
1099
  for s in unpushed:
1050
1100
  try:
1051
1101
  content, meta = load_story(s["id"])
1052
- asyncio.run(api_push_story({**meta, "content": content}))
1102
+ payload = {**meta, "content": content, "client_id": s["id"]}
1103
+ asyncio.run(api_push_story(payload))
1053
1104
  mark_story_pushed(s["id"])
1054
1105
  except Exception:
1055
1106
  pass
@@ -11,10 +11,23 @@ import asyncio
11
11
  from typing import Any
12
12
 
13
13
  from openai import AsyncOpenAI
14
+ from pydantic import BaseModel, Field
14
15
 
15
16
  from .tools import get_commits_with_diffs
16
17
  from .discovery import RepoInfo
17
18
  from .config import get_litellm_config, get_llm_config, get_api_base
19
+ from .templates import StoryOutput
20
+
21
+
22
+ class ExtractedStory(BaseModel):
23
+ """A single coherent block of work."""
24
+ title: str = Field(description="One-line title, max 120 chars. Dev jargon welcome. e.g. 'Wire up Redis caching for auth tokens'")
25
+ summary: str = Field(description="Markdown - what was built, how it works, why it matters")
26
+
27
+
28
+ class ExtractedCommitBatch(BaseModel):
29
+ """Schema for extraction phase output - one or more stories from a batch of commits."""
30
+ stories: list[ExtractedStory] = Field(description="List of distinct blocks of work found in the commits")
18
31
 
19
32
 
20
33
  # Model configuration (defaults for OpenAI)
@@ -124,10 +137,11 @@ async def extract_commit_batch(
124
137
  model: str = None,
125
138
  system_prompt: str = None,
126
139
  user_prompt: str = None,
127
- ) -> str:
140
+ structured: bool = False,
141
+ ) -> str | list[StoryOutput]:
128
142
  """
129
143
  Extraction phase: Extract accomplishments from a batch of commits.
130
-
144
+
131
145
  Args:
132
146
  client: OpenAI client
133
147
  commits: List of commits with diffs
@@ -136,9 +150,10 @@ async def extract_commit_batch(
136
150
  model: Model name to use (defaults to stored config or DEFAULT_EXTRACTION_MODEL)
137
151
  system_prompt: Custom system prompt (optional, uses default if not provided)
138
152
  user_prompt: Custom user prompt (optional, uses default if not provided)
139
-
153
+ structured: If True, return list of StoryOutput with summary/content fields
154
+
140
155
  Returns:
141
- Summary of technical accomplishments in this batch
156
+ Summary of technical accomplishments (str) or list[StoryOutput] if structured=True
142
157
  """
143
158
  if not model:
144
159
  llm_config = get_llm_config()
@@ -176,60 +191,81 @@ Files changed:"""
176
191
  commits_formatted = "\n\n---\n".join(commits_text)
177
192
 
178
193
  if not system_prompt:
179
- system_prompt = """You are analyzing a developer's actual code commits to extract specific technical accomplishments WITH the reasoning behind them.
180
-
181
- Your job: Read the commit messages and diffs, then list CONCRETE technical accomplishments with SPECIFIC details AND infer WHY those decisions were made.
182
-
183
- For each accomplishment, capture:
184
- 1. WHAT was built (the technical implementation)
185
- 2. WHY it was needed (the problem being solved, the user/business need, or the technical constraint)
186
-
187
- Rules:
188
- - Use EXACT technology names from the code (FastAPI, React, SQLAlchemy, not "web framework")
189
- - Describe SPECIFIC features built (e.g., "JWT authentication with refresh tokens", not "auth system")
190
- - INFER the motivation when possible:
191
- - Performance changes → what latency/throughput problem was being solved?
192
- - New features → what user capability was being enabled?
193
- - Refactors → what maintainability or scalability issue was being addressed?
194
- - Error handling → what failure mode was being prevented?
195
- - Mention architectural patterns when evident (microservices, event-driven, REST API, etc.)
196
- - Include scale indicators (number of endpoints, integrations, etc.)
197
- - Be concise but specific - bullet points are fine
198
-
199
- What NOT to do:
200
- - Don't write vague statements like "worked on backend"
201
- - Don't guess technologies not shown in the diffs
202
- - Don't include process/methodology unless there's evidence
203
- - Don't fabricate motivations that aren't supported by the code/commits"""
194
+ system_prompt = """Read the commits and diffs. Understand what the dev actually shipped.
204
195
 
205
- if not user_prompt:
206
- user_prompt = f"""Analyze commits batch {batch_num}/{total_batches} and extract technical accomplishments:
196
+ Write it up like one dev explaining to another what got done. Use real dev jargon - talk about wiring up endpoints, spinning up services, hooking into APIs, plumbing data through, etc.
207
197
 
208
- {commits_formatted}
198
+ Group related commits into one story. Split unrelated work into separate stories.
209
199
 
210
- List the specific technical work done in this batch. For each item:
211
- 1. What was BUILT (the concrete implementation)
212
- 2. Why it was needed (infer from context: what problem was solved? what user need? what constraint?)
200
+ Per story:
201
+ - title: One punchy line, max 120 chars. Say what was built. Tech details when relevant.
202
+ Good: "Wire up WebSocket streaming for chat responses"
203
+ Good: "Plumb user prefs through to the settings modal"
204
+ Good: "Fix race condition in token refresh flow"
205
+ Bad: "Improved authentication system" (too vague)
206
+ Bad: "Enhanced user experience" (meaningless)
207
+ - summary: Markdown. What was built, how it works, any interesting decisions.
213
208
 
214
- Focus on substance, not process."""
209
+ No corporate fluff. No "enhanced", "improved", "robust". Just say what happened."""
210
+
211
+ if not user_prompt:
212
+ user_prompt = f"""Commits batch {batch_num}/{total_batches}:
213
+
214
+ {commits_formatted}"""
215
215
 
216
216
  try:
217
- response = await client.chat.completions.create(
218
- model=model,
219
- messages=[
220
- {"role": "system", "content": system_prompt},
221
- {"role": "user", "content": user_prompt},
222
- ],
223
- temperature=EXTRACTION_TEMPERATURE,
224
- max_tokens=16000, # Increased for reasoning models that use tokens for thinking
225
- )
226
-
227
- return response.choices[0].message.content or ""
217
+ if structured:
218
+ # Use structured output with Pydantic model
219
+ response = await client.beta.chat.completions.parse(
220
+ model=model,
221
+ messages=[
222
+ {"role": "system", "content": system_prompt},
223
+ {"role": "user", "content": user_prompt},
224
+ ],
225
+ temperature=EXTRACTION_TEMPERATURE,
226
+ max_tokens=16000,
227
+ response_format=ExtractedCommitBatch,
228
+ )
229
+
230
+ parsed = response.choices[0].message.parsed
231
+ if parsed and parsed.stories:
232
+ # Convert each story to StoryOutput
233
+ return [
234
+ StoryOutput(summary=story.title, content=story.summary)
235
+ for story in parsed.stories
236
+ ]
237
+ # Fallback if parsing failed (e.g., refusal)
238
+ content = response.choices[0].message.content or ""
239
+ return [
240
+ StoryOutput(
241
+ summary=f"Batch {batch_num} analysis",
242
+ content=content if content else "[No content extracted]",
243
+ )
244
+ ]
245
+ else:
246
+ response = await client.chat.completions.create(
247
+ model=model,
248
+ messages=[
249
+ {"role": "system", "content": system_prompt},
250
+ {"role": "user", "content": user_prompt},
251
+ ],
252
+ temperature=EXTRACTION_TEMPERATURE,
253
+ max_tokens=16000,
254
+ )
255
+
256
+ return response.choices[0].message.content or ""
228
257
  except Exception as e:
229
258
  error_msg = str(e).lower()
230
259
  # Handle content moderation blocks gracefully
231
260
  if "blocked" in error_msg or "content" in error_msg or "moderation" in error_msg:
232
261
  # Skip this batch but continue with others
262
+ if structured:
263
+ return [
264
+ StoryOutput(
265
+ summary=f"Batch {batch_num} skipped",
266
+ content=f"[Batch {batch_num} skipped - content filter triggered]",
267
+ )
268
+ ]
233
269
  return f"[Batch {batch_num} skipped - content filter triggered]"
234
270
  # Re-raise other errors
235
271
  raise
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: repr-cli
3
- Version: 0.2.9
3
+ Version: 0.2.11
4
4
  Summary: A beautiful, privacy-first CLI that analyzes your code repositories and generates a compelling developer profile
5
5
  Author-email: Repr <hello@repr.dev>
6
6
  License: MIT License
@@ -48,6 +48,7 @@ Requires-Dist: pygments>=2.16.0
48
48
  Requires-Dist: httpx>=0.25.0
49
49
  Requires-Dist: openai>=1.0.0
50
50
  Requires-Dist: keyring>=24.0.0
51
+ Requires-Dist: pydantic>=2.0.0
51
52
  Provides-Extra: dev
52
53
  Requires-Dist: pytest>=7.0.0; extra == "dev"
53
54
  Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
@@ -5,6 +5,7 @@ pygments>=2.16.0
5
5
  httpx>=0.25.0
6
6
  openai>=1.0.0
7
7
  keyring>=24.0.0
8
+ pydantic>=2.0.0
8
9
 
9
10
  [dev]
10
11
  pytest>=7.0.0
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes