repr-cli 0.2.8__py3-none-any.whl → 0.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
repr/doctor.py CHANGED
repr/llm.py CHANGED
@@ -504,3 +504,40 @@ def get_effective_llm_mode() -> tuple[str, dict[str, Any]]:
504
504
  "model": llm_config.get("local_model"),
505
505
  }
506
506
 
507
+
508
+
509
+
510
+
511
+
512
+
513
+
514
+
515
+
516
+
517
+
518
+
519
+
520
+
521
+
522
+
523
+
524
+
525
+
526
+
527
+
528
+
529
+
530
+
531
+
532
+
533
+
534
+
535
+
536
+
537
+
538
+
539
+
540
+
541
+
542
+
543
+
repr/openai_analysis.py CHANGED
@@ -11,10 +11,23 @@ import asyncio
11
11
  from typing import Any
12
12
 
13
13
  from openai import AsyncOpenAI
14
+ from pydantic import BaseModel, Field
14
15
 
15
16
  from .tools import get_commits_with_diffs
16
17
  from .discovery import RepoInfo
17
18
  from .config import get_litellm_config, get_llm_config, get_api_base
19
+ from .templates import StoryOutput
20
+
21
+
22
+ class ExtractedStory(BaseModel):
23
+ """A single coherent block of work."""
24
+ title: str = Field(description="One-line title, max 120 chars. Dev jargon welcome. e.g. 'Wire up Redis caching for auth tokens'")
25
+ summary: str = Field(description="Markdown - what was built, how it works, why it matters")
26
+
27
+
28
+ class ExtractedCommitBatch(BaseModel):
29
+ """Schema for extraction phase output - one or more stories from a batch of commits."""
30
+ stories: list[ExtractedStory] = Field(description="List of distinct blocks of work found in the commits")
18
31
 
19
32
 
20
33
  # Model configuration (defaults for OpenAI)
@@ -122,105 +135,137 @@ async def extract_commit_batch(
122
135
  batch_num: int,
123
136
  total_batches: int,
124
137
  model: str = None,
125
- ) -> str:
138
+ system_prompt: str = None,
139
+ user_prompt: str = None,
140
+ structured: bool = False,
141
+ ) -> str | list[StoryOutput]:
126
142
  """
127
143
  Extraction phase: Extract accomplishments from a batch of commits.
128
-
144
+
129
145
  Args:
130
146
  client: OpenAI client
131
147
  commits: List of commits with diffs
132
148
  batch_num: Current batch number (for context)
133
149
  total_batches: Total number of batches
134
150
  model: Model name to use (defaults to stored config or DEFAULT_EXTRACTION_MODEL)
135
-
151
+ system_prompt: Custom system prompt (optional, uses default if not provided)
152
+ user_prompt: Custom user prompt (optional, uses default if not provided)
153
+ structured: If True, return list of StoryOutput with summary/content fields
154
+
136
155
  Returns:
137
- Summary of technical accomplishments in this batch
156
+ Summary of technical accomplishments (str) or list[StoryOutput] if structured=True
138
157
  """
139
158
  if not model:
140
159
  llm_config = get_llm_config()
141
160
  model = llm_config.get("extraction_model") or DEFAULT_EXTRACTION_MODEL
142
- # Format commits for the prompt
143
- commits_text = []
144
- for commit in commits:
145
- commit_text = f"""
161
+
162
+ # Use provided prompts or build defaults
163
+ if not system_prompt or not user_prompt:
164
+ # Format commits for the prompt
165
+ commits_text = []
166
+ for commit in commits:
167
+ commit_text = f"""
146
168
  Commit: {commit['sha']}
147
169
  Date: {commit['date']}
148
170
  Message: {commit['message']}
149
171
 
150
172
  Files changed:"""
151
-
152
- for file_info in commit['files'][:10]: # Limit files per commit
153
- change_type = {
154
- 'A': 'Added',
155
- 'D': 'Deleted',
156
- 'M': 'Modified',
157
- 'R': 'Renamed'
158
- }.get(file_info['change_type'], 'Changed')
159
173
 
160
- commit_text += f"\n {change_type}: {file_info['path']}"
174
+ for file_info in commit['files'][:10]: # Limit files per commit
175
+ change_type = {
176
+ 'A': 'Added',
177
+ 'D': 'Deleted',
178
+ 'M': 'Modified',
179
+ 'R': 'Renamed'
180
+ }.get(file_info['change_type'], 'Changed')
181
+
182
+ commit_text += f"\n {change_type}: {file_info['path']}"
183
+
184
+ if file_info['diff']:
185
+ # Truncate diff if too long (for token management)
186
+ diff = file_info['diff'][:2000]
187
+ commit_text += f"\n```diff\n{diff}\n```"
161
188
 
162
- if file_info['diff']:
163
- # Truncate diff if too long (for token management)
164
- diff = file_info['diff'][:2000]
165
- commit_text += f"\n```diff\n{diff}\n```"
189
+ commits_text.append(commit_text)
166
190
 
167
- commits_text.append(commit_text)
168
-
169
- commits_formatted = "\n\n---\n".join(commits_text)
170
-
171
- system_prompt = """You are analyzing a developer's actual code commits to extract specific technical accomplishments WITH the reasoning behind them.
172
-
173
- Your job: Read the commit messages and diffs, then list CONCRETE technical accomplishments with SPECIFIC details AND infer WHY those decisions were made.
174
-
175
- For each accomplishment, capture:
176
- 1. WHAT was built (the technical implementation)
177
- 2. WHY it was needed (the problem being solved, the user/business need, or the technical constraint)
191
+ commits_formatted = "\n\n---\n".join(commits_text)
192
+
193
+ if not system_prompt:
194
+ system_prompt = """Read the commits and diffs. Understand what the dev actually shipped.
178
195
 
179
- Rules:
180
- - Use EXACT technology names from the code (FastAPI, React, SQLAlchemy, not "web framework")
181
- - Describe SPECIFIC features built (e.g., "JWT authentication with refresh tokens", not "auth system")
182
- - INFER the motivation when possible:
183
- - Performance changes → what latency/throughput problem was being solved?
184
- - New features → what user capability was being enabled?
185
- - Refactors → what maintainability or scalability issue was being addressed?
186
- - Error handling → what failure mode was being prevented?
187
- - Mention architectural patterns when evident (microservices, event-driven, REST API, etc.)
188
- - Include scale indicators (number of endpoints, integrations, etc.)
189
- - Be concise but specific - bullet points are fine
196
+ Write it up like one dev explaining to another what got done. Use real dev jargon - talk about wiring up endpoints, spinning up services, hooking into APIs, plumbing data through, etc.
190
197
 
191
- What NOT to do:
192
- - Don't write vague statements like "worked on backend"
193
- - Don't guess technologies not shown in the diffs
194
- - Don't include process/methodology unless there's evidence
195
- - Don't fabricate motivations that aren't supported by the code/commits"""
198
+ Group related commits into one story. Split unrelated work into separate stories.
196
199
 
197
- user_prompt = f"""Analyze commits batch {batch_num}/{total_batches} and extract technical accomplishments:
200
+ Per story:
201
+ - title: One punchy line, max 120 chars. Say what was built. Tech details when relevant.
202
+ Good: "Wire up WebSocket streaming for chat responses"
203
+ Good: "Plumb user prefs through to the settings modal"
204
+ Good: "Fix race condition in token refresh flow"
205
+ Bad: "Improved authentication system" (too vague)
206
+ Bad: "Enhanced user experience" (meaningless)
207
+ - summary: Markdown. What was built, how it works, any interesting decisions.
198
208
 
199
- {commits_formatted}
209
+ No corporate fluff. No "enhanced", "improved", "robust". Just say what happened."""
200
210
 
201
- List the specific technical work done in this batch. For each item:
202
- 1. What was BUILT (the concrete implementation)
203
- 2. Why it was needed (infer from context: what problem was solved? what user need? what constraint?)
211
+ if not user_prompt:
212
+ user_prompt = f"""Commits batch {batch_num}/{total_batches}:
204
213
 
205
- Focus on substance, not process."""
214
+ {commits_formatted}"""
206
215
 
207
216
  try:
208
- response = await client.chat.completions.create(
209
- model=model,
210
- messages=[
211
- {"role": "system", "content": system_prompt},
212
- {"role": "user", "content": user_prompt},
213
- ],
214
- temperature=EXTRACTION_TEMPERATURE,
215
- max_tokens=16000, # Increased for reasoning models that use tokens for thinking
216
- )
217
-
218
- return response.choices[0].message.content or ""
217
+ if structured:
218
+ # Use structured output with Pydantic model
219
+ response = await client.beta.chat.completions.parse(
220
+ model=model,
221
+ messages=[
222
+ {"role": "system", "content": system_prompt},
223
+ {"role": "user", "content": user_prompt},
224
+ ],
225
+ temperature=EXTRACTION_TEMPERATURE,
226
+ max_tokens=16000,
227
+ response_format=ExtractedCommitBatch,
228
+ )
229
+
230
+ parsed = response.choices[0].message.parsed
231
+ if parsed and parsed.stories:
232
+ # Convert each story to StoryOutput
233
+ return [
234
+ StoryOutput(summary=story.title, content=story.summary)
235
+ for story in parsed.stories
236
+ ]
237
+ # Fallback if parsing failed (e.g., refusal)
238
+ content = response.choices[0].message.content or ""
239
+ return [
240
+ StoryOutput(
241
+ summary=f"Batch {batch_num} analysis",
242
+ content=content if content else "[No content extracted]",
243
+ )
244
+ ]
245
+ else:
246
+ response = await client.chat.completions.create(
247
+ model=model,
248
+ messages=[
249
+ {"role": "system", "content": system_prompt},
250
+ {"role": "user", "content": user_prompt},
251
+ ],
252
+ temperature=EXTRACTION_TEMPERATURE,
253
+ max_tokens=16000,
254
+ )
255
+
256
+ return response.choices[0].message.content or ""
219
257
  except Exception as e:
220
258
  error_msg = str(e).lower()
221
259
  # Handle content moderation blocks gracefully
222
260
  if "blocked" in error_msg or "content" in error_msg or "moderation" in error_msg:
223
261
  # Skip this batch but continue with others
262
+ if structured:
263
+ return [
264
+ StoryOutput(
265
+ summary=f"Batch {batch_num} skipped",
266
+ content=f"[Batch {batch_num} skipped - content filter triggered]",
267
+ )
268
+ ]
224
269
  return f"[Batch {batch_num} skipped - content filter triggered]"
225
270
  # Re-raise other errors
226
271
  raise
repr/telemetry.py CHANGED
repr/templates.py CHANGED
@@ -162,7 +162,12 @@ def format_commits_for_prompt(commits: list[dict[str, Any]]) -> str:
162
162
 
163
163
  lines.append(f"- [{sha}] {msg}")
164
164
  if files:
165
- lines.append(f" Files: {', '.join(files[:5])}")
165
+ # Handle files as either list of dicts or list of strings
166
+ file_names = [
167
+ f["path"] if isinstance(f, dict) else f
168
+ for f in files[:5]
169
+ ]
170
+ lines.append(f" Files: {', '.join(file_names)}")
166
171
  if len(files) > 5:
167
172
  lines.append(f" ... and {len(files) - 5} more files")
168
173
  if c.get("insertions") or c.get("deletions"):
repr/ui.py CHANGED
@@ -143,3 +143,40 @@ def confirm(message: str, default: bool = False) -> bool:
143
143
  """Prompt for confirmation."""
144
144
  return Confirm.ask(message, default=default)
145
145
 
146
+
147
+
148
+
149
+
150
+
151
+
152
+
153
+
154
+
155
+
156
+
157
+
158
+
159
+
160
+
161
+
162
+
163
+
164
+
165
+
166
+
167
+
168
+
169
+
170
+
171
+
172
+
173
+
174
+
175
+
176
+
177
+
178
+
179
+
180
+
181
+
182
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: repr-cli
3
- Version: 0.2.8
3
+ Version: 0.2.11
4
4
  Summary: A beautiful, privacy-first CLI that analyzes your code repositories and generates a compelling developer profile
5
5
  Author-email: Repr <hello@repr.dev>
6
6
  License: MIT License
@@ -48,6 +48,7 @@ Requires-Dist: pygments>=2.16.0
48
48
  Requires-Dist: httpx>=0.25.0
49
49
  Requires-Dist: openai>=1.0.0
50
50
  Requires-Dist: keyring>=24.0.0
51
+ Requires-Dist: pydantic>=2.0.0
51
52
  Provides-Extra: dev
52
53
  Requires-Dist: pytest>=7.0.0; extra == "dev"
53
54
  Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
@@ -71,7 +72,7 @@ Turn commits into professional narratives for interviews, performance reviews, a
71
72
  > *"I used repr to prep for my Meta interview in 30 minutes. Turned 2 years of commits into 8 STAR-format stories. Nailed every behavioral question."*
72
73
  > **— Sarah, Senior Backend Engineer**
73
74
 
74
- > *"Our sprint demos went from chaos to polished in 5 minutes. Just run `repr since '2 weeks ago'` and export. Stakeholders love it."*
75
+ > *"Our sprint demos went from chaos to polished in 5 minutes. Just run `repr commits --days 14` and export. Stakeholders love it."*
75
76
  > **— Marcus, Engineering Manager**
76
77
 
77
78
  > *"I run repr in a fully air-gapped environment. Zero network calls, 100% local. It's the only tool I trust for this."*
@@ -167,7 +168,7 @@ For full step-by-step guides, see the [documentation](https://repr.dev/docs/cli/
167
168
 
168
169
  ```bash
169
170
  repr init ~/code
170
- repr week
171
+ repr commits --days 7
171
172
  repr generate --local
172
173
  ```
173
174
 
@@ -186,7 +187,7 @@ repr review
186
187
  ### Weekly reflection
187
188
 
188
189
  ```bash
189
- repr week
190
+ repr commits --days 7
190
191
  repr generate --local
191
192
  repr story edit <id>
192
193
  repr story feature <id>
@@ -204,6 +205,20 @@ repr story view <id>
204
205
 
205
206
  [Full guide →](https://repr.dev/docs/cli/workflows/interview-prep)
206
207
 
208
+ ### Generate from a specific timeframe
209
+
210
+ ```bash
211
+ # Last 30 days
212
+ repr generate --days 30 --local
213
+
214
+ # Since a specific date
215
+ repr generate --since 2024-01-01 --local
216
+
217
+ # Natural language dates
218
+ repr generate --since "2 weeks ago" --local
219
+ repr generate --since monday --local
220
+ ```
221
+
207
222
  ### Publish your profile (optional)
208
223
 
209
224
  ```bash
@@ -274,7 +289,7 @@ repr llm use byok:openai
274
289
  | **Local LLM** | `repr generate --local` | Talks only to your local endpoint. |
275
290
  | **BYOK** | `repr llm add <provider>` | Calls your provider directly with your key. |
276
291
  | **Cloud** | `repr generate --cloud` | Requires login; you initiate all network calls. |
277
- | **Offline** | `repr week` / `repr stories` | Pure local operations. |
292
+ | **Offline** | `repr commits` / `repr stories` | Pure local operations. |
278
293
 
279
294
  ## Command help
280
295
 
@@ -0,0 +1,26 @@
1
+ repr/__init__.py,sha256=jraImidqaPxv03Uy76zPtnAcNnOl5KLZSXYBzxI85BI,446
2
+ repr/__main__.py,sha256=edYQ5TsuidoAKR1DSuTNcRKudj4lijHZGURb_CSem1M,164
3
+ repr/api.py,sha256=Rr6MEUkjf7LJ6TcxbdVstfpUM_mDpTKhllbFwy9jK2w,11893
4
+ repr/auth.py,sha256=-tqd2MMgFlowbhAqLHeSnVpDBkintkZ4kmPDZmczQFU,11682
5
+ repr/cli.py,sha256=2MwmF5-giuETiCfzN-udIUuf6GtXTSK9k-DcFduHs9U,85938
6
+ repr/config.py,sha256=GZf5ucrBFIfOo9UtKE-DAZ9Ns1suAKG0jvUAY64oGIc,30601
7
+ repr/discovery.py,sha256=2RYmJleqV7TbxIMMYP2izkEBUeKH7U1F-U4KAUlUNww,14816
8
+ repr/doctor.py,sha256=cD-XLCVXfME0DsgOWU4VUOv28O4avjSQmMK6X_Kddyk,13441
9
+ repr/extractor.py,sha256=lGPN8gwTF_ZSezoQoPBMnf95nCJArGIteNiInfb39FM,10566
10
+ repr/hooks.py,sha256=DRpVXVv5Lesn9ARKHr-I91bUScab2It2TPjdwM38bT4,16864
11
+ repr/keychain.py,sha256=CpKU3tjFZVEPgiHiplSAtBQFDPA6qOSovv4IXXgJXbY,6957
12
+ repr/llm.py,sha256=kLcRq2wZTiGJKmkzUIBCpybD4S4efTGc87f_GL4IlDU,14635
13
+ repr/openai_analysis.py,sha256=IJeg-8IhTbNLdCrxDBUyGrcLA8fFEGiIwz77v1KSkqM,27575
14
+ repr/privacy.py,sha256=r-HvQ4C56whI9Cbp4AHMwULvueBdYaO0pu3U1AoqB9M,9832
15
+ repr/storage.py,sha256=72nfFcR2Y98vpSjaO7zVHisq_Ln2UrHmGyDhEqEmDjU,14863
16
+ repr/telemetry.py,sha256=OR9w3v4VrVYu3C4Q0GQsX3-2JXSQvUhiYnkC6S5YOGc,6998
17
+ repr/templates.py,sha256=3YlhiUtnmsj6eQw3BPvvDty0-lf7oLxuYlbISE0KQrI,6733
18
+ repr/tools.py,sha256=QoGeti5Sye2wVuE-7UPxd_TDNXoen-xYfsFoT9rYRPs,20737
19
+ repr/ui.py,sha256=3Fc1ugwtfySIuB5LSE5R18tMHbz211TZNolVDlwKDAU,4031
20
+ repr/updater.py,sha256=sn3VEwtPkn1LLla2hGO53EmrY_ToRfMlDbFQawKhSZ4,7333
21
+ repr_cli-0.2.11.dist-info/licenses/LICENSE,sha256=tI16Ry3IQhjsde6weJ_in6czzWW2EF4Chz1uicyDLAA,1061
22
+ repr_cli-0.2.11.dist-info/METADATA,sha256=Cd-nSan4kRv9vXc6B7Lghc49MqTBj4GFFSEJw1c8v6w,11228
23
+ repr_cli-0.2.11.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
24
+ repr_cli-0.2.11.dist-info/entry_points.txt,sha256=SJoKgNB-fRy6O2T_lztFr9T3ND_BQl0ijWxNW-J7dUU,38
25
+ repr_cli-0.2.11.dist-info/top_level.txt,sha256=LNgPqdJPQnlicRve7uzI4a6rEUdcxHrNkUq_2w7eeiA,5
26
+ repr_cli-0.2.11.dist-info/RECORD,,
@@ -1,26 +0,0 @@
1
- repr/__init__.py,sha256=jraImidqaPxv03Uy76zPtnAcNnOl5KLZSXYBzxI85BI,446
2
- repr/__main__.py,sha256=M0ECtxOrmmYoYrYV5XI9UhDnOjWThxn0-PPysKs3RT0,127
3
- repr/api.py,sha256=Rr6MEUkjf7LJ6TcxbdVstfpUM_mDpTKhllbFwy9jK2w,11893
4
- repr/auth.py,sha256=-tqd2MMgFlowbhAqLHeSnVpDBkintkZ4kmPDZmczQFU,11682
5
- repr/cli.py,sha256=xCz40ZRx_sOznTxS7GP4m0aArN7d7c4QCpFLh2t_FYU,78218
6
- repr/config.py,sha256=GZf5ucrBFIfOo9UtKE-DAZ9Ns1suAKG0jvUAY64oGIc,30601
7
- repr/discovery.py,sha256=2RYmJleqV7TbxIMMYP2izkEBUeKH7U1F-U4KAUlUNww,14816
8
- repr/doctor.py,sha256=6cI21xXIlTNRzHi2fRfHpm__erO8jBZc6vge8-29ip4,13404
9
- repr/extractor.py,sha256=lGPN8gwTF_ZSezoQoPBMnf95nCJArGIteNiInfb39FM,10566
10
- repr/hooks.py,sha256=DRpVXVv5Lesn9ARKHr-I91bUScab2It2TPjdwM38bT4,16864
11
- repr/keychain.py,sha256=CpKU3tjFZVEPgiHiplSAtBQFDPA6qOSovv4IXXgJXbY,6957
12
- repr/llm.py,sha256=gTYloz7ONTpFQm73YFIVGOrjsk0iyocMTM4YkF4s4xI,14598
13
- repr/openai_analysis.py,sha256=-9POoLF6B15_oBKJw_CjKH2DuWEIgIlOmtyjS4Gjbck,25764
14
- repr/privacy.py,sha256=HITso2pzwN8R0Izh3SjUsrzcpjVw5bJEhbippAGeMiY,9795
15
- repr/storage.py,sha256=72nfFcR2Y98vpSjaO7zVHisq_Ln2UrHmGyDhEqEmDjU,14863
16
- repr/telemetry.py,sha256=7ANJJUB4Dd7A_HFVPqc92Gy77ruREzlmgayFQkwuC9s,6961
17
- repr/templates.py,sha256=RQl7nUfy8IK6QFKzgpcebkBbQH0E_brbYh83pzym1TM,6530
18
- repr/tools.py,sha256=QoGeti5Sye2wVuE-7UPxd_TDNXoen-xYfsFoT9rYRPs,20737
19
- repr/ui.py,sha256=5jycUT-5Q0az4FFUzgarI8CfVAEEUPSEsT24Fad2kG8,3994
20
- repr/updater.py,sha256=E2ZRfeQxA4_UrWmIphJkafrUHU2UGUpfINNVLuTIcfI,7296
21
- repr_cli-0.2.8.dist-info/licenses/LICENSE,sha256=tI16Ry3IQhjsde6weJ_in6czzWW2EF4Chz1uicyDLAA,1061
22
- repr_cli-0.2.8.dist-info/METADATA,sha256=5YVmEcJf4Dt2OfAxaIND95w-wkZ-XBhddndijKFjTfw,10898
23
- repr_cli-0.2.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
24
- repr_cli-0.2.8.dist-info/entry_points.txt,sha256=SJoKgNB-fRy6O2T_lztFr9T3ND_BQl0ijWxNW-J7dUU,38
25
- repr_cli-0.2.8.dist-info/top_level.txt,sha256=LNgPqdJPQnlicRve7uzI4a6rEUdcxHrNkUq_2w7eeiA,5
26
- repr_cli-0.2.8.dist-info/RECORD,,