repr-cli 0.2.22__py3-none-any.whl → 0.2.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
repr/change_synthesis.py CHANGED
@@ -418,14 +418,25 @@ async def explain_group(
418
418
  changes=changes_str,
419
419
  )
420
420
 
421
- response = await client.chat.completions.create(
422
- model=model,
423
- messages=[
424
- {"role": "system", "content": GROUP_EXPLAIN_SYSTEM},
425
- {"role": "user", "content": prompt},
426
- ],
427
- temperature=0.7,
428
- )
421
+ async def make_request(use_temperature: bool = True):
422
+ kwargs = {
423
+ "model": model,
424
+ "messages": [
425
+ {"role": "system", "content": GROUP_EXPLAIN_SYSTEM},
426
+ {"role": "user", "content": prompt},
427
+ ],
428
+ }
429
+ if use_temperature:
430
+ kwargs["temperature"] = 0.7
431
+ return await client.chat.completions.create(**kwargs)
432
+
433
+ try:
434
+ response = await make_request(use_temperature=True)
435
+ except Exception as e:
436
+ if "temperature" in str(e).lower() and "unsupported" in str(e).lower():
437
+ response = await make_request(use_temperature=False)
438
+ else:
439
+ raise
429
440
 
430
441
  return response.choices[0].message.content.strip()
431
442
 
@@ -455,15 +466,26 @@ async def synthesize_changes(
455
466
  unpushed=format_commit_changes(report.unpushed),
456
467
  )
457
468
 
458
- response = await client.chat.completions.create(
459
- model=model,
460
- messages=[
461
- {"role": "system", "content": CHANGE_SYNTHESIS_SYSTEM},
462
- {"role": "user", "content": prompt},
463
- ],
464
- response_format={"type": "json_object"},
465
- temperature=0.7,
466
- )
469
+ async def make_synthesis_request(use_temperature: bool = True):
470
+ kwargs = {
471
+ "model": model,
472
+ "messages": [
473
+ {"role": "system", "content": CHANGE_SYNTHESIS_SYSTEM},
474
+ {"role": "user", "content": prompt},
475
+ ],
476
+ "response_format": {"type": "json_object"},
477
+ }
478
+ if use_temperature:
479
+ kwargs["temperature"] = 0.7
480
+ return await client.chat.completions.create(**kwargs)
481
+
482
+ try:
483
+ response = await make_synthesis_request(use_temperature=True)
484
+ except Exception as e:
485
+ if "temperature" in str(e).lower() and "unsupported" in str(e).lower():
486
+ response = await make_synthesis_request(use_temperature=False)
487
+ else:
488
+ raise
467
489
 
468
490
  content = response.choices[0].message.content
469
491
  data = json.loads(content)
repr/cli.py CHANGED
@@ -6075,16 +6075,29 @@ def create_branch(
6075
6075
 
6076
6076
  prompt = COMMIT_MESSAGE_USER.format(changes=changes_str)
6077
6077
 
6078
- with create_spinner("Generating branch name..."):
6079
- response = asyncio.run(client.chat.completions.create(
6080
- model="gpt-4o-mini",
6081
- messages=[
6078
+ async def make_branch_request(use_temperature: bool = True):
6079
+ kwargs = {
6080
+ "model": "gpt-4o-mini",
6081
+ "messages": [
6082
6082
  {"role": "system", "content": COMMIT_MESSAGE_SYSTEM},
6083
6083
  {"role": "user", "content": prompt},
6084
6084
  ],
6085
- response_format={"type": "json_object"},
6086
- temperature=0.3,
6087
- ))
6085
+ "response_format": {"type": "json_object"},
6086
+ }
6087
+ if use_temperature:
6088
+ kwargs["temperature"] = 0.3
6089
+ return await client.chat.completions.create(**kwargs)
6090
+
6091
+ async def get_branch_response():
6092
+ try:
6093
+ return await make_branch_request(use_temperature=True)
6094
+ except Exception as e:
6095
+ if "temperature" in str(e).lower() and "unsupported" in str(e).lower():
6096
+ return await make_branch_request(use_temperature=False)
6097
+ raise
6098
+
6099
+ with create_spinner("Generating branch name..."):
6100
+ response = asyncio.run(get_branch_response())
6088
6101
  data = json.loads(response.choices[0].message.content)
6089
6102
  branch_name = data.get("branch", "")
6090
6103
  commit_msg = data.get("message", "")
@@ -6227,16 +6240,29 @@ def commit_staged(
6227
6240
  changes_str = format_file_changes(staged)
6228
6241
  prompt = COMMIT_MESSAGE_USER.format(changes=changes_str)
6229
6242
 
6230
- with create_spinner("Generating commit message..."):
6231
- response = asyncio.run(client.chat.completions.create(
6232
- model="gpt-4o-mini",
6233
- messages=[
6243
+ async def make_commit_request(use_temperature: bool = True):
6244
+ kwargs = {
6245
+ "model": "gpt-4o-mini",
6246
+ "messages": [
6234
6247
  {"role": "system", "content": COMMIT_MESSAGE_SYSTEM},
6235
6248
  {"role": "user", "content": prompt},
6236
6249
  ],
6237
- response_format={"type": "json_object"},
6238
- temperature=0.3,
6239
- ))
6250
+ "response_format": {"type": "json_object"},
6251
+ }
6252
+ if use_temperature:
6253
+ kwargs["temperature"] = 0.3
6254
+ return await client.chat.completions.create(**kwargs)
6255
+
6256
+ async def get_commit_response():
6257
+ try:
6258
+ return await make_commit_request(use_temperature=True)
6259
+ except Exception as e:
6260
+ if "temperature" in str(e).lower() and "unsupported" in str(e).lower():
6261
+ return await make_commit_request(use_temperature=False)
6262
+ raise
6263
+
6264
+ with create_spinner("Generating commit message..."):
6265
+ response = asyncio.run(get_commit_response())
6240
6266
  data = json.loads(response.choices[0].message.content)
6241
6267
  branch_name = data.get("branch", "")
6242
6268
  commit_msg = data.get("message", "")
@@ -6665,16 +6691,29 @@ def create_pr(
6665
6691
 
6666
6692
  prompt = PR_USER.format(commits=commits_text)
6667
6693
 
6668
- with create_spinner("Generating PR..."):
6669
- response = asyncio.run(client.chat.completions.create(
6670
- model="gpt-4o-mini",
6671
- messages=[
6694
+ async def make_pr_request(use_temperature: bool = True):
6695
+ kwargs = {
6696
+ "model": "gpt-4o-mini",
6697
+ "messages": [
6672
6698
  {"role": "system", "content": PR_SYSTEM},
6673
6699
  {"role": "user", "content": prompt},
6674
6700
  ],
6675
- response_format={"type": "json_object"},
6676
- temperature=0.3,
6677
- ))
6701
+ "response_format": {"type": "json_object"},
6702
+ }
6703
+ if use_temperature:
6704
+ kwargs["temperature"] = 0.3
6705
+ return await client.chat.completions.create(**kwargs)
6706
+
6707
+ async def get_pr_response():
6708
+ try:
6709
+ return await make_pr_request(use_temperature=True)
6710
+ except Exception as e:
6711
+ if "temperature" in str(e).lower() and "unsupported" in str(e).lower():
6712
+ return await make_pr_request(use_temperature=False)
6713
+ raise
6714
+
6715
+ with create_spinner("Generating PR..."):
6716
+ response = asyncio.run(get_pr_response())
6678
6717
  data = json.loads(response.choices[0].message.content)
6679
6718
  pr_title = data.get("title", current_branch)
6680
6719
  pr_body = data.get("body", "")
repr/session_extractor.py CHANGED
@@ -319,18 +319,29 @@ class SessionExtractor:
319
319
  # Call LLM (using sync client to avoid event loop cleanup issues)
320
320
  client = self._get_client()
321
321
 
322
- try:
323
- response = client.chat.completions.create(
324
- model=self.model.split("/")[-1] if "/" in self.model else self.model,
325
- messages=[
322
+ def make_extraction_request(use_temperature: bool = True):
323
+ kwargs = {
324
+ "model": self.model.split("/")[-1] if "/" in self.model else self.model,
325
+ "messages": [
326
326
  {"role": "system", "content": EXTRACTION_SYSTEM_PROMPT},
327
327
  {"role": "user", "content": EXTRACTION_USER_PROMPT.format(
328
328
  session_transcript=transcript
329
329
  )},
330
330
  ],
331
- response_format={"type": "json_object"},
332
- temperature=0.3,
333
- )
331
+ "response_format": {"type": "json_object"},
332
+ }
333
+ if use_temperature:
334
+ kwargs["temperature"] = 0.3
335
+ return client.chat.completions.create(**kwargs)
336
+
337
+ try:
338
+ try:
339
+ response = make_extraction_request(use_temperature=True)
340
+ except Exception as e:
341
+ if "temperature" in str(e).lower() and "unsupported" in str(e).lower():
342
+ response = make_extraction_request(use_temperature=False)
343
+ else:
344
+ raise
334
345
 
335
346
  # Parse response
336
347
  content = response.choices[0].message.content
repr/story_synthesis.py CHANGED
@@ -559,10 +559,20 @@ class StorySynthesizer:
559
559
  return llm_config["local_model"]
560
560
  elif default_mode == "cloud" and llm_config.get("cloud_model"):
561
561
  return llm_config["cloud_model"]
562
-
563
- # Fallback to any configured model
564
- if llm_config.get("local_model"):
565
- return llm_config["local_model"]
562
+ elif default_mode.startswith("byok:"):
563
+ # BYOK mode - use the provider's configured model
564
+ provider = default_mode.split(":", 1)[1]
565
+ byok_config = llm_config.get("byok", {}).get(provider, {})
566
+ if byok_config.get("model"):
567
+ return byok_config["model"]
568
+
569
+ # Fallback to BYOK model if configured
570
+ byok = llm_config.get("byok", {})
571
+ for provider_config in byok.values():
572
+ if provider_config.get("model"):
573
+ return provider_config["model"]
574
+
575
+ # Fallback to cloud model
566
576
  if llm_config.get("cloud_model"):
567
577
  return llm_config["cloud_model"]
568
578
  except Exception:
@@ -648,18 +658,29 @@ class StorySynthesizer:
648
658
  client = self._get_client()
649
659
  commits_text = self._format_commits_for_prompt(commits)
650
660
 
651
- try:
652
- response = client.chat.completions.create(
653
- model=self.model.split("/")[-1] if "/" in self.model else self.model,
654
- messages=[
661
+ def make_request(use_temperature: bool = True):
662
+ kwargs = {
663
+ "model": self.model.split("/")[-1] if "/" in self.model else self.model,
664
+ "messages": [
655
665
  {"role": "system", "content": STORY_SYNTHESIS_SYSTEM},
656
666
  {"role": "user", "content": STORY_SYNTHESIS_USER.format(
657
667
  commits_text=commits_text
658
668
  )},
659
669
  ],
660
- response_format={"type": "json_object"},
661
- temperature=0.3,
662
- )
670
+ "response_format": {"type": "json_object"},
671
+ }
672
+ if use_temperature:
673
+ kwargs["temperature"] = 0.3
674
+ return client.chat.completions.create(**kwargs)
675
+
676
+ try:
677
+ try:
678
+ response = make_request(use_temperature=True)
679
+ except Exception as e:
680
+ if "temperature" in str(e).lower() and "unsupported" in str(e).lower():
681
+ response = make_request(use_temperature=False)
682
+ else:
683
+ raise
663
684
 
664
685
  content = response.choices[0].message.content
665
686
 
@@ -1061,17 +1082,28 @@ async def transform_story_for_feed(
1061
1082
  )
1062
1083
  response_model = InternalStory
1063
1084
 
1064
- try:
1065
- # Use sync client to avoid event loop cleanup issues
1066
- response = client.chat.completions.create(
1067
- model=model_name.split("/")[-1] if "/" in model_name else model_name,
1068
- messages=[
1085
+ def make_story_request(use_temperature: bool = True):
1086
+ kwargs = {
1087
+ "model": model_name.split("/")[-1] if "/" in model_name else model_name,
1088
+ "messages": [
1069
1089
  {"role": "system", "content": system_prompt},
1070
1090
  {"role": "user", "content": user_prompt},
1071
1091
  ],
1072
- response_format={"type": "json_object"},
1073
- temperature=0.7,
1074
- )
1092
+ "response_format": {"type": "json_object"},
1093
+ }
1094
+ if use_temperature:
1095
+ kwargs["temperature"] = 0.7
1096
+ return client.chat.completions.create(**kwargs)
1097
+
1098
+ try:
1099
+ # Use sync client to avoid event loop cleanup issues
1100
+ try:
1101
+ response = make_story_request(use_temperature=True)
1102
+ except Exception as e:
1103
+ if "temperature" in str(e).lower() and "unsupported" in str(e).lower():
1104
+ response = make_story_request(use_temperature=False)
1105
+ else:
1106
+ raise
1075
1107
 
1076
1108
  content = response.choices[0].message.content.strip()
1077
1109
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: repr-cli
3
- Version: 0.2.22
3
+ Version: 0.2.24
4
4
  Summary: A beautiful, privacy-first CLI that analyzes your code repositories and generates a compelling developer profile
5
5
  Author-email: Repr <hello@repr.dev>
6
6
  License: MIT License
@@ -2,8 +2,8 @@ repr/__init__.py,sha256=_YVX4f3_NtxGhDquXGSiaxkfAG2BdWZlB4MbobLsO74,447
2
2
  repr/__main__.py,sha256=N7amYwdGB3yzk2ZJJbtH2hhESNkDuhDL11dDEm5Kl60,166
3
3
  repr/api.py,sha256=rJRn_4xZXipdBFMrsZbQPWfZKfPLWJpTI0uYUyvjFhw,22814
4
4
  repr/auth.py,sha256=TpqwqwZ3tAEolcSYu-zD8oHhzfwHALkauPP1xg5VTiY,12208
5
- repr/change_synthesis.py,sha256=z7GmCeEHQFlnqLtKDGDvlM7p9MAWl_ByeIJstEVAhbU,15223
6
- repr/cli.py,sha256=Dy466jKddASU6OvCgtWbik4X3p-xIymQ1R2CzMBfedQ,226157
5
+ repr/change_synthesis.py,sha256=0Dy16cALop3BwElZe9QAwy3R9ZpY1yBKTKhRJJ4M3Ik,16126
6
+ repr/cli.py,sha256=V0qoAYLzPDtbynWyQGJ5ixtrTk1DsL8vA6szeRRGr_8,227881
7
7
  repr/config.py,sha256=S69hdgFdvcHoIO2zihuvsSAQf2Gp41JtC5GGlE4Cy78,34233
8
8
  repr/configure.py,sha256=GnwjOC64F0uDD90IjA6LJNev8FlHHAHARuSLwBqI6k0,26860
9
9
  repr/cron.py,sha256=Hvo9ssVmGn09dLIHKWqzorKkW7eXdLQnQlBzagTX2Ko,11402
@@ -18,9 +18,9 @@ repr/mcp_server.py,sha256=IhQM35bMD5c-6ASYIAa9VfnrvxzvFlZntUqkBm08Xqk,39752
18
18
  repr/models.py,sha256=mQAkP1bBiAFPweC0OxU-UwKNLZkinvVYHB0KjItHt3Q,20093
19
19
  repr/openai_analysis.py,sha256=Pz3KMT5B91dcHOKPUQlFoMpTKlzjDO5idnmhkyAb6y8,31965
20
20
  repr/privacy.py,sha256=sN1tkoZjCDSwAjkQWeH6rHaLrtv727yT1HNHQ54GRis,9834
21
- repr/session_extractor.py,sha256=t1rEyhndjxMREt3gfmcGBYzFGEwzt1kAYbmXPq-QbU8,17104
21
+ repr/session_extractor.py,sha256=7rTtce0lnQSHqW92pl5VRY479JW_NKkM_q4Q_vvJ5sk,17591
22
22
  repr/storage.py,sha256=y_EYYKrUD2qNRKK2_vdjsIlPIq-IzfaNMUyj9aHofpQ,24223
23
- repr/story_synthesis.py,sha256=iRrW23egiLdXyzgdI9WsnOmhhWwQEk8-43elL7l-f-E,46609
23
+ repr/story_synthesis.py,sha256=-X9y5rzVFjujLfsjY7DdFTS7r0dLGfiLkbXBoozLYA0,47982
24
24
  repr/telemetry.py,sha256=M1NribTkiezpvweLrdbJxKDU2mlTe7frke6sUP0Yhiw,7000
25
25
  repr/templates.py,sha256=5Z3vftQMn87ufvEVt0uWx_gagmvdZGoNxjD1Q9ZbS0w,11029
26
26
  repr/timeline.py,sha256=z84PL_CfYikiNkz0oN4_glLxOQIQCeCUIGwXYvS6Dfk,22527
@@ -50,9 +50,9 @@ repr/loaders/base.py,sha256=AE9lFr8ZvPYt6KDwBTkNv3JF5A2QakVn9gA_ha77GLU,4308
50
50
  repr/loaders/claude_code.py,sha256=sWAiQgNVWsdw9qUDcfHDBi5k6jBL7D8_SI3NAEsL-io,11106
51
51
  repr/loaders/clawdbot.py,sha256=daKfTjI16tZrlwGUNaVOnLwxKyV6eW102CgIOu4mwAw,12064
52
52
  repr/loaders/gemini_antigravity.py,sha256=_0HhtC1TwB2gSu20Bcco_W-V3Bt6v9O2iqOL6kIHQLU,13766
53
- repr_cli-0.2.22.dist-info/licenses/LICENSE,sha256=tI16Ry3IQhjsde6weJ_in6czzWW2EF4Chz1uicyDLAA,1061
54
- repr_cli-0.2.22.dist-info/METADATA,sha256=hj5gXRgLB7yBxBKAs7L5K7HidSCwOVPH8EpWJ8CWSE0,13387
55
- repr_cli-0.2.22.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
56
- repr_cli-0.2.22.dist-info/entry_points.txt,sha256=dlI-TCeDTW2rBC_nvOvMhwLihU4qsgD5r4Ot5BuVqSw,56
57
- repr_cli-0.2.22.dist-info/top_level.txt,sha256=LNgPqdJPQnlicRve7uzI4a6rEUdcxHrNkUq_2w7eeiA,5
58
- repr_cli-0.2.22.dist-info/RECORD,,
53
+ repr_cli-0.2.24.dist-info/licenses/LICENSE,sha256=tI16Ry3IQhjsde6weJ_in6czzWW2EF4Chz1uicyDLAA,1061
54
+ repr_cli-0.2.24.dist-info/METADATA,sha256=1SVhA2nkk0Du5bikdJKxph1pe-3MQj2TXnbUmkjM12k,13387
55
+ repr_cli-0.2.24.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
56
+ repr_cli-0.2.24.dist-info/entry_points.txt,sha256=dlI-TCeDTW2rBC_nvOvMhwLihU4qsgD5r4Ot5BuVqSw,56
57
+ repr_cli-0.2.24.dist-info/top_level.txt,sha256=LNgPqdJPQnlicRve7uzI4a6rEUdcxHrNkUq_2w7eeiA,5
58
+ repr_cli-0.2.24.dist-info/RECORD,,