git-copilot-commit 0.5.7__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- git_copilot_commit/cli.py +158 -62
- git_copilot_commit/llms/__init__.py +0 -0
- git_copilot_commit/llms/copilot.py +799 -0
- git_copilot_commit/llms/core.py +802 -0
- git_copilot_commit/llms/openai_api.py +219 -0
- git_copilot_commit/llms/providers.py +373 -0
- git_copilot_commit/prompts/commit-message-generator-prompt.md +4 -3
- git_copilot_commit/prompts/split-commit-planner-prompt.md +1 -0
- {git_copilot_commit-0.5.7.dist-info → git_copilot_commit-0.6.0.dist-info}/METADATA +51 -7
- git_copilot_commit-0.6.0.dist-info/RECORD +19 -0
- git_copilot_commit/github_copilot.py +0 -1579
- git_copilot_commit-0.5.7.dist-info/RECORD +0 -15
- {git_copilot_commit-0.5.7.dist-info → git_copilot_commit-0.6.0.dist-info}/WHEEL +0 -0
- {git_copilot_commit-0.5.7.dist-info → git_copilot_commit-0.6.0.dist-info}/entry_points.txt +0 -0
- {git_copilot_commit-0.5.7.dist-info → git_copilot_commit-0.6.0.dist-info}/licenses/LICENSE +0 -0
git_copilot_commit/cli.py
CHANGED
|
@@ -29,7 +29,9 @@ from .split_commits import (
|
|
|
29
29
|
)
|
|
30
30
|
from .settings import Settings
|
|
31
31
|
from .version import __version__
|
|
32
|
-
from . import
|
|
32
|
+
from .llms import copilot
|
|
33
|
+
from .llms import core as llm
|
|
34
|
+
from .llms import providers
|
|
33
35
|
|
|
34
36
|
console = Console()
|
|
35
37
|
app = typer.Typer(help=__doc__, add_completion=False)
|
|
@@ -65,6 +67,31 @@ NativeTlsOption = Annotated[
|
|
|
65
67
|
bool,
|
|
66
68
|
typer.Option("--native-tls/--no-native-tls", help=NATIVE_TLS_HELP),
|
|
67
69
|
]
|
|
70
|
+
ProviderOption = Annotated[
|
|
71
|
+
str | None,
|
|
72
|
+
typer.Option(
|
|
73
|
+
"--provider",
|
|
74
|
+
help="LLM provider to use: copilot or openai.",
|
|
75
|
+
),
|
|
76
|
+
]
|
|
77
|
+
BaseUrlOption = Annotated[
|
|
78
|
+
str | None,
|
|
79
|
+
typer.Option(
|
|
80
|
+
"--base-url",
|
|
81
|
+
metavar="URL",
|
|
82
|
+
help=(
|
|
83
|
+
"Base URL for an OpenAI-compatible provider, for example "
|
|
84
|
+
"http://127.0.0.1:11434/v1."
|
|
85
|
+
),
|
|
86
|
+
),
|
|
87
|
+
]
|
|
88
|
+
ApiKeyOption = Annotated[
|
|
89
|
+
str | None,
|
|
90
|
+
typer.Option(
|
|
91
|
+
"--api-key",
|
|
92
|
+
help="API key for an OpenAI-compatible provider. Omit when the server does not require one.",
|
|
93
|
+
),
|
|
94
|
+
]
|
|
68
95
|
|
|
69
96
|
|
|
70
97
|
SplitOption = Annotated[
|
|
@@ -293,29 +320,29 @@ def build_http_client_config(
|
|
|
293
320
|
ca_bundle: str | None,
|
|
294
321
|
insecure: bool,
|
|
295
322
|
native_tls: bool,
|
|
296
|
-
) ->
|
|
323
|
+
) -> llm.HttpClientConfig:
|
|
297
324
|
if ca_bundle is not None:
|
|
298
325
|
ca_bundle = os.path.expanduser(ca_bundle)
|
|
299
|
-
return
|
|
326
|
+
return llm.HttpClientConfig(
|
|
300
327
|
native_tls=native_tls,
|
|
301
328
|
insecure=insecure,
|
|
302
329
|
ca_bundle=ca_bundle,
|
|
303
330
|
)
|
|
304
331
|
|
|
305
332
|
|
|
306
|
-
def
|
|
307
|
-
"""Render
|
|
308
|
-
if isinstance(exc,
|
|
333
|
+
def print_llm_error(message: str, exc: llm.LLMError) -> None:
|
|
334
|
+
"""Render LLM errors, with rich formatting for model selection issues."""
|
|
335
|
+
if isinstance(exc, llm.ModelSelectionError):
|
|
309
336
|
console.print(f"[red]{message}[/red]")
|
|
310
|
-
|
|
337
|
+
llm.print_model_selection_error(exc)
|
|
311
338
|
return
|
|
312
339
|
|
|
313
340
|
console.print(f"[red]{message}: {exc}[/red]")
|
|
314
341
|
|
|
315
342
|
|
|
316
|
-
def display_selected_model(model:
|
|
317
|
-
"""Show the resolved
|
|
318
|
-
details = [
|
|
343
|
+
def display_selected_model(model: llm.Model) -> None:
|
|
344
|
+
"""Show the resolved model for the current command."""
|
|
345
|
+
details = [llm.infer_api_surface(model)]
|
|
319
346
|
if model.vendor:
|
|
320
347
|
details.insert(0, model.vendor)
|
|
321
348
|
console.print(f"[green]Using model:[/green] {model.id} ({', '.join(details)})")
|
|
@@ -352,20 +379,27 @@ def build_commit_message_prompt(
|
|
|
352
379
|
|
|
353
380
|
|
|
354
381
|
def normalize_model_name(model: str | None) -> str | None:
|
|
355
|
-
"""Normalize model names accepted by the CLI to
|
|
356
|
-
if model is not None
|
|
357
|
-
|
|
382
|
+
"""Normalize model names accepted by the CLI to provider model ids."""
|
|
383
|
+
if model is not None:
|
|
384
|
+
for prefix in (
|
|
385
|
+
"copilot/",
|
|
386
|
+
"openai/",
|
|
387
|
+
"openai-compatible/",
|
|
388
|
+
):
|
|
389
|
+
if model.startswith(prefix):
|
|
390
|
+
return model.replace(prefix, "", 1)
|
|
358
391
|
return model
|
|
359
392
|
|
|
360
393
|
|
|
361
|
-
def
|
|
394
|
+
def ask_llm_with_system_prompt(
|
|
362
395
|
system_prompt: str,
|
|
363
396
|
prompt: str,
|
|
364
397
|
model: str | None = None,
|
|
365
|
-
|
|
398
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
399
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
366
400
|
) -> str:
|
|
367
|
-
"""Send a prepared prompt to
|
|
368
|
-
return
|
|
401
|
+
"""Send a prepared prompt to the selected LLM provider."""
|
|
402
|
+
return providers.ask(
|
|
369
403
|
f"""
|
|
370
404
|
# System Prompt
|
|
371
405
|
|
|
@@ -375,6 +409,7 @@ def ask_copilot_with_system_prompt(
|
|
|
375
409
|
|
|
376
410
|
{prompt}
|
|
377
411
|
""",
|
|
412
|
+
provider_config=provider_config,
|
|
378
413
|
model=normalize_model_name(model),
|
|
379
414
|
http_client_config=http_client_config,
|
|
380
415
|
)
|
|
@@ -383,20 +418,22 @@ def ask_copilot_with_system_prompt(
|
|
|
383
418
|
def generate_commit_message_for_prompt(
|
|
384
419
|
prompt: str,
|
|
385
420
|
model: str | None = None,
|
|
386
|
-
|
|
421
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
422
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
387
423
|
) -> str:
|
|
388
424
|
"""Generate a conventional commit message from a prepared prompt."""
|
|
389
|
-
return
|
|
425
|
+
return ask_llm_with_system_prompt(
|
|
390
426
|
load_system_prompt(),
|
|
391
427
|
prompt,
|
|
392
428
|
model=model,
|
|
429
|
+
provider_config=provider_config,
|
|
393
430
|
http_client_config=http_client_config,
|
|
394
431
|
)
|
|
395
432
|
|
|
396
433
|
|
|
397
|
-
def should_retry_with_compact_prompt(exc:
|
|
434
|
+
def should_retry_with_compact_prompt(exc: llm.LLMError) -> bool:
|
|
398
435
|
message_parts = [str(exc)]
|
|
399
|
-
if isinstance(exc,
|
|
436
|
+
if isinstance(exc, llm.LLMHttpError) and exc.detail:
|
|
400
437
|
message_parts.append(exc.detail)
|
|
401
438
|
|
|
402
439
|
haystack = " ".join(part.strip() for part in message_parts if part).lower()
|
|
@@ -422,7 +459,8 @@ def generate_commit_message_for_status(
|
|
|
422
459
|
status: GitStatus,
|
|
423
460
|
model: str | None = None,
|
|
424
461
|
context: str = "",
|
|
425
|
-
|
|
462
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
463
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
426
464
|
) -> str:
|
|
427
465
|
"""Generate a commit message for a staged status snapshot."""
|
|
428
466
|
full_prompt = build_commit_message_prompt(status, context=context)
|
|
@@ -430,9 +468,10 @@ def generate_commit_message_for_status(
|
|
|
430
468
|
return generate_commit_message_for_prompt(
|
|
431
469
|
full_prompt,
|
|
432
470
|
model=model,
|
|
471
|
+
provider_config=provider_config,
|
|
433
472
|
http_client_config=http_client_config,
|
|
434
473
|
)
|
|
435
|
-
except
|
|
474
|
+
except llm.LLMError as exc:
|
|
436
475
|
if not should_retry_with_compact_prompt(exc):
|
|
437
476
|
raise
|
|
438
477
|
|
|
@@ -447,6 +486,7 @@ def generate_commit_message_for_status(
|
|
|
447
486
|
return generate_commit_message_for_prompt(
|
|
448
487
|
fallback_prompt,
|
|
449
488
|
model=model,
|
|
489
|
+
provider_config=provider_config,
|
|
450
490
|
http_client_config=http_client_config,
|
|
451
491
|
)
|
|
452
492
|
|
|
@@ -476,24 +516,24 @@ def commit_with_retry_no_verify(
|
|
|
476
516
|
|
|
477
517
|
|
|
478
518
|
def ensure_copilot_authentication(
|
|
479
|
-
http_client_config:
|
|
519
|
+
http_client_config: llm.HttpClientConfig,
|
|
480
520
|
) -> None:
|
|
481
521
|
"""Authenticate if no cached Copilot credentials are available."""
|
|
482
522
|
try:
|
|
483
|
-
existing_credentials =
|
|
484
|
-
except
|
|
523
|
+
existing_credentials = copilot.load_credentials()
|
|
524
|
+
except copilot.LLMError:
|
|
485
525
|
existing_credentials = None
|
|
486
526
|
|
|
487
527
|
if existing_credentials is not None:
|
|
488
528
|
return
|
|
489
529
|
|
|
490
530
|
try:
|
|
491
|
-
|
|
531
|
+
copilot.login(
|
|
492
532
|
force=True,
|
|
493
533
|
http_client_config=http_client_config,
|
|
494
534
|
)
|
|
495
|
-
except
|
|
496
|
-
|
|
535
|
+
except copilot.LLMError as exc:
|
|
536
|
+
print_llm_error("Authentication failed", exc)
|
|
497
537
|
raise typer.Exit(1)
|
|
498
538
|
|
|
499
539
|
|
|
@@ -533,7 +573,8 @@ def request_commit_message(
|
|
|
533
573
|
status: GitStatus,
|
|
534
574
|
model: str | None = None,
|
|
535
575
|
context: str = "",
|
|
536
|
-
|
|
576
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
577
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
537
578
|
) -> str:
|
|
538
579
|
"""Request a commit message for the provided staged state."""
|
|
539
580
|
try:
|
|
@@ -544,10 +585,11 @@ def request_commit_message(
|
|
|
544
585
|
status,
|
|
545
586
|
model=model,
|
|
546
587
|
context=context,
|
|
588
|
+
provider_config=provider_config,
|
|
547
589
|
http_client_config=http_client_config,
|
|
548
590
|
)
|
|
549
|
-
except
|
|
550
|
-
|
|
591
|
+
except llm.LLMError as exc:
|
|
592
|
+
print_llm_error("Could not generate a commit message", exc)
|
|
551
593
|
raise typer.Exit(1)
|
|
552
594
|
|
|
553
595
|
|
|
@@ -558,7 +600,8 @@ def request_split_commit_plan(
|
|
|
558
600
|
preferred_commits: int | None = None,
|
|
559
601
|
model: str | None = None,
|
|
560
602
|
context: str = "",
|
|
561
|
-
|
|
603
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
604
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
562
605
|
) -> SplitCommitPlan:
|
|
563
606
|
"""Request and validate a split-commit plan for the staged patch units."""
|
|
564
607
|
planner_system_prompt = load_named_prompt(SPLIT_COMMIT_PLANNER_PROMPT_FILENAME)
|
|
@@ -573,15 +616,16 @@ def request_split_commit_plan(
|
|
|
573
616
|
with console.status(
|
|
574
617
|
"[yellow]Planning split commits from [bold]staged hunks[/] ...[/yellow]"
|
|
575
618
|
):
|
|
576
|
-
response =
|
|
619
|
+
response = ask_llm_with_system_prompt(
|
|
577
620
|
planner_system_prompt,
|
|
578
621
|
planner_prompt,
|
|
579
622
|
model=model,
|
|
623
|
+
provider_config=provider_config,
|
|
580
624
|
http_client_config=http_client_config,
|
|
581
625
|
)
|
|
582
|
-
except
|
|
626
|
+
except llm.LLMError as exc:
|
|
583
627
|
if not should_retry_with_compact_prompt(exc):
|
|
584
|
-
|
|
628
|
+
print_llm_error("Could not generate a split commit plan", exc)
|
|
585
629
|
raise typer.Exit(1)
|
|
586
630
|
|
|
587
631
|
console.print(
|
|
@@ -605,14 +649,15 @@ def request_split_commit_plan(
|
|
|
605
649
|
with console.status(
|
|
606
650
|
"[yellow]Planning split commits from [bold]patch summaries[/] ...[/yellow]"
|
|
607
651
|
):
|
|
608
|
-
response =
|
|
652
|
+
response = ask_llm_with_system_prompt(
|
|
609
653
|
planner_system_prompt,
|
|
610
654
|
compact_planner_prompt,
|
|
611
655
|
model=model,
|
|
656
|
+
provider_config=provider_config,
|
|
612
657
|
http_client_config=http_client_config,
|
|
613
658
|
)
|
|
614
|
-
except
|
|
615
|
-
|
|
659
|
+
except llm.LLMError as exc:
|
|
660
|
+
print_llm_error("Could not generate a split commit plan", exc)
|
|
616
661
|
raise typer.Exit(1)
|
|
617
662
|
|
|
618
663
|
return parse_split_plan_response(
|
|
@@ -627,7 +672,8 @@ def request_split_commit_messages(
|
|
|
627
672
|
*,
|
|
628
673
|
model: str | None = None,
|
|
629
674
|
context: str = "",
|
|
630
|
-
|
|
675
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
676
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
631
677
|
) -> list[PreparedSplitCommit]:
|
|
632
678
|
"""Generate commit messages for each planned split-commit group."""
|
|
633
679
|
try:
|
|
@@ -643,6 +689,7 @@ def request_split_commit_messages(
|
|
|
643
689
|
build_status_for_patch_units(unit_group),
|
|
644
690
|
model=model,
|
|
645
691
|
context=context,
|
|
692
|
+
provider_config=provider_config,
|
|
646
693
|
http_client_config=http_client_config,
|
|
647
694
|
)
|
|
648
695
|
|
|
@@ -651,8 +698,8 @@ def request_split_commit_messages(
|
|
|
651
698
|
)
|
|
652
699
|
|
|
653
700
|
return prepared_commits
|
|
654
|
-
except
|
|
655
|
-
|
|
701
|
+
except llm.LLMError as exc:
|
|
702
|
+
print_llm_error("Could not generate split commit messages", exc)
|
|
656
703
|
raise typer.Exit(1)
|
|
657
704
|
|
|
658
705
|
|
|
@@ -832,13 +879,15 @@ def handle_single_commit_flow(
|
|
|
832
879
|
model: str | None = None,
|
|
833
880
|
yes: bool = False,
|
|
834
881
|
context: str = "",
|
|
835
|
-
|
|
882
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
883
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
836
884
|
) -> None:
|
|
837
885
|
"""Generate, display, and execute the single-commit flow."""
|
|
838
886
|
commit_message = request_commit_message(
|
|
839
887
|
status,
|
|
840
888
|
model=model,
|
|
841
889
|
context=context,
|
|
890
|
+
provider_config=provider_config,
|
|
842
891
|
http_client_config=http_client_config,
|
|
843
892
|
)
|
|
844
893
|
display_commit_message(commit_message)
|
|
@@ -855,7 +904,8 @@ def handle_split_commit_flow(
|
|
|
855
904
|
model: str | None = None,
|
|
856
905
|
yes: bool = False,
|
|
857
906
|
context: str = "",
|
|
858
|
-
|
|
907
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
908
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
859
909
|
) -> None:
|
|
860
910
|
"""Generate, display, and execute the split-commit flow."""
|
|
861
911
|
patch_units = tuple(
|
|
@@ -872,6 +922,7 @@ def handle_split_commit_flow(
|
|
|
872
922
|
model=model,
|
|
873
923
|
yes=yes,
|
|
874
924
|
context=context,
|
|
925
|
+
provider_config=provider_config,
|
|
875
926
|
http_client_config=http_client_config,
|
|
876
927
|
)
|
|
877
928
|
return
|
|
@@ -886,6 +937,7 @@ def handle_split_commit_flow(
|
|
|
886
937
|
model=model,
|
|
887
938
|
yes=yes,
|
|
888
939
|
context=context,
|
|
940
|
+
provider_config=provider_config,
|
|
889
941
|
http_client_config=http_client_config,
|
|
890
942
|
)
|
|
891
943
|
return
|
|
@@ -907,6 +959,7 @@ def handle_split_commit_flow(
|
|
|
907
959
|
preferred_commits=preferred_commits,
|
|
908
960
|
model=model,
|
|
909
961
|
context=context,
|
|
962
|
+
provider_config=provider_config,
|
|
910
963
|
http_client_config=http_client_config,
|
|
911
964
|
)
|
|
912
965
|
except SplitPlanningError as exc:
|
|
@@ -920,6 +973,7 @@ def handle_split_commit_flow(
|
|
|
920
973
|
model=model,
|
|
921
974
|
yes=yes,
|
|
922
975
|
context=context,
|
|
976
|
+
provider_config=provider_config,
|
|
923
977
|
http_client_config=http_client_config,
|
|
924
978
|
)
|
|
925
979
|
return
|
|
@@ -936,6 +990,7 @@ def handle_split_commit_flow(
|
|
|
936
990
|
patch_units,
|
|
937
991
|
model=model,
|
|
938
992
|
context=context,
|
|
993
|
+
provider_config=provider_config,
|
|
939
994
|
http_client_config=http_client_config,
|
|
940
995
|
)
|
|
941
996
|
prepared_commits = order_prepared_split_commits(prepared_commits)
|
|
@@ -979,37 +1034,51 @@ def authenticate(
|
|
|
979
1034
|
native_tls=native_tls,
|
|
980
1035
|
)
|
|
981
1036
|
try:
|
|
982
|
-
|
|
1037
|
+
copilot.login(
|
|
983
1038
|
enterprise_domain=enterprise_domain,
|
|
984
1039
|
force=force,
|
|
985
1040
|
http_client_config=http_client_config,
|
|
986
1041
|
)
|
|
987
|
-
except
|
|
988
|
-
|
|
1042
|
+
except copilot.LLMError as exc:
|
|
1043
|
+
print_llm_error("Authentication failed", exc)
|
|
989
1044
|
raise typer.Exit(1)
|
|
990
1045
|
|
|
991
1046
|
|
|
992
1047
|
@app.command("summary")
|
|
993
1048
|
def summary(
|
|
1049
|
+
provider: ProviderOption = None,
|
|
1050
|
+
base_url: BaseUrlOption = None,
|
|
1051
|
+
api_key: ApiKeyOption = None,
|
|
994
1052
|
ca_bundle: CaBundleOption = None,
|
|
995
1053
|
insecure: InsecureOption = False,
|
|
996
1054
|
native_tls: NativeTlsOption = False,
|
|
997
1055
|
):
|
|
998
|
-
"""Show the
|
|
1056
|
+
"""Show the configured LLM provider summary."""
|
|
999
1057
|
http_client_config = build_http_client_config(
|
|
1000
1058
|
ca_bundle=ca_bundle,
|
|
1001
1059
|
insecure=insecure,
|
|
1002
1060
|
native_tls=native_tls,
|
|
1003
1061
|
)
|
|
1004
1062
|
try:
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1063
|
+
provider_config = providers.resolve_provider_config(
|
|
1064
|
+
provider=provider,
|
|
1065
|
+
base_url=base_url,
|
|
1066
|
+
api_key=api_key,
|
|
1067
|
+
)
|
|
1068
|
+
providers.show_summary(
|
|
1069
|
+
provider_config=provider_config,
|
|
1070
|
+
http_client_config=http_client_config,
|
|
1071
|
+
)
|
|
1072
|
+
except llm.LLMError as exc:
|
|
1073
|
+
print_llm_error("Could not load provider summary", exc)
|
|
1008
1074
|
raise typer.Exit(1)
|
|
1009
1075
|
|
|
1010
1076
|
|
|
1011
1077
|
@app.command("models")
|
|
1012
1078
|
def models_command(
|
|
1079
|
+
provider: ProviderOption = None,
|
|
1080
|
+
base_url: BaseUrlOption = None,
|
|
1081
|
+
api_key: ApiKeyOption = None,
|
|
1013
1082
|
vendor: str | None = typer.Option(
|
|
1014
1083
|
None,
|
|
1015
1084
|
"--vendor",
|
|
@@ -1019,7 +1088,7 @@ def models_command(
|
|
|
1019
1088
|
insecure: InsecureOption = False,
|
|
1020
1089
|
native_tls: NativeTlsOption = False,
|
|
1021
1090
|
):
|
|
1022
|
-
"""List available
|
|
1091
|
+
"""List available models for the configured LLM provider."""
|
|
1023
1092
|
http_client_config = build_http_client_config(
|
|
1024
1093
|
ca_bundle=ca_bundle,
|
|
1025
1094
|
insecure=insecure,
|
|
@@ -1027,16 +1096,26 @@ def models_command(
|
|
|
1027
1096
|
)
|
|
1028
1097
|
|
|
1029
1098
|
try:
|
|
1030
|
-
|
|
1099
|
+
provider_config = providers.resolve_provider_config(
|
|
1100
|
+
provider=provider,
|
|
1101
|
+
base_url=base_url,
|
|
1102
|
+
api_key=api_key,
|
|
1103
|
+
)
|
|
1104
|
+
inventory = providers.get_available_models(
|
|
1105
|
+
provider_config=provider_config,
|
|
1031
1106
|
vendor=vendor,
|
|
1032
1107
|
http_client_config=http_client_config,
|
|
1033
1108
|
)
|
|
1034
1109
|
|
|
1035
|
-
console.print(f"[green]
|
|
1036
|
-
console.print(f"[green]
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1110
|
+
console.print(f"[green]LLM provider:[/green] {provider_config.display_name}")
|
|
1111
|
+
console.print(f"[green]Base URL:[/green] {inventory.base_url}")
|
|
1112
|
+
console.print(f"[green]Model count:[/green] {len(inventory.models)}")
|
|
1113
|
+
llm.print_model_table(
|
|
1114
|
+
inventory.models,
|
|
1115
|
+
title=f"Available {provider_config.display_name} Models",
|
|
1116
|
+
)
|
|
1117
|
+
except llm.LLMError as exc:
|
|
1118
|
+
print_llm_error("Could not load models", exc)
|
|
1040
1119
|
raise typer.Exit(1)
|
|
1041
1120
|
|
|
1042
1121
|
|
|
@@ -1063,6 +1142,9 @@ def commit(
|
|
|
1063
1142
|
"-c",
|
|
1064
1143
|
help="Optional user-provided context to guide commit message",
|
|
1065
1144
|
),
|
|
1145
|
+
provider: ProviderOption = None,
|
|
1146
|
+
base_url: BaseUrlOption = None,
|
|
1147
|
+
api_key: ApiKeyOption = None,
|
|
1066
1148
|
ca_bundle: CaBundleOption = None,
|
|
1067
1149
|
insecure: InsecureOption = False,
|
|
1068
1150
|
native_tls: NativeTlsOption = False,
|
|
@@ -1081,7 +1163,18 @@ def commit(
|
|
|
1081
1163
|
insecure=insecure,
|
|
1082
1164
|
native_tls=native_tls,
|
|
1083
1165
|
)
|
|
1084
|
-
|
|
1166
|
+
try:
|
|
1167
|
+
provider_config = providers.resolve_provider_config(
|
|
1168
|
+
provider=provider,
|
|
1169
|
+
base_url=base_url,
|
|
1170
|
+
api_key=api_key,
|
|
1171
|
+
)
|
|
1172
|
+
except llm.LLMError as exc:
|
|
1173
|
+
print_llm_error("Could not resolve the LLM provider", exc)
|
|
1174
|
+
raise typer.Exit(1)
|
|
1175
|
+
|
|
1176
|
+
if provider_config.provider == "copilot":
|
|
1177
|
+
ensure_copilot_authentication(http_client_config)
|
|
1085
1178
|
|
|
1086
1179
|
# Get initial status
|
|
1087
1180
|
status = repo.get_status()
|
|
@@ -1099,12 +1192,13 @@ def commit(
|
|
|
1099
1192
|
|
|
1100
1193
|
normalized_model = normalize_model_name(model)
|
|
1101
1194
|
try:
|
|
1102
|
-
selected_model =
|
|
1195
|
+
selected_model = providers.ensure_model_ready(
|
|
1196
|
+
provider_config=provider_config,
|
|
1103
1197
|
model=normalized_model,
|
|
1104
1198
|
http_client_config=http_client_config,
|
|
1105
1199
|
)
|
|
1106
|
-
except
|
|
1107
|
-
|
|
1200
|
+
except llm.LLMError as exc:
|
|
1201
|
+
print_llm_error("Could not select a model", exc)
|
|
1108
1202
|
raise typer.Exit(1)
|
|
1109
1203
|
|
|
1110
1204
|
display_selected_model(selected_model)
|
|
@@ -1118,6 +1212,7 @@ def commit(
|
|
|
1118
1212
|
model=model,
|
|
1119
1213
|
yes=yes,
|
|
1120
1214
|
context=context,
|
|
1215
|
+
provider_config=provider_config,
|
|
1121
1216
|
http_client_config=http_client_config,
|
|
1122
1217
|
)
|
|
1123
1218
|
return
|
|
@@ -1128,6 +1223,7 @@ def commit(
|
|
|
1128
1223
|
model=model,
|
|
1129
1224
|
yes=yes,
|
|
1130
1225
|
context=context,
|
|
1226
|
+
provider_config=provider_config,
|
|
1131
1227
|
http_client_config=http_client_config,
|
|
1132
1228
|
)
|
|
1133
1229
|
|
|
File without changes
|