git-copilot-commit 0.5.6__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- git_copilot_commit/cli.py +214 -87
- git_copilot_commit/git.py +26 -0
- git_copilot_commit/llms/__init__.py +0 -0
- git_copilot_commit/llms/copilot.py +799 -0
- git_copilot_commit/llms/core.py +802 -0
- git_copilot_commit/llms/openai_api.py +219 -0
- git_copilot_commit/llms/providers.py +373 -0
- git_copilot_commit/prompts/commit-message-generator-prompt.md +4 -3
- git_copilot_commit/prompts/split-commit-planner-prompt.md +1 -0
- {git_copilot_commit-0.5.6.dist-info → git_copilot_commit-0.6.0.dist-info}/METADATA +51 -7
- git_copilot_commit-0.6.0.dist-info/RECORD +19 -0
- git_copilot_commit/github_copilot.py +0 -1579
- git_copilot_commit-0.5.6.dist-info/RECORD +0 -15
- {git_copilot_commit-0.5.6.dist-info → git_copilot_commit-0.6.0.dist-info}/WHEEL +0 -0
- {git_copilot_commit-0.5.6.dist-info → git_copilot_commit-0.6.0.dist-info}/entry_points.txt +0 -0
- {git_copilot_commit-0.5.6.dist-info → git_copilot_commit-0.6.0.dist-info}/licenses/LICENSE +0 -0
git_copilot_commit/cli.py
CHANGED
|
@@ -29,7 +29,9 @@ from .split_commits import (
|
|
|
29
29
|
)
|
|
30
30
|
from .settings import Settings
|
|
31
31
|
from .version import __version__
|
|
32
|
-
from . import
|
|
32
|
+
from .llms import copilot
|
|
33
|
+
from .llms import core as llm
|
|
34
|
+
from .llms import providers
|
|
33
35
|
|
|
34
36
|
console = Console()
|
|
35
37
|
app = typer.Typer(help=__doc__, add_completion=False)
|
|
@@ -65,6 +67,31 @@ NativeTlsOption = Annotated[
|
|
|
65
67
|
bool,
|
|
66
68
|
typer.Option("--native-tls/--no-native-tls", help=NATIVE_TLS_HELP),
|
|
67
69
|
]
|
|
70
|
+
ProviderOption = Annotated[
|
|
71
|
+
str | None,
|
|
72
|
+
typer.Option(
|
|
73
|
+
"--provider",
|
|
74
|
+
help="LLM provider to use: copilot or openai.",
|
|
75
|
+
),
|
|
76
|
+
]
|
|
77
|
+
BaseUrlOption = Annotated[
|
|
78
|
+
str | None,
|
|
79
|
+
typer.Option(
|
|
80
|
+
"--base-url",
|
|
81
|
+
metavar="URL",
|
|
82
|
+
help=(
|
|
83
|
+
"Base URL for an OpenAI-compatible provider, for example "
|
|
84
|
+
"http://127.0.0.1:11434/v1."
|
|
85
|
+
),
|
|
86
|
+
),
|
|
87
|
+
]
|
|
88
|
+
ApiKeyOption = Annotated[
|
|
89
|
+
str | None,
|
|
90
|
+
typer.Option(
|
|
91
|
+
"--api-key",
|
|
92
|
+
help="API key for an OpenAI-compatible provider. Omit when the server does not require one.",
|
|
93
|
+
),
|
|
94
|
+
]
|
|
68
95
|
|
|
69
96
|
|
|
70
97
|
SplitOption = Annotated[
|
|
@@ -95,6 +122,14 @@ class PreparedSplitCommit:
|
|
|
95
122
|
patch_units: tuple[PatchUnit, ...]
|
|
96
123
|
|
|
97
124
|
|
|
125
|
+
@dataclass(frozen=True, slots=True)
|
|
126
|
+
class SplitCommitExecutionState:
|
|
127
|
+
"""Original HEAD state used to roll back partial split-commit execution."""
|
|
128
|
+
|
|
129
|
+
original_head_sha: str | None
|
|
130
|
+
original_head_ref: str | None
|
|
131
|
+
|
|
132
|
+
|
|
98
133
|
CORE_CHANGE_COMMIT_TYPES = frozenset({"feat", "fix", "perf", "refactor", "revert"})
|
|
99
134
|
FOLLOW_UP_COMMIT_TYPE_PRIORITY = {
|
|
100
135
|
"test": 2,
|
|
@@ -285,29 +320,29 @@ def build_http_client_config(
|
|
|
285
320
|
ca_bundle: str | None,
|
|
286
321
|
insecure: bool,
|
|
287
322
|
native_tls: bool,
|
|
288
|
-
) ->
|
|
323
|
+
) -> llm.HttpClientConfig:
|
|
289
324
|
if ca_bundle is not None:
|
|
290
325
|
ca_bundle = os.path.expanduser(ca_bundle)
|
|
291
|
-
return
|
|
326
|
+
return llm.HttpClientConfig(
|
|
292
327
|
native_tls=native_tls,
|
|
293
328
|
insecure=insecure,
|
|
294
329
|
ca_bundle=ca_bundle,
|
|
295
330
|
)
|
|
296
331
|
|
|
297
332
|
|
|
298
|
-
def
|
|
299
|
-
"""Render
|
|
300
|
-
if isinstance(exc,
|
|
333
|
+
def print_llm_error(message: str, exc: llm.LLMError) -> None:
|
|
334
|
+
"""Render LLM errors, with rich formatting for model selection issues."""
|
|
335
|
+
if isinstance(exc, llm.ModelSelectionError):
|
|
301
336
|
console.print(f"[red]{message}[/red]")
|
|
302
|
-
|
|
337
|
+
llm.print_model_selection_error(exc)
|
|
303
338
|
return
|
|
304
339
|
|
|
305
340
|
console.print(f"[red]{message}: {exc}[/red]")
|
|
306
341
|
|
|
307
342
|
|
|
308
|
-
def display_selected_model(model:
|
|
309
|
-
"""Show the resolved
|
|
310
|
-
details = [
|
|
343
|
+
def display_selected_model(model: llm.Model) -> None:
|
|
344
|
+
"""Show the resolved model for the current command."""
|
|
345
|
+
details = [llm.infer_api_surface(model)]
|
|
311
346
|
if model.vendor:
|
|
312
347
|
details.insert(0, model.vendor)
|
|
313
348
|
console.print(f"[green]Using model:[/green] {model.id} ({', '.join(details)})")
|
|
@@ -344,20 +379,27 @@ def build_commit_message_prompt(
|
|
|
344
379
|
|
|
345
380
|
|
|
346
381
|
def normalize_model_name(model: str | None) -> str | None:
|
|
347
|
-
"""Normalize model names accepted by the CLI to
|
|
348
|
-
if model is not None
|
|
349
|
-
|
|
382
|
+
"""Normalize model names accepted by the CLI to provider model ids."""
|
|
383
|
+
if model is not None:
|
|
384
|
+
for prefix in (
|
|
385
|
+
"copilot/",
|
|
386
|
+
"openai/",
|
|
387
|
+
"openai-compatible/",
|
|
388
|
+
):
|
|
389
|
+
if model.startswith(prefix):
|
|
390
|
+
return model.replace(prefix, "", 1)
|
|
350
391
|
return model
|
|
351
392
|
|
|
352
393
|
|
|
353
|
-
def
|
|
394
|
+
def ask_llm_with_system_prompt(
|
|
354
395
|
system_prompt: str,
|
|
355
396
|
prompt: str,
|
|
356
397
|
model: str | None = None,
|
|
357
|
-
|
|
398
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
399
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
358
400
|
) -> str:
|
|
359
|
-
"""Send a prepared prompt to
|
|
360
|
-
return
|
|
401
|
+
"""Send a prepared prompt to the selected LLM provider."""
|
|
402
|
+
return providers.ask(
|
|
361
403
|
f"""
|
|
362
404
|
# System Prompt
|
|
363
405
|
|
|
@@ -367,6 +409,7 @@ def ask_copilot_with_system_prompt(
|
|
|
367
409
|
|
|
368
410
|
{prompt}
|
|
369
411
|
""",
|
|
412
|
+
provider_config=provider_config,
|
|
370
413
|
model=normalize_model_name(model),
|
|
371
414
|
http_client_config=http_client_config,
|
|
372
415
|
)
|
|
@@ -375,20 +418,22 @@ def ask_copilot_with_system_prompt(
|
|
|
375
418
|
def generate_commit_message_for_prompt(
|
|
376
419
|
prompt: str,
|
|
377
420
|
model: str | None = None,
|
|
378
|
-
|
|
421
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
422
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
379
423
|
) -> str:
|
|
380
424
|
"""Generate a conventional commit message from a prepared prompt."""
|
|
381
|
-
return
|
|
425
|
+
return ask_llm_with_system_prompt(
|
|
382
426
|
load_system_prompt(),
|
|
383
427
|
prompt,
|
|
384
428
|
model=model,
|
|
429
|
+
provider_config=provider_config,
|
|
385
430
|
http_client_config=http_client_config,
|
|
386
431
|
)
|
|
387
432
|
|
|
388
433
|
|
|
389
|
-
def should_retry_with_compact_prompt(exc:
|
|
434
|
+
def should_retry_with_compact_prompt(exc: llm.LLMError) -> bool:
|
|
390
435
|
message_parts = [str(exc)]
|
|
391
|
-
if isinstance(exc,
|
|
436
|
+
if isinstance(exc, llm.LLMHttpError) and exc.detail:
|
|
392
437
|
message_parts.append(exc.detail)
|
|
393
438
|
|
|
394
439
|
haystack = " ".join(part.strip() for part in message_parts if part).lower()
|
|
@@ -414,7 +459,8 @@ def generate_commit_message_for_status(
|
|
|
414
459
|
status: GitStatus,
|
|
415
460
|
model: str | None = None,
|
|
416
461
|
context: str = "",
|
|
417
|
-
|
|
462
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
463
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
418
464
|
) -> str:
|
|
419
465
|
"""Generate a commit message for a staged status snapshot."""
|
|
420
466
|
full_prompt = build_commit_message_prompt(status, context=context)
|
|
@@ -422,9 +468,10 @@ def generate_commit_message_for_status(
|
|
|
422
468
|
return generate_commit_message_for_prompt(
|
|
423
469
|
full_prompt,
|
|
424
470
|
model=model,
|
|
471
|
+
provider_config=provider_config,
|
|
425
472
|
http_client_config=http_client_config,
|
|
426
473
|
)
|
|
427
|
-
except
|
|
474
|
+
except llm.LLMError as exc:
|
|
428
475
|
if not should_retry_with_compact_prompt(exc):
|
|
429
476
|
raise
|
|
430
477
|
|
|
@@ -439,6 +486,7 @@ def generate_commit_message_for_status(
|
|
|
439
486
|
return generate_commit_message_for_prompt(
|
|
440
487
|
fallback_prompt,
|
|
441
488
|
model=model,
|
|
489
|
+
provider_config=provider_config,
|
|
442
490
|
http_client_config=http_client_config,
|
|
443
491
|
)
|
|
444
492
|
|
|
@@ -468,24 +516,24 @@ def commit_with_retry_no_verify(
|
|
|
468
516
|
|
|
469
517
|
|
|
470
518
|
def ensure_copilot_authentication(
|
|
471
|
-
http_client_config:
|
|
519
|
+
http_client_config: llm.HttpClientConfig,
|
|
472
520
|
) -> None:
|
|
473
521
|
"""Authenticate if no cached Copilot credentials are available."""
|
|
474
522
|
try:
|
|
475
|
-
existing_credentials =
|
|
476
|
-
except
|
|
523
|
+
existing_credentials = copilot.load_credentials()
|
|
524
|
+
except copilot.LLMError:
|
|
477
525
|
existing_credentials = None
|
|
478
526
|
|
|
479
527
|
if existing_credentials is not None:
|
|
480
528
|
return
|
|
481
529
|
|
|
482
530
|
try:
|
|
483
|
-
|
|
531
|
+
copilot.login(
|
|
484
532
|
force=True,
|
|
485
533
|
http_client_config=http_client_config,
|
|
486
534
|
)
|
|
487
|
-
except
|
|
488
|
-
|
|
535
|
+
except copilot.LLMError as exc:
|
|
536
|
+
print_llm_error("Authentication failed", exc)
|
|
489
537
|
raise typer.Exit(1)
|
|
490
538
|
|
|
491
539
|
|
|
@@ -525,7 +573,8 @@ def request_commit_message(
|
|
|
525
573
|
status: GitStatus,
|
|
526
574
|
model: str | None = None,
|
|
527
575
|
context: str = "",
|
|
528
|
-
|
|
576
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
577
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
529
578
|
) -> str:
|
|
530
579
|
"""Request a commit message for the provided staged state."""
|
|
531
580
|
try:
|
|
@@ -536,10 +585,11 @@ def request_commit_message(
|
|
|
536
585
|
status,
|
|
537
586
|
model=model,
|
|
538
587
|
context=context,
|
|
588
|
+
provider_config=provider_config,
|
|
539
589
|
http_client_config=http_client_config,
|
|
540
590
|
)
|
|
541
|
-
except
|
|
542
|
-
|
|
591
|
+
except llm.LLMError as exc:
|
|
592
|
+
print_llm_error("Could not generate a commit message", exc)
|
|
543
593
|
raise typer.Exit(1)
|
|
544
594
|
|
|
545
595
|
|
|
@@ -550,7 +600,8 @@ def request_split_commit_plan(
|
|
|
550
600
|
preferred_commits: int | None = None,
|
|
551
601
|
model: str | None = None,
|
|
552
602
|
context: str = "",
|
|
553
|
-
|
|
603
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
604
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
554
605
|
) -> SplitCommitPlan:
|
|
555
606
|
"""Request and validate a split-commit plan for the staged patch units."""
|
|
556
607
|
planner_system_prompt = load_named_prompt(SPLIT_COMMIT_PLANNER_PROMPT_FILENAME)
|
|
@@ -565,15 +616,16 @@ def request_split_commit_plan(
|
|
|
565
616
|
with console.status(
|
|
566
617
|
"[yellow]Planning split commits from [bold]staged hunks[/] ...[/yellow]"
|
|
567
618
|
):
|
|
568
|
-
response =
|
|
619
|
+
response = ask_llm_with_system_prompt(
|
|
569
620
|
planner_system_prompt,
|
|
570
621
|
planner_prompt,
|
|
571
622
|
model=model,
|
|
623
|
+
provider_config=provider_config,
|
|
572
624
|
http_client_config=http_client_config,
|
|
573
625
|
)
|
|
574
|
-
except
|
|
626
|
+
except llm.LLMError as exc:
|
|
575
627
|
if not should_retry_with_compact_prompt(exc):
|
|
576
|
-
|
|
628
|
+
print_llm_error("Could not generate a split commit plan", exc)
|
|
577
629
|
raise typer.Exit(1)
|
|
578
630
|
|
|
579
631
|
console.print(
|
|
@@ -597,14 +649,15 @@ def request_split_commit_plan(
|
|
|
597
649
|
with console.status(
|
|
598
650
|
"[yellow]Planning split commits from [bold]patch summaries[/] ...[/yellow]"
|
|
599
651
|
):
|
|
600
|
-
response =
|
|
652
|
+
response = ask_llm_with_system_prompt(
|
|
601
653
|
planner_system_prompt,
|
|
602
654
|
compact_planner_prompt,
|
|
603
655
|
model=model,
|
|
656
|
+
provider_config=provider_config,
|
|
604
657
|
http_client_config=http_client_config,
|
|
605
658
|
)
|
|
606
|
-
except
|
|
607
|
-
|
|
659
|
+
except llm.LLMError as exc:
|
|
660
|
+
print_llm_error("Could not generate a split commit plan", exc)
|
|
608
661
|
raise typer.Exit(1)
|
|
609
662
|
|
|
610
663
|
return parse_split_plan_response(
|
|
@@ -619,7 +672,8 @@ def request_split_commit_messages(
|
|
|
619
672
|
*,
|
|
620
673
|
model: str | None = None,
|
|
621
674
|
context: str = "",
|
|
622
|
-
|
|
675
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
676
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
623
677
|
) -> list[PreparedSplitCommit]:
|
|
624
678
|
"""Generate commit messages for each planned split-commit group."""
|
|
625
679
|
try:
|
|
@@ -635,6 +689,7 @@ def request_split_commit_messages(
|
|
|
635
689
|
build_status_for_patch_units(unit_group),
|
|
636
690
|
model=model,
|
|
637
691
|
context=context,
|
|
692
|
+
provider_config=provider_config,
|
|
638
693
|
http_client_config=http_client_config,
|
|
639
694
|
)
|
|
640
695
|
|
|
@@ -643,8 +698,8 @@ def request_split_commit_messages(
|
|
|
643
698
|
)
|
|
644
699
|
|
|
645
700
|
return prepared_commits
|
|
646
|
-
except
|
|
647
|
-
|
|
701
|
+
except llm.LLMError as exc:
|
|
702
|
+
print_llm_error("Could not generate split commit messages", exc)
|
|
648
703
|
raise typer.Exit(1)
|
|
649
704
|
|
|
650
705
|
|
|
@@ -757,39 +812,62 @@ def execute_split_commit_plan(
|
|
|
757
812
|
console.print("Invalid choice. Commit cancelled.")
|
|
758
813
|
raise typer.Exit()
|
|
759
814
|
|
|
815
|
+
execution_state = SplitCommitExecutionState(
|
|
816
|
+
original_head_sha=repo.get_head_sha() if repo.has_commit("HEAD") else None,
|
|
817
|
+
original_head_ref=repo.get_symbolic_head_ref(),
|
|
818
|
+
)
|
|
760
819
|
commit_shas: list[str] = []
|
|
761
820
|
total_commits = len(prepared_commits)
|
|
762
821
|
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
822
|
+
try:
|
|
823
|
+
for index, prepared_commit in enumerate(prepared_commits, start=1):
|
|
824
|
+
console.print(
|
|
825
|
+
f"[cyan]Creating commit {index}/{total_commits}:[/cyan] {prepared_commit.message}"
|
|
826
|
+
)
|
|
767
827
|
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
828
|
+
with repo.temporary_alternate_index() as alternate_index:
|
|
829
|
+
try:
|
|
830
|
+
for patch_unit in prepared_commit.patch_units:
|
|
831
|
+
repo.check_patch_for_alternate_index(
|
|
832
|
+
patch_unit.patch,
|
|
833
|
+
index=alternate_index,
|
|
834
|
+
)
|
|
835
|
+
repo.apply_patch_to_alternate_index(
|
|
836
|
+
patch_unit.patch,
|
|
837
|
+
index=alternate_index,
|
|
838
|
+
)
|
|
839
|
+
except GitError as exc:
|
|
840
|
+
console.print(
|
|
841
|
+
f"[red]Failed to apply the planned changes for commit {index}: {exc}[/red]"
|
|
774
842
|
)
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
843
|
+
raise typer.Exit(1)
|
|
844
|
+
|
|
845
|
+
commit_shas.append(
|
|
846
|
+
commit_with_retry_no_verify(
|
|
847
|
+
repo,
|
|
848
|
+
prepared_commit.message,
|
|
849
|
+
use_editor=use_editor,
|
|
850
|
+
env=alternate_index.env,
|
|
778
851
|
)
|
|
779
|
-
except GitError as exc:
|
|
780
|
-
console.print(
|
|
781
|
-
f"[red]Failed to apply the planned changes for commit {index}: {exc}[/red]"
|
|
782
|
-
)
|
|
783
|
-
raise typer.Exit(1)
|
|
784
|
-
|
|
785
|
-
commit_shas.append(
|
|
786
|
-
commit_with_retry_no_verify(
|
|
787
|
-
repo,
|
|
788
|
-
prepared_commit.message,
|
|
789
|
-
use_editor=use_editor,
|
|
790
|
-
env=alternate_index.env,
|
|
791
852
|
)
|
|
853
|
+
except BaseException:
|
|
854
|
+
try:
|
|
855
|
+
if execution_state.original_head_sha is not None:
|
|
856
|
+
repo.soft_reset(execution_state.original_head_sha)
|
|
857
|
+
elif execution_state.original_head_ref is not None and repo.has_commit(
|
|
858
|
+
"HEAD"
|
|
859
|
+
):
|
|
860
|
+
repo.delete_ref(execution_state.original_head_ref)
|
|
861
|
+
except GitError as exc:
|
|
862
|
+
console.print(
|
|
863
|
+
"[red]Failed to restore the original staged changes after split commit creation stopped early: "
|
|
864
|
+
f"{exc}[/red]"
|
|
792
865
|
)
|
|
866
|
+
else:
|
|
867
|
+
console.print(
|
|
868
|
+
"[yellow]Split commit creation did not complete; restored the original staged changes.[/yellow]"
|
|
869
|
+
)
|
|
870
|
+
raise
|
|
793
871
|
|
|
794
872
|
return commit_shas
|
|
795
873
|
|
|
@@ -801,13 +879,15 @@ def handle_single_commit_flow(
|
|
|
801
879
|
model: str | None = None,
|
|
802
880
|
yes: bool = False,
|
|
803
881
|
context: str = "",
|
|
804
|
-
|
|
882
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
883
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
805
884
|
) -> None:
|
|
806
885
|
"""Generate, display, and execute the single-commit flow."""
|
|
807
886
|
commit_message = request_commit_message(
|
|
808
887
|
status,
|
|
809
888
|
model=model,
|
|
810
889
|
context=context,
|
|
890
|
+
provider_config=provider_config,
|
|
811
891
|
http_client_config=http_client_config,
|
|
812
892
|
)
|
|
813
893
|
display_commit_message(commit_message)
|
|
@@ -824,7 +904,8 @@ def handle_split_commit_flow(
|
|
|
824
904
|
model: str | None = None,
|
|
825
905
|
yes: bool = False,
|
|
826
906
|
context: str = "",
|
|
827
|
-
|
|
907
|
+
provider_config: providers.ProviderConfig | None = None,
|
|
908
|
+
http_client_config: llm.HttpClientConfig | None = None,
|
|
828
909
|
) -> None:
|
|
829
910
|
"""Generate, display, and execute the split-commit flow."""
|
|
830
911
|
patch_units = tuple(
|
|
@@ -841,6 +922,7 @@ def handle_split_commit_flow(
|
|
|
841
922
|
model=model,
|
|
842
923
|
yes=yes,
|
|
843
924
|
context=context,
|
|
925
|
+
provider_config=provider_config,
|
|
844
926
|
http_client_config=http_client_config,
|
|
845
927
|
)
|
|
846
928
|
return
|
|
@@ -855,6 +937,7 @@ def handle_split_commit_flow(
|
|
|
855
937
|
model=model,
|
|
856
938
|
yes=yes,
|
|
857
939
|
context=context,
|
|
940
|
+
provider_config=provider_config,
|
|
858
941
|
http_client_config=http_client_config,
|
|
859
942
|
)
|
|
860
943
|
return
|
|
@@ -876,6 +959,7 @@ def handle_split_commit_flow(
|
|
|
876
959
|
preferred_commits=preferred_commits,
|
|
877
960
|
model=model,
|
|
878
961
|
context=context,
|
|
962
|
+
provider_config=provider_config,
|
|
879
963
|
http_client_config=http_client_config,
|
|
880
964
|
)
|
|
881
965
|
except SplitPlanningError as exc:
|
|
@@ -889,6 +973,7 @@ def handle_split_commit_flow(
|
|
|
889
973
|
model=model,
|
|
890
974
|
yes=yes,
|
|
891
975
|
context=context,
|
|
976
|
+
provider_config=provider_config,
|
|
892
977
|
http_client_config=http_client_config,
|
|
893
978
|
)
|
|
894
979
|
return
|
|
@@ -905,6 +990,7 @@ def handle_split_commit_flow(
|
|
|
905
990
|
patch_units,
|
|
906
991
|
model=model,
|
|
907
992
|
context=context,
|
|
993
|
+
provider_config=provider_config,
|
|
908
994
|
http_client_config=http_client_config,
|
|
909
995
|
)
|
|
910
996
|
prepared_commits = order_prepared_split_commits(prepared_commits)
|
|
@@ -948,37 +1034,51 @@ def authenticate(
|
|
|
948
1034
|
native_tls=native_tls,
|
|
949
1035
|
)
|
|
950
1036
|
try:
|
|
951
|
-
|
|
1037
|
+
copilot.login(
|
|
952
1038
|
enterprise_domain=enterprise_domain,
|
|
953
1039
|
force=force,
|
|
954
1040
|
http_client_config=http_client_config,
|
|
955
1041
|
)
|
|
956
|
-
except
|
|
957
|
-
|
|
1042
|
+
except copilot.LLMError as exc:
|
|
1043
|
+
print_llm_error("Authentication failed", exc)
|
|
958
1044
|
raise typer.Exit(1)
|
|
959
1045
|
|
|
960
1046
|
|
|
961
1047
|
@app.command("summary")
|
|
962
1048
|
def summary(
|
|
1049
|
+
provider: ProviderOption = None,
|
|
1050
|
+
base_url: BaseUrlOption = None,
|
|
1051
|
+
api_key: ApiKeyOption = None,
|
|
963
1052
|
ca_bundle: CaBundleOption = None,
|
|
964
1053
|
insecure: InsecureOption = False,
|
|
965
1054
|
native_tls: NativeTlsOption = False,
|
|
966
1055
|
):
|
|
967
|
-
"""Show the
|
|
1056
|
+
"""Show the configured LLM provider summary."""
|
|
968
1057
|
http_client_config = build_http_client_config(
|
|
969
1058
|
ca_bundle=ca_bundle,
|
|
970
1059
|
insecure=insecure,
|
|
971
1060
|
native_tls=native_tls,
|
|
972
1061
|
)
|
|
973
1062
|
try:
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
1063
|
+
provider_config = providers.resolve_provider_config(
|
|
1064
|
+
provider=provider,
|
|
1065
|
+
base_url=base_url,
|
|
1066
|
+
api_key=api_key,
|
|
1067
|
+
)
|
|
1068
|
+
providers.show_summary(
|
|
1069
|
+
provider_config=provider_config,
|
|
1070
|
+
http_client_config=http_client_config,
|
|
1071
|
+
)
|
|
1072
|
+
except llm.LLMError as exc:
|
|
1073
|
+
print_llm_error("Could not load provider summary", exc)
|
|
977
1074
|
raise typer.Exit(1)
|
|
978
1075
|
|
|
979
1076
|
|
|
980
1077
|
@app.command("models")
|
|
981
1078
|
def models_command(
|
|
1079
|
+
provider: ProviderOption = None,
|
|
1080
|
+
base_url: BaseUrlOption = None,
|
|
1081
|
+
api_key: ApiKeyOption = None,
|
|
982
1082
|
vendor: str | None = typer.Option(
|
|
983
1083
|
None,
|
|
984
1084
|
"--vendor",
|
|
@@ -988,7 +1088,7 @@ def models_command(
|
|
|
988
1088
|
insecure: InsecureOption = False,
|
|
989
1089
|
native_tls: NativeTlsOption = False,
|
|
990
1090
|
):
|
|
991
|
-
"""List available
|
|
1091
|
+
"""List available models for the configured LLM provider."""
|
|
992
1092
|
http_client_config = build_http_client_config(
|
|
993
1093
|
ca_bundle=ca_bundle,
|
|
994
1094
|
insecure=insecure,
|
|
@@ -996,16 +1096,26 @@ def models_command(
|
|
|
996
1096
|
)
|
|
997
1097
|
|
|
998
1098
|
try:
|
|
999
|
-
|
|
1099
|
+
provider_config = providers.resolve_provider_config(
|
|
1100
|
+
provider=provider,
|
|
1101
|
+
base_url=base_url,
|
|
1102
|
+
api_key=api_key,
|
|
1103
|
+
)
|
|
1104
|
+
inventory = providers.get_available_models(
|
|
1105
|
+
provider_config=provider_config,
|
|
1000
1106
|
vendor=vendor,
|
|
1001
1107
|
http_client_config=http_client_config,
|
|
1002
1108
|
)
|
|
1003
1109
|
|
|
1004
|
-
console.print(f"[green]
|
|
1005
|
-
console.print(f"[green]
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1110
|
+
console.print(f"[green]LLM provider:[/green] {provider_config.display_name}")
|
|
1111
|
+
console.print(f"[green]Base URL:[/green] {inventory.base_url}")
|
|
1112
|
+
console.print(f"[green]Model count:[/green] {len(inventory.models)}")
|
|
1113
|
+
llm.print_model_table(
|
|
1114
|
+
inventory.models,
|
|
1115
|
+
title=f"Available {provider_config.display_name} Models",
|
|
1116
|
+
)
|
|
1117
|
+
except llm.LLMError as exc:
|
|
1118
|
+
print_llm_error("Could not load models", exc)
|
|
1009
1119
|
raise typer.Exit(1)
|
|
1010
1120
|
|
|
1011
1121
|
|
|
@@ -1032,6 +1142,9 @@ def commit(
|
|
|
1032
1142
|
"-c",
|
|
1033
1143
|
help="Optional user-provided context to guide commit message",
|
|
1034
1144
|
),
|
|
1145
|
+
provider: ProviderOption = None,
|
|
1146
|
+
base_url: BaseUrlOption = None,
|
|
1147
|
+
api_key: ApiKeyOption = None,
|
|
1035
1148
|
ca_bundle: CaBundleOption = None,
|
|
1036
1149
|
insecure: InsecureOption = False,
|
|
1037
1150
|
native_tls: NativeTlsOption = False,
|
|
@@ -1050,7 +1163,18 @@ def commit(
|
|
|
1050
1163
|
insecure=insecure,
|
|
1051
1164
|
native_tls=native_tls,
|
|
1052
1165
|
)
|
|
1053
|
-
|
|
1166
|
+
try:
|
|
1167
|
+
provider_config = providers.resolve_provider_config(
|
|
1168
|
+
provider=provider,
|
|
1169
|
+
base_url=base_url,
|
|
1170
|
+
api_key=api_key,
|
|
1171
|
+
)
|
|
1172
|
+
except llm.LLMError as exc:
|
|
1173
|
+
print_llm_error("Could not resolve the LLM provider", exc)
|
|
1174
|
+
raise typer.Exit(1)
|
|
1175
|
+
|
|
1176
|
+
if provider_config.provider == "copilot":
|
|
1177
|
+
ensure_copilot_authentication(http_client_config)
|
|
1054
1178
|
|
|
1055
1179
|
# Get initial status
|
|
1056
1180
|
status = repo.get_status()
|
|
@@ -1068,12 +1192,13 @@ def commit(
|
|
|
1068
1192
|
|
|
1069
1193
|
normalized_model = normalize_model_name(model)
|
|
1070
1194
|
try:
|
|
1071
|
-
selected_model =
|
|
1195
|
+
selected_model = providers.ensure_model_ready(
|
|
1196
|
+
provider_config=provider_config,
|
|
1072
1197
|
model=normalized_model,
|
|
1073
1198
|
http_client_config=http_client_config,
|
|
1074
1199
|
)
|
|
1075
|
-
except
|
|
1076
|
-
|
|
1200
|
+
except llm.LLMError as exc:
|
|
1201
|
+
print_llm_error("Could not select a model", exc)
|
|
1077
1202
|
raise typer.Exit(1)
|
|
1078
1203
|
|
|
1079
1204
|
display_selected_model(selected_model)
|
|
@@ -1087,6 +1212,7 @@ def commit(
|
|
|
1087
1212
|
model=model,
|
|
1088
1213
|
yes=yes,
|
|
1089
1214
|
context=context,
|
|
1215
|
+
provider_config=provider_config,
|
|
1090
1216
|
http_client_config=http_client_config,
|
|
1091
1217
|
)
|
|
1092
1218
|
return
|
|
@@ -1097,6 +1223,7 @@ def commit(
|
|
|
1097
1223
|
model=model,
|
|
1098
1224
|
yes=yes,
|
|
1099
1225
|
context=context,
|
|
1226
|
+
provider_config=provider_config,
|
|
1100
1227
|
http_client_config=http_client_config,
|
|
1101
1228
|
)
|
|
1102
1229
|
|
git_copilot_commit/git.py
CHANGED
|
@@ -274,6 +274,15 @@ class GitRepository:
|
|
|
274
274
|
)
|
|
275
275
|
return result.returncode == 0
|
|
276
276
|
|
|
277
|
+
def get_symbolic_head_ref(self) -> str | None:
|
|
278
|
+
"""Return the symbolic ref for HEAD when attached to a branch."""
|
|
279
|
+
result = self._run_git_command(["symbolic-ref", "-q", "HEAD"], check=False)
|
|
280
|
+
if result.returncode != 0:
|
|
281
|
+
return None
|
|
282
|
+
|
|
283
|
+
ref = result.stdout.strip()
|
|
284
|
+
return ref or None
|
|
285
|
+
|
|
277
286
|
def _parse_status_output(self, status_output: str) -> list[GitFile]:
|
|
278
287
|
"""Parse git status --porcelain output into GitFile objects."""
|
|
279
288
|
files = []
|
|
@@ -326,6 +335,23 @@ class GitRepository:
|
|
|
326
335
|
else:
|
|
327
336
|
self._run_git_command(["reset", "HEAD"] + self._normalize_paths(paths))
|
|
328
337
|
|
|
338
|
+
def soft_reset(self, ref: str) -> None:
|
|
339
|
+
"""Move HEAD to ref while preserving the working tree and index."""
|
|
340
|
+
self._run_git_command(["reset", "--soft", ref])
|
|
341
|
+
|
|
342
|
+
def delete_ref(self, ref: str, *, missing_ok: bool = False) -> None:
|
|
343
|
+
"""Delete a ref, optionally ignoring missing refs."""
|
|
344
|
+
result = self._run_git_command(["update-ref", "-d", ref], check=False)
|
|
345
|
+
if result.returncode == 0 or missing_ok:
|
|
346
|
+
return
|
|
347
|
+
|
|
348
|
+
error_output = result.stderr or result.stdout or ""
|
|
349
|
+
if error_output:
|
|
350
|
+
raise GitCommandError(
|
|
351
|
+
f"Git command failed: git update-ref -d {ref}\n{error_output}"
|
|
352
|
+
)
|
|
353
|
+
raise GitCommandError(f"Git command failed: git update-ref -d {ref}")
|
|
354
|
+
|
|
329
355
|
def create_alternate_index(self, from_ref: str = "HEAD") -> AlternateGitIndex:
|
|
330
356
|
"""Create a temporary git index initialized from the provided ref."""
|
|
331
357
|
fd, index_path = tempfile.mkstemp(prefix="git-copilot-commit-", suffix=".index")
|
|
File without changes
|