gac 0.17.2__py3-none-any.whl → 3.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gac/__version__.py +1 -1
- gac/ai.py +69 -123
- gac/ai_utils.py +227 -0
- gac/auth_cli.py +69 -0
- gac/cli.py +87 -19
- gac/config.py +13 -7
- gac/config_cli.py +26 -5
- gac/constants.py +176 -5
- gac/errors.py +14 -0
- gac/git.py +207 -11
- gac/init_cli.py +52 -29
- gac/language_cli.py +378 -0
- gac/main.py +922 -189
- gac/model_cli.py +374 -0
- gac/oauth/__init__.py +1 -0
- gac/oauth/claude_code.py +397 -0
- gac/preprocess.py +5 -5
- gac/prompt.py +656 -219
- gac/providers/__init__.py +88 -0
- gac/providers/anthropic.py +51 -0
- gac/providers/azure_openai.py +97 -0
- gac/providers/cerebras.py +38 -0
- gac/providers/chutes.py +71 -0
- gac/providers/claude_code.py +102 -0
- gac/providers/custom_anthropic.py +133 -0
- gac/providers/custom_openai.py +98 -0
- gac/providers/deepseek.py +38 -0
- gac/providers/fireworks.py +38 -0
- gac/providers/gemini.py +87 -0
- gac/providers/groq.py +63 -0
- gac/providers/kimi_coding.py +63 -0
- gac/providers/lmstudio.py +59 -0
- gac/providers/minimax.py +38 -0
- gac/providers/mistral.py +38 -0
- gac/providers/moonshot.py +38 -0
- gac/providers/ollama.py +50 -0
- gac/providers/openai.py +38 -0
- gac/providers/openrouter.py +58 -0
- gac/providers/replicate.py +98 -0
- gac/providers/streamlake.py +51 -0
- gac/providers/synthetic.py +42 -0
- gac/providers/together.py +38 -0
- gac/providers/zai.py +59 -0
- gac/security.py +293 -0
- gac/utils.py +243 -4
- gac/workflow_utils.py +222 -0
- gac-3.6.0.dist-info/METADATA +281 -0
- gac-3.6.0.dist-info/RECORD +53 -0
- {gac-0.17.2.dist-info → gac-3.6.0.dist-info}/WHEEL +1 -1
- {gac-0.17.2.dist-info → gac-3.6.0.dist-info}/licenses/LICENSE +1 -1
- gac-0.17.2.dist-info/METADATA +0 -221
- gac-0.17.2.dist-info/RECORD +0 -20
- {gac-0.17.2.dist-info → gac-3.6.0.dist-info}/entry_points.txt +0 -0
gac/main.py
CHANGED
|
@@ -10,36 +10,747 @@ import click
|
|
|
10
10
|
from rich.console import Console
|
|
11
11
|
from rich.panel import Panel
|
|
12
12
|
|
|
13
|
-
from gac.ai import
|
|
13
|
+
from gac.ai import generate_commit_message
|
|
14
|
+
from gac.ai_utils import count_tokens
|
|
14
15
|
from gac.config import load_config
|
|
15
16
|
from gac.constants import EnvDefaults, Utility
|
|
16
17
|
from gac.errors import AIError, GitError, handle_error
|
|
17
18
|
from gac.git import (
|
|
19
|
+
detect_rename_mappings,
|
|
18
20
|
get_staged_files,
|
|
21
|
+
get_staged_status,
|
|
19
22
|
push_changes,
|
|
20
23
|
run_git_command,
|
|
24
|
+
run_lefthook_hooks,
|
|
21
25
|
run_pre_commit_hooks,
|
|
22
26
|
)
|
|
23
27
|
from gac.preprocess import preprocess_diff
|
|
24
28
|
from gac.prompt import build_prompt, clean_commit_message
|
|
29
|
+
from gac.security import get_affected_files, scan_staged_diff
|
|
30
|
+
from gac.workflow_utils import (
|
|
31
|
+
check_token_warning,
|
|
32
|
+
collect_interactive_answers,
|
|
33
|
+
display_commit_message,
|
|
34
|
+
execute_commit,
|
|
35
|
+
format_answers_for_prompt,
|
|
36
|
+
handle_confirmation_loop,
|
|
37
|
+
restore_staging,
|
|
38
|
+
)
|
|
25
39
|
|
|
26
40
|
logger = logging.getLogger(__name__)
|
|
27
41
|
|
|
28
42
|
config = load_config()
|
|
43
|
+
console = Console() # Initialize console globally to prevent undefined access
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _validate_grouped_files_or_feedback(staged: set[str], grouped_result: dict) -> tuple[bool, str, str]:
|
|
47
|
+
from collections import Counter
|
|
48
|
+
|
|
49
|
+
commits = grouped_result.get("commits", []) if isinstance(grouped_result, dict) else []
|
|
50
|
+
all_files: list[str] = []
|
|
51
|
+
for commit in commits:
|
|
52
|
+
files = commit.get("files", []) if isinstance(commit, dict) else []
|
|
53
|
+
all_files.extend([str(p) for p in files])
|
|
54
|
+
|
|
55
|
+
counts = Counter(all_files)
|
|
56
|
+
union_set = set(all_files)
|
|
57
|
+
|
|
58
|
+
duplicates = sorted([f for f, c in counts.items() if c > 1])
|
|
59
|
+
missing = sorted(staged - union_set)
|
|
60
|
+
unexpected = sorted(union_set - staged)
|
|
61
|
+
|
|
62
|
+
if not duplicates and not missing and not unexpected:
|
|
63
|
+
return True, "", ""
|
|
64
|
+
|
|
65
|
+
problems: list[str] = []
|
|
66
|
+
if missing:
|
|
67
|
+
problems.append(f"Missing: {', '.join(missing)}")
|
|
68
|
+
if unexpected:
|
|
69
|
+
problems.append(f"Not staged: {', '.join(unexpected)}")
|
|
70
|
+
if duplicates:
|
|
71
|
+
problems.append(f"Duplicates: {', '.join(duplicates)}")
|
|
72
|
+
|
|
73
|
+
feedback = f"{'; '.join(problems)}. Required files: {', '.join(sorted(staged))}. Respond with ONLY valid JSON."
|
|
74
|
+
return False, feedback, "; ".join(problems)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _parse_model_identifier(model: str) -> tuple[str, str]:
|
|
78
|
+
"""Validate and split model identifier into provider and model name."""
|
|
79
|
+
normalized = model.strip()
|
|
80
|
+
if ":" not in normalized:
|
|
81
|
+
message = (
|
|
82
|
+
f"Invalid model format: '{model}'. Expected 'provider:model', e.g. 'openai:gpt-4o-mini'. "
|
|
83
|
+
"Use 'gac config set model <provider:model>' to update your configuration."
|
|
84
|
+
)
|
|
85
|
+
logger.error(message)
|
|
86
|
+
console.print(f"[red]{message}[/red]")
|
|
87
|
+
sys.exit(1)
|
|
88
|
+
|
|
89
|
+
provider, model_name = normalized.split(":", 1)
|
|
90
|
+
if not provider or not model_name:
|
|
91
|
+
message = (
|
|
92
|
+
f"Invalid model format: '{model}'. Both provider and model name are required "
|
|
93
|
+
"(example: 'anthropic:claude-haiku-4-5')."
|
|
94
|
+
)
|
|
95
|
+
logger.error(message)
|
|
96
|
+
console.print(f"[red]{message}[/red]")
|
|
97
|
+
sys.exit(1)
|
|
98
|
+
|
|
99
|
+
return provider, model_name
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _handle_validation_retry(
|
|
103
|
+
attempts: int,
|
|
104
|
+
content_retry_budget: int,
|
|
105
|
+
raw_response: str,
|
|
106
|
+
feedback_message: str,
|
|
107
|
+
error_message: str,
|
|
108
|
+
conversation_messages: list[dict[str, str]],
|
|
109
|
+
quiet: bool,
|
|
110
|
+
retry_context: str,
|
|
111
|
+
) -> bool:
|
|
112
|
+
"""Handle validation retry logic. Returns True if should exit, False if should retry."""
|
|
113
|
+
conversation_messages.append({"role": "assistant", "content": raw_response})
|
|
114
|
+
conversation_messages.append({"role": "user", "content": feedback_message})
|
|
115
|
+
if attempts >= content_retry_budget:
|
|
116
|
+
logger.error(error_message)
|
|
117
|
+
console.print(f"\n[red]{error_message}[/red]")
|
|
118
|
+
console.print("\n[yellow]Raw model output:[/yellow]")
|
|
119
|
+
console.print(Panel(raw_response, title="Model Output", border_style="yellow"))
|
|
120
|
+
return True
|
|
121
|
+
if not quiet:
|
|
122
|
+
console.print(f"[yellow]Retry {attempts} of {content_retry_budget - 1}: {retry_context}[/yellow]")
|
|
123
|
+
return False
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def execute_grouped_commits_workflow(
|
|
127
|
+
*,
|
|
128
|
+
system_prompt: str,
|
|
129
|
+
user_prompt: str,
|
|
130
|
+
model: str,
|
|
131
|
+
temperature: float,
|
|
132
|
+
max_output_tokens: int,
|
|
133
|
+
max_retries: int,
|
|
134
|
+
require_confirmation: bool,
|
|
135
|
+
quiet: bool,
|
|
136
|
+
no_verify: bool,
|
|
137
|
+
dry_run: bool,
|
|
138
|
+
push: bool,
|
|
139
|
+
show_prompt: bool,
|
|
140
|
+
interactive: bool,
|
|
141
|
+
message_only: bool,
|
|
142
|
+
hook_timeout: int = 120,
|
|
143
|
+
) -> None:
|
|
144
|
+
"""Execute the grouped commits workflow."""
|
|
145
|
+
import json
|
|
146
|
+
|
|
147
|
+
from gac.ai import generate_grouped_commits
|
|
148
|
+
|
|
149
|
+
provider, model_name = _parse_model_identifier(model)
|
|
150
|
+
|
|
151
|
+
if show_prompt:
|
|
152
|
+
full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
|
|
153
|
+
console.print(Panel(full_prompt, title="Prompt for LLM", border_style="bright_blue"))
|
|
154
|
+
|
|
155
|
+
conversation_messages: list[dict[str, str]] = []
|
|
156
|
+
if system_prompt:
|
|
157
|
+
conversation_messages.append({"role": "system", "content": system_prompt})
|
|
158
|
+
conversation_messages.append({"role": "user", "content": user_prompt})
|
|
159
|
+
|
|
160
|
+
_parse_model_identifier(model)
|
|
161
|
+
|
|
162
|
+
# Generate interactive questions if enabled
|
|
163
|
+
if interactive and not message_only:
|
|
164
|
+
try:
|
|
165
|
+
# Extract git data from the user prompt for question generation
|
|
166
|
+
status_match = None
|
|
167
|
+
diff_match = None
|
|
168
|
+
diff_stat_match = None
|
|
169
|
+
|
|
170
|
+
import re
|
|
171
|
+
|
|
172
|
+
status_match = re.search(r"<git_status>\n(.*?)\n</git_status>", user_prompt, re.DOTALL)
|
|
173
|
+
diff_match = re.search(r"<git_diff>\n(.*?)\n</git_diff>", user_prompt, re.DOTALL)
|
|
174
|
+
diff_stat_match = re.search(r"<git_diff_stat>\n(.*?)\n</git_diff_stat>", user_prompt, re.DOTALL)
|
|
175
|
+
|
|
176
|
+
status = status_match.group(1) if status_match else ""
|
|
177
|
+
diff = diff_match.group(1) if diff_match else ""
|
|
178
|
+
diff_stat = diff_stat_match.group(1) if diff_stat_match else ""
|
|
179
|
+
|
|
180
|
+
# Extract hint text if present
|
|
181
|
+
hint_match = re.search(r"<hint_text>(.*?)</hint_text>", user_prompt, re.DOTALL)
|
|
182
|
+
hint = hint_match.group(1) if hint_match else ""
|
|
183
|
+
|
|
184
|
+
questions = generate_contextual_questions(
|
|
185
|
+
model=model,
|
|
186
|
+
status=status,
|
|
187
|
+
processed_diff=diff,
|
|
188
|
+
diff_stat=diff_stat,
|
|
189
|
+
hint=hint,
|
|
190
|
+
temperature=temperature,
|
|
191
|
+
max_tokens=max_output_tokens,
|
|
192
|
+
max_retries=max_retries,
|
|
193
|
+
quiet=quiet,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
if questions:
|
|
197
|
+
# Collect answers interactively
|
|
198
|
+
answers = collect_interactive_answers(questions)
|
|
199
|
+
|
|
200
|
+
if answers is None:
|
|
201
|
+
# User aborted interactive mode
|
|
202
|
+
if not quiet:
|
|
203
|
+
console.print("[yellow]Proceeding with commit without additional context[/yellow]\n")
|
|
204
|
+
elif answers:
|
|
205
|
+
# User provided some answers, format them for the prompt
|
|
206
|
+
answers_context = format_answers_for_prompt(answers)
|
|
207
|
+
enhanced_user_prompt = user_prompt + answers_context
|
|
208
|
+
|
|
209
|
+
# Update the conversation messages with the enhanced prompt
|
|
210
|
+
if conversation_messages and conversation_messages[-1]["role"] == "user":
|
|
211
|
+
conversation_messages[-1]["content"] = enhanced_user_prompt
|
|
212
|
+
|
|
213
|
+
logger.info(f"Collected answers for {len(answers)} questions")
|
|
214
|
+
else:
|
|
215
|
+
# User skipped all questions
|
|
216
|
+
if not quiet:
|
|
217
|
+
console.print("[dim]No answers provided, proceeding with original context[/dim]\n")
|
|
218
|
+
|
|
219
|
+
except Exception as e:
|
|
220
|
+
logger.warning(f"Failed to generate contextual questions, proceeding without them: {e}")
|
|
221
|
+
if not quiet:
|
|
222
|
+
console.print("[yellow]⚠️ Could not generate contextual questions, proceeding normally[/yellow]\n")
|
|
223
|
+
|
|
224
|
+
first_iteration = True
|
|
225
|
+
content_retry_budget = max(3, int(max_retries))
|
|
226
|
+
attempts = 0
|
|
227
|
+
|
|
228
|
+
grouped_result: dict | None = None
|
|
229
|
+
raw_response: str = ""
|
|
230
|
+
|
|
231
|
+
while True:
|
|
232
|
+
prompt_tokens = count_tokens(conversation_messages, model)
|
|
233
|
+
|
|
234
|
+
if first_iteration:
|
|
235
|
+
warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
|
|
236
|
+
assert warning_limit_val is not None
|
|
237
|
+
warning_limit = int(warning_limit_val)
|
|
238
|
+
if not check_token_warning(prompt_tokens, warning_limit, require_confirmation):
|
|
239
|
+
sys.exit(0)
|
|
240
|
+
first_iteration = False
|
|
241
|
+
|
|
242
|
+
raw_response = generate_grouped_commits(
|
|
243
|
+
model=model,
|
|
244
|
+
prompt=conversation_messages,
|
|
245
|
+
temperature=temperature,
|
|
246
|
+
max_tokens=max_output_tokens,
|
|
247
|
+
max_retries=max_retries,
|
|
248
|
+
quiet=quiet,
|
|
249
|
+
skip_success_message=True,
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
parsed: dict | None = None
|
|
253
|
+
extract = raw_response
|
|
254
|
+
first_brace = raw_response.find("{")
|
|
255
|
+
last_brace = raw_response.rfind("}")
|
|
256
|
+
if first_brace != -1 and last_brace != -1 and first_brace < last_brace:
|
|
257
|
+
extract = raw_response[first_brace : last_brace + 1]
|
|
258
|
+
|
|
259
|
+
try:
|
|
260
|
+
parsed = json.loads(extract)
|
|
261
|
+
except json.JSONDecodeError as e:
|
|
262
|
+
parsed = None
|
|
263
|
+
logger.debug(
|
|
264
|
+
f"JSON parsing failed: {e}. Extract length: {len(extract)}, Response length: {len(raw_response)}"
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
if parsed is None:
|
|
268
|
+
attempts += 1
|
|
269
|
+
feedback = "Your response was not valid JSON. Respond with ONLY valid JSON matching the expected schema. Do not include any commentary or code fences."
|
|
270
|
+
error_msg = f"Failed to parse LLM response as JSON after {attempts} retries."
|
|
271
|
+
if _handle_validation_retry(
|
|
272
|
+
attempts,
|
|
273
|
+
content_retry_budget,
|
|
274
|
+
raw_response,
|
|
275
|
+
feedback,
|
|
276
|
+
error_msg,
|
|
277
|
+
conversation_messages,
|
|
278
|
+
quiet,
|
|
279
|
+
"JSON parsing failed, asking model to fix...",
|
|
280
|
+
):
|
|
281
|
+
sys.exit(1)
|
|
282
|
+
continue
|
|
283
|
+
|
|
284
|
+
try:
|
|
285
|
+
if "commits" not in parsed or not isinstance(parsed["commits"], list):
|
|
286
|
+
raise ValueError("Response missing 'commits' array")
|
|
287
|
+
if len(parsed["commits"]) == 0:
|
|
288
|
+
raise ValueError("No commits in response")
|
|
289
|
+
for idx, commit in enumerate(parsed["commits"]):
|
|
290
|
+
if "files" not in commit or not isinstance(commit["files"], list):
|
|
291
|
+
raise ValueError(f"Commit {idx + 1} missing 'files' array")
|
|
292
|
+
if "message" not in commit or not isinstance(commit["message"], str):
|
|
293
|
+
raise ValueError(f"Commit {idx + 1} missing 'message' string")
|
|
294
|
+
if len(commit["files"]) == 0:
|
|
295
|
+
raise ValueError(f"Commit {idx + 1} has empty files list")
|
|
296
|
+
if not commit["message"].strip():
|
|
297
|
+
raise ValueError(f"Commit {idx + 1} has empty message")
|
|
298
|
+
except (ValueError, TypeError) as e:
|
|
299
|
+
attempts += 1
|
|
300
|
+
feedback = f"Invalid response structure: {e}. Please return ONLY valid JSON following the schema with a non-empty 'commits' array of objects containing 'files' and 'message'."
|
|
301
|
+
error_msg = f"Invalid grouped commits structure after {attempts} retries: {e}"
|
|
302
|
+
if _handle_validation_retry(
|
|
303
|
+
attempts,
|
|
304
|
+
content_retry_budget,
|
|
305
|
+
raw_response,
|
|
306
|
+
feedback,
|
|
307
|
+
error_msg,
|
|
308
|
+
conversation_messages,
|
|
309
|
+
quiet,
|
|
310
|
+
"Structure validation failed, asking model to fix...",
|
|
311
|
+
):
|
|
312
|
+
sys.exit(1)
|
|
313
|
+
continue
|
|
314
|
+
|
|
315
|
+
staged_set = set(get_staged_files(existing_only=False))
|
|
316
|
+
ok, feedback, detail_msg = _validate_grouped_files_or_feedback(staged_set, parsed)
|
|
317
|
+
if not ok:
|
|
318
|
+
attempts += 1
|
|
319
|
+
error_msg = (
|
|
320
|
+
f"Grouped commits file set mismatch after {attempts} retries{': ' + detail_msg if detail_msg else ''}"
|
|
321
|
+
)
|
|
322
|
+
if _handle_validation_retry(
|
|
323
|
+
attempts,
|
|
324
|
+
content_retry_budget,
|
|
325
|
+
raw_response,
|
|
326
|
+
feedback,
|
|
327
|
+
error_msg,
|
|
328
|
+
conversation_messages,
|
|
329
|
+
quiet,
|
|
330
|
+
"File coverage mismatch, asking model to fix...",
|
|
331
|
+
):
|
|
332
|
+
sys.exit(1)
|
|
333
|
+
continue
|
|
334
|
+
|
|
335
|
+
grouped_result = parsed
|
|
336
|
+
conversation_messages.append({"role": "assistant", "content": raw_response})
|
|
337
|
+
|
|
338
|
+
if not quiet:
|
|
339
|
+
console.print(f"[green]✔ Generated commit messages with {provider} {model_name}[/green]")
|
|
340
|
+
num_commits = len(grouped_result["commits"])
|
|
341
|
+
console.print(f"[bold green]Proposed Commits ({num_commits}):[/bold green]\n")
|
|
342
|
+
for idx, commit in enumerate(grouped_result["commits"], 1):
|
|
343
|
+
files = commit["files"]
|
|
344
|
+
files_display = ", ".join(files)
|
|
345
|
+
console.print(f"[dim]{files_display}[/dim]")
|
|
346
|
+
commit_msg = commit["message"]
|
|
347
|
+
console.print(Panel(commit_msg, title=f"Commit Message {idx}/{num_commits}", border_style="cyan"))
|
|
348
|
+
console.print()
|
|
349
|
+
|
|
350
|
+
completion_tokens = count_tokens(raw_response, model)
|
|
351
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
352
|
+
console.print(
|
|
353
|
+
f"[dim]Token usage: {prompt_tokens} prompt + {completion_tokens} completion = {total_tokens} total[/dim]"
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
if require_confirmation:
|
|
357
|
+
accepted = False
|
|
358
|
+
num_commits = len(grouped_result["commits"]) if grouped_result else 0
|
|
359
|
+
while True:
|
|
360
|
+
response = click.prompt(
|
|
361
|
+
f"Proceed with {num_commits} commits above? [y/n/r/<feedback>]",
|
|
362
|
+
type=str,
|
|
363
|
+
show_default=False,
|
|
364
|
+
).strip()
|
|
365
|
+
response_lower = response.lower()
|
|
366
|
+
|
|
367
|
+
if response_lower in ["y", "yes"]:
|
|
368
|
+
accepted = True
|
|
369
|
+
break
|
|
370
|
+
if response_lower in ["n", "no"]:
|
|
371
|
+
console.print("[yellow]Commits not accepted. Exiting...[/yellow]")
|
|
372
|
+
sys.exit(0)
|
|
373
|
+
if response == "":
|
|
374
|
+
continue
|
|
375
|
+
if response_lower in ["r", "reroll"]:
|
|
376
|
+
feedback_message = "Please provide alternative commit groupings using the same repository context."
|
|
377
|
+
console.print("[cyan]Regenerating commit groups...[/cyan]")
|
|
378
|
+
conversation_messages.append({"role": "user", "content": feedback_message})
|
|
379
|
+
console.print()
|
|
380
|
+
attempts = 0
|
|
381
|
+
break
|
|
382
|
+
|
|
383
|
+
feedback_message = f"Please revise the commit groupings based on this feedback: {response}"
|
|
384
|
+
console.print(f"[cyan]Regenerating commit groups with feedback: {response}[/cyan]")
|
|
385
|
+
conversation_messages.append({"role": "user", "content": feedback_message})
|
|
386
|
+
console.print()
|
|
387
|
+
attempts = 0
|
|
388
|
+
break
|
|
389
|
+
|
|
390
|
+
if not accepted:
|
|
391
|
+
continue
|
|
392
|
+
|
|
393
|
+
num_commits = len(grouped_result["commits"]) if grouped_result else 0
|
|
394
|
+
if dry_run:
|
|
395
|
+
console.print(f"[yellow]Dry run: Would create {num_commits} commits[/yellow]")
|
|
396
|
+
for idx, commit in enumerate(grouped_result["commits"], 1):
|
|
397
|
+
console.print(f"\n[cyan]Commit {idx}/{num_commits}:[/cyan]")
|
|
398
|
+
console.print(f" Files: {', '.join(commit['files'])}")
|
|
399
|
+
console.print(f" Message: {commit['message'][:50]}...")
|
|
400
|
+
else:
|
|
401
|
+
original_staged_files = get_staged_files(existing_only=False)
|
|
402
|
+
original_staged_diff = run_git_command(["diff", "--cached", "--binary"], silent=True)
|
|
403
|
+
run_git_command(["reset", "HEAD"])
|
|
404
|
+
|
|
405
|
+
try:
|
|
406
|
+
# Detect file renames to handle them properly
|
|
407
|
+
rename_mappings = detect_rename_mappings(original_staged_diff)
|
|
408
|
+
|
|
409
|
+
for idx, commit in enumerate(grouped_result["commits"], 1):
|
|
410
|
+
try:
|
|
411
|
+
for file_path in commit["files"]:
|
|
412
|
+
# Check if this file is the destination of a rename
|
|
413
|
+
if file_path in rename_mappings:
|
|
414
|
+
old_file = rename_mappings[file_path]
|
|
415
|
+
# For renames, stage both the old file (for deletion) and new file
|
|
416
|
+
# This ensures the complete rename operation is preserved
|
|
417
|
+
run_git_command(["add", "-A", old_file])
|
|
418
|
+
run_git_command(["add", "-A", file_path])
|
|
419
|
+
else:
|
|
420
|
+
run_git_command(["add", "-A", file_path])
|
|
421
|
+
execute_commit(commit["message"], no_verify, hook_timeout)
|
|
422
|
+
console.print(f"[green]✓ Commit {idx}/{num_commits} created[/green]")
|
|
423
|
+
except Exception as e:
|
|
424
|
+
console.print(f"[red]✗ Failed at commit {idx}/{num_commits}: {e}[/red]")
|
|
425
|
+
console.print(f"[yellow]Completed {idx - 1}/{num_commits} commits.[/yellow]")
|
|
426
|
+
if idx == 1:
|
|
427
|
+
console.print("[yellow]Restoring original staging area...[/yellow]")
|
|
428
|
+
restore_staging(original_staged_files, original_staged_diff)
|
|
429
|
+
console.print("[green]Original staging area restored.[/green]")
|
|
430
|
+
sys.exit(1)
|
|
431
|
+
except KeyboardInterrupt:
|
|
432
|
+
console.print("\n[yellow]Interrupted by user. Restoring original staging area...[/yellow]")
|
|
433
|
+
restore_staging(original_staged_files, original_staged_diff)
|
|
434
|
+
console.print("[green]Original staging area restored.[/green]")
|
|
435
|
+
sys.exit(1)
|
|
436
|
+
|
|
437
|
+
if push:
|
|
438
|
+
try:
|
|
439
|
+
if dry_run:
|
|
440
|
+
console.print("[yellow]Dry run: Would push changes[/yellow]")
|
|
441
|
+
sys.exit(0)
|
|
442
|
+
if push_changes():
|
|
443
|
+
logger.info("Changes pushed successfully")
|
|
444
|
+
console.print("[green]Changes pushed successfully[/green]")
|
|
445
|
+
else:
|
|
446
|
+
console.print(
|
|
447
|
+
"[red]Failed to push changes. Check your remote configuration and network connection.[/red]"
|
|
448
|
+
)
|
|
449
|
+
sys.exit(1)
|
|
450
|
+
except Exception as e:
|
|
451
|
+
console.print(f"[red]Error pushing changes: {e}[/red]")
|
|
452
|
+
sys.exit(1)
|
|
453
|
+
|
|
454
|
+
sys.exit(0)
|
|
455
|
+
|
|
456
|
+
|
|
457
|
+
def execute_single_commit_workflow(
|
|
458
|
+
*,
|
|
459
|
+
system_prompt: str,
|
|
460
|
+
user_prompt: str,
|
|
461
|
+
model: str,
|
|
462
|
+
temperature: float,
|
|
463
|
+
max_output_tokens: int,
|
|
464
|
+
max_retries: int,
|
|
465
|
+
require_confirmation: bool,
|
|
466
|
+
quiet: bool,
|
|
467
|
+
no_verify: bool,
|
|
468
|
+
dry_run: bool,
|
|
469
|
+
message_only: bool = False,
|
|
470
|
+
push: bool,
|
|
471
|
+
show_prompt: bool,
|
|
472
|
+
hook_timeout: int = 120,
|
|
473
|
+
interactive: bool = False,
|
|
474
|
+
) -> None:
|
|
475
|
+
if show_prompt:
|
|
476
|
+
full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
|
|
477
|
+
console.print(Panel(full_prompt, title="Prompt for LLM", border_style="bright_blue"))
|
|
478
|
+
|
|
479
|
+
conversation_messages: list[dict[str, str]] = []
|
|
480
|
+
if system_prompt:
|
|
481
|
+
conversation_messages.append({"role": "system", "content": system_prompt})
|
|
482
|
+
conversation_messages.append({"role": "user", "content": user_prompt})
|
|
483
|
+
|
|
484
|
+
_parse_model_identifier(model)
|
|
485
|
+
|
|
486
|
+
# Generate interactive questions if enabled
|
|
487
|
+
if interactive and not message_only:
|
|
488
|
+
try:
|
|
489
|
+
# Extract git data from the user prompt for question generation
|
|
490
|
+
status_match = None
|
|
491
|
+
diff_match = None
|
|
492
|
+
diff_stat_match = None
|
|
493
|
+
|
|
494
|
+
import re
|
|
495
|
+
|
|
496
|
+
status_match = re.search(r"<git_status>\n(.*?)\n</git_status>", user_prompt, re.DOTALL)
|
|
497
|
+
diff_match = re.search(r"<git_diff>\n(.*?)\n</git_diff>", user_prompt, re.DOTALL)
|
|
498
|
+
diff_stat_match = re.search(r"<git_diff_stat>\n(.*?)\n</git_diff_stat>", user_prompt, re.DOTALL)
|
|
499
|
+
|
|
500
|
+
status = status_match.group(1) if status_match else ""
|
|
501
|
+
diff = diff_match.group(1) if diff_match else ""
|
|
502
|
+
diff_stat = diff_stat_match.group(1) if diff_stat_match else ""
|
|
503
|
+
|
|
504
|
+
# Extract hint text if present
|
|
505
|
+
hint_match = re.search(r"<hint_text>(.*?)</hint_text>", user_prompt, re.DOTALL)
|
|
506
|
+
hint = hint_match.group(1) if hint_match else ""
|
|
507
|
+
|
|
508
|
+
questions = generate_contextual_questions(
|
|
509
|
+
model=model,
|
|
510
|
+
status=status,
|
|
511
|
+
processed_diff=diff,
|
|
512
|
+
diff_stat=diff_stat,
|
|
513
|
+
hint=hint,
|
|
514
|
+
temperature=temperature,
|
|
515
|
+
max_tokens=max_output_tokens,
|
|
516
|
+
max_retries=max_retries,
|
|
517
|
+
quiet=quiet,
|
|
518
|
+
)
|
|
519
|
+
|
|
520
|
+
if questions:
|
|
521
|
+
# Collect answers interactively
|
|
522
|
+
answers = collect_interactive_answers(questions)
|
|
523
|
+
|
|
524
|
+
if answers is None:
|
|
525
|
+
# User aborted interactive mode
|
|
526
|
+
if not quiet:
|
|
527
|
+
console.print("[yellow]Proceeding with commit without additional context[/yellow]\n")
|
|
528
|
+
elif answers:
|
|
529
|
+
# User provided some answers, format them for the prompt
|
|
530
|
+
answers_context = format_answers_for_prompt(answers)
|
|
531
|
+
enhanced_user_prompt = user_prompt + answers_context
|
|
532
|
+
|
|
533
|
+
# Update the conversation messages with the enhanced prompt
|
|
534
|
+
if conversation_messages and conversation_messages[-1]["role"] == "user":
|
|
535
|
+
conversation_messages[-1]["content"] = enhanced_user_prompt
|
|
536
|
+
|
|
537
|
+
logger.info(f"Collected answers for {len(answers)} questions")
|
|
538
|
+
else:
|
|
539
|
+
# User skipped all questions
|
|
540
|
+
if not quiet:
|
|
541
|
+
console.print("[dim]No answers provided, proceeding with original context[/dim]\n")
|
|
542
|
+
|
|
543
|
+
except Exception as e:
|
|
544
|
+
logger.warning(f"Failed to generate contextual questions, proceeding without them: {e}")
|
|
545
|
+
if not quiet:
|
|
546
|
+
console.print("[yellow]⚠️ Could not generate contextual questions, proceeding normally[/yellow]\n")
|
|
547
|
+
|
|
548
|
+
first_iteration = True
|
|
549
|
+
while True:
|
|
550
|
+
prompt_tokens = count_tokens(conversation_messages, model)
|
|
551
|
+
if first_iteration:
|
|
552
|
+
warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
|
|
553
|
+
assert warning_limit_val is not None
|
|
554
|
+
warning_limit = int(warning_limit_val)
|
|
555
|
+
if not check_token_warning(prompt_tokens, warning_limit, require_confirmation):
|
|
556
|
+
sys.exit(0)
|
|
557
|
+
first_iteration = False
|
|
558
|
+
|
|
559
|
+
raw_commit_message = generate_commit_message(
|
|
560
|
+
model=model,
|
|
561
|
+
prompt=conversation_messages,
|
|
562
|
+
temperature=temperature,
|
|
563
|
+
max_tokens=max_output_tokens,
|
|
564
|
+
max_retries=max_retries,
|
|
565
|
+
quiet=quiet or message_only,
|
|
566
|
+
)
|
|
567
|
+
commit_message = clean_commit_message(raw_commit_message)
|
|
568
|
+
logger.info("Generated commit message:")
|
|
569
|
+
logger.info(commit_message)
|
|
570
|
+
conversation_messages.append({"role": "assistant", "content": commit_message})
|
|
571
|
+
|
|
572
|
+
if message_only:
|
|
573
|
+
# Output only the commit message without any formatting
|
|
574
|
+
print(commit_message)
|
|
575
|
+
sys.exit(0)
|
|
576
|
+
|
|
577
|
+
display_commit_message(commit_message, prompt_tokens, model, quiet)
|
|
578
|
+
|
|
579
|
+
if require_confirmation:
|
|
580
|
+
decision, commit_message, conversation_messages = handle_confirmation_loop(
|
|
581
|
+
commit_message, conversation_messages, quiet, model
|
|
582
|
+
)
|
|
583
|
+
if decision == "no":
|
|
584
|
+
console.print("[yellow]Prompt not accepted. Exiting...[/yellow]")
|
|
585
|
+
sys.exit(0)
|
|
586
|
+
elif decision == "yes":
|
|
587
|
+
break
|
|
588
|
+
else:
|
|
589
|
+
break
|
|
590
|
+
|
|
591
|
+
if dry_run:
|
|
592
|
+
console.print("[yellow]Dry run: Commit message generated but not applied[/yellow]")
|
|
593
|
+
console.print("Would commit with message:")
|
|
594
|
+
console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
|
|
595
|
+
staged_files = get_staged_files(existing_only=False)
|
|
596
|
+
console.print(f"Would commit {len(staged_files)} files")
|
|
597
|
+
logger.info(f"Would commit {len(staged_files)} files")
|
|
598
|
+
else:
|
|
599
|
+
execute_commit(commit_message, no_verify, hook_timeout)
|
|
600
|
+
|
|
601
|
+
if push:
|
|
602
|
+
try:
|
|
603
|
+
if dry_run:
|
|
604
|
+
staged_files = get_staged_files(existing_only=False)
|
|
605
|
+
logger.info("Dry run: Would push changes")
|
|
606
|
+
logger.info("Would push with message:")
|
|
607
|
+
logger.info(commit_message)
|
|
608
|
+
logger.info(f"Would push {len(staged_files)} files")
|
|
609
|
+
console.print("[yellow]Dry run: Would push changes[/yellow]")
|
|
610
|
+
console.print("Would push with message:")
|
|
611
|
+
console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
|
|
612
|
+
console.print(f"Would push {len(staged_files)} files")
|
|
613
|
+
sys.exit(0)
|
|
614
|
+
if push_changes():
|
|
615
|
+
logger.info("Changes pushed successfully")
|
|
616
|
+
console.print("[green]Changes pushed successfully[/green]")
|
|
617
|
+
else:
|
|
618
|
+
console.print(
|
|
619
|
+
"[red]Failed to push changes. Check your remote configuration and network connection.[/red]"
|
|
620
|
+
)
|
|
621
|
+
sys.exit(1)
|
|
622
|
+
except Exception as e:
|
|
623
|
+
console.print(f"[red]Error pushing changes: {e}[/red]")
|
|
624
|
+
sys.exit(1)
|
|
625
|
+
|
|
626
|
+
if not quiet:
|
|
627
|
+
logger.info("Successfully committed changes with message:")
|
|
628
|
+
logger.info(commit_message)
|
|
629
|
+
if push:
|
|
630
|
+
logger.info("Changes pushed to remote.")
|
|
631
|
+
sys.exit(0)
|
|
632
|
+
|
|
633
|
+
|
|
634
|
+
def generate_contextual_questions(
|
|
635
|
+
model: str,
|
|
636
|
+
status: str,
|
|
637
|
+
processed_diff: str,
|
|
638
|
+
diff_stat: str = "",
|
|
639
|
+
hint: str = "",
|
|
640
|
+
temperature: float = EnvDefaults.TEMPERATURE,
|
|
641
|
+
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
642
|
+
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
643
|
+
quiet: bool = False,
|
|
644
|
+
) -> list[str]:
|
|
645
|
+
"""Generate contextual questions about staged changes when interactive mode is enabled.
|
|
646
|
+
|
|
647
|
+
Args:
|
|
648
|
+
model: The model to use in provider:model_name format
|
|
649
|
+
status: Git status output
|
|
650
|
+
processed_diff: Git diff output, already preprocessed
|
|
651
|
+
diff_stat: Git diff stat output showing file changes summary
|
|
652
|
+
hint: Optional hint to guide the question generation
|
|
653
|
+
temperature: Controls randomness for generation
|
|
654
|
+
max_tokens: Maximum tokens in the response
|
|
655
|
+
max_retries: Number of retry attempts if generation fails
|
|
656
|
+
quiet: If True, suppress progress indicators
|
|
657
|
+
|
|
658
|
+
Returns:
|
|
659
|
+
A list of contextual questions about the staged changes
|
|
660
|
+
|
|
661
|
+
Raises:
|
|
662
|
+
AIError: If question generation fails after max_retries attempts
|
|
663
|
+
"""
|
|
664
|
+
from gac.prompt import build_question_generation_prompt
|
|
665
|
+
|
|
666
|
+
try:
|
|
667
|
+
# Build prompts for question generation
|
|
668
|
+
system_prompt, user_prompt = build_question_generation_prompt(
|
|
669
|
+
status=status,
|
|
670
|
+
processed_diff=processed_diff,
|
|
671
|
+
diff_stat=diff_stat,
|
|
672
|
+
hint=hint,
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
# Generate questions using existing infrastructure
|
|
676
|
+
logger.info("Generating contextual questions about staged changes...")
|
|
677
|
+
questions_text = generate_commit_message(
|
|
678
|
+
model=model,
|
|
679
|
+
prompt=(system_prompt, user_prompt),
|
|
680
|
+
temperature=temperature,
|
|
681
|
+
max_tokens=max_tokens,
|
|
682
|
+
max_retries=max_retries,
|
|
683
|
+
quiet=quiet,
|
|
684
|
+
skip_success_message=True, # Don't show "Generated commit message" for questions
|
|
685
|
+
task_description="contextual questions",
|
|
686
|
+
)
|
|
687
|
+
|
|
688
|
+
# Parse the response to extract individual questions
|
|
689
|
+
questions = _parse_questions_from_response(questions_text)
|
|
690
|
+
|
|
691
|
+
logger.info(f"Generated {len(questions)} contextual questions")
|
|
692
|
+
return questions
|
|
693
|
+
|
|
694
|
+
except Exception as e:
|
|
695
|
+
logger.error(f"Failed to generate contextual questions: {e}")
|
|
696
|
+
raise AIError.model_error(f"Failed to generate contextual questions: {e}") from e
|
|
697
|
+
|
|
698
|
+
|
|
699
|
+
def _parse_questions_from_response(response: str) -> list[str]:
|
|
700
|
+
"""Parse the AI response to extract individual questions from a numbered list.
|
|
701
|
+
|
|
702
|
+
Args:
|
|
703
|
+
response: The raw response from the AI model
|
|
704
|
+
|
|
705
|
+
Returns:
|
|
706
|
+
A list of cleaned questions
|
|
707
|
+
"""
|
|
708
|
+
import re
|
|
709
|
+
|
|
710
|
+
questions = []
|
|
711
|
+
lines = response.strip().split("\n")
|
|
712
|
+
|
|
713
|
+
for line in lines:
|
|
714
|
+
line = line.strip()
|
|
715
|
+
if not line:
|
|
716
|
+
continue
|
|
717
|
+
|
|
718
|
+
# Match numbered list format (e.g., "1. Question text?" or "1) Question text?")
|
|
719
|
+
match = re.match(r"^\d+\.\s+(.+)$", line)
|
|
720
|
+
if not match:
|
|
721
|
+
match = re.match(r"^\d+\)\s+(.+)$", line)
|
|
722
|
+
|
|
723
|
+
if match:
|
|
724
|
+
question = match.group(1).strip()
|
|
725
|
+
# Remove any leading symbols like •, -, *
|
|
726
|
+
question = re.sub(r"^[•\-*]\s+", "", question)
|
|
727
|
+
if question and question.endswith("?"):
|
|
728
|
+
questions.append(question)
|
|
729
|
+
elif line.endswith("?") and len(line) > 5: # Fallback for non-numbered questions
|
|
730
|
+
questions.append(line)
|
|
731
|
+
|
|
732
|
+
return questions
|
|
29
733
|
|
|
30
734
|
|
|
31
735
|
def main(
|
|
32
736
|
stage_all: bool = False,
|
|
737
|
+
group: bool = False,
|
|
738
|
+
interactive: bool = False,
|
|
33
739
|
model: str | None = None,
|
|
34
740
|
hint: str = "",
|
|
35
741
|
one_liner: bool = False,
|
|
36
742
|
show_prompt: bool = False,
|
|
37
|
-
|
|
743
|
+
infer_scope: bool = False,
|
|
38
744
|
require_confirmation: bool = True,
|
|
39
745
|
push: bool = False,
|
|
40
746
|
quiet: bool = False,
|
|
41
747
|
dry_run: bool = False,
|
|
748
|
+
message_only: bool = False,
|
|
749
|
+
verbose: bool = False,
|
|
42
750
|
no_verify: bool = False,
|
|
751
|
+
skip_secret_scan: bool = False,
|
|
752
|
+
language: str | None = None,
|
|
753
|
+
hook_timeout: int = 120,
|
|
43
754
|
) -> None:
|
|
44
755
|
"""Main application logic for gac."""
|
|
45
756
|
try:
|
|
@@ -51,239 +762,261 @@ def main(
|
|
|
51
762
|
handle_error(GitError("Not in a git repository"), exit_program=True)
|
|
52
763
|
|
|
53
764
|
if model is None:
|
|
54
|
-
|
|
55
|
-
if
|
|
765
|
+
model_from_config = config["model"]
|
|
766
|
+
if model_from_config is None:
|
|
56
767
|
handle_error(
|
|
57
768
|
AIError.model_error(
|
|
58
|
-
"
|
|
769
|
+
"gac init hasn't been run yet. Please run 'gac init' to set up your configuration, then try again."
|
|
59
770
|
),
|
|
60
771
|
exit_program=True,
|
|
61
772
|
)
|
|
773
|
+
model = str(model_from_config)
|
|
774
|
+
|
|
775
|
+
temperature_val = config["temperature"]
|
|
776
|
+
assert temperature_val is not None
|
|
777
|
+
temperature = float(temperature_val)
|
|
778
|
+
|
|
779
|
+
max_tokens_val = config["max_output_tokens"]
|
|
780
|
+
assert max_tokens_val is not None
|
|
781
|
+
max_output_tokens = int(max_tokens_val)
|
|
62
782
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
max_retries =
|
|
783
|
+
max_retries_val = config["max_retries"]
|
|
784
|
+
assert max_retries_val is not None
|
|
785
|
+
max_retries = int(max_retries_val)
|
|
66
786
|
|
|
67
787
|
if stage_all and (not dry_run):
|
|
68
788
|
logger.info("Staging all changes")
|
|
69
789
|
run_git_command(["add", "--all"])
|
|
70
790
|
|
|
71
|
-
# Check for staged files
|
|
72
791
|
staged_files = get_staged_files(existing_only=False)
|
|
792
|
+
|
|
793
|
+
if group:
|
|
794
|
+
num_files = len(staged_files)
|
|
795
|
+
multiplier = min(5, 2 + (num_files // 10))
|
|
796
|
+
max_output_tokens *= multiplier
|
|
797
|
+
logger.debug(f"Grouped mode: scaling max_output_tokens by {multiplier}x for {num_files} files")
|
|
798
|
+
|
|
73
799
|
if not staged_files:
|
|
74
|
-
console = Console()
|
|
75
800
|
console.print(
|
|
76
801
|
"[yellow]No staged changes found. Stage your changes with git add first or use --add-all.[/yellow]"
|
|
77
802
|
)
|
|
78
803
|
sys.exit(0)
|
|
79
804
|
|
|
80
|
-
# Run pre-commit hooks before doing expensive operations
|
|
81
805
|
if not no_verify and not dry_run:
|
|
82
|
-
if not
|
|
83
|
-
console
|
|
806
|
+
if not run_lefthook_hooks(hook_timeout):
|
|
807
|
+
console.print("[red]Lefthook hooks failed. Please fix the issues and try again.[/red]")
|
|
808
|
+
console.print("[yellow]You can use --no-verify to skip pre-commit and lefthook hooks.[/yellow]")
|
|
809
|
+
sys.exit(1)
|
|
810
|
+
|
|
811
|
+
if not run_pre_commit_hooks(hook_timeout):
|
|
84
812
|
console.print("[red]Pre-commit hooks failed. Please fix the issues and try again.[/red]")
|
|
85
|
-
console.print("[yellow]You can use --no-verify to skip pre-commit hooks.[/yellow]")
|
|
813
|
+
console.print("[yellow]You can use --no-verify to skip pre-commit and lefthook hooks.[/yellow]")
|
|
86
814
|
sys.exit(1)
|
|
87
815
|
|
|
88
|
-
status =
|
|
816
|
+
status = get_staged_status()
|
|
89
817
|
diff = run_git_command(["diff", "--staged"])
|
|
90
818
|
diff_stat = " " + run_git_command(["diff", "--stat", "--cached"])
|
|
91
819
|
|
|
92
|
-
|
|
820
|
+
if not skip_secret_scan:
|
|
821
|
+
logger.info("Scanning staged changes for potential secrets...")
|
|
822
|
+
secrets = scan_staged_diff(diff)
|
|
823
|
+
if secrets:
|
|
824
|
+
if not quiet:
|
|
825
|
+
console.print("\n[bold red]⚠️ SECURITY WARNING: Potential secrets detected![/bold red]")
|
|
826
|
+
console.print("[red]The following sensitive information was found in your staged changes:[/red]\n")
|
|
827
|
+
|
|
828
|
+
for secret in secrets:
|
|
829
|
+
location = f"{secret.file_path}:{secret.line_number}" if secret.line_number else secret.file_path
|
|
830
|
+
if not quiet:
|
|
831
|
+
console.print(f" • [yellow]{secret.secret_type}[/yellow] in [cyan]{location}[/cyan]")
|
|
832
|
+
console.print(f" Match: [dim]{secret.matched_text}[/dim]\n")
|
|
833
|
+
|
|
834
|
+
if not quiet:
|
|
835
|
+
console.print("\n[bold]Options:[/bold]")
|
|
836
|
+
console.print(" \\[a] Abort commit (recommended)")
|
|
837
|
+
console.print(" \\[c] [yellow]Continue anyway[/yellow] (not recommended)")
|
|
838
|
+
console.print(" \\[r] Remove affected file(s) and continue")
|
|
839
|
+
|
|
840
|
+
try:
|
|
841
|
+
choice = (
|
|
842
|
+
click.prompt(
|
|
843
|
+
"\nChoose an option",
|
|
844
|
+
type=click.Choice(["a", "c", "r"], case_sensitive=False),
|
|
845
|
+
default="a",
|
|
846
|
+
show_choices=True,
|
|
847
|
+
show_default=True,
|
|
848
|
+
)
|
|
849
|
+
.strip()
|
|
850
|
+
.lower()
|
|
851
|
+
)
|
|
852
|
+
except (EOFError, KeyboardInterrupt):
|
|
853
|
+
console.print("\n[red]Aborted by user.[/red]")
|
|
854
|
+
sys.exit(0)
|
|
855
|
+
|
|
856
|
+
if choice == "a":
|
|
857
|
+
console.print("[yellow]Commit aborted.[/yellow]")
|
|
858
|
+
sys.exit(0)
|
|
859
|
+
elif choice == "c":
|
|
860
|
+
console.print("[bold yellow]⚠️ Continuing with potential secrets in commit...[/bold yellow]")
|
|
861
|
+
logger.warning("User chose to continue despite detected secrets")
|
|
862
|
+
elif choice == "r":
|
|
863
|
+
affected_files = get_affected_files(secrets)
|
|
864
|
+
for file_path in affected_files:
|
|
865
|
+
try:
|
|
866
|
+
run_git_command(["reset", "HEAD", file_path])
|
|
867
|
+
console.print(f"[green]Unstaged: {file_path}[/green]")
|
|
868
|
+
except GitError as e:
|
|
869
|
+
console.print(f"[red]Failed to unstage {file_path}: {e}[/red]")
|
|
870
|
+
|
|
871
|
+
# Check if there are still staged files
|
|
872
|
+
remaining_staged = get_staged_files(existing_only=False)
|
|
873
|
+
if not remaining_staged:
|
|
874
|
+
console.print("[yellow]No files remain staged. Commit aborted.[/yellow]")
|
|
875
|
+
sys.exit(0)
|
|
876
|
+
|
|
877
|
+
console.print(f"[green]Continuing with {len(remaining_staged)} staged file(s)...[/green]")
|
|
878
|
+
status = get_staged_status()
|
|
879
|
+
diff = run_git_command(["diff", "--staged"])
|
|
880
|
+
diff_stat = " " + run_git_command(["diff", "--stat", "--cached"])
|
|
881
|
+
else:
|
|
882
|
+
logger.info("No secrets detected in staged changes")
|
|
883
|
+
|
|
93
884
|
logger.debug(f"Preprocessing diff ({len(diff)} characters)")
|
|
94
|
-
|
|
95
|
-
processed_diff = preprocess_diff(diff, token_limit=Utility.DEFAULT_DIFF_TOKEN_LIMIT, model=
|
|
885
|
+
assert model is not None
|
|
886
|
+
processed_diff = preprocess_diff(diff, token_limit=Utility.DEFAULT_DIFF_TOKEN_LIMIT, model=model)
|
|
96
887
|
logger.debug(f"Processed diff ({len(processed_diff)} characters)")
|
|
97
888
|
|
|
98
|
-
|
|
889
|
+
system_template_path_value = config.get("system_prompt_path")
|
|
890
|
+
system_template_path: str | None = (
|
|
891
|
+
system_template_path_value if isinstance(system_template_path_value, str) else None
|
|
892
|
+
)
|
|
893
|
+
|
|
894
|
+
if language is None:
|
|
895
|
+
language_value = config.get("language")
|
|
896
|
+
language = language_value if isinstance(language_value, str) else None
|
|
897
|
+
|
|
898
|
+
translate_prefixes_value = config.get("translate_prefixes")
|
|
899
|
+
translate_prefixes: bool = bool(translate_prefixes_value) if isinstance(translate_prefixes_value, bool) else False
|
|
900
|
+
|
|
901
|
+
system_prompt, user_prompt = build_prompt(
|
|
99
902
|
status=status,
|
|
100
903
|
processed_diff=processed_diff,
|
|
101
904
|
diff_stat=diff_stat,
|
|
102
905
|
one_liner=one_liner,
|
|
103
906
|
hint=hint,
|
|
104
|
-
|
|
907
|
+
infer_scope=infer_scope,
|
|
908
|
+
verbose=verbose,
|
|
909
|
+
system_template_path=system_template_path,
|
|
910
|
+
language=language,
|
|
911
|
+
translate_prefixes=translate_prefixes,
|
|
105
912
|
)
|
|
106
913
|
|
|
107
|
-
if
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
914
|
+
if group:
|
|
915
|
+
from gac.prompt import build_group_prompt
|
|
916
|
+
|
|
917
|
+
system_prompt, user_prompt = build_group_prompt(
|
|
918
|
+
status=status,
|
|
919
|
+
processed_diff=processed_diff,
|
|
920
|
+
diff_stat=diff_stat,
|
|
921
|
+
one_liner=one_liner,
|
|
922
|
+
hint=hint,
|
|
923
|
+
infer_scope=infer_scope,
|
|
924
|
+
verbose=verbose,
|
|
925
|
+
system_template_path=system_template_path,
|
|
926
|
+
language=language,
|
|
927
|
+
translate_prefixes=translate_prefixes,
|
|
115
928
|
)
|
|
116
929
|
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
930
|
+
try:
|
|
931
|
+
execute_grouped_commits_workflow(
|
|
932
|
+
system_prompt=system_prompt,
|
|
933
|
+
user_prompt=user_prompt,
|
|
934
|
+
model=model,
|
|
935
|
+
temperature=temperature,
|
|
936
|
+
max_output_tokens=max_output_tokens,
|
|
937
|
+
max_retries=max_retries,
|
|
938
|
+
require_confirmation=require_confirmation,
|
|
939
|
+
quiet=quiet,
|
|
940
|
+
no_verify=no_verify,
|
|
941
|
+
dry_run=dry_run,
|
|
942
|
+
push=push,
|
|
943
|
+
show_prompt=show_prompt,
|
|
944
|
+
hook_timeout=hook_timeout,
|
|
945
|
+
interactive=interactive,
|
|
946
|
+
message_only=message_only,
|
|
126
947
|
)
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
)
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
"
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
sys.exit(0)
|
|
173
|
-
elif response.lower() == "r" or response.lower().startswith("r ") or response.lower() == "reroll":
|
|
174
|
-
# Parse the reroll command for optional feedback
|
|
175
|
-
if response.lower() == "r" or response.lower() == "reroll":
|
|
176
|
-
# Simple reroll without feedback
|
|
177
|
-
reroll_feedback = ""
|
|
178
|
-
console.print("[cyan]Regenerating commit message...[/cyan]")
|
|
179
|
-
else:
|
|
180
|
-
# Extract feedback from "r <feedback>"
|
|
181
|
-
reroll_feedback = response[2:].strip() # Remove "r " prefix
|
|
182
|
-
console.print(f"[cyan]Regenerating commit message with feedback: {reroll_feedback}[/cyan]")
|
|
183
|
-
|
|
184
|
-
# Combine hints if reroll feedback provided
|
|
185
|
-
combined_hint = hint
|
|
186
|
-
if reroll_feedback:
|
|
187
|
-
# Create conversational prompt with previous attempt and feedback
|
|
188
|
-
conversational_hint = f"Previous attempt: '{commit_message}'. User feedback: {reroll_feedback}. Please revise accordingly."
|
|
189
|
-
|
|
190
|
-
if hint:
|
|
191
|
-
combined_hint = f"{hint}. {conversational_hint}"
|
|
192
|
-
else:
|
|
193
|
-
combined_hint = conversational_hint
|
|
194
|
-
|
|
195
|
-
# Regenerate prompt with conversational feedback
|
|
196
|
-
reroll_prompt = build_prompt(
|
|
197
|
-
status=status,
|
|
198
|
-
processed_diff=processed_diff,
|
|
199
|
-
diff_stat=diff_stat,
|
|
200
|
-
one_liner=one_liner,
|
|
201
|
-
hint=combined_hint,
|
|
202
|
-
scope=scope,
|
|
203
|
-
)
|
|
204
|
-
else:
|
|
205
|
-
# No hint given, just reroll with same prompt
|
|
206
|
-
reroll_prompt = prompt
|
|
207
|
-
|
|
208
|
-
console.print() # Add blank line for readability
|
|
209
|
-
|
|
210
|
-
# Generate new message
|
|
211
|
-
commit_message = generate_commit_message(
|
|
948
|
+
except AIError as e:
|
|
949
|
+
logger.error(str(e))
|
|
950
|
+
console.print(f"[red]Failed to generate grouped commits: {str(e)}[/red]")
|
|
951
|
+
sys.exit(1)
|
|
952
|
+
else:
|
|
953
|
+
try:
|
|
954
|
+
execute_single_commit_workflow(
|
|
955
|
+
system_prompt=system_prompt,
|
|
956
|
+
user_prompt=user_prompt,
|
|
957
|
+
model=model,
|
|
958
|
+
temperature=temperature,
|
|
959
|
+
max_output_tokens=max_output_tokens,
|
|
960
|
+
max_retries=max_retries,
|
|
961
|
+
require_confirmation=require_confirmation,
|
|
962
|
+
quiet=quiet,
|
|
963
|
+
no_verify=no_verify,
|
|
964
|
+
dry_run=dry_run,
|
|
965
|
+
message_only=message_only,
|
|
966
|
+
push=push,
|
|
967
|
+
show_prompt=show_prompt,
|
|
968
|
+
hook_timeout=hook_timeout,
|
|
969
|
+
interactive=interactive,
|
|
970
|
+
)
|
|
971
|
+
except AIError as e:
|
|
972
|
+
# Check if this is a Claude Code OAuth token expiration
|
|
973
|
+
if (
|
|
974
|
+
e.error_type == "authentication"
|
|
975
|
+
and model.startswith("claude-code:")
|
|
976
|
+
and ("expired" in str(e).lower() or "oauth" in str(e).lower())
|
|
977
|
+
):
|
|
978
|
+
logger.error(str(e))
|
|
979
|
+
console.print("[yellow]⚠ Claude Code OAuth token has expired[/yellow]")
|
|
980
|
+
console.print("[cyan]🔐 Starting automatic re-authentication...[/cyan]")
|
|
981
|
+
|
|
982
|
+
try:
|
|
983
|
+
from gac.oauth.claude_code import authenticate_and_save
|
|
984
|
+
|
|
985
|
+
if authenticate_and_save(quiet=quiet):
|
|
986
|
+
console.print("[green]✓ Re-authentication successful![/green]")
|
|
987
|
+
console.print("[cyan]Retrying commit...[/cyan]\n")
|
|
988
|
+
|
|
989
|
+
# Retry the commit workflow
|
|
990
|
+
execute_single_commit_workflow(
|
|
991
|
+
system_prompt=system_prompt,
|
|
992
|
+
user_prompt=user_prompt,
|
|
212
993
|
model=model,
|
|
213
|
-
prompt=reroll_prompt,
|
|
214
994
|
temperature=temperature,
|
|
215
|
-
|
|
995
|
+
max_output_tokens=max_output_tokens,
|
|
216
996
|
max_retries=max_retries,
|
|
997
|
+
require_confirmation=require_confirmation,
|
|
217
998
|
quiet=quiet,
|
|
999
|
+
no_verify=no_verify,
|
|
1000
|
+
dry_run=dry_run,
|
|
1001
|
+
message_only=message_only,
|
|
1002
|
+
push=push,
|
|
1003
|
+
show_prompt=show_prompt,
|
|
1004
|
+
hook_timeout=hook_timeout,
|
|
1005
|
+
interactive=interactive,
|
|
218
1006
|
)
|
|
219
|
-
commit_message = clean_commit_message(commit_message)
|
|
220
|
-
break # Exit inner loop, continue outer loop
|
|
221
1007
|
else:
|
|
222
|
-
console.print(
|
|
223
|
-
|
|
224
|
-
)
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
1008
|
+
console.print("[red]Re-authentication failed.[/red]")
|
|
1009
|
+
console.print("[yellow]Run 'gac model' to re-authenticate manually.[/yellow]")
|
|
1010
|
+
sys.exit(1)
|
|
1011
|
+
except Exception as auth_error:
|
|
1012
|
+
console.print(f"[red]Re-authentication error: {auth_error}[/red]")
|
|
1013
|
+
console.print("[yellow]Run 'gac model' to re-authenticate manually.[/yellow]")
|
|
1014
|
+
sys.exit(1)
|
|
229
1015
|
else:
|
|
230
|
-
#
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
if dry_run:
|
|
234
|
-
console.print("[yellow]Dry run: Commit message generated but not applied[/yellow]")
|
|
235
|
-
console.print("Would commit with message:")
|
|
236
|
-
console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
|
|
237
|
-
staged_files = get_staged_files(existing_only=False)
|
|
238
|
-
console.print(f"Would commit {len(staged_files)} files")
|
|
239
|
-
logger.info(f"Would commit {len(staged_files)} files")
|
|
240
|
-
else:
|
|
241
|
-
commit_args = ["commit", "-m", commit_message]
|
|
242
|
-
if no_verify:
|
|
243
|
-
commit_args.append("--no-verify")
|
|
244
|
-
run_git_command(commit_args)
|
|
245
|
-
logger.info("Commit created successfully")
|
|
246
|
-
console.print("[green]Commit created successfully[/green]")
|
|
247
|
-
except AIError as e:
|
|
248
|
-
logger.error(str(e))
|
|
249
|
-
console = Console()
|
|
250
|
-
console.print(f"[red]Failed to generate commit message: {str(e)}[/red]")
|
|
251
|
-
sys.exit(1)
|
|
252
|
-
|
|
253
|
-
if push:
|
|
254
|
-
try:
|
|
255
|
-
if dry_run:
|
|
256
|
-
staged_files = get_staged_files(existing_only=False)
|
|
257
|
-
|
|
258
|
-
logger.info("Dry run: Would push changes")
|
|
259
|
-
logger.info("Would push with message:")
|
|
260
|
-
logger.info(commit_message)
|
|
261
|
-
logger.info(f"Would push {len(staged_files)} files")
|
|
262
|
-
|
|
263
|
-
console.print("[yellow]Dry run: Would push changes[/yellow]")
|
|
264
|
-
console.print("Would push with message:")
|
|
265
|
-
console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
|
|
266
|
-
console.print(f"Would push {len(staged_files)} files")
|
|
267
|
-
sys.exit(0)
|
|
268
|
-
|
|
269
|
-
if push_changes():
|
|
270
|
-
logger.info("Changes pushed successfully")
|
|
271
|
-
console.print("[green]Changes pushed successfully[/green]")
|
|
272
|
-
else:
|
|
273
|
-
console.print(
|
|
274
|
-
"[red]Failed to push changes. Check your remote configuration and network connection.[/red]"
|
|
275
|
-
)
|
|
1016
|
+
# Non-Claude Code error or non-auth error
|
|
1017
|
+
logger.error(str(e))
|
|
1018
|
+
console.print(f"[red]Failed to generate commit message: {str(e)}[/red]")
|
|
276
1019
|
sys.exit(1)
|
|
277
|
-
except Exception as e:
|
|
278
|
-
console.print(f"[red]Error pushing changes: {e}[/red]")
|
|
279
|
-
sys.exit(1)
|
|
280
|
-
|
|
281
|
-
if not quiet:
|
|
282
|
-
logger.info("Successfully committed changes with message:")
|
|
283
|
-
logger.info(commit_message)
|
|
284
|
-
if push:
|
|
285
|
-
logger.info("Changes pushed to remote.")
|
|
286
|
-
sys.exit(0)
|
|
287
1020
|
|
|
288
1021
|
|
|
289
1022
|
if __name__ == "__main__":
|