gac 1.13.0__py3-none-any.whl → 3.8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. gac/__version__.py +1 -1
  2. gac/ai.py +33 -47
  3. gac/ai_utils.py +113 -41
  4. gac/auth_cli.py +214 -0
  5. gac/cli.py +72 -2
  6. gac/config.py +63 -6
  7. gac/config_cli.py +26 -5
  8. gac/constants.py +178 -2
  9. gac/git.py +158 -12
  10. gac/init_cli.py +40 -125
  11. gac/language_cli.py +378 -0
  12. gac/main.py +868 -158
  13. gac/model_cli.py +429 -0
  14. gac/oauth/__init__.py +27 -0
  15. gac/oauth/claude_code.py +464 -0
  16. gac/oauth/qwen_oauth.py +323 -0
  17. gac/oauth/token_store.py +81 -0
  18. gac/preprocess.py +3 -3
  19. gac/prompt.py +573 -226
  20. gac/providers/__init__.py +49 -0
  21. gac/providers/anthropic.py +11 -1
  22. gac/providers/azure_openai.py +101 -0
  23. gac/providers/cerebras.py +11 -1
  24. gac/providers/chutes.py +11 -1
  25. gac/providers/claude_code.py +112 -0
  26. gac/providers/custom_anthropic.py +6 -2
  27. gac/providers/custom_openai.py +6 -3
  28. gac/providers/deepseek.py +11 -1
  29. gac/providers/fireworks.py +11 -1
  30. gac/providers/gemini.py +11 -1
  31. gac/providers/groq.py +5 -1
  32. gac/providers/kimi_coding.py +67 -0
  33. gac/providers/lmstudio.py +12 -1
  34. gac/providers/minimax.py +11 -1
  35. gac/providers/mistral.py +48 -0
  36. gac/providers/moonshot.py +48 -0
  37. gac/providers/ollama.py +11 -1
  38. gac/providers/openai.py +11 -1
  39. gac/providers/openrouter.py +11 -1
  40. gac/providers/qwen.py +76 -0
  41. gac/providers/replicate.py +110 -0
  42. gac/providers/streamlake.py +11 -1
  43. gac/providers/synthetic.py +11 -1
  44. gac/providers/together.py +11 -1
  45. gac/providers/zai.py +11 -1
  46. gac/security.py +1 -1
  47. gac/utils.py +272 -4
  48. gac/workflow_utils.py +217 -0
  49. {gac-1.13.0.dist-info → gac-3.8.1.dist-info}/METADATA +90 -27
  50. gac-3.8.1.dist-info/RECORD +56 -0
  51. {gac-1.13.0.dist-info → gac-3.8.1.dist-info}/WHEEL +1 -1
  52. gac-1.13.0.dist-info/RECORD +0 -41
  53. {gac-1.13.0.dist-info → gac-3.8.1.dist-info}/entry_points.txt +0 -0
  54. {gac-1.13.0.dist-info → gac-3.8.1.dist-info}/licenses/LICENSE +0 -0
gac/main.py CHANGED
@@ -14,9 +14,11 @@ from gac.ai import generate_commit_message
14
14
  from gac.ai_utils import count_tokens
15
15
  from gac.config import load_config
16
16
  from gac.constants import EnvDefaults, Utility
17
- from gac.errors import AIError, GitError, handle_error
17
+ from gac.errors import AIError, ConfigError, GitError, handle_error
18
18
  from gac.git import (
19
+ detect_rename_mappings,
19
20
  get_staged_files,
21
+ get_staged_status,
20
22
  push_changes,
21
23
  run_git_command,
22
24
  run_lefthook_hooks,
@@ -25,6 +27,15 @@ from gac.git import (
25
27
  from gac.preprocess import preprocess_diff
26
28
  from gac.prompt import build_prompt, clean_commit_message
27
29
  from gac.security import get_affected_files, scan_staged_diff
30
+ from gac.workflow_utils import (
31
+ check_token_warning,
32
+ collect_interactive_answers,
33
+ display_commit_message,
34
+ execute_commit,
35
+ format_answers_for_prompt,
36
+ handle_confirmation_loop,
37
+ restore_staging,
38
+ )
28
39
 
29
40
  logger = logging.getLogger(__name__)
30
41
 
@@ -32,8 +43,701 @@ config = load_config()
32
43
  console = Console() # Initialize console globally to prevent undefined access
33
44
 
34
45
 
46
+ def _validate_grouped_files_or_feedback(staged: set[str], grouped_result: dict) -> tuple[bool, str, str]:
47
+ from collections import Counter
48
+
49
+ commits = grouped_result.get("commits", []) if isinstance(grouped_result, dict) else []
50
+ all_files: list[str] = []
51
+ for commit in commits:
52
+ files = commit.get("files", []) if isinstance(commit, dict) else []
53
+ all_files.extend([str(p) for p in files])
54
+
55
+ counts = Counter(all_files)
56
+ union_set = set(all_files)
57
+
58
+ duplicates = sorted([f for f, c in counts.items() if c > 1])
59
+ missing = sorted(staged - union_set)
60
+ unexpected = sorted(union_set - staged)
61
+
62
+ if not duplicates and not missing and not unexpected:
63
+ return True, "", ""
64
+
65
+ problems: list[str] = []
66
+ if missing:
67
+ problems.append(f"Missing: {', '.join(missing)}")
68
+ if unexpected:
69
+ problems.append(f"Not staged: {', '.join(unexpected)}")
70
+ if duplicates:
71
+ problems.append(f"Duplicates: {', '.join(duplicates)}")
72
+
73
+ feedback = f"{'; '.join(problems)}. Required files: {', '.join(sorted(staged))}. Respond with ONLY valid JSON."
74
+ return False, feedback, "; ".join(problems)
75
+
76
+
77
+ def _parse_model_identifier(model: str) -> tuple[str, str]:
78
+ """Validate and split model identifier into provider and model name."""
79
+ normalized = model.strip()
80
+ if ":" not in normalized:
81
+ message = (
82
+ f"Invalid model format: '{model}'. Expected 'provider:model', e.g. 'openai:gpt-4o-mini'. "
83
+ "Use 'gac config set model <provider:model>' to update your configuration."
84
+ )
85
+ logger.error(message)
86
+ console.print(f"[red]{message}[/red]")
87
+ sys.exit(1)
88
+
89
+ provider, model_name = normalized.split(":", 1)
90
+ if not provider or not model_name:
91
+ message = (
92
+ f"Invalid model format: '{model}'. Both provider and model name are required "
93
+ "(example: 'anthropic:claude-haiku-4-5')."
94
+ )
95
+ logger.error(message)
96
+ console.print(f"[red]{message}[/red]")
97
+ sys.exit(1)
98
+
99
+ return provider, model_name
100
+
101
+
102
+ def _handle_validation_retry(
103
+ attempts: int,
104
+ content_retry_budget: int,
105
+ raw_response: str,
106
+ feedback_message: str,
107
+ error_message: str,
108
+ conversation_messages: list[dict[str, str]],
109
+ quiet: bool,
110
+ retry_context: str,
111
+ ) -> bool:
112
+ """Handle validation retry logic. Returns True if should exit, False if should retry."""
113
+ conversation_messages.append({"role": "assistant", "content": raw_response})
114
+ conversation_messages.append({"role": "user", "content": feedback_message})
115
+ if attempts >= content_retry_budget:
116
+ logger.error(error_message)
117
+ console.print(f"\n[red]{error_message}[/red]")
118
+ console.print("\n[yellow]Raw model output:[/yellow]")
119
+ console.print(Panel(raw_response, title="Model Output", border_style="yellow"))
120
+ return True
121
+ if not quiet:
122
+ console.print(f"[yellow]Retry {attempts} of {content_retry_budget - 1}: {retry_context}[/yellow]")
123
+ return False
124
+
125
+
126
+ def execute_grouped_commits_workflow(
127
+ *,
128
+ system_prompt: str,
129
+ user_prompt: str,
130
+ model: str,
131
+ temperature: float,
132
+ max_output_tokens: int,
133
+ max_retries: int,
134
+ require_confirmation: bool,
135
+ quiet: bool,
136
+ no_verify: bool,
137
+ dry_run: bool,
138
+ push: bool,
139
+ show_prompt: bool,
140
+ interactive: bool,
141
+ message_only: bool,
142
+ hook_timeout: int = 120,
143
+ ) -> None:
144
+ """Execute the grouped commits workflow."""
145
+ import json
146
+
147
+ from gac.ai import generate_grouped_commits
148
+
149
+ provider, model_name = _parse_model_identifier(model)
150
+
151
+ if show_prompt:
152
+ full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
153
+ console.print(Panel(full_prompt, title="Prompt for LLM", border_style="bright_blue"))
154
+
155
+ conversation_messages: list[dict[str, str]] = []
156
+ if system_prompt:
157
+ conversation_messages.append({"role": "system", "content": system_prompt})
158
+ conversation_messages.append({"role": "user", "content": user_prompt})
159
+
160
+ _parse_model_identifier(model)
161
+
162
+ # Generate interactive questions if enabled
163
+ if interactive and not message_only:
164
+ try:
165
+ # Extract git data from the user prompt for question generation
166
+ status_match = None
167
+ diff_match = None
168
+ diff_stat_match = None
169
+
170
+ import re
171
+
172
+ status_match = re.search(r"<git_status>\n(.*?)\n</git_status>", user_prompt, re.DOTALL)
173
+ diff_match = re.search(r"<git_diff>\n(.*?)\n</git_diff>", user_prompt, re.DOTALL)
174
+ diff_stat_match = re.search(r"<git_diff_stat>\n(.*?)\n</git_diff_stat>", user_prompt, re.DOTALL)
175
+
176
+ status = status_match.group(1) if status_match else ""
177
+ diff = diff_match.group(1) if diff_match else ""
178
+ diff_stat = diff_stat_match.group(1) if diff_stat_match else ""
179
+
180
+ # Extract hint text if present
181
+ hint_match = re.search(r"<hint_text>(.*?)</hint_text>", user_prompt, re.DOTALL)
182
+ hint = hint_match.group(1) if hint_match else ""
183
+
184
+ questions = generate_contextual_questions(
185
+ model=model,
186
+ status=status,
187
+ processed_diff=diff,
188
+ diff_stat=diff_stat,
189
+ hint=hint,
190
+ temperature=temperature,
191
+ max_tokens=max_output_tokens,
192
+ max_retries=max_retries,
193
+ quiet=quiet,
194
+ )
195
+
196
+ if questions:
197
+ # Collect answers interactively
198
+ answers = collect_interactive_answers(questions)
199
+
200
+ if answers is None:
201
+ # User aborted interactive mode
202
+ if not quiet:
203
+ console.print("[yellow]Proceeding with commit without additional context[/yellow]\n")
204
+ elif answers:
205
+ # User provided some answers, format them for the prompt
206
+ answers_context = format_answers_for_prompt(answers)
207
+ enhanced_user_prompt = user_prompt + answers_context
208
+
209
+ # Update the conversation messages with the enhanced prompt
210
+ if conversation_messages and conversation_messages[-1]["role"] == "user":
211
+ conversation_messages[-1]["content"] = enhanced_user_prompt
212
+
213
+ logger.info(f"Collected answers for {len(answers)} questions")
214
+ else:
215
+ # User skipped all questions
216
+ if not quiet:
217
+ console.print("[dim]No answers provided, proceeding with original context[/dim]\n")
218
+
219
+ except Exception as e:
220
+ logger.warning(f"Failed to generate contextual questions, proceeding without them: {e}")
221
+ if not quiet:
222
+ console.print("[yellow]⚠️ Could not generate contextual questions, proceeding normally[/yellow]\n")
223
+
224
+ first_iteration = True
225
+ content_retry_budget = max(3, int(max_retries))
226
+ attempts = 0
227
+
228
+ grouped_result: dict | None = None
229
+ raw_response: str = ""
230
+
231
+ while True:
232
+ prompt_tokens = count_tokens(conversation_messages, model)
233
+
234
+ if first_iteration:
235
+ warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
236
+ if warning_limit_val is None:
237
+ raise ConfigError("warning_limit_tokens configuration missing")
238
+ warning_limit = int(warning_limit_val)
239
+ if not check_token_warning(prompt_tokens, warning_limit, require_confirmation):
240
+ sys.exit(0)
241
+ first_iteration = False
242
+
243
+ raw_response = generate_grouped_commits(
244
+ model=model,
245
+ prompt=conversation_messages,
246
+ temperature=temperature,
247
+ max_tokens=max_output_tokens,
248
+ max_retries=max_retries,
249
+ quiet=quiet,
250
+ skip_success_message=True,
251
+ )
252
+
253
+ parsed: dict | None = None
254
+ extract = raw_response
255
+ first_brace = raw_response.find("{")
256
+ last_brace = raw_response.rfind("}")
257
+ if first_brace != -1 and last_brace != -1 and first_brace < last_brace:
258
+ extract = raw_response[first_brace : last_brace + 1]
259
+
260
+ try:
261
+ parsed = json.loads(extract)
262
+ except json.JSONDecodeError as e:
263
+ parsed = None
264
+ logger.debug(
265
+ f"JSON parsing failed: {e}. Extract length: {len(extract)}, Response length: {len(raw_response)}"
266
+ )
267
+
268
+ if parsed is None:
269
+ attempts += 1
270
+ feedback = "Your response was not valid JSON. Respond with ONLY valid JSON matching the expected schema. Do not include any commentary or code fences."
271
+ error_msg = f"Failed to parse LLM response as JSON after {attempts} retries."
272
+ if _handle_validation_retry(
273
+ attempts,
274
+ content_retry_budget,
275
+ raw_response,
276
+ feedback,
277
+ error_msg,
278
+ conversation_messages,
279
+ quiet,
280
+ "JSON parsing failed, asking model to fix...",
281
+ ):
282
+ sys.exit(1)
283
+ continue
284
+
285
+ try:
286
+ if "commits" not in parsed or not isinstance(parsed["commits"], list):
287
+ raise ValueError("Response missing 'commits' array")
288
+ if len(parsed["commits"]) == 0:
289
+ raise ValueError("No commits in response")
290
+ for idx, commit in enumerate(parsed["commits"]):
291
+ if "files" not in commit or not isinstance(commit["files"], list):
292
+ raise ValueError(f"Commit {idx + 1} missing 'files' array")
293
+ if "message" not in commit or not isinstance(commit["message"], str):
294
+ raise ValueError(f"Commit {idx + 1} missing 'message' string")
295
+ if len(commit["files"]) == 0:
296
+ raise ValueError(f"Commit {idx + 1} has empty files list")
297
+ if not commit["message"].strip():
298
+ raise ValueError(f"Commit {idx + 1} has empty message")
299
+ except (ValueError, TypeError) as e:
300
+ attempts += 1
301
+ feedback = f"Invalid response structure: {e}. Please return ONLY valid JSON following the schema with a non-empty 'commits' array of objects containing 'files' and 'message'."
302
+ error_msg = f"Invalid grouped commits structure after {attempts} retries: {e}"
303
+ if _handle_validation_retry(
304
+ attempts,
305
+ content_retry_budget,
306
+ raw_response,
307
+ feedback,
308
+ error_msg,
309
+ conversation_messages,
310
+ quiet,
311
+ "Structure validation failed, asking model to fix...",
312
+ ):
313
+ sys.exit(1)
314
+ continue
315
+
316
+ staged_set = set(get_staged_files(existing_only=False))
317
+ ok, feedback, detail_msg = _validate_grouped_files_or_feedback(staged_set, parsed)
318
+ if not ok:
319
+ attempts += 1
320
+ error_msg = (
321
+ f"Grouped commits file set mismatch after {attempts} retries{': ' + detail_msg if detail_msg else ''}"
322
+ )
323
+ if _handle_validation_retry(
324
+ attempts,
325
+ content_retry_budget,
326
+ raw_response,
327
+ feedback,
328
+ error_msg,
329
+ conversation_messages,
330
+ quiet,
331
+ "File coverage mismatch, asking model to fix...",
332
+ ):
333
+ sys.exit(1)
334
+ continue
335
+
336
+ grouped_result = parsed
337
+ conversation_messages.append({"role": "assistant", "content": raw_response})
338
+
339
+ if not quiet:
340
+ console.print(f"[green]✔ Generated commit messages with {provider} {model_name}[/green]")
341
+ num_commits = len(grouped_result["commits"])
342
+ console.print(f"[bold green]Proposed Commits ({num_commits}):[/bold green]\n")
343
+ for idx, commit in enumerate(grouped_result["commits"], 1):
344
+ files = commit["files"]
345
+ files_display = ", ".join(files)
346
+ console.print(f"[dim]{files_display}[/dim]")
347
+ commit_msg = commit["message"].strip()
348
+ console.print(Panel(commit_msg, title=f"Commit Message {idx}/{num_commits}", border_style="cyan"))
349
+ console.print()
350
+
351
+ completion_tokens = count_tokens(raw_response, model)
352
+ total_tokens = prompt_tokens + completion_tokens
353
+ console.print(
354
+ f"[dim]Token usage: {prompt_tokens} prompt + {completion_tokens} completion = {total_tokens} total[/dim]"
355
+ )
356
+
357
+ if require_confirmation:
358
+ accepted = False
359
+ num_commits = len(grouped_result["commits"]) if grouped_result else 0
360
+ while True:
361
+ response = click.prompt(
362
+ f"Proceed with {num_commits} commits above? [y/n/r/<feedback>]",
363
+ type=str,
364
+ show_default=False,
365
+ ).strip()
366
+ response_lower = response.lower()
367
+
368
+ if response_lower in ["y", "yes"]:
369
+ accepted = True
370
+ break
371
+ if response_lower in ["n", "no"]:
372
+ console.print("[yellow]Commits not accepted. Exiting...[/yellow]")
373
+ sys.exit(0)
374
+ if response == "":
375
+ continue
376
+ if response_lower in ["r", "reroll"]:
377
+ feedback_message = "Please provide alternative commit groupings using the same repository context."
378
+ console.print("[cyan]Regenerating commit groups...[/cyan]")
379
+ conversation_messages.append({"role": "user", "content": feedback_message})
380
+ console.print()
381
+ attempts = 0
382
+ break
383
+
384
+ feedback_message = f"Please revise the commit groupings based on this feedback: {response}"
385
+ console.print(f"[cyan]Regenerating commit groups with feedback: {response}[/cyan]")
386
+ conversation_messages.append({"role": "user", "content": feedback_message})
387
+ console.print()
388
+ attempts = 0
389
+ break
390
+
391
+ if not accepted:
392
+ continue
393
+
394
+ num_commits = len(grouped_result["commits"]) if grouped_result else 0
395
+ if dry_run:
396
+ console.print(f"[yellow]Dry run: Would create {num_commits} commits[/yellow]")
397
+ for idx, commit in enumerate(grouped_result["commits"], 1):
398
+ console.print(f"\n[cyan]Commit {idx}/{num_commits}:[/cyan]")
399
+ console.print(f" Files: {', '.join(commit['files'])}")
400
+ console.print(f" Message: {commit['message'].strip()[:50]}...")
401
+ else:
402
+ original_staged_files = get_staged_files(existing_only=False)
403
+ original_staged_diff = run_git_command(["diff", "--cached", "--binary"], silent=True)
404
+ run_git_command(["reset", "HEAD"])
405
+
406
+ try:
407
+ # Detect file renames to handle them properly
408
+ rename_mappings = detect_rename_mappings(original_staged_diff)
409
+
410
+ for idx, commit in enumerate(grouped_result["commits"], 1):
411
+ try:
412
+ for file_path in commit["files"]:
413
+ # Check if this file is the destination of a rename
414
+ if file_path in rename_mappings:
415
+ old_file = rename_mappings[file_path]
416
+ # For renames, stage both the old file (for deletion) and new file
417
+ # This ensures the complete rename operation is preserved
418
+ run_git_command(["add", "-A", old_file])
419
+ run_git_command(["add", "-A", file_path])
420
+ else:
421
+ run_git_command(["add", "-A", file_path])
422
+ execute_commit(commit["message"].strip(), no_verify, hook_timeout)
423
+ console.print(f"[green]✓ Commit {idx}/{num_commits} created[/green]")
424
+ except Exception as e:
425
+ console.print(f"[red]✗ Failed at commit {idx}/{num_commits}: {e}[/red]")
426
+ console.print(f"[yellow]Completed {idx - 1}/{num_commits} commits.[/yellow]")
427
+ if idx == 1:
428
+ console.print("[yellow]Restoring original staging area...[/yellow]")
429
+ restore_staging(original_staged_files, original_staged_diff)
430
+ console.print("[green]Original staging area restored.[/green]")
431
+ sys.exit(1)
432
+ except KeyboardInterrupt:
433
+ console.print("\n[yellow]Interrupted by user. Restoring original staging area...[/yellow]")
434
+ restore_staging(original_staged_files, original_staged_diff)
435
+ console.print("[green]Original staging area restored.[/green]")
436
+ sys.exit(1)
437
+
438
+ if push:
439
+ try:
440
+ if dry_run:
441
+ console.print("[yellow]Dry run: Would push changes[/yellow]")
442
+ sys.exit(0)
443
+ if push_changes():
444
+ logger.info("Changes pushed successfully")
445
+ console.print("[green]Changes pushed successfully[/green]")
446
+ else:
447
+ console.print(
448
+ "[red]Failed to push changes. Check your remote configuration and network connection.[/red]"
449
+ )
450
+ sys.exit(1)
451
+ except Exception as e:
452
+ console.print(f"[red]Error pushing changes: {e}[/red]")
453
+ sys.exit(1)
454
+
455
+ sys.exit(0)
456
+
457
+
458
+ def execute_single_commit_workflow(
459
+ *,
460
+ system_prompt: str,
461
+ user_prompt: str,
462
+ model: str,
463
+ temperature: float,
464
+ max_output_tokens: int,
465
+ max_retries: int,
466
+ require_confirmation: bool,
467
+ quiet: bool,
468
+ no_verify: bool,
469
+ dry_run: bool,
470
+ message_only: bool = False,
471
+ push: bool,
472
+ show_prompt: bool,
473
+ hook_timeout: int = 120,
474
+ interactive: bool = False,
475
+ ) -> None:
476
+ if show_prompt:
477
+ full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
478
+ console.print(Panel(full_prompt, title="Prompt for LLM", border_style="bright_blue"))
479
+
480
+ conversation_messages: list[dict[str, str]] = []
481
+ if system_prompt:
482
+ conversation_messages.append({"role": "system", "content": system_prompt})
483
+ conversation_messages.append({"role": "user", "content": user_prompt})
484
+
485
+ _parse_model_identifier(model)
486
+
487
+ # Generate interactive questions if enabled
488
+ if interactive and not message_only:
489
+ try:
490
+ # Extract git data from the user prompt for question generation
491
+ status_match = None
492
+ diff_match = None
493
+ diff_stat_match = None
494
+
495
+ import re
496
+
497
+ status_match = re.search(r"<git_status>\n(.*?)\n</git_status>", user_prompt, re.DOTALL)
498
+ diff_match = re.search(r"<git_diff>\n(.*?)\n</git_diff>", user_prompt, re.DOTALL)
499
+ diff_stat_match = re.search(r"<git_diff_stat>\n(.*?)\n</git_diff_stat>", user_prompt, re.DOTALL)
500
+
501
+ status = status_match.group(1) if status_match else ""
502
+ diff = diff_match.group(1) if diff_match else ""
503
+ diff_stat = diff_stat_match.group(1) if diff_stat_match else ""
504
+
505
+ # Extract hint text if present
506
+ hint_match = re.search(r"<hint_text>(.*?)</hint_text>", user_prompt, re.DOTALL)
507
+ hint = hint_match.group(1) if hint_match else ""
508
+
509
+ questions = generate_contextual_questions(
510
+ model=model,
511
+ status=status,
512
+ processed_diff=diff,
513
+ diff_stat=diff_stat,
514
+ hint=hint,
515
+ temperature=temperature,
516
+ max_tokens=max_output_tokens,
517
+ max_retries=max_retries,
518
+ quiet=quiet,
519
+ )
520
+
521
+ if questions:
522
+ # Collect answers interactively
523
+ answers = collect_interactive_answers(questions)
524
+
525
+ if answers is None:
526
+ # User aborted interactive mode
527
+ if not quiet:
528
+ console.print("[yellow]Proceeding with commit without additional context[/yellow]\n")
529
+ elif answers:
530
+ # User provided some answers, format them for the prompt
531
+ answers_context = format_answers_for_prompt(answers)
532
+ enhanced_user_prompt = user_prompt + answers_context
533
+
534
+ # Update the conversation messages with the enhanced prompt
535
+ if conversation_messages and conversation_messages[-1]["role"] == "user":
536
+ conversation_messages[-1]["content"] = enhanced_user_prompt
537
+
538
+ logger.info(f"Collected answers for {len(answers)} questions")
539
+ else:
540
+ # User skipped all questions
541
+ if not quiet:
542
+ console.print("[dim]No answers provided, proceeding with original context[/dim]\n")
543
+
544
+ except Exception as e:
545
+ logger.warning(f"Failed to generate contextual questions, proceeding without them: {e}")
546
+ if not quiet:
547
+ console.print("[yellow]⚠️ Could not generate contextual questions, proceeding normally[/yellow]\n")
548
+
549
+ first_iteration = True
550
+ while True:
551
+ prompt_tokens = count_tokens(conversation_messages, model)
552
+ if first_iteration:
553
+ warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
554
+ if warning_limit_val is None:
555
+ raise ConfigError("warning_limit_tokens configuration missing")
556
+ warning_limit = int(warning_limit_val)
557
+ if not check_token_warning(prompt_tokens, warning_limit, require_confirmation):
558
+ sys.exit(0)
559
+ first_iteration = False
560
+
561
+ raw_commit_message = generate_commit_message(
562
+ model=model,
563
+ prompt=conversation_messages,
564
+ temperature=temperature,
565
+ max_tokens=max_output_tokens,
566
+ max_retries=max_retries,
567
+ quiet=quiet or message_only,
568
+ )
569
+ commit_message = clean_commit_message(raw_commit_message)
570
+ logger.info("Generated commit message:")
571
+ logger.info(commit_message)
572
+ conversation_messages.append({"role": "assistant", "content": commit_message})
573
+
574
+ if message_only:
575
+ # Output only the commit message without any formatting
576
+ print(commit_message)
577
+ sys.exit(0)
578
+
579
+ display_commit_message(commit_message, prompt_tokens, model, quiet)
580
+
581
+ if require_confirmation:
582
+ decision, commit_message, conversation_messages = handle_confirmation_loop(
583
+ commit_message, conversation_messages, quiet, model
584
+ )
585
+ if decision == "no":
586
+ console.print("[yellow]Prompt not accepted. Exiting...[/yellow]")
587
+ sys.exit(0)
588
+ elif decision == "yes":
589
+ break
590
+ else:
591
+ break
592
+
593
+ if dry_run:
594
+ console.print("[yellow]Dry run: Commit message generated but not applied[/yellow]")
595
+ console.print("Would commit with message:")
596
+ console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
597
+ staged_files = get_staged_files(existing_only=False)
598
+ console.print(f"Would commit {len(staged_files)} files")
599
+ logger.info(f"Would commit {len(staged_files)} files")
600
+ else:
601
+ execute_commit(commit_message, no_verify, hook_timeout)
602
+
603
+ if push:
604
+ try:
605
+ if dry_run:
606
+ staged_files = get_staged_files(existing_only=False)
607
+ logger.info("Dry run: Would push changes")
608
+ logger.info("Would push with message:")
609
+ logger.info(commit_message)
610
+ logger.info(f"Would push {len(staged_files)} files")
611
+ console.print("[yellow]Dry run: Would push changes[/yellow]")
612
+ console.print("Would push with message:")
613
+ console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
614
+ console.print(f"Would push {len(staged_files)} files")
615
+ sys.exit(0)
616
+ if push_changes():
617
+ logger.info("Changes pushed successfully")
618
+ console.print("[green]Changes pushed successfully[/green]")
619
+ else:
620
+ console.print(
621
+ "[red]Failed to push changes. Check your remote configuration and network connection.[/red]"
622
+ )
623
+ sys.exit(1)
624
+ except Exception as e:
625
+ console.print(f"[red]Error pushing changes: {e}[/red]")
626
+ sys.exit(1)
627
+
628
+ if not quiet:
629
+ logger.info("Successfully committed changes with message:")
630
+ logger.info(commit_message)
631
+ if push:
632
+ logger.info("Changes pushed to remote.")
633
+ sys.exit(0)
634
+
635
+
636
+ def generate_contextual_questions(
637
+ model: str,
638
+ status: str,
639
+ processed_diff: str,
640
+ diff_stat: str = "",
641
+ hint: str = "",
642
+ temperature: float = EnvDefaults.TEMPERATURE,
643
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
644
+ max_retries: int = EnvDefaults.MAX_RETRIES,
645
+ quiet: bool = False,
646
+ ) -> list[str]:
647
+ """Generate contextual questions about staged changes when interactive mode is enabled.
648
+
649
+ Args:
650
+ model: The model to use in provider:model_name format
651
+ status: Git status output
652
+ processed_diff: Git diff output, already preprocessed
653
+ diff_stat: Git diff stat output showing file changes summary
654
+ hint: Optional hint to guide the question generation
655
+ temperature: Controls randomness for generation
656
+ max_tokens: Maximum tokens in the response
657
+ max_retries: Number of retry attempts if generation fails
658
+ quiet: If True, suppress progress indicators
659
+
660
+ Returns:
661
+ A list of contextual questions about the staged changes
662
+
663
+ Raises:
664
+ AIError: If question generation fails after max_retries attempts
665
+ """
666
+ from gac.prompt import build_question_generation_prompt
667
+
668
+ try:
669
+ # Build prompts for question generation
670
+ system_prompt, user_prompt = build_question_generation_prompt(
671
+ status=status,
672
+ processed_diff=processed_diff,
673
+ diff_stat=diff_stat,
674
+ hint=hint,
675
+ )
676
+
677
+ # Generate questions using existing infrastructure
678
+ logger.info("Generating contextual questions about staged changes...")
679
+ questions_text = generate_commit_message(
680
+ model=model,
681
+ prompt=(system_prompt, user_prompt),
682
+ temperature=temperature,
683
+ max_tokens=max_tokens,
684
+ max_retries=max_retries,
685
+ quiet=quiet,
686
+ skip_success_message=True, # Don't show "Generated commit message" for questions
687
+ task_description="contextual questions",
688
+ )
689
+
690
+ # Parse the response to extract individual questions
691
+ questions = _parse_questions_from_response(questions_text)
692
+
693
+ logger.info(f"Generated {len(questions)} contextual questions")
694
+ return questions
695
+
696
+ except Exception as e:
697
+ logger.error(f"Failed to generate contextual questions: {e}")
698
+ raise AIError.model_error(f"Failed to generate contextual questions: {e}") from e
699
+
700
+
701
+ def _parse_questions_from_response(response: str) -> list[str]:
702
+ """Parse the AI response to extract individual questions from a numbered list.
703
+
704
+ Args:
705
+ response: The raw response from the AI model
706
+
707
+ Returns:
708
+ A list of cleaned questions
709
+ """
710
+ import re
711
+
712
+ questions = []
713
+ lines = response.strip().split("\n")
714
+
715
+ for line in lines:
716
+ line = line.strip()
717
+ if not line:
718
+ continue
719
+
720
+ # Match numbered list format (e.g., "1. Question text?" or "1) Question text?")
721
+ match = re.match(r"^\d+\.\s+(.+)$", line)
722
+ if not match:
723
+ match = re.match(r"^\d+\)\s+(.+)$", line)
724
+
725
+ if match:
726
+ question = match.group(1).strip()
727
+ # Remove any leading symbols like •, -, *
728
+ question = re.sub(r"^[•\-*]\s+", "", question)
729
+ if question and question.endswith("?"):
730
+ questions.append(question)
731
+ elif line.endswith("?") and len(line) > 5: # Fallback for non-numbered questions
732
+ questions.append(line)
733
+
734
+ return questions
735
+
736
+
35
737
  def main(
36
738
  stage_all: bool = False,
739
+ group: bool = False,
740
+ interactive: bool = False,
37
741
  model: str | None = None,
38
742
  hint: str = "",
39
743
  one_liner: bool = False,
@@ -43,9 +747,12 @@ def main(
43
747
  push: bool = False,
44
748
  quiet: bool = False,
45
749
  dry_run: bool = False,
750
+ message_only: bool = False,
46
751
  verbose: bool = False,
47
752
  no_verify: bool = False,
48
753
  skip_secret_scan: bool = False,
754
+ language: str | None = None,
755
+ hook_timeout: int = 120,
49
756
  ) -> None:
50
757
  """Main application logic for gac."""
51
758
  try:
@@ -68,48 +775,53 @@ def main(
68
775
  model = str(model_from_config)
69
776
 
70
777
  temperature_val = config["temperature"]
71
- assert temperature_val is not None
778
+ if temperature_val is None:
779
+ raise ConfigError("temperature configuration missing")
72
780
  temperature = float(temperature_val)
73
781
 
74
782
  max_tokens_val = config["max_output_tokens"]
75
- assert max_tokens_val is not None
783
+ if max_tokens_val is None:
784
+ raise ConfigError("max_output_tokens configuration missing")
76
785
  max_output_tokens = int(max_tokens_val)
77
786
 
78
787
  max_retries_val = config["max_retries"]
79
- assert max_retries_val is not None
788
+ if max_retries_val is None:
789
+ raise ConfigError("max_retries configuration missing")
80
790
  max_retries = int(max_retries_val)
81
791
 
82
792
  if stage_all and (not dry_run):
83
793
  logger.info("Staging all changes")
84
794
  run_git_command(["add", "--all"])
85
795
 
86
- # Check for staged files
87
796
  staged_files = get_staged_files(existing_only=False)
797
+
798
+ if group:
799
+ num_files = len(staged_files)
800
+ multiplier = min(5, 2 + (num_files // 10))
801
+ max_output_tokens *= multiplier
802
+ logger.debug(f"Grouped mode: scaling max_output_tokens by {multiplier}x for {num_files} files")
803
+
88
804
  if not staged_files:
89
805
  console.print(
90
806
  "[yellow]No staged changes found. Stage your changes with git add first or use --add-all.[/yellow]"
91
807
  )
92
808
  sys.exit(0)
93
809
 
94
- # Run pre-commit and lefthook hooks before doing expensive operations
95
810
  if not no_verify and not dry_run:
96
- # Run lefthook hooks
97
- if not run_lefthook_hooks():
811
+ if not run_lefthook_hooks(hook_timeout):
98
812
  console.print("[red]Lefthook hooks failed. Please fix the issues and try again.[/red]")
99
813
  console.print("[yellow]You can use --no-verify to skip pre-commit and lefthook hooks.[/yellow]")
100
814
  sys.exit(1)
101
815
 
102
- # Run pre-commit hooks
103
- if not run_pre_commit_hooks():
816
+ if not run_pre_commit_hooks(hook_timeout):
104
817
  console.print("[red]Pre-commit hooks failed. Please fix the issues and try again.[/red]")
105
818
  console.print("[yellow]You can use --no-verify to skip pre-commit and lefthook hooks.[/yellow]")
106
819
  sys.exit(1)
107
820
 
108
- status = run_git_command(["status"])
821
+ status = get_staged_status()
109
822
  diff = run_git_command(["diff", "--staged"])
110
823
  diff_stat = " " + run_git_command(["diff", "--stat", "--cached"])
111
824
 
112
- # Security scan for secrets
113
825
  if not skip_secret_scan:
114
826
  logger.info("Scanning staged changes for potential secrets...")
115
827
  secrets = scan_staged_diff(diff)
@@ -168,19 +880,30 @@ def main(
168
880
  sys.exit(0)
169
881
 
170
882
  console.print(f"[green]Continuing with {len(remaining_staged)} staged file(s)...[/green]")
171
- # Refresh all git state variables after removing files
172
- status = run_git_command(["status"])
883
+ status = get_staged_status()
173
884
  diff = run_git_command(["diff", "--staged"])
174
885
  diff_stat = " " + run_git_command(["diff", "--stat", "--cached"])
175
886
  else:
176
887
  logger.info("No secrets detected in staged changes")
177
888
 
178
- # Preprocess the diff before passing to build_prompt
179
889
  logger.debug(f"Preprocessing diff ({len(diff)} characters)")
180
- assert model is not None
890
+ if model is None:
891
+ raise ConfigError("Model must be specified via GAC_MODEL environment variable or --model flag")
181
892
  processed_diff = preprocess_diff(diff, token_limit=Utility.DEFAULT_DIFF_TOKEN_LIMIT, model=model)
182
893
  logger.debug(f"Processed diff ({len(processed_diff)} characters)")
183
894
 
895
+ system_template_path_value = config.get("system_prompt_path")
896
+ system_template_path: str | None = (
897
+ system_template_path_value if isinstance(system_template_path_value, str) else None
898
+ )
899
+
900
+ if language is None:
901
+ language_value = config.get("language")
902
+ language = language_value if isinstance(language_value, str) else None
903
+
904
+ translate_prefixes_value = config.get("translate_prefixes")
905
+ translate_prefixes: bool = bool(translate_prefixes_value) if isinstance(translate_prefixes_value, bool) else False
906
+
184
907
  system_prompt, user_prompt = build_prompt(
185
908
  status=status,
186
909
  processed_diff=processed_diff,
@@ -189,166 +912,153 @@ def main(
189
912
  hint=hint,
190
913
  infer_scope=infer_scope,
191
914
  verbose=verbose,
915
+ system_template_path=system_template_path,
916
+ language=language,
917
+ translate_prefixes=translate_prefixes,
192
918
  )
193
919
 
194
- if show_prompt:
195
- # Show both system and user prompts
196
- full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
197
- console.print(
198
- Panel(
199
- full_prompt,
200
- title="Prompt for LLM",
201
- border_style="bright_blue",
202
- )
203
- )
204
-
205
- conversation_messages: list[dict[str, str]] = []
206
- if system_prompt:
207
- conversation_messages.append({"role": "system", "content": system_prompt})
208
- conversation_messages.append({"role": "user", "content": user_prompt})
209
-
210
- try:
211
- first_iteration = True
212
-
213
- while True:
214
- prompt_tokens = count_tokens(conversation_messages, model)
215
-
216
- if first_iteration:
217
- warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
218
- assert warning_limit_val is not None
219
- warning_limit = int(warning_limit_val)
220
- if warning_limit and prompt_tokens > warning_limit:
221
- console.print(
222
- f"[yellow]⚠️ WARNING: Prompt contains {prompt_tokens} tokens, which exceeds the warning limit of "
223
- f"{warning_limit} tokens.[/yellow]"
224
- )
225
- if require_confirmation:
226
- proceed = click.confirm("Do you want to continue anyway?", default=True)
227
- if not proceed:
228
- console.print("[yellow]Aborted due to token limit.[/yellow]")
229
- sys.exit(0)
920
+ if group:
921
+ from gac.prompt import build_group_prompt
230
922
 
231
- first_iteration = False
923
+ system_prompt, user_prompt = build_group_prompt(
924
+ status=status,
925
+ processed_diff=processed_diff,
926
+ diff_stat=diff_stat,
927
+ one_liner=one_liner,
928
+ hint=hint,
929
+ infer_scope=infer_scope,
930
+ verbose=verbose,
931
+ system_template_path=system_template_path,
932
+ language=language,
933
+ translate_prefixes=translate_prefixes,
934
+ )
232
935
 
233
- raw_commit_message = generate_commit_message(
936
+ try:
937
+ execute_grouped_commits_workflow(
938
+ system_prompt=system_prompt,
939
+ user_prompt=user_prompt,
234
940
  model=model,
235
- prompt=conversation_messages,
236
941
  temperature=temperature,
237
- max_tokens=max_output_tokens,
942
+ max_output_tokens=max_output_tokens,
238
943
  max_retries=max_retries,
944
+ require_confirmation=require_confirmation,
239
945
  quiet=quiet,
946
+ no_verify=no_verify,
947
+ dry_run=dry_run,
948
+ push=push,
949
+ show_prompt=show_prompt,
950
+ hook_timeout=hook_timeout,
951
+ interactive=interactive,
952
+ message_only=message_only,
240
953
  )
241
- commit_message = clean_commit_message(raw_commit_message)
954
+ except AIError as e:
955
+ logger.error(str(e))
956
+ console.print(f"[red]Failed to generate grouped commits: {str(e)}[/red]")
957
+ sys.exit(1)
958
+ else:
959
+ try:
960
+ execute_single_commit_workflow(
961
+ system_prompt=system_prompt,
962
+ user_prompt=user_prompt,
963
+ model=model,
964
+ temperature=temperature,
965
+ max_output_tokens=max_output_tokens,
966
+ max_retries=max_retries,
967
+ require_confirmation=require_confirmation,
968
+ quiet=quiet,
969
+ no_verify=no_verify,
970
+ dry_run=dry_run,
971
+ message_only=message_only,
972
+ push=push,
973
+ show_prompt=show_prompt,
974
+ hook_timeout=hook_timeout,
975
+ interactive=interactive,
976
+ )
977
+ except AIError as e:
978
+ # Check if this is a Claude Code OAuth token expiration
979
+ if (
980
+ e.error_type == "authentication"
981
+ and model.startswith("claude-code:")
982
+ and ("expired" in str(e).lower() or "oauth" in str(e).lower())
983
+ ):
984
+ logger.error(str(e))
985
+ console.print("[yellow]⚠ Claude Code OAuth token has expired[/yellow]")
986
+ console.print("[cyan]🔐 Starting automatic re-authentication...[/cyan]")
242
987
 
243
- logger.info("Generated commit message:")
244
- logger.info(commit_message)
988
+ try:
989
+ from gac.oauth.claude_code import authenticate_and_save
245
990
 
246
- conversation_messages.append({"role": "assistant", "content": commit_message})
991
+ if authenticate_and_save(quiet=quiet):
992
+ console.print("[green]✓ Re-authentication successful![/green]")
993
+ console.print("[cyan]Retrying commit...[/cyan]\n")
247
994
 
248
- console.print("[bold green]Generated commit message:[/bold green]")
249
- console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
995
+ # Retry the commit workflow
996
+ execute_single_commit_workflow(
997
+ system_prompt=system_prompt,
998
+ user_prompt=user_prompt,
999
+ model=model,
1000
+ temperature=temperature,
1001
+ max_output_tokens=max_output_tokens,
1002
+ max_retries=max_retries,
1003
+ require_confirmation=require_confirmation,
1004
+ quiet=quiet,
1005
+ no_verify=no_verify,
1006
+ dry_run=dry_run,
1007
+ message_only=message_only,
1008
+ push=push,
1009
+ show_prompt=show_prompt,
1010
+ hook_timeout=hook_timeout,
1011
+ interactive=interactive,
1012
+ )
1013
+ else:
1014
+ console.print("[red]Re-authentication failed.[/red]")
1015
+ console.print("[yellow]Run 'gac model' to re-authenticate manually.[/yellow]")
1016
+ sys.exit(1)
1017
+ except Exception as auth_error:
1018
+ console.print(f"[red]Re-authentication error: {auth_error}[/red]")
1019
+ console.print("[yellow]Run 'gac model' to re-authenticate manually.[/yellow]")
1020
+ sys.exit(1)
1021
+ # Check if this is a Qwen OAuth token expiration
1022
+ elif e.error_type == "authentication" and model.startswith("qwen:"):
1023
+ logger.error(str(e))
1024
+ console.print("[yellow]⚠ Qwen authentication failed[/yellow]")
1025
+ console.print("[cyan]🔐 Starting automatic re-authentication...[/cyan]")
250
1026
 
251
- if not quiet:
252
- completion_tokens = count_tokens(commit_message, model)
253
- total_tokens = prompt_tokens + completion_tokens
254
- console.print(
255
- f"[dim]Token usage: {prompt_tokens} prompt + {completion_tokens} completion = {total_tokens} "
256
- "total[/dim]"
257
- )
1027
+ try:
1028
+ from gac.oauth import QwenOAuthProvider, TokenStore
258
1029
 
259
- if require_confirmation:
260
- # Custom prompt that accepts y/n/r or "r <feedback (optional)>"
261
- while True:
262
- response = click.prompt(
263
- "Proceed with commit above? [y/n/r <feedback>]", type=str, show_default=False
264
- ).strip()
265
- response_lower = response.lower()
266
-
267
- if response_lower in ["y", "yes"]:
268
- break
269
- if response_lower in ["n", "no"]:
270
- console.print("[yellow]Prompt not accepted. Exiting...[/yellow]")
271
- sys.exit(0)
272
- if response_lower == "r" or response_lower == "reroll" or response_lower.startswith("r "):
273
- if response_lower == "r" or response_lower == "reroll":
274
- feedback_message = (
275
- "Please provide an alternative commit message using the same repository context."
276
- )
277
- console.print("[cyan]Regenerating commit message...[/cyan]")
278
- else:
279
- reroll_feedback = response[2:].strip()
280
- feedback_message = (
281
- f"Please revise the commit message based on this feedback: {reroll_feedback}"
282
- )
283
- console.print(f"[cyan]Regenerating commit message with feedback: {reroll_feedback}[/cyan]")
284
-
285
- conversation_messages.append({"role": "user", "content": feedback_message})
286
-
287
- console.print() # Add blank line for readability
288
- break
1030
+ oauth_provider = QwenOAuthProvider(TokenStore())
1031
+ oauth_provider.initiate_auth(open_browser=True)
1032
+ console.print("[green]✓ Re-authentication successful![/green]")
1033
+ console.print("[cyan]Retrying commit...[/cyan]\n")
289
1034
 
290
- console.print(
291
- "[red]Invalid response. Please enter y (yes), n (no), r (reroll), or r <feedback>.[/red]"
1035
+ # Retry the commit workflow
1036
+ execute_single_commit_workflow(
1037
+ system_prompt=system_prompt,
1038
+ user_prompt=user_prompt,
1039
+ model=model,
1040
+ temperature=temperature,
1041
+ max_output_tokens=max_output_tokens,
1042
+ max_retries=max_retries,
1043
+ require_confirmation=require_confirmation,
1044
+ quiet=quiet,
1045
+ no_verify=no_verify,
1046
+ dry_run=dry_run,
1047
+ message_only=message_only,
1048
+ push=push,
1049
+ show_prompt=show_prompt,
1050
+ hook_timeout=hook_timeout,
1051
+ interactive=interactive,
292
1052
  )
293
-
294
- if response_lower in ["y", "yes"]:
295
- break
296
- else:
297
- break
298
-
299
- if dry_run:
300
- console.print("[yellow]Dry run: Commit message generated but not applied[/yellow]")
301
- console.print("Would commit with message:")
302
- console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
303
- staged_files = get_staged_files(existing_only=False)
304
- console.print(f"Would commit {len(staged_files)} files")
305
- logger.info(f"Would commit {len(staged_files)} files")
306
- else:
307
- commit_args = ["commit", "-m", commit_message]
308
- if no_verify:
309
- commit_args.append("--no-verify")
310
- run_git_command(commit_args)
311
- logger.info("Commit created successfully")
312
- console.print("[green]Commit created successfully[/green]")
313
- except AIError as e:
314
- logger.error(str(e))
315
- console.print(f"[red]Failed to generate commit message: {str(e)}[/red]")
316
- sys.exit(1)
317
-
318
- if push:
319
- try:
320
- if dry_run:
321
- staged_files = get_staged_files(existing_only=False)
322
-
323
- logger.info("Dry run: Would push changes")
324
- logger.info("Would push with message:")
325
- logger.info(commit_message)
326
- logger.info(f"Would push {len(staged_files)} files")
327
-
328
- console.print("[yellow]Dry run: Would push changes[/yellow]")
329
- console.print("Would push with message:")
330
- console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
331
- console.print(f"Would push {len(staged_files)} files")
332
- sys.exit(0)
333
-
334
- if push_changes():
335
- logger.info("Changes pushed successfully")
336
- console.print("[green]Changes pushed successfully[/green]")
1053
+ except Exception as auth_error:
1054
+ console.print(f"[red]Re-authentication error: {auth_error}[/red]")
1055
+ console.print("[yellow]Run 'gac auth qwen login' to re-authenticate manually.[/yellow]")
1056
+ sys.exit(1)
337
1057
  else:
338
- console.print(
339
- "[red]Failed to push changes. Check your remote configuration and network connection.[/red]"
340
- )
1058
+ # Non-Claude Code/Qwen error or non-auth error
1059
+ logger.error(str(e))
1060
+ console.print(f"[red]Failed to generate commit message: {str(e)}[/red]")
341
1061
  sys.exit(1)
342
- except Exception as e:
343
- console.print(f"[red]Error pushing changes: {e}[/red]")
344
- sys.exit(1)
345
-
346
- if not quiet:
347
- logger.info("Successfully committed changes with message:")
348
- logger.info(commit_message)
349
- if push:
350
- logger.info("Changes pushed to remote.")
351
- sys.exit(0)
352
1062
 
353
1063
 
354
1064
  if __name__ == "__main__":