gac 2.1.0__py3-none-any.whl → 2.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gac might be problematic. Click here for more details.

gac/main.py CHANGED
@@ -17,6 +17,7 @@ from gac.constants import EnvDefaults, Utility
17
17
  from gac.errors import AIError, GitError, handle_error
18
18
  from gac.git import (
19
19
  get_staged_files,
20
+ get_staged_status,
20
21
  push_changes,
21
22
  run_git_command,
22
23
  run_lefthook_hooks,
@@ -25,6 +26,13 @@ from gac.git import (
25
26
  from gac.preprocess import preprocess_diff
26
27
  from gac.prompt import build_prompt, clean_commit_message
27
28
  from gac.security import get_affected_files, scan_staged_diff
29
+ from gac.workflow_utils import (
30
+ check_token_warning,
31
+ display_commit_message,
32
+ execute_commit,
33
+ handle_confirmation_loop,
34
+ restore_staging,
35
+ )
28
36
 
29
37
  logger = logging.getLogger(__name__)
30
38
 
@@ -32,8 +40,450 @@ config = load_config()
32
40
  console = Console() # Initialize console globally to prevent undefined access
33
41
 
34
42
 
43
+ def _validate_grouped_files_or_feedback(staged: set[str], grouped_result: dict) -> tuple[bool, str, str]:
44
+ from collections import Counter
45
+
46
+ commits = grouped_result.get("commits", []) if isinstance(grouped_result, dict) else []
47
+ all_files: list[str] = []
48
+ for commit in commits:
49
+ files = commit.get("files", []) if isinstance(commit, dict) else []
50
+ all_files.extend([str(p) for p in files])
51
+
52
+ counts = Counter(all_files)
53
+ union_set = set(all_files)
54
+
55
+ duplicates = sorted([f for f, c in counts.items() if c > 1])
56
+ missing = sorted(staged - union_set)
57
+ unexpected = sorted(union_set - staged)
58
+
59
+ if not duplicates and not missing and not unexpected:
60
+ return True, "", ""
61
+
62
+ problems: list[str] = []
63
+ if missing:
64
+ problems.append(f"Missing: {', '.join(missing)}")
65
+ if unexpected:
66
+ problems.append(f"Not staged: {', '.join(unexpected)}")
67
+ if duplicates:
68
+ problems.append(f"Duplicates: {', '.join(duplicates)}")
69
+
70
+ feedback = f"{'; '.join(problems)}. Required files: {', '.join(sorted(staged))}. Respond with ONLY valid JSON."
71
+ return False, feedback, "; ".join(problems)
72
+
73
+
74
+ def _parse_model_identifier(model: str) -> tuple[str, str]:
75
+ """Validate and split model identifier into provider and model name."""
76
+ normalized = model.strip()
77
+ if ":" not in normalized:
78
+ message = (
79
+ f"Invalid model format: '{model}'. Expected 'provider:model', e.g. 'openai:gpt-4o-mini'. "
80
+ "Use 'gac config set model <provider:model>' to update your configuration."
81
+ )
82
+ logger.error(message)
83
+ console.print(f"[red]{message}[/red]")
84
+ sys.exit(1)
85
+
86
+ provider, model_name = normalized.split(":", 1)
87
+ if not provider or not model_name:
88
+ message = (
89
+ f"Invalid model format: '{model}'. Both provider and model name are required "
90
+ "(example: 'anthropic:claude-3-5-haiku-latest')."
91
+ )
92
+ logger.error(message)
93
+ console.print(f"[red]{message}[/red]")
94
+ sys.exit(1)
95
+
96
+ return provider, model_name
97
+
98
+
99
+ def _handle_validation_retry(
100
+ attempts: int,
101
+ content_retry_budget: int,
102
+ raw_response: str,
103
+ feedback_message: str,
104
+ error_message: str,
105
+ conversation_messages: list[dict[str, str]],
106
+ quiet: bool,
107
+ retry_context: str,
108
+ ) -> bool:
109
+ """Handle validation retry logic. Returns True if should exit, False if should retry."""
110
+ conversation_messages.append({"role": "assistant", "content": raw_response})
111
+ conversation_messages.append({"role": "user", "content": feedback_message})
112
+ if attempts >= content_retry_budget:
113
+ logger.error(error_message)
114
+ console.print(f"\n[red]{error_message}[/red]")
115
+ console.print("\n[yellow]Raw model output:[/yellow]")
116
+ console.print(Panel(raw_response, title="Model Output", border_style="yellow"))
117
+ return True
118
+ if not quiet:
119
+ console.print(f"[yellow]Retry {attempts} of {content_retry_budget - 1}: {retry_context}[/yellow]")
120
+ return False
121
+
122
+
123
+ def execute_grouped_commits_workflow(
124
+ *,
125
+ system_prompt: str,
126
+ user_prompt: str,
127
+ model: str,
128
+ temperature: float,
129
+ max_output_tokens: int,
130
+ max_retries: int,
131
+ require_confirmation: bool,
132
+ quiet: bool,
133
+ no_verify: bool,
134
+ dry_run: bool,
135
+ push: bool,
136
+ show_prompt: bool,
137
+ ) -> None:
138
+ """Execute the grouped commits workflow."""
139
+ import json
140
+
141
+ from gac.ai import generate_grouped_commits
142
+
143
+ provider, model_name = _parse_model_identifier(model)
144
+
145
+ if show_prompt:
146
+ full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
147
+ console.print(Panel(full_prompt, title="Prompt for LLM", border_style="bright_blue"))
148
+
149
+ conversation_messages: list[dict[str, str]] = []
150
+ if system_prompt:
151
+ conversation_messages.append({"role": "system", "content": system_prompt})
152
+ conversation_messages.append({"role": "user", "content": user_prompt})
153
+
154
+ _parse_model_identifier(model)
155
+
156
+ first_iteration = True
157
+ content_retry_budget = max(3, int(max_retries))
158
+ attempts = 0
159
+
160
+ grouped_result: dict | None = None
161
+ raw_response: str = ""
162
+
163
+ while True:
164
+ prompt_tokens = count_tokens(conversation_messages, model)
165
+
166
+ if first_iteration:
167
+ warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
168
+ assert warning_limit_val is not None
169
+ warning_limit = int(warning_limit_val)
170
+ if not check_token_warning(prompt_tokens, warning_limit, require_confirmation):
171
+ sys.exit(0)
172
+ first_iteration = False
173
+
174
+ raw_response = generate_grouped_commits(
175
+ model=model,
176
+ prompt=conversation_messages,
177
+ temperature=temperature,
178
+ max_tokens=max_output_tokens,
179
+ max_retries=max_retries,
180
+ quiet=quiet,
181
+ skip_success_message=True,
182
+ )
183
+
184
+ parsed: dict | None = None
185
+ extract = raw_response
186
+ first_brace = raw_response.find("{")
187
+ last_brace = raw_response.rfind("}")
188
+ if first_brace != -1 and last_brace != -1 and first_brace < last_brace:
189
+ extract = raw_response[first_brace : last_brace + 1]
190
+
191
+ try:
192
+ parsed = json.loads(extract)
193
+ except json.JSONDecodeError as e:
194
+ parsed = None
195
+ logger.debug(
196
+ f"JSON parsing failed: {e}. Extract length: {len(extract)}, Response length: {len(raw_response)}"
197
+ )
198
+
199
+ if parsed is None:
200
+ attempts += 1
201
+ feedback = "Your response was not valid JSON. Respond with ONLY valid JSON matching the expected schema. Do not include any commentary or code fences."
202
+ error_msg = f"Failed to parse LLM response as JSON after {attempts} retries."
203
+ if _handle_validation_retry(
204
+ attempts,
205
+ content_retry_budget,
206
+ raw_response,
207
+ feedback,
208
+ error_msg,
209
+ conversation_messages,
210
+ quiet,
211
+ "JSON parsing failed, asking model to fix...",
212
+ ):
213
+ sys.exit(1)
214
+ continue
215
+
216
+ try:
217
+ if "commits" not in parsed or not isinstance(parsed["commits"], list):
218
+ raise ValueError("Response missing 'commits' array")
219
+ if len(parsed["commits"]) == 0:
220
+ raise ValueError("No commits in response")
221
+ for idx, commit in enumerate(parsed["commits"]):
222
+ if "files" not in commit or not isinstance(commit["files"], list):
223
+ raise ValueError(f"Commit {idx + 1} missing 'files' array")
224
+ if "message" not in commit or not isinstance(commit["message"], str):
225
+ raise ValueError(f"Commit {idx + 1} missing 'message' string")
226
+ if len(commit["files"]) == 0:
227
+ raise ValueError(f"Commit {idx + 1} has empty files list")
228
+ if not commit["message"].strip():
229
+ raise ValueError(f"Commit {idx + 1} has empty message")
230
+ except (ValueError, TypeError) as e:
231
+ attempts += 1
232
+ feedback = f"Invalid response structure: {e}. Please return ONLY valid JSON following the schema with a non-empty 'commits' array of objects containing 'files' and 'message'."
233
+ error_msg = f"Invalid grouped commits structure after {attempts} retries: {e}"
234
+ if _handle_validation_retry(
235
+ attempts,
236
+ content_retry_budget,
237
+ raw_response,
238
+ feedback,
239
+ error_msg,
240
+ conversation_messages,
241
+ quiet,
242
+ "Structure validation failed, asking model to fix...",
243
+ ):
244
+ sys.exit(1)
245
+ continue
246
+
247
+ staged_set = set(get_staged_files(existing_only=False))
248
+ ok, feedback, detail_msg = _validate_grouped_files_or_feedback(staged_set, parsed)
249
+ if not ok:
250
+ attempts += 1
251
+ error_msg = (
252
+ f"Grouped commits file set mismatch after {attempts} retries{': ' + detail_msg if detail_msg else ''}"
253
+ )
254
+ if _handle_validation_retry(
255
+ attempts,
256
+ content_retry_budget,
257
+ raw_response,
258
+ feedback,
259
+ error_msg,
260
+ conversation_messages,
261
+ quiet,
262
+ "File coverage mismatch, asking model to fix...",
263
+ ):
264
+ sys.exit(1)
265
+ continue
266
+
267
+ grouped_result = parsed
268
+ conversation_messages.append({"role": "assistant", "content": raw_response})
269
+
270
+ if not quiet:
271
+ console.print(f"[green]✔ Generated commit messages with {provider} {model_name}[/green]")
272
+ num_commits = len(grouped_result["commits"])
273
+ console.print(f"[bold green]Proposed Commits ({num_commits}):[/bold green]\n")
274
+ for idx, commit in enumerate(grouped_result["commits"], 1):
275
+ files = commit["files"]
276
+ files_display = ", ".join(files)
277
+ console.print(f"[dim]{files_display}[/dim]")
278
+ commit_msg = commit["message"]
279
+ console.print(Panel(commit_msg, title=f"Commit Message {idx}/{num_commits}", border_style="cyan"))
280
+ console.print()
281
+
282
+ completion_tokens = count_tokens(raw_response, model)
283
+ total_tokens = prompt_tokens + completion_tokens
284
+ console.print(
285
+ f"[dim]Token usage: {prompt_tokens} prompt + {completion_tokens} completion = {total_tokens} total[/dim]"
286
+ )
287
+
288
+ if require_confirmation:
289
+ accepted = False
290
+ num_commits = len(grouped_result["commits"]) if grouped_result else 0
291
+ while True:
292
+ response = click.prompt(
293
+ f"Proceed with {num_commits} commits above? [y/n/r/<feedback>]",
294
+ type=str,
295
+ show_default=False,
296
+ ).strip()
297
+ response_lower = response.lower()
298
+
299
+ if response_lower in ["y", "yes"]:
300
+ accepted = True
301
+ break
302
+ if response_lower in ["n", "no"]:
303
+ console.print("[yellow]Commits not accepted. Exiting...[/yellow]")
304
+ sys.exit(0)
305
+ if response == "":
306
+ continue
307
+ if response_lower in ["r", "reroll"]:
308
+ feedback_message = "Please provide alternative commit groupings using the same repository context."
309
+ console.print("[cyan]Regenerating commit groups...[/cyan]")
310
+ conversation_messages.append({"role": "user", "content": feedback_message})
311
+ console.print()
312
+ attempts = 0
313
+ break
314
+
315
+ feedback_message = f"Please revise the commit groupings based on this feedback: {response}"
316
+ console.print(f"[cyan]Regenerating commit groups with feedback: {response}[/cyan]")
317
+ conversation_messages.append({"role": "user", "content": feedback_message})
318
+ console.print()
319
+ attempts = 0
320
+ break
321
+
322
+ if not accepted:
323
+ continue
324
+
325
+ num_commits = len(grouped_result["commits"]) if grouped_result else 0
326
+ if dry_run:
327
+ console.print(f"[yellow]Dry run: Would create {num_commits} commits[/yellow]")
328
+ for idx, commit in enumerate(grouped_result["commits"], 1):
329
+ console.print(f"\n[cyan]Commit {idx}/{num_commits}:[/cyan]")
330
+ console.print(f" Files: {', '.join(commit['files'])}")
331
+ console.print(f" Message: {commit['message'][:50]}...")
332
+ else:
333
+ original_staged_files = get_staged_files(existing_only=False)
334
+ original_staged_diff = run_git_command(["diff", "--cached", "--binary"], silent=True)
335
+ run_git_command(["reset", "HEAD"])
336
+
337
+ try:
338
+ for idx, commit in enumerate(grouped_result["commits"], 1):
339
+ try:
340
+ for file_path in commit["files"]:
341
+ run_git_command(["add", "-A", file_path])
342
+ execute_commit(commit["message"], no_verify)
343
+ console.print(f"[green]✓ Commit {idx}/{num_commits} created[/green]")
344
+ except Exception as e:
345
+ console.print(f"[red]✗ Failed at commit {idx}/{num_commits}: {e}[/red]")
346
+ console.print(f"[yellow]Completed {idx - 1}/{num_commits} commits.[/yellow]")
347
+ if idx == 1:
348
+ console.print("[yellow]Restoring original staging area...[/yellow]")
349
+ restore_staging(original_staged_files, original_staged_diff)
350
+ console.print("[green]Original staging area restored.[/green]")
351
+ sys.exit(1)
352
+ except KeyboardInterrupt:
353
+ console.print("\n[yellow]Interrupted by user. Restoring original staging area...[/yellow]")
354
+ restore_staging(original_staged_files, original_staged_diff)
355
+ console.print("[green]Original staging area restored.[/green]")
356
+ sys.exit(1)
357
+
358
+ if push:
359
+ try:
360
+ if dry_run:
361
+ console.print("[yellow]Dry run: Would push changes[/yellow]")
362
+ sys.exit(0)
363
+ if push_changes():
364
+ logger.info("Changes pushed successfully")
365
+ console.print("[green]Changes pushed successfully[/green]")
366
+ else:
367
+ console.print(
368
+ "[red]Failed to push changes. Check your remote configuration and network connection.[/red]"
369
+ )
370
+ sys.exit(1)
371
+ except Exception as e:
372
+ console.print(f"[red]Error pushing changes: {e}[/red]")
373
+ sys.exit(1)
374
+
375
+ sys.exit(0)
376
+
377
+
378
+ def execute_single_commit_workflow(
379
+ *,
380
+ system_prompt: str,
381
+ user_prompt: str,
382
+ model: str,
383
+ temperature: float,
384
+ max_output_tokens: int,
385
+ max_retries: int,
386
+ require_confirmation: bool,
387
+ quiet: bool,
388
+ no_verify: bool,
389
+ dry_run: bool,
390
+ push: bool,
391
+ show_prompt: bool,
392
+ ) -> None:
393
+ if show_prompt:
394
+ full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
395
+ console.print(Panel(full_prompt, title="Prompt for LLM", border_style="bright_blue"))
396
+
397
+ conversation_messages: list[dict[str, str]] = []
398
+ if system_prompt:
399
+ conversation_messages.append({"role": "system", "content": system_prompt})
400
+ conversation_messages.append({"role": "user", "content": user_prompt})
401
+
402
+ _parse_model_identifier(model)
403
+
404
+ first_iteration = True
405
+ while True:
406
+ prompt_tokens = count_tokens(conversation_messages, model)
407
+ if first_iteration:
408
+ warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
409
+ assert warning_limit_val is not None
410
+ warning_limit = int(warning_limit_val)
411
+ if not check_token_warning(prompt_tokens, warning_limit, require_confirmation):
412
+ sys.exit(0)
413
+ first_iteration = False
414
+
415
+ raw_commit_message = generate_commit_message(
416
+ model=model,
417
+ prompt=conversation_messages,
418
+ temperature=temperature,
419
+ max_tokens=max_output_tokens,
420
+ max_retries=max_retries,
421
+ quiet=quiet,
422
+ )
423
+ commit_message = clean_commit_message(raw_commit_message)
424
+ logger.info("Generated commit message:")
425
+ logger.info(commit_message)
426
+ conversation_messages.append({"role": "assistant", "content": commit_message})
427
+ display_commit_message(commit_message, prompt_tokens, model, quiet)
428
+
429
+ if require_confirmation:
430
+ decision, commit_message, conversation_messages = handle_confirmation_loop(
431
+ commit_message, conversation_messages, quiet, model
432
+ )
433
+ if decision == "no":
434
+ console.print("[yellow]Prompt not accepted. Exiting...[/yellow]")
435
+ sys.exit(0)
436
+ elif decision == "yes":
437
+ break
438
+ else:
439
+ break
440
+
441
+ if dry_run:
442
+ console.print("[yellow]Dry run: Commit message generated but not applied[/yellow]")
443
+ console.print("Would commit with message:")
444
+ console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
445
+ staged_files = get_staged_files(existing_only=False)
446
+ console.print(f"Would commit {len(staged_files)} files")
447
+ logger.info(f"Would commit {len(staged_files)} files")
448
+ else:
449
+ execute_commit(commit_message, no_verify)
450
+
451
+ if push:
452
+ try:
453
+ if dry_run:
454
+ staged_files = get_staged_files(existing_only=False)
455
+ logger.info("Dry run: Would push changes")
456
+ logger.info("Would push with message:")
457
+ logger.info(commit_message)
458
+ logger.info(f"Would push {len(staged_files)} files")
459
+ console.print("[yellow]Dry run: Would push changes[/yellow]")
460
+ console.print("Would push with message:")
461
+ console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
462
+ console.print(f"Would push {len(staged_files)} files")
463
+ sys.exit(0)
464
+ if push_changes():
465
+ logger.info("Changes pushed successfully")
466
+ console.print("[green]Changes pushed successfully[/green]")
467
+ else:
468
+ console.print(
469
+ "[red]Failed to push changes. Check your remote configuration and network connection.[/red]"
470
+ )
471
+ sys.exit(1)
472
+ except Exception as e:
473
+ console.print(f"[red]Error pushing changes: {e}[/red]")
474
+ sys.exit(1)
475
+
476
+ if not quiet:
477
+ logger.info("Successfully committed changes with message:")
478
+ logger.info(commit_message)
479
+ if push:
480
+ logger.info("Changes pushed to remote.")
481
+ sys.exit(0)
482
+
483
+
35
484
  def main(
36
485
  stage_all: bool = False,
486
+ group: bool = False,
37
487
  model: str | None = None,
38
488
  hint: str = "",
39
489
  one_liner: bool = False,
@@ -84,33 +534,35 @@ def main(
84
534
  logger.info("Staging all changes")
85
535
  run_git_command(["add", "--all"])
86
536
 
87
- # Check for staged files
88
537
  staged_files = get_staged_files(existing_only=False)
538
+
539
+ if group:
540
+ num_files = len(staged_files)
541
+ multiplier = min(5, 2 + (num_files // 10))
542
+ max_output_tokens *= multiplier
543
+ logger.debug(f"Grouped mode: scaling max_output_tokens by {multiplier}x for {num_files} files")
544
+
89
545
  if not staged_files:
90
546
  console.print(
91
547
  "[yellow]No staged changes found. Stage your changes with git add first or use --add-all.[/yellow]"
92
548
  )
93
549
  sys.exit(0)
94
550
 
95
- # Run pre-commit and lefthook hooks before doing expensive operations
96
551
  if not no_verify and not dry_run:
97
- # Run lefthook hooks
98
552
  if not run_lefthook_hooks():
99
553
  console.print("[red]Lefthook hooks failed. Please fix the issues and try again.[/red]")
100
554
  console.print("[yellow]You can use --no-verify to skip pre-commit and lefthook hooks.[/yellow]")
101
555
  sys.exit(1)
102
556
 
103
- # Run pre-commit hooks
104
557
  if not run_pre_commit_hooks():
105
558
  console.print("[red]Pre-commit hooks failed. Please fix the issues and try again.[/red]")
106
559
  console.print("[yellow]You can use --no-verify to skip pre-commit and lefthook hooks.[/yellow]")
107
560
  sys.exit(1)
108
561
 
109
- status = run_git_command(["status"])
562
+ status = get_staged_status()
110
563
  diff = run_git_command(["diff", "--staged"])
111
564
  diff_stat = " " + run_git_command(["diff", "--stat", "--cached"])
112
565
 
113
- # Security scan for secrets
114
566
  if not skip_secret_scan:
115
567
  logger.info("Scanning staged changes for potential secrets...")
116
568
  secrets = scan_staged_diff(diff)
@@ -169,14 +621,12 @@ def main(
169
621
  sys.exit(0)
170
622
 
171
623
  console.print(f"[green]Continuing with {len(remaining_staged)} staged file(s)...[/green]")
172
- # Refresh all git state variables after removing files
173
- status = run_git_command(["status"])
624
+ status = get_staged_status()
174
625
  diff = run_git_command(["diff", "--staged"])
175
626
  diff_stat = " " + run_git_command(["diff", "--stat", "--cached"])
176
627
  else:
177
628
  logger.info("No secrets detected in staged changes")
178
629
 
179
- # Preprocess the diff before passing to build_prompt
180
630
  logger.debug(f"Preprocessing diff ({len(diff)} characters)")
181
631
  assert model is not None
182
632
  processed_diff = preprocess_diff(diff, token_limit=Utility.DEFAULT_DIFF_TOKEN_LIMIT, model=model)
@@ -187,7 +637,6 @@ def main(
187
637
  system_template_path_value if isinstance(system_template_path_value, str) else None
188
638
  )
189
639
 
190
- # Use language parameter if provided, otherwise fall back to config
191
640
  if language is None:
192
641
  language_value = config.get("language")
193
642
  language = language_value if isinstance(language_value, str) else None
@@ -208,162 +657,62 @@ def main(
208
657
  translate_prefixes=translate_prefixes,
209
658
  )
210
659
 
211
- if show_prompt:
212
- # Show both system and user prompts
213
- full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
214
- console.print(
215
- Panel(
216
- full_prompt,
217
- title="Prompt for LLM",
218
- border_style="bright_blue",
219
- )
660
+ if group:
661
+ from gac.prompt import build_group_prompt
662
+
663
+ system_prompt, user_prompt = build_group_prompt(
664
+ status=status,
665
+ processed_diff=processed_diff,
666
+ diff_stat=diff_stat,
667
+ one_liner=one_liner,
668
+ hint=hint,
669
+ infer_scope=infer_scope,
670
+ verbose=verbose,
671
+ system_template_path=system_template_path,
672
+ language=language,
673
+ translate_prefixes=translate_prefixes,
220
674
  )
221
675
 
222
- conversation_messages: list[dict[str, str]] = []
223
- if system_prompt:
224
- conversation_messages.append({"role": "system", "content": system_prompt})
225
- conversation_messages.append({"role": "user", "content": user_prompt})
226
-
227
- try:
228
- first_iteration = True
229
-
230
- while True:
231
- prompt_tokens = count_tokens(conversation_messages, model)
232
-
233
- if first_iteration:
234
- warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
235
- assert warning_limit_val is not None
236
- warning_limit = int(warning_limit_val)
237
- if warning_limit and prompt_tokens > warning_limit:
238
- console.print(
239
- f"[yellow]⚠️ WARNING: Prompt contains {prompt_tokens} tokens, which exceeds the warning limit of "
240
- f"{warning_limit} tokens.[/yellow]"
241
- )
242
- if require_confirmation:
243
- proceed = click.confirm("Do you want to continue anyway?", default=True)
244
- if not proceed:
245
- console.print("[yellow]Aborted due to token limit.[/yellow]")
246
- sys.exit(0)
247
-
248
- first_iteration = False
249
-
250
- raw_commit_message = generate_commit_message(
676
+ try:
677
+ execute_grouped_commits_workflow(
678
+ system_prompt=system_prompt,
679
+ user_prompt=user_prompt,
251
680
  model=model,
252
- prompt=conversation_messages,
253
681
  temperature=temperature,
254
- max_tokens=max_output_tokens,
682
+ max_output_tokens=max_output_tokens,
255
683
  max_retries=max_retries,
684
+ require_confirmation=require_confirmation,
256
685
  quiet=quiet,
686
+ no_verify=no_verify,
687
+ dry_run=dry_run,
688
+ push=push,
689
+ show_prompt=show_prompt,
257
690
  )
258
- # Clean the commit message (no automatic prefix enforcement)
259
- commit_message = clean_commit_message(raw_commit_message)
260
-
261
- logger.info("Generated commit message:")
262
- logger.info(commit_message)
263
-
264
- conversation_messages.append({"role": "assistant", "content": commit_message})
265
-
266
- console.print("[bold green]Generated commit message:[/bold green]")
267
- console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
268
-
269
- if not quiet:
270
- completion_tokens = count_tokens(commit_message, model)
271
- total_tokens = prompt_tokens + completion_tokens
272
- console.print(
273
- f"[dim]Token usage: {prompt_tokens} prompt + {completion_tokens} completion = {total_tokens} "
274
- "total[/dim]"
275
- )
276
-
277
- if require_confirmation:
278
- while True:
279
- response = click.prompt(
280
- "Proceed with commit above? [y/n/r/<feedback>]",
281
- type=str,
282
- show_default=False,
283
- ).strip()
284
- response_lower = response.lower()
285
-
286
- if response_lower in ["y", "yes"]:
287
- break
288
- if response_lower in ["n", "no"]:
289
- console.print("[yellow]Prompt not accepted. Exiting...[/yellow]")
290
- sys.exit(0)
291
- if response == "":
292
- continue
293
- if response_lower in ["r", "reroll"]:
294
- feedback_message = (
295
- "Please provide an alternative commit message using the same repository context."
296
- )
297
- console.print("[cyan]Regenerating commit message...[/cyan]")
298
- conversation_messages.append({"role": "user", "content": feedback_message})
299
- console.print()
300
- break
301
-
302
- feedback_message = f"Please revise the commit message based on this feedback: {response}"
303
- console.print(f"[cyan]Regenerating commit message with feedback: {response}[/cyan]")
304
- conversation_messages.append({"role": "user", "content": feedback_message})
305
- console.print()
306
- break
307
-
308
- if response_lower in ["y", "yes"]:
309
- break
310
- else:
311
- break
312
-
313
- if dry_run:
314
- console.print("[yellow]Dry run: Commit message generated but not applied[/yellow]")
315
- console.print("Would commit with message:")
316
- console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
317
- staged_files = get_staged_files(existing_only=False)
318
- console.print(f"Would commit {len(staged_files)} files")
319
- logger.info(f"Would commit {len(staged_files)} files")
320
- else:
321
- commit_args = ["commit", "-m", commit_message]
322
- if no_verify:
323
- commit_args.append("--no-verify")
324
- run_git_command(commit_args)
325
- logger.info("Commit created successfully")
326
- console.print("[green]Commit created successfully[/green]")
327
- except AIError as e:
328
- logger.error(str(e))
329
- console.print(f"[red]Failed to generate commit message: {str(e)}[/red]")
330
- sys.exit(1)
331
-
332
- if push:
691
+ except AIError as e:
692
+ logger.error(str(e))
693
+ console.print(f"[red]Failed to generate grouped commits: {str(e)}[/red]")
694
+ sys.exit(1)
695
+ else:
333
696
  try:
334
- if dry_run:
335
- staged_files = get_staged_files(existing_only=False)
336
-
337
- logger.info("Dry run: Would push changes")
338
- logger.info("Would push with message:")
339
- logger.info(commit_message)
340
- logger.info(f"Would push {len(staged_files)} files")
341
-
342
- console.print("[yellow]Dry run: Would push changes[/yellow]")
343
- console.print("Would push with message:")
344
- console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
345
- console.print(f"Would push {len(staged_files)} files")
346
- sys.exit(0)
347
-
348
- if push_changes():
349
- logger.info("Changes pushed successfully")
350
- console.print("[green]Changes pushed successfully[/green]")
351
- else:
352
- console.print(
353
- "[red]Failed to push changes. Check your remote configuration and network connection.[/red]"
354
- )
355
- sys.exit(1)
356
- except Exception as e:
357
- console.print(f"[red]Error pushing changes: {e}[/red]")
697
+ execute_single_commit_workflow(
698
+ system_prompt=system_prompt,
699
+ user_prompt=user_prompt,
700
+ model=model,
701
+ temperature=temperature,
702
+ max_output_tokens=max_output_tokens,
703
+ max_retries=max_retries,
704
+ require_confirmation=require_confirmation,
705
+ quiet=quiet,
706
+ no_verify=no_verify,
707
+ dry_run=dry_run,
708
+ push=push,
709
+ show_prompt=show_prompt,
710
+ )
711
+ except AIError as e:
712
+ logger.error(str(e))
713
+ console.print(f"[red]Failed to generate commit message: {str(e)}[/red]")
358
714
  sys.exit(1)
359
715
 
360
- if not quiet:
361
- logger.info("Successfully committed changes with message:")
362
- logger.info(commit_message)
363
- if push:
364
- logger.info("Changes pushed to remote.")
365
- sys.exit(0)
366
-
367
716
 
368
717
  if __name__ == "__main__":
369
718
  main()