gac 0.17.3__tar.gz → 0.17.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gac
3
- Version: 0.17.3
3
+ Version: 0.17.5
4
4
  Summary: AI-powered Git commit message generator with multi-provider support
5
5
  Project-URL: Homepage, https://github.com/cellwebb/gac
6
6
  Project-URL: Documentation, https://github.com/cellwebb/gac#readme
@@ -71,7 +71,13 @@ Description-Content-Type: text/markdown
71
71
 
72
72
  ## How It Works
73
73
 
74
- gac analyzes your staged changes to generate high-quality commit messages with the help of large language models.
74
+ gac analyzes your staged changes to generate high-quality commit messages with the help of large language models. The tool uses a sophisticated prompt architecture that separates system instructions from user data, enabling better AI understanding and more consistent results.
75
+
76
+ ### Technical Architecture
77
+
78
+ - **Dual-Prompt System**: GAC uses a separated prompt architecture where system instructions (role definition, conventions, examples) are sent as system messages, while git data (diffs, status) are sent as user messages. This follows AI best practices for improved model performance.
79
+ - **Smart Context Analysis**: The tool examines your repository structure, recent commit history, and README files to understand the broader context of your changes.
80
+ - **Intelligent Diff Processing**: Large diffs are automatically preprocessed to focus on the most important changes while staying within token limits.
75
81
 
76
82
  ## How to Use
77
83
 
@@ -23,7 +23,13 @@
23
23
 
24
24
  ## How It Works
25
25
 
26
- gac analyzes your staged changes to generate high-quality commit messages with the help of large language models.
26
+ gac analyzes your staged changes to generate high-quality commit messages with the help of large language models. The tool uses a sophisticated prompt architecture that separates system instructions from user data, enabling better AI understanding and more consistent results.
27
+
28
+ ### Technical Architecture
29
+
30
+ - **Dual-Prompt System**: GAC uses a separated prompt architecture where system instructions (role definition, conventions, examples) are sent as system messages, while git data (diffs, status) are sent as user messages. This follows AI best practices for improved model performance.
31
+ - **Smart Context Analysis**: The tool examines your repository structure, recent commit history, and README files to understand the broader context of your changes.
32
+ - **Intelligent Diff Processing**: Large diffs are automatically preprocessed to focus on the most important changes while staying within token limits.
27
33
 
28
34
  ## How to Use
29
35
 
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "0.17.3"
3
+ __version__ = "0.17.5"
@@ -61,7 +61,7 @@ def get_encoding(model: str) -> tiktoken.Encoding:
61
61
 
62
62
  def generate_commit_message(
63
63
  model: str,
64
- prompt: str,
64
+ prompt: str | tuple[str, str],
65
65
  temperature: float = EnvDefaults.TEMPERATURE,
66
66
  max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
67
67
  max_retries: int = EnvDefaults.MAX_RETRIES,
@@ -71,7 +71,7 @@ def generate_commit_message(
71
71
 
72
72
  Args:
73
73
  model: The model to use in provider:model_name format (e.g., 'anthropic:claude-3-5-haiku-latest')
74
- prompt: The formatted prompt containing diff and context
74
+ prompt: Either a string prompt (for backward compatibility) or tuple of (system_prompt, user_prompt)
75
75
  temperature: Controls randomness (0.0-1.0), lower values are more deterministic
76
76
  max_tokens: Maximum tokens in the response
77
77
  max_retries: Number of retry attempts if generation fails
@@ -85,8 +85,8 @@ def generate_commit_message(
85
85
 
86
86
  Example:
87
87
  >>> model = "anthropic:claude-3-5-haiku-latest"
88
- >>> prompt = build_prompt("On branch main", "diff --git a/README.md b/README.md")
89
- >>> generate_commit_message(model, prompt)
88
+ >>> system_prompt, user_prompt = build_prompt("On branch main", "diff --git a/README.md b/README.md")
89
+ >>> generate_commit_message(model, (system_prompt, user_prompt))
90
90
  'docs: Update README with installation instructions'
91
91
  """
92
92
  try:
@@ -98,6 +98,14 @@ def generate_commit_message(
98
98
 
99
99
  client = ai.Client()
100
100
 
101
+ # Handle both old (string) and new (tuple) prompt formats
102
+ if isinstance(prompt, tuple):
103
+ system_prompt, user_prompt = prompt
104
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
105
+ else:
106
+ # Backward compatibility: treat string as user prompt
107
+ messages = [{"role": "user", "content": prompt}]
108
+
101
109
  if quiet:
102
110
  spinner = None
103
111
  else:
@@ -112,7 +120,7 @@ def generate_commit_message(
112
120
  logger.debug(f"Trying with model {model} (attempt {retry_count + 1}/{max_retries})")
113
121
  response = client.chat.completions.create(
114
122
  model=model,
115
- messages=[{"role": "user", "content": prompt}],
123
+ messages=messages,
116
124
  temperature=temperature,
117
125
  max_tokens=max_tokens,
118
126
  )
@@ -95,7 +95,7 @@ def main(
95
95
  processed_diff = preprocess_diff(diff, token_limit=Utility.DEFAULT_DIFF_TOKEN_LIMIT, model=model_id)
96
96
  logger.debug(f"Processed diff ({len(processed_diff)} characters)")
97
97
 
98
- prompt = build_prompt(
98
+ system_prompt, user_prompt = build_prompt(
99
99
  status=status,
100
100
  processed_diff=processed_diff,
101
101
  diff_stat=diff_stat,
@@ -106,16 +106,19 @@ def main(
106
106
 
107
107
  if show_prompt:
108
108
  console = Console()
109
+ # Show both system and user prompts
110
+ full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
109
111
  console.print(
110
112
  Panel(
111
- prompt,
113
+ full_prompt,
112
114
  title="Prompt for LLM",
113
115
  border_style="bright_blue",
114
116
  )
115
117
  )
116
118
 
117
119
  try:
118
- prompt_tokens = count_tokens(prompt, model)
120
+ # Count tokens for both prompts
121
+ prompt_tokens = count_tokens(system_prompt, model) + count_tokens(user_prompt, model)
119
122
 
120
123
  warning_limit = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
121
124
  if warning_limit and prompt_tokens > warning_limit:
@@ -132,7 +135,7 @@ def main(
132
135
 
133
136
  commit_message = generate_commit_message(
134
137
  model=model,
135
- prompt=prompt,
138
+ prompt=(system_prompt, user_prompt),
136
139
  temperature=temperature,
137
140
  max_tokens=max_output_tokens,
138
141
  max_retries=max_retries,
@@ -193,7 +196,7 @@ def main(
193
196
  combined_hint = conversational_hint
194
197
 
195
198
  # Regenerate prompt with conversational feedback
196
- reroll_prompt = build_prompt(
199
+ reroll_system_prompt, reroll_user_prompt = build_prompt(
197
200
  status=status,
198
201
  processed_diff=processed_diff,
199
202
  diff_stat=diff_stat,
@@ -202,15 +205,15 @@ def main(
202
205
  scope=scope,
203
206
  )
204
207
  else:
205
- # No hint given, just reroll with same prompt
206
- reroll_prompt = prompt
208
+ # No hint given, just reroll with same prompts
209
+ reroll_system_prompt, reroll_user_prompt = system_prompt, user_prompt
207
210
 
208
211
  console.print() # Add blank line for readability
209
212
 
210
213
  # Generate new message
211
214
  commit_message = generate_commit_message(
212
215
  model=model,
213
- prompt=reroll_prompt,
216
+ prompt=(reroll_system_prompt, reroll_user_prompt),
214
217
  temperature=temperature,
215
218
  max_tokens=max_output_tokens,
216
219
  max_retries=max_retries,
@@ -237,8 +237,8 @@ def build_prompt(
237
237
  one_liner: bool = False,
238
238
  hint: str = "",
239
239
  scope: str | None = None,
240
- ) -> str:
241
- """Build a prompt for the AI model using the provided template and git information.
240
+ ) -> tuple[str, str]:
241
+ """Build system and user prompts for the AI model using the provided template and git information.
242
242
 
243
243
  Args:
244
244
  status: Git status output
@@ -249,7 +249,7 @@ def build_prompt(
249
249
  scope: Optional scope parameter. None = no scope, "infer" = infer scope, any other string = use as scope
250
250
 
251
251
  Returns:
252
- Formatted prompt string ready to be sent to an AI model
252
+ Tuple of (system_prompt, user_prompt) ready to be sent to an AI model
253
253
  """
254
254
  template = load_prompt_template()
255
255
 
@@ -331,7 +331,49 @@ def build_prompt(
331
331
  # Clean up extra whitespace, collapsing blank lines that may contain spaces
332
332
  template = re.sub(r"\n(?:[ \t]*\n){2,}", "\n\n", template)
333
333
 
334
- return template.strip()
334
+ # Split the template into system and user prompts
335
+ # System prompt contains all instructions, role, conventions, examples
336
+ # User prompt contains the actual git data
337
+
338
+ # Extract the git data sections for the user prompt
339
+ user_sections = []
340
+
341
+ # Extract git status
342
+ status_match = re.search(r"<git_status>.*?</git_status>", template, re.DOTALL)
343
+ if status_match:
344
+ user_sections.append(status_match.group(0))
345
+ # Remove from system prompt
346
+ template = template.replace(status_match.group(0), "")
347
+
348
+ # Extract git diff stat
349
+ diff_stat_match = re.search(r"<git_diff_stat>.*?</git_diff_stat>", template, re.DOTALL)
350
+ if diff_stat_match:
351
+ user_sections.append(diff_stat_match.group(0))
352
+ # Remove from system prompt
353
+ template = template.replace(diff_stat_match.group(0), "")
354
+
355
+ # Extract git diff
356
+ diff_match = re.search(r"<git_diff>.*?</git_diff>", template, re.DOTALL)
357
+ if diff_match:
358
+ user_sections.append(diff_match.group(0))
359
+ # Remove from system prompt
360
+ template = template.replace(diff_match.group(0), "")
361
+
362
+ # Extract hint if present
363
+ hint_match = re.search(r"<hint>.*?</hint>", template, re.DOTALL)
364
+ if hint_match and hint: # Only include if hint was provided
365
+ user_sections.append(hint_match.group(0))
366
+ # Remove from system prompt
367
+ template = template.replace(hint_match.group(0), "")
368
+
369
+ # System prompt is everything else (role, conventions, examples, instructions)
370
+ system_prompt = template.strip()
371
+ system_prompt = re.sub(r"\n(?:[ \t]*\n){2,}", "\n\n", system_prompt)
372
+
373
+ # User prompt is the git data sections
374
+ user_prompt = "\n\n".join(user_sections).strip()
375
+
376
+ return system_prompt, user_prompt
335
377
 
336
378
 
337
379
  def clean_commit_message(message: str) -> str:
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes