commitai 1.0.5__py3-none-any.whl → 2.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
commitai/__init__.py CHANGED
@@ -3,7 +3,7 @@
3
3
 
4
4
  # This __version__ string is read by hatchling during the build process
5
5
  # Make sure to update it for new releases.
6
- __version__ = "0.3.0"
6
+ __version__ = "2.2.2"
7
7
 
8
8
  # The importlib.metadata approach is generally for reading the version
9
9
  # of an *already installed* package at runtime. We don't need it here
commitai/agent.py ADDED
@@ -0,0 +1,252 @@
1
+ import glob
2
+ import os
3
+ import subprocess
4
+ from typing import Any, Dict, Type
5
+
6
+ # from langchain.agents import AgentExecutor, create_tool_calling_agent # Removed
7
+ from langchain_core.language_models import BaseChatModel
8
+ from langchain_core.runnables import Runnable
9
+ from langchain_core.tools import BaseTool
10
+ from langgraph.prebuilt import create_react_agent
11
+ from pydantic import BaseModel, Field
12
+
13
+ # --- TOOLS ---
14
+
15
+
16
+ class ShellInput(BaseModel):
17
+ command: str = Field(
18
+ description=(
19
+ "The git command to execute (e.g., 'git status', 'git log'). "
20
+ "Must start with 'git'."
21
+ )
22
+ )
23
+
24
+
25
+ class ReadOnlyShellTool(BaseTool):
26
+ name: str = "git_shell"
27
+ description: str = (
28
+ "Run read-only git commands to inspect the repository state. "
29
+ "Only 'git' commands are allowed. Write operations are blocked."
30
+ )
31
+ args_schema: Type[BaseModel] = ShellInput
32
+
33
+ def _run(self, command: str) -> str:
34
+ command = command.strip()
35
+ if not command.startswith("git"):
36
+ return "Error: Only 'git' commands are allowed."
37
+
38
+ # Simple blocklist for write operations
39
+ forbidden = [
40
+ "push",
41
+ "pull",
42
+ "commit",
43
+ "merge",
44
+ "rebase",
45
+ "cherry-pick",
46
+ "stash",
47
+ "clean",
48
+ "reset",
49
+ "checkout",
50
+ "switch",
51
+ "branch",
52
+ ]
53
+ if any(w in command.split() for w in forbidden):
54
+ return f"Error: Command '{command}' contains forbidden write operations."
55
+
56
+ try:
57
+ # shell=True is dangerous in general, but we heavily restricted input above
58
+ result = subprocess.run(
59
+ command, shell=True, capture_output=True, text=True, cwd=os.getcwd()
60
+ )
61
+ if result.returncode != 0:
62
+ return f"Error ({result.returncode}): {result.stderr}"
63
+ return result.stdout
64
+ except Exception as e:
65
+ return f"Execution Error: {str(e)}"
66
+
67
+
68
+ class FileSearchInput(BaseModel):
69
+ pattern: str = Field(
70
+ description="The glob pattern to search for files (e.g., 'src/**/*.py')."
71
+ )
72
+
73
+
74
+ class FileSearchTool(BaseTool):
75
+ name: str = "file_search"
76
+ description: str = (
77
+ "Search for file paths in the project using glob patterns. "
78
+ "Useful to find files to inspect."
79
+ )
80
+ args_schema: Type[BaseModel] = FileSearchInput
81
+
82
+ def _run(self, pattern: str) -> str:
83
+ try:
84
+ # Security: prevent breaking out of repo?
85
+ # For simplicity, just run glob.
86
+ if ".." in pattern:
87
+ return "Error: '..' not allowed in patterns."
88
+
89
+ files = glob.glob(pattern, recursive=True)
90
+ if not files:
91
+ return "No files found."
92
+ return "\n".join(files[:20]) # Limit output
93
+ except Exception as e:
94
+ return f"Error: {str(e)}"
95
+
96
+
97
+ class FileReadInput(BaseModel):
98
+ file_path: str = Field(description="The path of the file to read.")
99
+
100
+
101
+ class FileReadTool(BaseTool):
102
+ name: str = "file_read"
103
+ description: str = "Read the contents of a specific file."
104
+ args_schema: Type[BaseModel] = FileReadInput
105
+
106
+ def _run(self, file_path: str) -> str:
107
+ if ".." in file_path:
108
+ return "Error: Traversing up directories is not allowed."
109
+ if not os.path.exists(file_path):
110
+ return "Error: File does not exist."
111
+ try:
112
+ with open(file_path, "r") as f:
113
+ content = f.read()
114
+ return content[:2000] # Truncate large files
115
+ except Exception as e:
116
+ return f"Error reading file: {str(e)}"
117
+
118
+
119
+ # --- MIDDLEWARE (Simulated for Agent) ---
120
+
121
+
122
+ class SummarizationMiddleware:
123
+ """Uses LLM to summarize diff before agent sees it."""
124
+
125
+ def __init__(self, llm: BaseChatModel):
126
+ self.llm = llm
127
+
128
+ def process(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
129
+ diff = inputs.get("diff", "")
130
+ if not diff:
131
+ return inputs
132
+
133
+ # Simple summarization chain (inline invocation)
134
+ # Truncate for summary
135
+ msg = f"Summarize these changes in 2 sentences:\n\n{diff[:5000]}"
136
+ resp = self.llm.invoke(msg)
137
+ inputs["summary"] = resp.content
138
+ return inputs
139
+
140
+
141
+ class TodoMiddleware:
142
+ """Scans diff for TODOs and adds to inputs."""
143
+
144
+ def process(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
145
+ diff = inputs.get("diff", "")
146
+ todos = []
147
+ for line in diff.splitlines():
148
+ if line.startswith("+") and any(
149
+ x in line.lower() for x in ["todo", "fixme"]
150
+ ):
151
+ todos.append(line[1:].strip())
152
+ inputs["todos"] = todos
153
+ inputs["todo_str"] = "\n".join(f"- {t}" for t in todos) if todos else "None"
154
+ return inputs
155
+
156
+
157
+ # --- AGENT ---
158
+
159
+
160
+ # --- AGENT ---
161
+
162
+
163
+ def create_commit_agent(llm: BaseChatModel) -> Runnable:
164
+ # 1. Init Tools
165
+ tools = [ReadOnlyShellTool(), FileSearchTool(), FileReadTool()]
166
+
167
+ # 2. Middlewares
168
+ summ_mw = SummarizationMiddleware(llm)
169
+ todo_mw = TodoMiddleware()
170
+
171
+ # 3. Prompt
172
+ system_prompt = """You are an expert software engineer acting as a Commit Assistant.
173
+ Your goal is to generate a conventional commit message.
174
+
175
+ Context:
176
+ - User Explanation: {explanation}
177
+ - Detected TODOs: {todo_str}
178
+ - Auto-Summary: {summary}
179
+ - Staged Diff: {diff}
180
+
181
+ You have access to tools to explore the codebase if the diff + explanation is ambiguous.
182
+ - Use `git_shell` to check status or logs.
183
+ - Use `file_search` and `file_read` to understand context of modified files.
184
+
185
+ Protocol:
186
+ 1. Analyze the input.
187
+ 2. If detecting POTENTIAL SENSITIVE DATA (API keys, secrets) in the diff, you MUST stop
188
+ and ask the user (simulated by returning a warning message).
189
+ 3. If clarification is needed, explore files.
190
+ 4. Final Answer MUST be ONLY the commit message.
191
+ """
192
+ # Note: create_react_agent handles the prompt internally or via state_modifier.
193
+ # We can pass a system string or a function. Since our prompt depends on dynamic
194
+ # variables (diff, explanation, etc.), we need to inject them. LangGraph's
195
+ # prebuilt agent usually takes a static system message. However, we can use the
196
+ # 'messages' state. But to keep it simple and compatible with existing 'invoke'
197
+ # interface: We will format the system prompt in the wrapper and pass it as the
198
+ # first message.
199
+
200
+ # Actually, create_react_agent supports 'state_modifier'.
201
+ # If we pass a formatted string, it works as system prompt.
202
+
203
+ # 4. Construct Graph
204
+ # We don't construct the graph with ALL variables pre-bound if they change per run.
205
+ # Instead, we'll format the prompt in the pipeline and pass it to the agent.
206
+
207
+ agent_graph = create_react_agent(llm, tools)
208
+
209
+ # 5. Pipeline with Middleware
210
+ def run_pipeline(inputs: Dict[str, Any]) -> str:
211
+ # Run Middleware
212
+ state = inputs.copy()
213
+ state = todo_mw.process(state)
214
+ state = summ_mw.process(state)
215
+
216
+ # Inject formatted fields if missing
217
+ state.setdefault("explanation", "None")
218
+ state.setdefault("summary", "None")
219
+ state.setdefault("todo_str", "None")
220
+
221
+ # Format System Prompt
222
+ formatted_system_prompt = system_prompt.format(
223
+ explanation=state["explanation"],
224
+ todo_str=state["todo_str"],
225
+ summary=state["summary"],
226
+ diff=state.get("diff", ""),
227
+ )
228
+
229
+ # Run Agent
230
+ # LangGraph inputs: {"messages": [{"role": "user", "content": ...}]}
231
+ # We inject the system prompt as a SystemMessage or just update the state.
232
+ # create_react_agent primarily looks at 'messages'.
233
+
234
+ from langchain_core.messages import HumanMessage, SystemMessage
235
+
236
+ messages = [
237
+ SystemMessage(content=formatted_system_prompt),
238
+ HumanMessage(content="Generate the commit message."),
239
+ ]
240
+
241
+ # Invoke graph
242
+ # result is a dict with 'messages'
243
+ result = agent_graph.invoke({"messages": messages})
244
+
245
+ # Extract last message content
246
+ last_message = result["messages"][-1]
247
+ return str(last_message.content)
248
+
249
+ # Wrap in RunnableLambda to expose 'invoke'
250
+ from langchain_core.runnables import RunnableLambda
251
+
252
+ return RunnableLambda(run_pipeline)
commitai/chains.py ADDED
@@ -0,0 +1,135 @@
1
+ from typing import Any, Dict, List, Optional, TypedDict
2
+
3
+ from langchain_core.language_models import BaseChatModel
4
+ from langchain_core.output_parsers import StrOutputParser
5
+ from langchain_core.prompts import ChatPromptTemplate
6
+ from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough
7
+
8
+
9
+ class CommitState(TypedDict):
10
+ diff: str
11
+ explanation: Optional[str]
12
+ summary: Optional[str]
13
+ todos: Optional[List[str]]
14
+
15
+
16
+ class SummarizationMiddleware:
17
+ """Middleware to summarize the diff before generating the commit message."""
18
+
19
+ def __init__(self, llm: BaseChatModel):
20
+ self.llm = llm
21
+ self.prompt = ChatPromptTemplate.from_template(
22
+ "Summarize the following code changes concisely in 1-2 sentences:\n\n{diff}"
23
+ )
24
+ self.chain = self.prompt | self.llm | StrOutputParser()
25
+
26
+ def __call__(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
27
+ """Run the summarizer and add 'summary' to the state."""
28
+ diff = inputs.get("diff", "")
29
+ if not diff:
30
+ return {**inputs, "summary": ""}
31
+
32
+ # We invoke the chain synchronously here
33
+ summary = self.chain.invoke({"diff": diff})
34
+ return {**inputs, "summary": summary}
35
+
36
+
37
+ class TodoMiddleware:
38
+ """Middleware to scan the diff for TODO/FIXME/HACK comments."""
39
+
40
+ def __call__(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
41
+ diff = inputs.get("diff", "")
42
+ todos = []
43
+ for line in diff.splitlines():
44
+ if line.startswith("+"):
45
+ lower_line = line.lower()
46
+ if (
47
+ "todo" in lower_line
48
+ or "fixme" in lower_line
49
+ or "hack" in lower_line
50
+ ):
51
+ # Strip the + and whitespace
52
+ clean_line = line[1:].strip()
53
+ todos.append(clean_line)
54
+
55
+ return {**inputs, "todos": todos}
56
+
57
+
58
+ def create_commit_chain(llm: BaseChatModel) -> Runnable:
59
+ """Creates the full commit generation pipeline with middleware."""
60
+
61
+ # 1. Initialize Middlewares
62
+ summarizer = SummarizationMiddleware(llm)
63
+ todo_scanner = TodoMiddleware()
64
+
65
+ # 2. Define the Prompt
66
+ # We include placeholders for summary and todos if they exist
67
+ system_template = (
68
+ "You are an expert software engineer and git commit message generator.\n"
69
+ "Your task is to generate a clean, concise commit message following the "
70
+ "Conventional Commits specification.\n\n"
71
+ "Values from middleware:\n"
72
+ "{summary_section}\n"
73
+ "{todo_section}\n\n"
74
+ "Input context:\n"
75
+ "{explanation_section}\n\n"
76
+ "Existing Code Changes (Diff):\n"
77
+ "{diff}\n\n"
78
+ "Instructions:\n"
79
+ "1. Use the format: <type>(<scope>): <subject>\n"
80
+ "2. Keep the subject line under 50 characters if possible.\n"
81
+ "3. If there are multiple changes, provide a bulleted body.\n"
82
+ "4. If TODOs were detected, mention them in the footer or body as "
83
+ "appropriate.\n"
84
+ "5. If an explanation is provided, prioritize it.\n"
85
+ )
86
+ prompt = ChatPromptTemplate.from_template(system_template)
87
+
88
+ # 3. Helper to format the prompt inputs from state
89
+ def format_inputs(state: CommitState) -> Dict[str, Any]:
90
+ summary = state.get("summary")
91
+ todos = state.get("todos")
92
+ explanation = state.get("explanation")
93
+
94
+ summary_section = f"Summary of changes:\n{summary}\n" if summary else ""
95
+
96
+ todo_section = ""
97
+ if todos:
98
+ todo_section = (
99
+ "Detected TODOs in this diff:\n"
100
+ + "\n".join(f"- {t}" for t in todos)
101
+ + "\n"
102
+ )
103
+
104
+ explanation_section = ""
105
+ if explanation:
106
+ explanation_section = f"User Explanation:\n{explanation}\n"
107
+
108
+ return {
109
+ "diff": state["diff"],
110
+ "summary_section": summary_section,
111
+ "todo_section": todo_section,
112
+ "explanation_section": explanation_section,
113
+ }
114
+
115
+ # 4. Construct the Pipeline
116
+ # Parallel step to run middlewares
117
+ # (conceptually, though here we chain them or use RunnablePassthrough)
118
+ # Since middlewares modify state, we can chain them:
119
+
120
+ middleware_chain: Runnable = (
121
+ RunnablePassthrough()
122
+ | RunnableLambda(todo_scanner)
123
+ | RunnableLambda(summarizer)
124
+ )
125
+
126
+ # Final generation chain
127
+ generation_chain = (
128
+ middleware_chain
129
+ | RunnableLambda(format_inputs)
130
+ | prompt
131
+ | llm
132
+ | StrOutputParser()
133
+ )
134
+
135
+ return generation_chain
commitai/cli.py CHANGED
@@ -2,13 +2,11 @@
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
4
  import os
5
- from typing import Optional, Tuple, cast
5
+ import sys
6
+ from typing import Optional, Tuple
6
7
 
7
8
  import click
8
- from langchain_anthropic import ChatAnthropic
9
9
  from langchain_core.language_models.chat_models import BaseChatModel
10
- from langchain_ollama import ChatOllama
11
- from langchain_openai import ChatOpenAI
12
10
 
13
11
  # Keep SecretStr import in case it's needed elsewhere or for future refinement
14
12
 
@@ -18,6 +16,7 @@ try:
18
16
  except ImportError:
19
17
  ChatGoogleGenerativeAI = None # type: ignore
20
18
 
19
+ from commitai.agent import create_commit_agent
21
20
  from commitai.git import (
22
21
  create_commit,
23
22
  get_commit_template,
@@ -28,11 +27,6 @@ from commitai.git import (
28
27
  save_commit_template,
29
28
  stage_all_changes,
30
29
  )
31
- from commitai.template import (
32
- adding_template,
33
- build_user_message,
34
- default_system_message,
35
- )
36
30
 
37
31
 
38
32
  def _get_google_api_key() -> Optional[str]:
@@ -49,45 +43,30 @@ def _initialize_llm(model: str) -> BaseChatModel:
49
43
  google_api_key_str = _get_google_api_key()
50
44
 
51
45
  try:
52
- if model.startswith("gpt-"):
53
- api_key = os.getenv("OPENAI_API_KEY")
54
- if not api_key:
55
- raise click.ClickException(
56
- "Error: OPENAI_API_KEY environment variable not set."
57
- )
58
- return ChatOpenAI(model=model, api_key=api_key, temperature=0.7)
59
-
60
- elif model.startswith("claude-"):
61
- api_key = os.getenv("ANTHROPIC_API_KEY")
62
- if not api_key:
63
- raise click.ClickException(
64
- "Error: ANTHROPIC_API_KEY environment variable not set."
65
- )
66
- return ChatAnthropic(model_name=model, api_key=api_key, temperature=0.7)
67
-
68
- elif model.startswith("gemini-"):
69
- if ChatGoogleGenerativeAI is None:
70
- raise click.ClickException(
71
- "Error: 'langchain-google-genai' is not installed. "
72
- "Run 'pip install commitai[test]' or "
73
- "'pip install langchain-google-genai'"
74
- )
75
- if not google_api_key_str:
76
- raise click.ClickException(
77
- "Error: Google API Key not found. Set GOOGLE_API_KEY, "
78
- "GEMINI_API_KEY, or GOOGLE_GENERATIVE_AI_API_KEY."
79
- )
80
- return ChatGoogleGenerativeAI(
81
- model=model,
82
- google_api_key=google_api_key_str,
83
- temperature=0.7,
84
- convert_system_message_to_human=True,
46
+ # Enforce Gemini-Only Policy
47
+ # Enforce Strict Gemini-3 Policy
48
+ allowed_models = ["gemini-3-flash-preview", "gemini-3-pro-preview"]
49
+ if model not in allowed_models:
50
+ raise click.ClickException(
51
+ f"🚫 Unsupported model: {model}. "
52
+ f"Only Google Gemini 3 models are allowed: {', '.join(allowed_models)}"
53
+ )
54
+
55
+ if ChatGoogleGenerativeAI is None:
56
+ raise click.ClickException(
57
+ "Error: 'langchain-google-genai' is not installed. "
58
+ "Run 'pip install commitai[test]' or "
59
+ "'pip install langchain-google-genai'"
85
60
  )
86
- elif model.startswith("llama"):
87
- # Ollama models (e.g., llama2, llama3)
88
- return cast(BaseChatModel, ChatOllama(model=model, temperature=0.7))
89
- else:
90
- raise click.ClickException(f"🚫 Unsupported model: {model}")
61
+ if not google_api_key_str:
62
+ raise click.ClickException(
63
+ "Error: Google API Key not found. Set GOOGLE_API_KEY, "
64
+ "GEMINI_API_KEY, or GOOGLE_GENERATIVE_AI_API_KEY."
65
+ )
66
+ return ChatGoogleGenerativeAI(
67
+ model=model,
68
+ google_api_key=google_api_key_str,
69
+ )
91
70
 
92
71
  except Exception as e:
93
72
  raise click.ClickException(f"Error initializing AI model: {e}") from e
@@ -100,25 +79,13 @@ def _prepare_context() -> str:
100
79
 
101
80
  repo_name = get_repository_name()
102
81
  branch_name = get_current_branch_name()
82
+ # Return just the diff for the chain, or context?
83
+ # The chain prompt expects 'diff'.
84
+ # Current helper was returning "Repo/Branch\n\nDiff".
85
+ # Let's keep it to maximize context for the chain.
103
86
  return f"{repo_name}/{branch_name}\n\n{diff}"
104
87
 
105
88
 
106
- def _build_prompt(
107
- explanation: str, formatted_diff: str, template: Optional[str]
108
- ) -> str:
109
- system_message = default_system_message
110
- if template:
111
- system_message += adding_template
112
- system_message += template
113
-
114
- if explanation:
115
- diff_message = build_user_message(explanation, formatted_diff)
116
- else:
117
- diff_message = formatted_diff
118
-
119
- return f"{system_message}\n\n{diff_message}"
120
-
121
-
122
89
  def _handle_commit(commit_message: str, commit_flag: bool) -> None:
123
90
  repo_path = get_repository_name()
124
91
  git_dir = os.path.join(repo_path, ".git")
@@ -135,19 +102,34 @@ def _handle_commit(commit_message: str, commit_flag: bool) -> None:
135
102
  except IOError as e:
136
103
  raise click.ClickException(f"Error writing commit message file: {e}") from e
137
104
 
105
+ final_commit_message = commit_message
138
106
  final_commit_message = commit_message
139
107
  if not commit_flag:
108
+ click.secho(
109
+ f"\n📝 Generated Commit Message:\n{'-' * 40}\n"
110
+ f"{commit_message}\n{'-' * 40}\n",
111
+ fg="green",
112
+ )
113
+
114
+ # Interactive loop for Enter-Enter flow
140
115
  try:
141
- click.edit(filename=commit_msg_path)
142
- with open(commit_msg_path, "r") as f:
143
- final_commit_message = f.read().strip()
144
- except click.UsageError as e:
145
- click.secho(f"Could not open editor: {e}", fg="yellow")
146
- click.secho(f"Using generated message:\n\n{commit_message}\n", fg="yellow")
147
- except IOError as e:
148
- raise click.ClickException(
149
- f"Error reading commit message file after edit: {e}"
150
- ) from e
116
+ # Default to Yes (Enter)
117
+ if click.confirm("🚀 Commit with this message?", default=True):
118
+ pass # final_commit_message is already set
119
+ else:
120
+ if click.confirm("✏️ Edit message?", default=True):
121
+ try:
122
+ click.edit(filename=commit_msg_path)
123
+ with open(commit_msg_path, "r") as f:
124
+ final_commit_message = f.read().strip()
125
+ except click.UsageError as e:
126
+ click.secho(f"Could not open editor: {e}", fg="yellow")
127
+ else:
128
+ raise click.ClickException("Aborted by user.")
129
+ except click.Abort:
130
+ raise click.ClickException("Aborted by user.") from None
131
+ except Exception as e:
132
+ raise click.ClickException(f"Error handling user input: {e}") from e
151
133
 
152
134
  if not final_commit_message:
153
135
  raise click.ClickException("Aborting commit due to empty commit message.")
@@ -173,6 +155,11 @@ def cli() -> None:
173
155
  is_flag=True,
174
156
  help="Commit the changes with the generated message",
175
157
  )
158
+ @click.option(
159
+ "--review/--no-review",
160
+ default=True,
161
+ help="AI review the diff before generating the commit message (default: enabled)",
162
+ )
176
163
  @click.option(
177
164
  "--template",
178
165
  "-t",
@@ -190,23 +177,42 @@ def cli() -> None:
190
177
  @click.option(
191
178
  "--model",
192
179
  "-m",
193
- default="gemini-2.5-pro-preview-03-25",
180
+ default="gemini-3-flash-preview",
194
181
  help=(
195
- "Set the engine model (e.g., 'gpt-4', 'claude-3-opus-20240229', "
196
- "'gemini-2.5-pro-preview-03-25'). Ensure API key env var is set "
197
- "(OPENAI_API_KEY, ANTHROPIC_API_KEY, "
198
- "GOOGLE_API_KEY/GEMINI_API_KEY/GOOGLE_GENERATIVE_AI_API_KEY)."
182
+ "Set the engine model (default: gemini-3-flash-preview). "
183
+ "Only Google Gemini 3 models are supported "
184
+ "('gemini-3-flash-preview', 'gemini-3-pro-preview'). "
185
+ "Ensure GOOGLE_API_KEY is set."
199
186
  ),
200
187
  )
188
+ @click.option(
189
+ "--deep",
190
+ "-d",
191
+ is_flag=True,
192
+ help="Use the deeper reasoning model (gemini-3-pro-preview).",
193
+ )
201
194
  def generate_message(
202
195
  description: Tuple[str, ...],
203
196
  commit: bool,
197
+ review: bool,
204
198
  template: Optional[str],
205
199
  add: bool,
206
200
  model: str,
201
+ deep: bool = False,
207
202
  ) -> None:
208
203
  explanation = " ".join(description)
209
204
 
205
+ # Handle Model Selection Logic
206
+ # 1. Default is gemini-3-flash-preview
207
+ # 2. If --deep is passed, upgrade to gemini-3-pro-preview
208
+ # (unless -m is explicitly distinct)
209
+ if deep:
210
+ # Upgrade to Pro model if deep flag is set
211
+ # We override the model unless the user explicitly chose a different one
212
+ # (For simplicity here, we assume --deep implies pro)
213
+ if model == "gemini-3-flash-preview":
214
+ model = "gemini-3-pro-preview"
215
+
210
216
  llm = _initialize_llm(model)
211
217
 
212
218
  if add:
@@ -222,26 +228,63 @@ def generate_message(
222
228
 
223
229
  formatted_diff = _prepare_context()
224
230
 
231
+ # Initialize Agent Pipeline
232
+ agent_pipeline = create_commit_agent(llm)
233
+
234
+ # Optional pre-generation review
235
+ if review:
236
+ click.secho(
237
+ "\n\n🔎 Reviewing the staged changes before "
238
+ "generating a commit message...\n",
239
+ fg="blue",
240
+ bold=True,
241
+ )
242
+
243
+ # Only prompt for confirmation when running in an interactive TTY
244
+ try:
245
+ is_interactive = sys.stdin.isatty()
246
+ except Exception:
247
+ is_interactive = False
248
+ if is_interactive:
249
+ if not click.confirm(
250
+ "Proceed with generating the commit message?", default=True
251
+ ):
252
+ raise click.ClickException("Aborted by user after review.")
253
+
225
254
  if template:
226
255
  click.secho(
227
256
  "⚠️ Warning: The --template/-t option is deprecated. Use environment "
228
257
  "variable TEMPLATE_COMMIT or `commitai-create-template` command.",
229
258
  fg="yellow",
230
259
  )
231
- final_template = template or get_commit_template()
232
260
 
233
- input_message = _build_prompt(explanation, formatted_diff, final_template)
261
+ # Check for template from env or file if not provided via CLI
262
+ # (though CLI overrides or is deprecated)
263
+ # The agent/chain prompt Logic usually handles 'template' variable
264
+ # if passed in input.
265
+ # We need to fetch the template content if it exists to pass to agent.
266
+
267
+ final_template_content = template
268
+ if not final_template_content:
269
+ # Check env var or local file
270
+ final_template_content = os.getenv("TEMPLATE_COMMIT") or get_commit_template()
234
271
 
235
272
  click.clear()
236
273
  click.secho(
237
- "\n\n🧠 Analyzing the changes and generating a commit message...\n\n",
274
+ "\n\n🧠 internal-monologue: Analyzing changes, "
275
+ "checking for sensitive data, and summarizing...\n\n",
238
276
  fg="blue",
239
277
  bold=True,
240
278
  )
241
279
  try:
242
280
  assert llm is not None
243
- ai_message = llm.invoke(input=input_message)
244
- commit_message = ai_message.content
281
+ # Invoke the Agent Pipeline
282
+ inputs = {"diff": formatted_diff, "explanation": explanation}
283
+ if final_template_content:
284
+ inputs["template"] = final_template_content
285
+
286
+ commit_message = agent_pipeline.invoke(inputs)
287
+
245
288
  if not isinstance(commit_message, str):
246
289
  commit_message = str(commit_message)
247
290
 
@@ -283,23 +326,42 @@ def create_template_command(template_content: Tuple[str, ...]) -> None:
283
326
  is_flag=True,
284
327
  help="Commit the changes with the generated message",
285
328
  )
329
+ @click.option(
330
+ "--review/--no-review",
331
+ default=True,
332
+ help="AI review the diff before generating the commit message (default: enabled)",
333
+ )
286
334
  @click.option(
287
335
  "--model",
288
336
  "-m",
289
- default="gemini-2.5-pro-preview-03-25",
337
+ default="gemini-3-flash-preview",
290
338
  help="Set the engine model to be used.",
291
339
  )
340
+ @click.option(
341
+ "--deep",
342
+ "-d",
343
+ is_flag=True,
344
+ help="Use the deeper reasoning model (gemini-3-pro-preview).",
345
+ )
292
346
  @click.pass_context
293
347
  def commitai_alias(
294
348
  ctx: click.Context,
295
349
  description: Tuple[str, ...],
296
350
  add: bool,
297
351
  commit: bool,
352
+ review: bool,
298
353
  model: str,
354
+ deep: bool,
299
355
  ) -> None:
300
356
  """Alias for the 'generate' command."""
301
357
  ctx.forward(
302
- generate_message, description=description, add=add, commit=commit, model=model
358
+ generate_message,
359
+ description=description,
360
+ add=add,
361
+ commit=commit,
362
+ review=review,
363
+ model=model,
364
+ deep=deep,
303
365
  )
304
366
 
305
367
 
commitai/template.py CHANGED
@@ -23,3 +23,24 @@ adding_template = " The message should follow this template: "
23
23
 
24
24
  def build_user_message(explanation, diff):
25
25
  return f"Here is a high-level explanation of the commit: {explanation}\n\n{diff}"
26
+
27
+
28
+ def build_review_prompt(explanation: str, formatted_diff: str) -> str:
29
+ """Builds a review prompt asking the AI to review the diff before message generation.
30
+
31
+ The review should highlight:
32
+ - correctness concerns, risky changes, missing tests or docs
33
+ - obvious refactors or style violations
34
+ - a short bullet summary of the changes
35
+ Keep it concise.
36
+ """
37
+ review_system = (
38
+ "You are a senior code reviewer. You will receive a repository path/branch and a git diff. "
39
+ "Provide a brief review focusing on potential issues, risks, and improvement suggestions. "
40
+ "Then provide a very short summary of changes. Keep output as plain text, no markdown code fences."
41
+ )
42
+ if explanation:
43
+ intro = f"High-level explanation: {explanation}\n\n{formatted_diff}"
44
+ else:
45
+ intro = formatted_diff
46
+ return f"{review_system}\n\n{intro}"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: commitai
3
- Version: 1.0.5
3
+ Version: 2.2.2
4
4
  Summary: Commitai helps you generate git commit messages using AI
5
5
  Project-URL: Bug Tracker, https://github.com/lguibr/commitai/issues
6
6
  Project-URL: Documentation, https://github.com/lguibr/commitai/blob/main/README.md
@@ -34,28 +34,25 @@ Classifier: Intended Audience :: Developers
34
34
  Classifier: License :: OSI Approved :: MIT License
35
35
  Classifier: Operating System :: OS Independent
36
36
  Classifier: Programming Language :: Python :: 3
37
- Classifier: Programming Language :: Python :: 3.9
38
37
  Classifier: Programming Language :: Python :: 3.10
39
38
  Classifier: Programming Language :: Python :: 3.11
40
39
  Classifier: Programming Language :: Python :: 3.12
41
40
  Classifier: Topic :: Software Development :: Version Control :: Git
42
41
  Classifier: Topic :: Utilities
43
- Requires-Python: >=3.9
44
- Requires-Dist: click<9.0,>=8.0
45
- Requires-Dist: langchain-anthropic<=0.3.12,>=0.1.0
46
- Requires-Dist: langchain-community<=0.3.23,>=0.0.20
47
- Requires-Dist: langchain-core<=0.3.58,>=0.1.0
48
- Requires-Dist: langchain-google-genai~=2.1.4
49
- Requires-Dist: langchain-ollama~=0.3.2
50
- Requires-Dist: langchain-openai<=0.3.16,>=0.1.0
51
- Requires-Dist: langchain<=0.3.25,>=0.1.0
52
- Requires-Dist: pydantic<3.0,>=2.0
42
+ Requires-Python: >=3.10
43
+ Requires-Dist: click>=8.1
44
+ Requires-Dist: langchain-community>=0.4.0
45
+ Requires-Dist: langchain-core>=1.0
46
+ Requires-Dist: langchain-google-genai>=1.0
47
+ Requires-Dist: langchain>=1.0
48
+ Requires-Dist: langgraph>=1.0.0
49
+ Requires-Dist: pydantic>=2.0
53
50
  Provides-Extra: test
54
- Requires-Dist: langchain-google-genai~=2.1.4; extra == 'test'
51
+ Requires-Dist: langchain-google-genai>=2.0.0; extra == 'test'
55
52
  Requires-Dist: mypy>=1.9.0; extra == 'test'
56
53
  Requires-Dist: pytest-cov>=3.0; extra == 'test'
57
54
  Requires-Dist: pytest>=7.0; extra == 'test'
58
- Requires-Dist: ruff==0.4.4; extra == 'test'
55
+ Requires-Dist: ruff>=0.4.4; extra == 'test'
59
56
  Requires-Dist: types-setuptools; extra == 'test'
60
57
  Description-Content-Type: text/markdown
61
58
 
@@ -102,7 +99,7 @@ Simply stage your files and run `commitai`. It analyzes the diff, optionally tak
102
99
 
103
100
  ## Features
104
101
 
105
- * 🧠 **Intelligent Commit Generation**: Analyzes staged code differences (`git diff --staged`) using state-of-the-art AI models (GPT, Claude, Gemini) to create meaningful commit messages.
102
+ * 🧠 **Intelligent Commit Generation**: Analyzes staged code differences (`git diff --staged`) using state-of-the-art AI models (Gemini, GPT, Claude) to create meaningful commit messages.
106
103
  * 📄 **Conventional Commits**: Automatically formats messages according to the Conventional Commits specification (e.g., `feat(auth): add JWT authentication`). This improves readability and enables automated changelog generation.
107
104
  * 📝 **Optional Explanations**: Provide a high-level description of your changes as input to guide the AI, or let it infer the context solely from the code diff.
108
105
  * ✅ **Pre-commit Hook Integration**: Automatically runs your existing native Git pre-commit hook (`.git/hooks/pre-commit`) before generating the message, ensuring code quality and style checks pass.
@@ -157,7 +154,7 @@ CommitAi requires API keys for the AI provider you intend to use. Set these as e
157
154
  export GOOGLE_API_KEY="your_google_api_key_here"
158
155
  ```
159
156
 
160
- You only need to set the key for the provider corresponding to the model you select (or the default, Gemini).
157
+ You only need to set the key for the provider corresponding to the model you select (or the default, Gemini 3 Flash with Google).
161
158
 
162
159
  ### Ollama
163
160
 
@@ -224,14 +221,20 @@ The `commitai` command (which is an alias for `commitai generate`) accepts the f
224
221
  * Example: `commitai -c "Fix typo in documentation"` (for minor changes)
225
222
  * Can be combined with `-a`: `commitai -a -c "Quick fix and commit all"`
226
223
 
224
+ * `--review` / `--no-review`:
225
+ * Toggle a preliminary AI review of the staged diff before generating the commit message. Default is `--review` (enabled).
226
+ * When enabled, CommitAi prints a brief review and asks if you want to proceed.
227
+ * Example: `commitai --no-review` to skip the review step.
228
+
227
229
  * `-m <model_name>`, `--model <model_name>`:
228
230
  * Specifies which AI model to use.
229
- * Defaults to `gemini-2.5-pro-preview-03-25`.
231
+ * Specifies which AI model to use.
232
+ * Defaults to `gemini-3-flash-preview`.
230
233
  * Ensure the corresponding API key environment variable is set.
231
234
  * Examples:
235
+ * `commitai -m gemini-3-pro-preview "Use Google's Gemini 3 Pro"`
232
236
  * `commitai -m gpt-4 "Use OpenAI's GPT-4"`
233
- * `commitai -m claude-3-opus-20240229 "Use Anthropic's Claude 3 Opus"`
234
- * `commitai -m gemini-2.5-flash-preview-04-17 "Use Google's Gemini 1.5 Flash"`
237
+ * `commitai -m claude-3-opus "Use Anthropic's Claude 3 Opus"`
235
238
 
236
239
  ### Creating Repository Templates
237
240
 
@@ -0,0 +1,11 @@
1
+ commitai/__init__.py,sha256=6977C2xbYqWwIRHhuFqmyoyI7MaMQmzb3SYtWjIJ4To,388
2
+ commitai/agent.py,sha256=dxTHlxpYWuOQU9purJMoZCWhTszY4pkQhhnXXuWvmWk,8330
3
+ commitai/chains.py,sha256=i5tQL9Qg-kP4yVCiQavhOLS9ljh72bxYALQ27Hoqivc,4612
4
+ commitai/cli.py,sha256=I_3c2q15e3KniprWidqry8ZLJJXg-jAcT2w9wO00_L4,11666
5
+ commitai/git.py,sha256=XWAloZWQuLrFHUyfh3SkOgLsL4kfKRtgj3fzuJjcL2A,1649
6
+ commitai/template.py,sha256=PAS3BUjj6fcdsvSheopU_0xGCLPMj-vvFeZVCNRf0VM,2274
7
+ commitai-2.2.2.dist-info/METADATA,sha256=sdVS1BuvMbvck6vWfNjv4tPrGyMI8RTff6bvhditrz8,13856
8
+ commitai-2.2.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
9
+ commitai-2.2.2.dist-info/entry_points.txt,sha256=qzWJQdPoR38mjQgRPRCB3tA7Kojtj3WrozlFWR4KhLY,128
10
+ commitai-2.2.2.dist-info/licenses/LICENSE,sha256=wVkmSz0UMpGw0xYxk4AmkPLd_tVFcuszTdNIoq02tJA,1087
11
+ commitai-2.2.2.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.27.0
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,9 +0,0 @@
1
- commitai/__init__.py,sha256=Z5hB8NLKlfi7oPDRcDSQmtPSo_arLjoa6jwjFt3Pb9o,388
2
- commitai/cli.py,sha256=ClIF6hLekc4gRg9IAUDK9d-y4ee7kvJcZuTt9NCfFRg,9732
3
- commitai/git.py,sha256=XWAloZWQuLrFHUyfh3SkOgLsL4kfKRtgj3fzuJjcL2A,1649
4
- commitai/template.py,sha256=q4AO64hKhJP2y9DCc5-ePFoRZfOQBkbkB6vt8CnoMh8,1379
5
- commitai-1.0.5.dist-info/METADATA,sha256=_wO_IflHPj969OHjwzuQ-kCgV0DPgJ9ChPi8qPjBJPs,13710
6
- commitai-1.0.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
7
- commitai-1.0.5.dist-info/entry_points.txt,sha256=qzWJQdPoR38mjQgRPRCB3tA7Kojtj3WrozlFWR4KhLY,128
8
- commitai-1.0.5.dist-info/licenses/LICENSE,sha256=wVkmSz0UMpGw0xYxk4AmkPLd_tVFcuszTdNIoq02tJA,1087
9
- commitai-1.0.5.dist-info/RECORD,,