commitai 2.0.0__tar.gz → 2.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,7 +12,7 @@ repos:
12
12
 
13
13
  - repo: https://github.com/astral-sh/ruff-pre-commit
14
14
  # Ruff version. Must be aligned with the version in pyproject.toml
15
- rev: v0.4.4 # Choose a specific Ruff version
15
+ rev: v0.15.0 # Match local version
16
16
  hooks:
17
17
  # Run the linter. Applies fixes including import sorting, etc.
18
18
  - id: ruff
@@ -27,10 +27,11 @@ repos:
27
27
  # Ensure mypy runs with the necessary dependencies installed
28
28
  additional_dependencies:
29
29
  - "click>=8.0,<9.0"
30
- - "langchain>=0.1.0,<=0.3.25"
31
- - "langchain-core>=0.1.0,<=0.3.58"
32
- - "langchain-community>=0.0.20,<=0.3.23"
33
- - "langchain-google-genai~=2.1.4"
30
+ - "langchain>=1.0"
31
+ - "langchain-core>=1.0"
32
+ - "langchain-community>=0.4.0"
33
+ - "langchain-google-genai>=1.0"
34
+ - "langgraph>=1.0.0"
34
35
  - "pydantic>=2.0,<3.0"
35
36
  - "types-setuptools"
36
37
  args: [--config-file=pyproject.toml] # Point mypy to the config
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: commitai
3
- Version: 2.0.0
3
+ Version: 2.2.2
4
4
  Summary: Commitai helps you generate git commit messages using AI
5
5
  Project-URL: Bug Tracker, https://github.com/lguibr/commitai/issues
6
6
  Project-URL: Documentation, https://github.com/lguibr/commitai/blob/main/README.md
@@ -40,18 +40,19 @@ Classifier: Programming Language :: Python :: 3.12
40
40
  Classifier: Topic :: Software Development :: Version Control :: Git
41
41
  Classifier: Topic :: Utilities
42
42
  Requires-Python: >=3.10
43
- Requires-Dist: click<9.0,>=8.0
44
- Requires-Dist: langchain-community<=0.3.23,>=0.0.20
45
- Requires-Dist: langchain-core<=0.3.58,>=0.1.0
46
- Requires-Dist: langchain-google-genai~=2.1.4
47
- Requires-Dist: langchain<=0.3.25,>=0.1.0
48
- Requires-Dist: pydantic<3.0,>=2.0
43
+ Requires-Dist: click>=8.1
44
+ Requires-Dist: langchain-community>=0.4.0
45
+ Requires-Dist: langchain-core>=1.0
46
+ Requires-Dist: langchain-google-genai>=1.0
47
+ Requires-Dist: langchain>=1.0
48
+ Requires-Dist: langgraph>=1.0.0
49
+ Requires-Dist: pydantic>=2.0
49
50
  Provides-Extra: test
50
- Requires-Dist: langchain-google-genai~=2.1.4; extra == 'test'
51
+ Requires-Dist: langchain-google-genai>=2.0.0; extra == 'test'
51
52
  Requires-Dist: mypy>=1.9.0; extra == 'test'
52
53
  Requires-Dist: pytest-cov>=3.0; extra == 'test'
53
54
  Requires-Dist: pytest>=7.0; extra == 'test'
54
- Requires-Dist: ruff==0.4.4; extra == 'test'
55
+ Requires-Dist: ruff>=0.4.4; extra == 'test'
55
56
  Requires-Dist: types-setuptools; extra == 'test'
56
57
  Description-Content-Type: text/markdown
57
58
 
@@ -3,7 +3,7 @@
3
3
 
4
4
  # This __version__ string is read by hatchling during the build process
5
5
  # Make sure to update it for new releases.
6
- __version__ = "2.0.0"
6
+ __version__ = "2.2.2"
7
7
 
8
8
  # The importlib.metadata approach is generally for reading the version
9
9
  # of an *already installed* package at runtime. We don't need it here
@@ -3,11 +3,11 @@ import os
3
3
  import subprocess
4
4
  from typing import Any, Dict, Type
5
5
 
6
- from langchain.agents import AgentExecutor, create_tool_calling_agent
6
+ # from langchain.agents import AgentExecutor, create_tool_calling_agent # Removed
7
7
  from langchain_core.language_models import BaseChatModel
8
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
9
8
  from langchain_core.runnables import Runnable
10
9
  from langchain_core.tools import BaseTool
10
+ from langgraph.prebuilt import create_react_agent
11
11
  from pydantic import BaseModel, Field
12
12
 
13
13
  # --- TOOLS ---
@@ -157,6 +157,9 @@ class TodoMiddleware:
157
157
  # --- AGENT ---
158
158
 
159
159
 
160
+ # --- AGENT ---
161
+
162
+
160
163
  def create_commit_agent(llm: BaseChatModel) -> Runnable:
161
164
  # 1. Init Tools
162
165
  tools = [ReadOnlyShellTool(), FileSearchTool(), FileReadTool()]
@@ -186,18 +189,22 @@ Protocol:
186
189
  3. If clarification is needed, explore files.
187
190
  4. Final Answer MUST be ONLY the commit message.
188
191
  """
189
- prompt = ChatPromptTemplate.from_messages(
190
- [
191
- ("system", system_prompt),
192
- MessagesPlaceholder("chat_history", optional=True),
193
- ("human", "Generate the commit message."),
194
- MessagesPlaceholder("agent_scratchpad"),
195
- ]
196
- )
192
+ # Note: create_react_agent handles the prompt internally or via state_modifier.
193
+ # We can pass a system string or a function. Since our prompt depends on dynamic
194
+ # variables (diff, explanation, etc.), we need to inject them. LangGraph's
195
+ # prebuilt agent usually takes a static system message. However, we can use the
196
+ # 'messages' state. But to keep it simple and compatible with existing 'invoke'
197
+ # interface: We will format the system prompt in the wrapper and pass it as the
198
+ # first message.
199
+
200
+ # Actually, create_react_agent supports 'state_modifier'.
201
+ # If we pass a formatted string, it works as system prompt.
202
+
203
+ # 4. Construct Graph
204
+ # We don't construct the graph with ALL variables pre-bound if they change per run.
205
+ # Instead, we'll format the prompt in the pipeline and pass it to the agent.
197
206
 
198
- # 4. Construct Agent
199
- agent = create_tool_calling_agent(llm, tools, prompt)
200
- agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=False)
207
+ agent_graph = create_react_agent(llm, tools)
201
208
 
202
209
  # 5. Pipeline with Middleware
203
210
  def run_pipeline(inputs: Dict[str, Any]) -> str:
@@ -210,11 +217,34 @@ Protocol:
210
217
  state.setdefault("explanation", "None")
211
218
  state.setdefault("summary", "None")
212
219
  state.setdefault("todo_str", "None")
213
- state.setdefault("chat_history", [])
220
+
221
+ # Format System Prompt
222
+ formatted_system_prompt = system_prompt.format(
223
+ explanation=state["explanation"],
224
+ todo_str=state["todo_str"],
225
+ summary=state["summary"],
226
+ diff=state.get("diff", ""),
227
+ )
214
228
 
215
229
  # Run Agent
216
- result = agent_executor.invoke(state)
217
- return str(result["output"])
230
+ # LangGraph inputs: {"messages": [{"role": "user", "content": ...}]}
231
+ # We inject the system prompt as a SystemMessage or just update the state.
232
+ # create_react_agent primarily looks at 'messages'.
233
+
234
+ from langchain_core.messages import HumanMessage, SystemMessage
235
+
236
+ messages = [
237
+ SystemMessage(content=formatted_system_prompt),
238
+ HumanMessage(content="Generate the commit message."),
239
+ ]
240
+
241
+ # Invoke graph
242
+ # result is a dict with 'messages'
243
+ result = agent_graph.invoke({"messages": messages})
244
+
245
+ # Extract last message content
246
+ last_message = result["messages"][-1]
247
+ return str(last_message.content)
218
248
 
219
249
  # Wrap in RunnableLambda to expose 'invoke'
220
250
  from langchain_core.runnables import RunnableLambda
@@ -106,7 +106,8 @@ def _handle_commit(commit_message: str, commit_flag: bool) -> None:
106
106
  final_commit_message = commit_message
107
107
  if not commit_flag:
108
108
  click.secho(
109
- f"\n📝 Generated Commit Message:\n{'-'*40}\n{commit_message}\n{'-'*40}\n",
109
+ f"\n📝 Generated Commit Message:\n{'-' * 40}\n"
110
+ f"{commit_message}\n{'-' * 40}\n",
110
111
  fg="green",
111
112
  )
112
113
 
@@ -197,7 +198,7 @@ def generate_message(
197
198
  template: Optional[str],
198
199
  add: bool,
199
200
  model: str,
200
- deep: bool,
201
+ deep: bool = False,
201
202
  ) -> None:
202
203
  explanation = " ".join(description)
203
204
 
@@ -7,7 +7,7 @@ build-backend = "hatchling.build"
7
7
  name = "commitai"
8
8
  # Make sure to update version in commitai/__init__.py as well
9
9
 
10
- version = "2.0.0"
10
+ version = "2.2.2"
11
11
 
12
12
  description = "Commitai helps you generate git commit messages using AI"
13
13
  readme = "README.md"
@@ -29,12 +29,13 @@ classifiers = [
29
29
  "Topic :: Utilities",
30
30
  ]
31
31
  dependencies = [
32
- "click>=8.0,<9.0",
33
- "langchain>=0.1.0,<=0.3.25",
34
- "langchain-core>=0.1.0,<=0.3.58",
35
- "langchain-community>=0.0.20,<=0.3.23",
36
- "langchain-google-genai~=2.1.4",
37
- "pydantic>=2.0,<3.0",
32
+ "click>=8.1",
33
+ "langchain>=1.0",
34
+ "langchain-community>=0.4.0",
35
+ "langchain-core>=1.0",
36
+ "langchain-google-genai>=1.0",
37
+ "langgraph>=1.0.0",
38
+ "pydantic>=2.0",
38
39
  ]
39
40
 
40
41
  [project.urls]
@@ -52,9 +53,8 @@ test = [
52
53
  "pytest-cov>=3.0", # Still needed for coverage run command
53
54
  "mypy>=1.9.0",
54
55
  "types-setuptools",
55
- # Pin ruff version to match pre-commit hook
56
- "ruff==0.4.4",
57
- "langchain-google-genai~=2.1.4", # Keep google genai here for mypy in pre-commit
56
+ "ruff>=0.4.4",
57
+ "langchain-google-genai>=2.0.0", # Keep google genai here for mypy in pre-commit
58
58
  ]
59
59
 
60
60
  [tool.hatch.version]
@@ -62,17 +62,25 @@ def test_todo_scanner_middleware(mock_llm):
62
62
 
63
63
  def test_create_commit_agent(mock_llm):
64
64
  # This tests the factory function
65
- agent_executor = create_commit_agent(mock_llm)
66
- assert agent_executor is not None
67
- # We can try to invoke it if we mock enough stuff
68
- # But just creating it covers the definition lines.
65
+ # Mocking create_react_agent to avoid actual graph compilation
66
+ with patch("commitai.agent.create_react_agent") as mock_create_graph:
67
+ mock_graph = MagicMock()
68
+ mock_create_graph.return_value = mock_graph
69
+ agent_executor = create_commit_agent(mock_llm)
70
+ assert agent_executor is not None
69
71
 
70
72
 
71
73
  def test_agent_run(mock_llm):
72
74
  # E2E-ish test of the agent logic with mocks
73
- with patch("commitai.agent.AgentExecutor") as MockExecutor:
74
- mock_executor_instance = MockExecutor.return_value
75
- mock_executor_instance.invoke.return_value = {"output": "Final Commit Message"}
75
+ with patch("commitai.agent.create_react_agent") as mock_create_graph:
76
+ mock_graph = MagicMock()
77
+ mock_create_graph.return_value = mock_graph
78
+
79
+ # Determine strict return structure for LangGraph invoke
80
+ # It yields a dict with "messages" list
81
+ last_message = MagicMock()
82
+ last_message.content = "Final Commit Message"
83
+ mock_graph.invoke.return_value = {"messages": [last_message]}
76
84
 
77
85
  agent_runnable = create_commit_agent(mock_llm)
78
86
  result = agent_runnable.invoke({"diff": "diff", "explanation": "expl"})
@@ -41,6 +41,7 @@ def mock_generate_deps(tmp_path):
41
41
  "commitai.cli.get_current_branch_name", return_value="main"
42
42
  ) as mock_branch,
43
43
  patch("commitai.cli.create_commit") as mock_commit,
44
+ # Update mock target for agent creation
44
45
  patch("commitai.cli.create_commit_agent") as mock_create_agent,
45
46
  patch("click.edit") as mock_edit,
46
47
  patch("click.clear"),
@@ -57,10 +58,10 @@ def mock_generate_deps(tmp_path):
57
58
 
58
59
  mock_google_instance = mock_google_class_in_cli.return_value
59
60
 
60
- # Agent Mock
61
- mock_agent_instance = MagicMock()
62
- mock_agent_instance.invoke.return_value = "Generated commit message"
63
- mock_create_agent.return_value = mock_agent_instance
61
+ # Agent Mock (RunnableLambda now)
62
+ mock_agent_runnable = MagicMock()
63
+ mock_agent_runnable.invoke.return_value = "Generated commit message"
64
+ mock_create_agent.return_value = mock_agent_runnable
64
65
 
65
66
  if mock_google_class_in_cli is not None:
66
67
  mock_google_instance.spec = ActualChatGoogleGenerativeAI
@@ -93,7 +94,7 @@ def mock_generate_deps(tmp_path):
93
94
  "path_exists": mock_path_exists,
94
95
  "commit_msg_path": fake_commit_msg_path,
95
96
  "create_agent": mock_create_agent,
96
- "agent_instance": mock_agent_instance,
97
+ "agent_instance": mock_agent_runnable, # Still useful alias for tests
97
98
  "confirm": mock_confirm,
98
99
  }
99
100
 
@@ -381,9 +382,9 @@ def test_generate_edit_error_io(mock_generate_deps):
381
382
  # Check exit code is 1
382
383
  result = runner.invoke(cli, ["generate", "--no-review", "Test explanation"])
383
384
 
384
- assert (
385
- result.exit_code == 1
386
- ), f"Expected exit code 1, got {result.exit_code}. Output: {result.output}"
385
+ assert result.exit_code == 1, (
386
+ f"Expected exit code 1, got {result.exit_code}. Output: {result.output}"
387
+ )
387
388
  # The IOError bubbles up to the outer exception handler
388
389
  assert "Error handling user input: Read permission denied" in result.output
389
390
  mock_generate_deps["commit"].assert_not_called()
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes