ai-cr 2.0.0.dev1__tar.gz → 2.0.0.dev2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/PKG-INFO +4 -2
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/README.md +1 -1
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/gito/cli.py +4 -3
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/gito/commands/fix.py +36 -3
- ai_cr-2.0.0.dev2/gito/commands/gh_comment.py +157 -0
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/gito/config.toml +31 -1
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/gito/core.py +24 -6
- ai_cr-2.0.0.dev2/gito/issue_trackers.py +15 -0
- ai_cr-2.0.0.dev2/gito/pipeline.py +70 -0
- ai_cr-2.0.0.dev2/gito/pipeline_steps/__init__.py +0 -0
- ai_cr-2.0.0.dev2/gito/pipeline_steps/jira.py +83 -0
- ai_cr-2.0.0.dev2/gito/project_config.py +71 -0
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/gito/report_struct.py +2 -1
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/gito/utils.py +83 -1
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/pyproject.toml +3 -1
- ai_cr-2.0.0.dev1/gito/project_config.py +0 -119
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/LICENSE +0 -0
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/gito/__init__.py +0 -0
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/gito/__main__.py +0 -0
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/gito/bootstrap.py +0 -0
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/gito/commands/__init__.py +0 -0
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/gito/commands/repl.py +0 -0
- {ai_cr-2.0.0.dev1 → ai_cr-2.0.0.dev2}/gito/constants.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: ai-cr
|
3
|
-
Version: 2.0.0.
|
3
|
+
Version: 2.0.0.dev2
|
4
4
|
Summary: AI code review tool that works with any language model provider. It detects issues in GitHub pull requests or local changes—instantly, reliably, and without vendor lock-in.
|
5
5
|
License: MIT
|
6
6
|
Keywords: static code analysis,code review,code quality,ai,coding,assistant,llm,github,automation,devops,developer tools,github actions,workflows,git
|
@@ -18,7 +18,9 @@ Classifier: Topic :: Software Development
|
|
18
18
|
Requires-Dist: GitPython (>=3.1.44,<4.0.0)
|
19
19
|
Requires-Dist: ai-microcore (==4.0.0)
|
20
20
|
Requires-Dist: anthropic (>=0.52.2,<0.53.0)
|
21
|
+
Requires-Dist: ghapi (>=1.0.6,<1.1.0)
|
21
22
|
Requires-Dist: google-generativeai (>=0.8.5,<0.9.0)
|
23
|
+
Requires-Dist: jira (>=3.8.0,<4.0.0)
|
22
24
|
Requires-Dist: typer (>=0.16.0,<0.17.0)
|
23
25
|
Requires-Dist: unidiff (>=0.7.5,<0.8.0)
|
24
26
|
Project-URL: Homepage, https://github.com/Nayjest/Gito
|
@@ -62,7 +64,7 @@ Get consistent, thorough code reviews in seconds—no waiting for human availabi
|
|
62
64
|
|
63
65
|
### 1. Review Pull Requests via GitHub Actions
|
64
66
|
|
65
|
-
Create a `.github/workflows/gito.yml` file:
|
67
|
+
Create a `.github/workflows/gito-code-review.yml` file:
|
66
68
|
|
67
69
|
```yaml
|
68
70
|
name: "Gito: AI Code Review"
|
@@ -35,7 +35,7 @@ Get consistent, thorough code reviews in seconds—no waiting for human availabi
|
|
35
35
|
|
36
36
|
### 1. Review Pull Requests via GitHub Actions
|
37
37
|
|
38
|
-
Create a `.github/workflows/gito.yml` file:
|
38
|
+
Create a `.github/workflows/gito-code-review.yml` file:
|
39
39
|
|
40
40
|
```yaml
|
41
41
|
name: "Gito: AI Code Review"
|
@@ -18,7 +18,7 @@ from .project_config import ProjectConfig
|
|
18
18
|
from .utils import no_subcommand, parse_refs_pair
|
19
19
|
|
20
20
|
# Import fix command to register it
|
21
|
-
from .commands import fix # noqa
|
21
|
+
from .commands import fix, gh_comment # noqa
|
22
22
|
|
23
23
|
|
24
24
|
app_no_subcommand = typer.Typer(pretty_exceptions_show_locals=False)
|
@@ -40,10 +40,10 @@ def main():
|
|
40
40
|
|
41
41
|
@app.callback(invoke_without_command=True)
|
42
42
|
def cli(ctx: typer.Context, verbose: bool = typer.Option(default=False)):
|
43
|
-
if verbose:
|
44
|
-
mc.logging.LoggingConfig.STRIP_REQUEST_LINES = None
|
45
43
|
if ctx.invoked_subcommand != "setup":
|
46
44
|
bootstrap()
|
45
|
+
if verbose:
|
46
|
+
mc.logging.LoggingConfig.STRIP_REQUEST_LINES = None
|
47
47
|
|
48
48
|
|
49
49
|
def args_to_target(refs, what, against) -> tuple[str | None, str | None]:
|
@@ -104,6 +104,7 @@ def arg_against() -> typer.Option:
|
|
104
104
|
|
105
105
|
@app_no_subcommand.command(name="review", help="Perform code review")
|
106
106
|
@app.command(name="review", help="Perform code review")
|
107
|
+
@app.command(name="run", hidden=True)
|
107
108
|
def cmd_review(
|
108
109
|
refs: str = arg_refs(),
|
109
110
|
what: str = arg_what(),
|
@@ -6,12 +6,13 @@ import logging
|
|
6
6
|
from pathlib import Path
|
7
7
|
from typing import Optional
|
8
8
|
|
9
|
+
import git
|
9
10
|
import typer
|
10
11
|
from microcore import ui
|
11
12
|
|
12
13
|
from ..bootstrap import app
|
13
14
|
from ..constants import JSON_REPORT_FILE_NAME
|
14
|
-
from ..report_struct import Report
|
15
|
+
from ..report_struct import Report, Issue
|
15
16
|
|
16
17
|
|
17
18
|
@app.command(help="Fix an issue from the code review report")
|
@@ -26,7 +27,9 @@ def fix(
|
|
26
27
|
dry_run: bool = typer.Option(
|
27
28
|
False, "--dry-run", "-d", help="Only print changes without applying them"
|
28
29
|
),
|
29
|
-
)
|
30
|
+
commit: bool = typer.Option(default=False, help="Commit changes after applying them"),
|
31
|
+
push: bool = typer.Option(default=False, help="Push changes to the remote repository"),
|
32
|
+
) -> list[str]:
|
30
33
|
"""
|
31
34
|
Applies the proposed change for the specified issue number from the code review report.
|
32
35
|
"""
|
@@ -39,7 +42,7 @@ def fix(
|
|
39
42
|
raise typer.Exit(code=1)
|
40
43
|
|
41
44
|
# Find the issue by number
|
42
|
-
issue = None
|
45
|
+
issue: Optional[Issue] = None
|
43
46
|
for file_issues in report.issues.values():
|
44
47
|
for i in file_issues:
|
45
48
|
if i.id == issue_number:
|
@@ -122,3 +125,33 @@ def fix(
|
|
122
125
|
raise typer.Exit(code=1)
|
123
126
|
|
124
127
|
print(f"\n{ui.green('✓')} Issue #{issue_number} fixed successfully")
|
128
|
+
|
129
|
+
changed_files = [file_path.as_posix()]
|
130
|
+
if commit:
|
131
|
+
commit_changes(
|
132
|
+
changed_files,
|
133
|
+
commit_message=f"[AI] Fix issue {issue_number}:{issue.title}",
|
134
|
+
push=push
|
135
|
+
)
|
136
|
+
return changed_files
|
137
|
+
|
138
|
+
|
139
|
+
def commit_changes(
|
140
|
+
files: list[str],
|
141
|
+
repo: git.Repo = None,
|
142
|
+
commit_message: str = "fix by AI",
|
143
|
+
push: bool = True
|
144
|
+
) -> None:
|
145
|
+
if opened_repo := not repo:
|
146
|
+
repo = git.Repo(".")
|
147
|
+
for i in files:
|
148
|
+
repo.index.add(i)
|
149
|
+
repo.index.commit(commit_message)
|
150
|
+
if push:
|
151
|
+
origin = repo.remotes.origin
|
152
|
+
origin.push()
|
153
|
+
logging.info(f"Changes pushed to {origin.name}")
|
154
|
+
else:
|
155
|
+
logging.info("Changes committed but not pushed to remote")
|
156
|
+
if opened_repo:
|
157
|
+
repo.close()
|
@@ -0,0 +1,157 @@
|
|
1
|
+
"""
|
2
|
+
Fix issues from code review report
|
3
|
+
"""
|
4
|
+
|
5
|
+
import logging
|
6
|
+
import os
|
7
|
+
import re
|
8
|
+
from pathlib import Path
|
9
|
+
from typing import Optional
|
10
|
+
import zipfile
|
11
|
+
|
12
|
+
import requests
|
13
|
+
import typer
|
14
|
+
from fastcore.basics import AttrDict
|
15
|
+
from gito.project_config import ProjectConfig
|
16
|
+
from gito.utils import extract_gh_owner_repo
|
17
|
+
from microcore import ui
|
18
|
+
from ghapi.all import GhApi
|
19
|
+
import git
|
20
|
+
|
21
|
+
from ..bootstrap import app
|
22
|
+
from ..constants import JSON_REPORT_FILE_NAME
|
23
|
+
from .fix import fix
|
24
|
+
from ..utils import is_running_in_github_action
|
25
|
+
|
26
|
+
|
27
|
+
@app.command()
|
28
|
+
def react_to_comment(
|
29
|
+
comment_id: int = typer.Argument(),
|
30
|
+
gh_token: str = typer.Option(
|
31
|
+
"",
|
32
|
+
"--gh-token",
|
33
|
+
"--token",
|
34
|
+
"-t",
|
35
|
+
"--github-token",
|
36
|
+
help="GitHub token for authentication",
|
37
|
+
),
|
38
|
+
dry_run: bool = typer.Option(
|
39
|
+
False, "--dry-run", "-d", help="Only print changes without applying them"
|
40
|
+
),
|
41
|
+
):
|
42
|
+
repo = git.Repo(".") # Current directory
|
43
|
+
owner, repo_name = extract_gh_owner_repo(repo)
|
44
|
+
logging.info(f"Using repository: {ui.yellow}{owner}/{repo_name}{ui.reset}")
|
45
|
+
gh_token = (
|
46
|
+
gh_token or os.getenv("GITHUB_TOKEN", None) or os.getenv("GH_TOKEN", None)
|
47
|
+
)
|
48
|
+
api = GhApi(owner=owner, repo=repo_name, token=gh_token)
|
49
|
+
comment = api.issues.get_comment(comment_id=comment_id)
|
50
|
+
logging.info(
|
51
|
+
f"Comment by {ui.yellow('@' + comment.user.login)}: "
|
52
|
+
f"{ui.green(comment.body)}\n"
|
53
|
+
f"url: {comment.html_url}"
|
54
|
+
)
|
55
|
+
|
56
|
+
cfg = ProjectConfig.load_for_repo(repo)
|
57
|
+
if not any(
|
58
|
+
trigger.lower() in comment.body.lower() for trigger in cfg.mention_triggers
|
59
|
+
):
|
60
|
+
ui.error("No mention trigger found in comment, no reaction added.")
|
61
|
+
return
|
62
|
+
if not is_running_in_github_action():
|
63
|
+
# @todo: need service account to react to comments
|
64
|
+
logging.info("Comment contains mention trigger, reacting with 'eyes'.")
|
65
|
+
api.reactions.create_for_issue_comment(comment_id=comment_id, content="eyes")
|
66
|
+
|
67
|
+
pr = int(comment.issue_url.split("/")[-1])
|
68
|
+
print(f"Processing comment for PR #{pr}...")
|
69
|
+
out_folder = "artifact"
|
70
|
+
download_latest_code_review_artifact(
|
71
|
+
api, pr_number=pr, gh_token=gh_token, out_folder=out_folder
|
72
|
+
)
|
73
|
+
|
74
|
+
issue_ids = extract_fix_args(comment.body)
|
75
|
+
if not issue_ids:
|
76
|
+
ui.error("Can't identify target command in the text.")
|
77
|
+
return
|
78
|
+
logging.info(f"Extracted issue IDs: {ui.yellow(str(issue_ids))}")
|
79
|
+
|
80
|
+
fix(
|
81
|
+
issue_ids[0], # @todo: support multiple IDs
|
82
|
+
report_path=Path(out_folder) / JSON_REPORT_FILE_NAME,
|
83
|
+
dry_run=dry_run,
|
84
|
+
commit=not dry_run,
|
85
|
+
push=not dry_run,
|
86
|
+
)
|
87
|
+
logging.info("Fix applied successfully.")
|
88
|
+
|
89
|
+
|
90
|
+
def last_code_review_run(api: GhApi, pr_number: int) -> AttrDict | None:
|
91
|
+
pr = api.pulls.get(pr_number)
|
92
|
+
sha = pr["head"]["sha"] # noqa
|
93
|
+
branch = pr["head"]["ref"]
|
94
|
+
|
95
|
+
runs = api.actions.list_workflow_runs_for_repo(branch=branch)["workflow_runs"]
|
96
|
+
# Find the run for this SHA
|
97
|
+
run = next(
|
98
|
+
(
|
99
|
+
r
|
100
|
+
for r in runs # r['head_sha'] == sha and
|
101
|
+
if (
|
102
|
+
any(
|
103
|
+
marker in r["path"].lower()
|
104
|
+
for marker in ["code-review", "code_review", "cr"]
|
105
|
+
)
|
106
|
+
or "gito.yml" in r["name"].lower()
|
107
|
+
)
|
108
|
+
and r["status"] == "completed"
|
109
|
+
),
|
110
|
+
None,
|
111
|
+
)
|
112
|
+
return run
|
113
|
+
|
114
|
+
|
115
|
+
def download_latest_code_review_artifact(
|
116
|
+
api: GhApi, pr_number: int, gh_token: str, out_folder: Optional[str] = "artifact"
|
117
|
+
) -> tuple[str, dict] | None:
|
118
|
+
run = last_code_review_run(api, pr_number)
|
119
|
+
if not run:
|
120
|
+
raise Exception("No workflow run found for this PR/SHA")
|
121
|
+
|
122
|
+
artifacts = api.actions.list_workflow_run_artifacts(run["id"])["artifacts"]
|
123
|
+
if not artifacts:
|
124
|
+
raise Exception("No artifacts found for this workflow run")
|
125
|
+
|
126
|
+
latest_artifact = artifacts[0]
|
127
|
+
url = latest_artifact["archive_download_url"]
|
128
|
+
print(f"Artifact: {latest_artifact['name']}, Download URL: {url}")
|
129
|
+
headers = {"Authorization": f"token {gh_token}"} if gh_token else {}
|
130
|
+
zip_path = "artifact.zip"
|
131
|
+
try:
|
132
|
+
with requests.get(url, headers=headers, stream=True) as r:
|
133
|
+
r.raise_for_status()
|
134
|
+
with open(zip_path, "wb") as f:
|
135
|
+
for chunk in r.iter_content(chunk_size=8192):
|
136
|
+
f.write(chunk)
|
137
|
+
|
138
|
+
# Unpack to ./artifact
|
139
|
+
os.makedirs("artifact", exist_ok=True)
|
140
|
+
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
141
|
+
zip_ref.extractall("artifact")
|
142
|
+
finally:
|
143
|
+
if os.path.exists(zip_path):
|
144
|
+
os.remove(zip_path)
|
145
|
+
|
146
|
+
print("Artifact unpacked to ./artifact")
|
147
|
+
|
148
|
+
|
149
|
+
def extract_fix_args(text: str) -> list[int]:
|
150
|
+
pattern1 = r"fix\s+(?:issues?)?(?:\s+)?#?(\d+(?:\s*,\s*#?\d+)*)"
|
151
|
+
match = re.search(pattern1, text)
|
152
|
+
if match:
|
153
|
+
numbers_str = match.group(1)
|
154
|
+
numbers = re.findall(r"\d+", numbers_str)
|
155
|
+
issue_numbers = [int(num) for num in numbers]
|
156
|
+
return issue_numbers
|
157
|
+
return []
|
@@ -1,3 +1,8 @@
|
|
1
|
+
# :class: gito.project_config.ProjectConfig
|
2
|
+
|
3
|
+
# Defines the keyword or mention tag that triggers bot actions when referenced in code review comments.
|
4
|
+
# list of strings, case-insensitive
|
5
|
+
mention_triggers = ["gito", "bot", "ai", "/fix"]
|
1
6
|
report_template_md = """
|
2
7
|
<h2><a href="https://github.com/Nayjest/Gito"><img src="https://raw.githubusercontent.com/Nayjest/Gito/main/press-kit/logo/gito-bot-1_64top.png" align="left" width=64 height=50></a>I've Reviewed the Code</h2>
|
3
8
|
|
@@ -199,12 +204,37 @@ Note: Awards should only be given to authors of initial codebase changes, not to
|
|
199
204
|
--Available Awards--
|
200
205
|
{{ awards }}
|
201
206
|
---
|
207
|
+
{% if pipeline_out.associated_issue and pipeline_out.associated_issue.title %}
|
208
|
+
----SUBTASK----
|
209
|
+
Include one sentence about how the code changes address the requirements of the associated issue listed below.
|
210
|
+
|
211
|
+
--Associated Issue--
|
212
|
+
# {{ pipeline_out.associated_issue.title }}
|
213
|
+
{{ pipeline_out.associated_issue.description }}
|
214
|
+
URL: {{ pipeline_out.associated_issue.url }}
|
215
|
+
---
|
216
|
+
|
217
|
+
Examples:
|
218
|
+
|
219
|
+
In case if the implementation delivers what was requested:
|
220
|
+
```
|
221
|
+
✅ Implementation Satisfies [<ISSUE_KEY>](<ISSUE_URL>).
|
222
|
+
```
|
223
|
+
In case of any critical concerns:
|
224
|
+
```
|
225
|
+
⚠️ <Your concern 1 here>.
|
226
|
+
⚠️ <Your concern 2 here>.
|
227
|
+
```
|
228
|
+
--------
|
229
|
+
{% endif -%}
|
202
230
|
- Your response will be parsed programmatically, so do not include any additional text.
|
203
231
|
- Do not include the issues by itself to the summary, they are already provided in the context.
|
204
232
|
- Use Markdown formatting in your response.
|
205
233
|
{{ summary_requirements -}}
|
206
234
|
"""
|
207
|
-
|
235
|
+
[pipeline_steps.jira]
|
236
|
+
call="gito.pipeline_steps.jira.fetch_associated_issue"
|
237
|
+
envs=["local","gh-action"]
|
208
238
|
[prompt_vars]
|
209
239
|
self_id = """
|
210
240
|
You are a subsystem of an AI-powered software platform, specifically tasked with performing expert code reviews.
|
@@ -6,6 +6,7 @@ from pathlib import Path
|
|
6
6
|
|
7
7
|
import microcore as mc
|
8
8
|
from git import Repo
|
9
|
+
from gito.pipeline import Pipeline
|
9
10
|
from unidiff import PatchSet, PatchedFile
|
10
11
|
from unidiff.constants import DEV_NULL
|
11
12
|
|
@@ -143,15 +144,16 @@ def file_lines(repo: Repo, file: str, max_tokens: int = None, use_local_files: b
|
|
143
144
|
return "".join(lines)
|
144
145
|
|
145
146
|
|
146
|
-
def make_cr_summary(
|
147
|
+
def make_cr_summary(config: ProjectConfig, report: Report, diff, **kwargs) -> str:
|
147
148
|
return (
|
148
149
|
mc.prompt(
|
149
|
-
|
150
|
-
diff=mc.tokenizing.fit_to_token_size(diff,
|
150
|
+
config.summary_prompt,
|
151
|
+
diff=mc.tokenizing.fit_to_token_size(diff, config.max_code_tokens)[0],
|
151
152
|
issues=report.issues,
|
152
|
-
**
|
153
|
+
**config.prompt_vars,
|
154
|
+
**kwargs,
|
153
155
|
).to_llm()
|
154
|
-
if
|
156
|
+
if config.summary_prompt
|
155
157
|
else ""
|
156
158
|
)
|
157
159
|
|
@@ -213,7 +215,23 @@ async def review(
|
|
213
215
|
exec(cfg.post_process, {"mc": mc, **locals()})
|
214
216
|
out_folder.mkdir(parents=True, exist_ok=True)
|
215
217
|
report = Report(issues=issues, number_of_processed_files=len(diff))
|
216
|
-
|
218
|
+
ctx = dict(
|
219
|
+
report=report,
|
220
|
+
config=cfg,
|
221
|
+
diff=diff,
|
222
|
+
repo=repo,
|
223
|
+
pipeline_out={},
|
224
|
+
)
|
225
|
+
if cfg.pipeline_steps:
|
226
|
+
pipe = Pipeline(
|
227
|
+
ctx=ctx,
|
228
|
+
steps=cfg.pipeline_steps
|
229
|
+
)
|
230
|
+
pipe.run()
|
231
|
+
else:
|
232
|
+
logging.info("No pipeline steps defined, skipping pipeline execution")
|
233
|
+
|
234
|
+
report.summary = make_cr_summary(**ctx)
|
217
235
|
report.save(file_name=out_folder / JSON_REPORT_FILE_NAME)
|
218
236
|
report_text = report.render(cfg, Report.Format.MARKDOWN)
|
219
237
|
text_report_path = out_folder / "code-review-report.md"
|
@@ -0,0 +1,15 @@
|
|
1
|
+
import re
|
2
|
+
from dataclasses import dataclass, field
|
3
|
+
|
4
|
+
|
5
|
+
def extract_issue_key(branch_name: str, min_len=2, max_len=10) -> str | None:
|
6
|
+
pattern = fr"\b[A-Z][A-Z0-9]{{{min_len - 1},{max_len - 1}}}-\d+\b"
|
7
|
+
match = re.search(pattern, branch_name)
|
8
|
+
return match.group(0) if match else None
|
9
|
+
|
10
|
+
|
11
|
+
@dataclass
|
12
|
+
class IssueTrackerIssue:
|
13
|
+
title: str = field(default="")
|
14
|
+
description: str = field(default="")
|
15
|
+
url: str = field(default="")
|
@@ -0,0 +1,70 @@
|
|
1
|
+
import logging
|
2
|
+
from enum import StrEnum
|
3
|
+
from dataclasses import dataclass, field
|
4
|
+
|
5
|
+
from gito.utils import is_running_in_github_action
|
6
|
+
from microcore import ui
|
7
|
+
from microcore.utils import resolve_callable
|
8
|
+
|
9
|
+
|
10
|
+
class PipelineEnv(StrEnum):
|
11
|
+
LOCAL = "local"
|
12
|
+
GH_ACTION = "gh-action"
|
13
|
+
|
14
|
+
@staticmethod
|
15
|
+
def all():
|
16
|
+
return [PipelineEnv.LOCAL, PipelineEnv.GH_ACTION]
|
17
|
+
|
18
|
+
@staticmethod
|
19
|
+
def current():
|
20
|
+
return (
|
21
|
+
PipelineEnv.GH_ACTION
|
22
|
+
if is_running_in_github_action()
|
23
|
+
else PipelineEnv.LOCAL
|
24
|
+
)
|
25
|
+
|
26
|
+
|
27
|
+
@dataclass
|
28
|
+
class PipelineStep:
|
29
|
+
call: str
|
30
|
+
envs: list[PipelineEnv] = field(default_factory=PipelineEnv.all)
|
31
|
+
|
32
|
+
def get_callable(self):
|
33
|
+
"""
|
34
|
+
Resolve the callable from the string representation.
|
35
|
+
"""
|
36
|
+
return resolve_callable(self.call)
|
37
|
+
|
38
|
+
def run(self, *args, **kwargs):
|
39
|
+
return self.get_callable()(*args, **kwargs)
|
40
|
+
|
41
|
+
|
42
|
+
@dataclass
|
43
|
+
class Pipeline:
|
44
|
+
ctx: dict = field(default_factory=dict)
|
45
|
+
steps: dict[str, PipelineStep] = field(default_factory=dict)
|
46
|
+
|
47
|
+
def run(self, *args, **kwargs):
|
48
|
+
cur_env = PipelineEnv.current()
|
49
|
+
logging.info("Running pipeline... [env: %s]", ui.yellow(cur_env))
|
50
|
+
self.ctx["pipeline_out"] = self.ctx.get("pipeline_out", {})
|
51
|
+
for step_name, step in self.steps.items():
|
52
|
+
if cur_env in step.envs:
|
53
|
+
logging.info(f"Running pipeline step: {step_name}")
|
54
|
+
try:
|
55
|
+
step_output = step.run(*args, **kwargs, **self.ctx)
|
56
|
+
if isinstance(step_output, dict):
|
57
|
+
self.ctx["pipeline_out"].update(step_output)
|
58
|
+
self.ctx["pipeline_out"][step_name] = step_output
|
59
|
+
if not step_output:
|
60
|
+
logging.warning(
|
61
|
+
f'Pipeline step "{step_name}" returned {repr(step_output)}.'
|
62
|
+
)
|
63
|
+
except Exception as e:
|
64
|
+
logging.error(f'Error in pipeline step "{step_name}": {e}')
|
65
|
+
else:
|
66
|
+
logging.info(
|
67
|
+
f"Skipping pipeline step: {step_name}"
|
68
|
+
f" [env: {ui.yellow(cur_env)} not in {step.envs}]"
|
69
|
+
)
|
70
|
+
return self.ctx["pipeline_out"]
|
File without changes
|
@@ -0,0 +1,83 @@
|
|
1
|
+
import logging
|
2
|
+
import os
|
3
|
+
|
4
|
+
import git
|
5
|
+
from jira import JIRA
|
6
|
+
|
7
|
+
from gito.issue_trackers import extract_issue_key, IssueTrackerIssue
|
8
|
+
from gito.utils import is_running_in_github_action
|
9
|
+
|
10
|
+
|
11
|
+
def fetch_issue(issue_key, jira_url, username, api_token) -> IssueTrackerIssue | None:
|
12
|
+
try:
|
13
|
+
jira = JIRA(jira_url, basic_auth=(username, api_token))
|
14
|
+
issue = jira.issue(issue_key)
|
15
|
+
return IssueTrackerIssue(
|
16
|
+
title=issue.fields.summary,
|
17
|
+
description=issue.fields.description or "",
|
18
|
+
url=f"{jira_url.rstrip('/')}/browse/{issue_key}"
|
19
|
+
)
|
20
|
+
except Exception as e:
|
21
|
+
logging.error(f"Failed to fetch Jira issue {issue_key}: {e}")
|
22
|
+
return None
|
23
|
+
|
24
|
+
|
25
|
+
def get_branch(repo: git.Repo):
|
26
|
+
if is_running_in_github_action():
|
27
|
+
branch_name = os.getenv('GITHUB_HEAD_REF')
|
28
|
+
if branch_name:
|
29
|
+
return branch_name
|
30
|
+
|
31
|
+
github_ref = os.getenv('GITHUB_REF', '')
|
32
|
+
if github_ref.startswith('refs/heads/'):
|
33
|
+
return github_ref.replace('refs/heads/', '')
|
34
|
+
try:
|
35
|
+
branch_name = repo.active_branch.name
|
36
|
+
return branch_name
|
37
|
+
except Exception as e: # @todo: specify more precise exception
|
38
|
+
logging.error("Could not determine the active branch name: %s", e)
|
39
|
+
return None
|
40
|
+
|
41
|
+
|
42
|
+
def fetch_associated_issue(
|
43
|
+
repo: git.Repo,
|
44
|
+
jira_url=None,
|
45
|
+
jira_username=None,
|
46
|
+
jira_api_token=None,
|
47
|
+
**kwargs
|
48
|
+
):
|
49
|
+
"""
|
50
|
+
Pipeline step to fetch a Jira issue based on the current branch name.
|
51
|
+
"""
|
52
|
+
branch_name = get_branch(repo)
|
53
|
+
if not branch_name:
|
54
|
+
logging.error("No active branch found in the repository, cannot determine Jira issue key.")
|
55
|
+
return None
|
56
|
+
|
57
|
+
if not (issue_key := extract_issue_key(branch_name)):
|
58
|
+
logging.error(f"No Jira issue key found in branch name: {branch_name}")
|
59
|
+
return None
|
60
|
+
|
61
|
+
jira_url = jira_url or os.getenv("JIRA_URL")
|
62
|
+
jira_username = (
|
63
|
+
jira_username
|
64
|
+
or os.getenv("JIRA_USERNAME")
|
65
|
+
or os.getenv("JIRA_USER")
|
66
|
+
or os.getenv("JIRA_EMAIL")
|
67
|
+
)
|
68
|
+
jira_token = (
|
69
|
+
jira_api_token
|
70
|
+
or os.getenv("JIRA_API_TOKEN")
|
71
|
+
or os.getenv("JIRA_API_KEY")
|
72
|
+
or os.getenv("JIRA_TOKEN")
|
73
|
+
)
|
74
|
+
try:
|
75
|
+
assert jira_url, "JIRA_URL is not set"
|
76
|
+
assert jira_username, "JIRA_USERNAME is not set"
|
77
|
+
assert jira_token, "JIRA_API_TOKEN is not set"
|
78
|
+
except AssertionError as e:
|
79
|
+
logging.error(f"Jira configuration error: {e}")
|
80
|
+
return None
|
81
|
+
return dict(
|
82
|
+
associated_issue=fetch_issue(issue_key, jira_url, jira_username, jira_token)
|
83
|
+
)
|
@@ -0,0 +1,71 @@
|
|
1
|
+
import logging
|
2
|
+
import tomllib
|
3
|
+
from dataclasses import dataclass, field
|
4
|
+
from pathlib import Path
|
5
|
+
|
6
|
+
import microcore as mc
|
7
|
+
from gito.utils import detect_github_env
|
8
|
+
from microcore import ui
|
9
|
+
from git import Repo
|
10
|
+
|
11
|
+
from .constants import PROJECT_CONFIG_BUNDLED_DEFAULTS_FILE, PROJECT_CONFIG_FILE_PATH
|
12
|
+
from .pipeline import PipelineStep
|
13
|
+
|
14
|
+
|
15
|
+
@dataclass
|
16
|
+
class ProjectConfig:
|
17
|
+
prompt: str = ""
|
18
|
+
summary_prompt: str = ""
|
19
|
+
report_template_md: str = ""
|
20
|
+
"""Markdown report template"""
|
21
|
+
report_template_cli: str = ""
|
22
|
+
"""Report template for CLI output"""
|
23
|
+
post_process: str = ""
|
24
|
+
retries: int = 3
|
25
|
+
"""LLM retries for one request"""
|
26
|
+
max_code_tokens: int = 32000
|
27
|
+
prompt_vars: dict = field(default_factory=dict)
|
28
|
+
mention_triggers: list[str] = field(default_factory=list)
|
29
|
+
"""
|
30
|
+
Defines the keyword or mention tag that triggers bot actions
|
31
|
+
when referenced in code review comments.
|
32
|
+
"""
|
33
|
+
pipeline_steps: dict[str, dict | PipelineStep] = field(default_factory=dict)
|
34
|
+
|
35
|
+
def __post_init__(self):
|
36
|
+
self.pipeline_steps = {
|
37
|
+
k: PipelineStep(**v) if isinstance(v, dict) else v
|
38
|
+
for k, v in self.pipeline_steps.items()
|
39
|
+
}
|
40
|
+
|
41
|
+
@staticmethod
|
42
|
+
def _read_bundled_defaults() -> dict:
|
43
|
+
with open(PROJECT_CONFIG_BUNDLED_DEFAULTS_FILE, "rb") as f:
|
44
|
+
config = tomllib.load(f)
|
45
|
+
return config
|
46
|
+
|
47
|
+
@staticmethod
|
48
|
+
def load_for_repo(repo: Repo):
|
49
|
+
return ProjectConfig.load(Path(repo.working_tree_dir) / PROJECT_CONFIG_FILE_PATH)
|
50
|
+
|
51
|
+
@staticmethod
|
52
|
+
def load(config_path: str | Path | None = None) -> "ProjectConfig":
|
53
|
+
config = ProjectConfig._read_bundled_defaults()
|
54
|
+
github_env = detect_github_env()
|
55
|
+
config["prompt_vars"] |= github_env | dict(github_env=github_env)
|
56
|
+
|
57
|
+
config_path = Path(config_path or PROJECT_CONFIG_FILE_PATH)
|
58
|
+
if config_path.exists():
|
59
|
+
logging.info(
|
60
|
+
f"Loading project-specific configuration from {mc.utils.file_link(config_path)}...")
|
61
|
+
default_prompt_vars = config["prompt_vars"]
|
62
|
+
with open(config_path, "rb") as f:
|
63
|
+
config.update(tomllib.load(f))
|
64
|
+
# overriding prompt_vars config section will not empty default values
|
65
|
+
config["prompt_vars"] = default_prompt_vars | config["prompt_vars"]
|
66
|
+
else:
|
67
|
+
logging.info(
|
68
|
+
f"No project config found at {ui.blue(config_path)}, using defaults"
|
69
|
+
)
|
70
|
+
|
71
|
+
return ProjectConfig(**config)
|
@@ -63,12 +63,13 @@ class Report:
|
|
63
63
|
MARKDOWN = "md"
|
64
64
|
CLI = "cli"
|
65
65
|
|
66
|
-
issues: dict = field(default_factory=dict)
|
66
|
+
issues: dict[str, list[Issue]] = field(default_factory=dict)
|
67
67
|
summary: str = field(default="")
|
68
68
|
number_of_processed_files: int = field(default=0)
|
69
69
|
total_issues: int = field(init=False)
|
70
70
|
created_at: str = field(default_factory=lambda: datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
|
71
71
|
model: str = field(default_factory=lambda: mc.config().MODEL)
|
72
|
+
pipeline_out: dict = field(default_factory=dict)
|
72
73
|
|
73
74
|
@property
|
74
75
|
def plain_issues(self):
|
@@ -1,8 +1,11 @@
|
|
1
|
+
import re
|
1
2
|
import sys
|
2
3
|
import os
|
3
4
|
from pathlib import Path
|
4
|
-
import typer
|
5
5
|
|
6
|
+
import typer
|
7
|
+
import git
|
8
|
+
from git import Repo
|
6
9
|
|
7
10
|
_EXT_TO_HINT: dict[str, str] = {
|
8
11
|
# scripting & languages
|
@@ -130,3 +133,82 @@ def block_wrap_lr(text: str, left: str = "", right: str = "", max_rwrap: int = 6
|
|
130
133
|
ln += ' ' * (ml - len(line)) + right
|
131
134
|
wrapped_lines.append(ln)
|
132
135
|
return "\n".join(wrapped_lines)
|
136
|
+
|
137
|
+
|
138
|
+
def extract_gh_owner_repo(repo: git.Repo) -> tuple[str, str]:
|
139
|
+
"""
|
140
|
+
Extracts the GitHub owner and repository name.
|
141
|
+
|
142
|
+
Returns:
|
143
|
+
tuple[str, str]: A tuple containing the owner and repository name.
|
144
|
+
"""
|
145
|
+
remote_url = repo.remotes.origin.url
|
146
|
+
if remote_url.startswith('git@github.com:'):
|
147
|
+
# SSH format: git@github.com:owner/repo.git
|
148
|
+
repo_path = remote_url.split(':')[1].replace('.git', '')
|
149
|
+
elif remote_url.startswith('https://github.com/'):
|
150
|
+
# HTTPS format: https://github.com/owner/repo.git
|
151
|
+
repo_path = remote_url.replace('https://github.com/', '').replace('.git', '')
|
152
|
+
else:
|
153
|
+
raise ValueError("Unsupported remote URL format")
|
154
|
+
owner, repo_name = repo_path.split('/')
|
155
|
+
return owner, repo_name
|
156
|
+
|
157
|
+
|
158
|
+
def detect_github_env() -> dict:
|
159
|
+
"""
|
160
|
+
Try to detect GitHub repository/PR info from environment variables (for GitHub Actions).
|
161
|
+
Returns a dict with github_repo, github_pr_sha, github_pr_number, github_ref, etc.
|
162
|
+
"""
|
163
|
+
repo = os.environ.get("GITHUB_REPOSITORY", "")
|
164
|
+
pr_sha = os.environ.get("GITHUB_SHA", "")
|
165
|
+
pr_number = os.environ.get("GITHUB_REF", "")
|
166
|
+
branch = ""
|
167
|
+
ref = os.environ.get("GITHUB_REF", "")
|
168
|
+
# Try to resolve PR head SHA if available.
|
169
|
+
# On PRs, GITHUB_HEAD_REF/BASE_REF contain branch names.
|
170
|
+
if "GITHUB_HEAD_REF" in os.environ:
|
171
|
+
branch = os.environ["GITHUB_HEAD_REF"]
|
172
|
+
elif ref.startswith("refs/heads/"):
|
173
|
+
branch = ref[len("refs/heads/"):]
|
174
|
+
elif ref.startswith("refs/pull/"):
|
175
|
+
# for pull_request events
|
176
|
+
branch = ref
|
177
|
+
|
178
|
+
d = {
|
179
|
+
"github_repo": repo,
|
180
|
+
"github_pr_sha": pr_sha,
|
181
|
+
"github_pr_number": pr_number,
|
182
|
+
"github_branch": branch,
|
183
|
+
"github_ref": ref,
|
184
|
+
}
|
185
|
+
# Fallback for local usage: try to get from git
|
186
|
+
if not repo:
|
187
|
+
git_repo = None
|
188
|
+
try:
|
189
|
+
git_repo = Repo(".", search_parent_directories=True)
|
190
|
+
origin = git_repo.remotes.origin.url
|
191
|
+
# e.g. git@github.com:Nayjest/ai-code-review.git -> Nayjest/ai-code-review
|
192
|
+
match = re.search(r"[:/]([\w\-]+)/([\w\-\.]+?)(\.git)?$", origin)
|
193
|
+
if match:
|
194
|
+
d["github_repo"] = f"{match.group(1)}/{match.group(2)}"
|
195
|
+
d["github_pr_sha"] = git_repo.head.commit.hexsha
|
196
|
+
d["github_branch"] = (
|
197
|
+
git_repo.active_branch.name if hasattr(git_repo, "active_branch") else ""
|
198
|
+
)
|
199
|
+
except Exception:
|
200
|
+
pass
|
201
|
+
finally:
|
202
|
+
if git_repo:
|
203
|
+
try:
|
204
|
+
git_repo.close()
|
205
|
+
except Exception:
|
206
|
+
pass
|
207
|
+
# If branch is not a commit SHA, prefer branch for links
|
208
|
+
if d["github_branch"]:
|
209
|
+
d["github_pr_sha_or_branch"] = d["github_branch"]
|
210
|
+
elif d["github_pr_sha"]:
|
211
|
+
d["github_pr_sha_or_branch"] = d["github_pr_sha"]
|
212
|
+
else:
|
213
|
+
d["github_pr_sha_or_branch"] = "main"
|
214
|
+
return d
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "ai-cr"
|
3
|
-
version = "2.0.0-
|
3
|
+
version = "2.0.0-dev2"
|
4
4
|
description = "AI code review tool that works with any language model provider. It detects issues in GitHub pull requests or local changes—instantly, reliably, and without vendor lock-in."
|
5
5
|
authors = ["Nayjest <mail@vitaliy.in>"]
|
6
6
|
readme = "README.md"
|
@@ -27,6 +27,8 @@ unidiff = "^0.7.5"
|
|
27
27
|
google-generativeai = "^0.8.5"
|
28
28
|
anthropic = "^0.52.2"
|
29
29
|
typer = "^0.16.0"
|
30
|
+
ghapi = "~=1.0.6"
|
31
|
+
jira = "^3.8.0"
|
30
32
|
|
31
33
|
[tool.poetry.group.dev.dependencies]
|
32
34
|
flake8 = "*"
|
@@ -1,119 +0,0 @@
|
|
1
|
-
import re
|
2
|
-
import logging
|
3
|
-
import tomllib
|
4
|
-
from dataclasses import dataclass, field
|
5
|
-
from pathlib import Path
|
6
|
-
|
7
|
-
import microcore as mc
|
8
|
-
from microcore import ui
|
9
|
-
from git import Repo
|
10
|
-
|
11
|
-
from .constants import PROJECT_CONFIG_BUNDLED_DEFAULTS_FILE, PROJECT_CONFIG_FILE_PATH
|
12
|
-
|
13
|
-
|
14
|
-
def _detect_github_env() -> dict:
|
15
|
-
"""
|
16
|
-
Try to detect GitHub repository/PR info from environment variables (for GitHub Actions).
|
17
|
-
Returns a dict with github_repo, github_pr_sha, github_pr_number, github_ref, etc.
|
18
|
-
"""
|
19
|
-
import os
|
20
|
-
|
21
|
-
repo = os.environ.get("GITHUB_REPOSITORY", "")
|
22
|
-
pr_sha = os.environ.get("GITHUB_SHA", "")
|
23
|
-
pr_number = os.environ.get("GITHUB_REF", "")
|
24
|
-
branch = ""
|
25
|
-
ref = os.environ.get("GITHUB_REF", "")
|
26
|
-
# Try to resolve PR head SHA if available.
|
27
|
-
# On PRs, GITHUB_HEAD_REF/BASE_REF contain branch names.
|
28
|
-
if "GITHUB_HEAD_REF" in os.environ:
|
29
|
-
branch = os.environ["GITHUB_HEAD_REF"]
|
30
|
-
elif ref.startswith("refs/heads/"):
|
31
|
-
branch = ref[len("refs/heads/"):]
|
32
|
-
elif ref.startswith("refs/pull/"):
|
33
|
-
# for pull_request events
|
34
|
-
branch = ref
|
35
|
-
|
36
|
-
d = {
|
37
|
-
"github_repo": repo,
|
38
|
-
"github_pr_sha": pr_sha,
|
39
|
-
"github_pr_number": pr_number,
|
40
|
-
"github_branch": branch,
|
41
|
-
"github_ref": ref,
|
42
|
-
}
|
43
|
-
# Fallback for local usage: try to get from git
|
44
|
-
if not repo:
|
45
|
-
git_repo = None
|
46
|
-
try:
|
47
|
-
git_repo = Repo(".", search_parent_directories=True)
|
48
|
-
origin = git_repo.remotes.origin.url
|
49
|
-
# e.g. git@github.com:Nayjest/ai-code-review.git -> Nayjest/ai-code-review
|
50
|
-
match = re.search(r"[:/]([\w\-]+)/([\w\-\.]+?)(\.git)?$", origin)
|
51
|
-
if match:
|
52
|
-
d["github_repo"] = f"{match.group(1)}/{match.group(2)}"
|
53
|
-
d["github_pr_sha"] = git_repo.head.commit.hexsha
|
54
|
-
d["github_branch"] = (
|
55
|
-
git_repo.active_branch.name if hasattr(git_repo, "active_branch") else ""
|
56
|
-
)
|
57
|
-
except Exception:
|
58
|
-
pass
|
59
|
-
finally:
|
60
|
-
if git_repo:
|
61
|
-
try:
|
62
|
-
git_repo.close()
|
63
|
-
except Exception:
|
64
|
-
pass
|
65
|
-
# If branch is not a commit SHA, prefer branch for links
|
66
|
-
if d["github_branch"]:
|
67
|
-
d["github_pr_sha_or_branch"] = d["github_branch"]
|
68
|
-
elif d["github_pr_sha"]:
|
69
|
-
d["github_pr_sha_or_branch"] = d["github_pr_sha"]
|
70
|
-
else:
|
71
|
-
d["github_pr_sha_or_branch"] = "main"
|
72
|
-
return d
|
73
|
-
|
74
|
-
|
75
|
-
@dataclass
|
76
|
-
class ProjectConfig:
|
77
|
-
prompt: str = ""
|
78
|
-
summary_prompt: str = ""
|
79
|
-
report_template_md: str = ""
|
80
|
-
"""Markdown report template"""
|
81
|
-
report_template_cli: str = ""
|
82
|
-
"""Report template for CLI output"""
|
83
|
-
post_process: str = ""
|
84
|
-
retries: int = 3
|
85
|
-
"""LLM retries for one request"""
|
86
|
-
max_code_tokens: int = 32000
|
87
|
-
prompt_vars: dict = field(default_factory=dict)
|
88
|
-
|
89
|
-
@staticmethod
|
90
|
-
def _read_bundled_defaults() -> dict:
|
91
|
-
with open(PROJECT_CONFIG_BUNDLED_DEFAULTS_FILE, "rb") as f:
|
92
|
-
config = tomllib.load(f)
|
93
|
-
return config
|
94
|
-
|
95
|
-
@staticmethod
|
96
|
-
def load_for_repo(repo: Repo):
|
97
|
-
return ProjectConfig.load(Path(repo.working_tree_dir) / PROJECT_CONFIG_FILE_PATH)
|
98
|
-
|
99
|
-
@staticmethod
|
100
|
-
def load(config_path: str | Path | None = None) -> "ProjectConfig":
|
101
|
-
config = ProjectConfig._read_bundled_defaults()
|
102
|
-
github_env = _detect_github_env()
|
103
|
-
config["prompt_vars"] |= github_env | dict(github_env=github_env)
|
104
|
-
|
105
|
-
config_path = Path(config_path or PROJECT_CONFIG_FILE_PATH)
|
106
|
-
if config_path.exists():
|
107
|
-
logging.info(
|
108
|
-
f"Loading project-specific configuration from {mc.utils.file_link(config_path)}...")
|
109
|
-
default_prompt_vars = config["prompt_vars"]
|
110
|
-
with open(config_path, "rb") as f:
|
111
|
-
config.update(tomllib.load(f))
|
112
|
-
# overriding prompt_vars config section will not empty default values
|
113
|
-
config["prompt_vars"] = default_prompt_vars | config["prompt_vars"]
|
114
|
-
else:
|
115
|
-
logging.info(
|
116
|
-
f"No project config found at {ui.blue(config_path)}, using defaults"
|
117
|
-
)
|
118
|
-
|
119
|
-
return ProjectConfig(**config)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|