xai-review 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xai-review might be problematic. Click here for more details.
- ai_review/__init__.py +0 -0
- ai_review/cli/__init__.py +0 -0
- ai_review/cli/commands/__init__.py +0 -0
- ai_review/cli/commands/run_context_review.py +7 -0
- ai_review/cli/commands/run_inline_review.py +7 -0
- ai_review/cli/commands/run_review.py +8 -0
- ai_review/cli/commands/run_summary_review.py +7 -0
- ai_review/cli/main.py +54 -0
- ai_review/clients/__init__.py +0 -0
- ai_review/clients/claude/__init__.py +0 -0
- ai_review/clients/claude/client.py +44 -0
- ai_review/clients/claude/schema.py +44 -0
- ai_review/clients/gemini/__init__.py +0 -0
- ai_review/clients/gemini/client.py +45 -0
- ai_review/clients/gemini/schema.py +78 -0
- ai_review/clients/gitlab/__init__.py +0 -0
- ai_review/clients/gitlab/client.py +31 -0
- ai_review/clients/gitlab/mr/__init__.py +0 -0
- ai_review/clients/gitlab/mr/client.py +101 -0
- ai_review/clients/gitlab/mr/schema/__init__.py +0 -0
- ai_review/clients/gitlab/mr/schema/changes.py +35 -0
- ai_review/clients/gitlab/mr/schema/comments.py +19 -0
- ai_review/clients/gitlab/mr/schema/discussions.py +34 -0
- ai_review/clients/openai/__init__.py +0 -0
- ai_review/clients/openai/client.py +42 -0
- ai_review/clients/openai/schema.py +37 -0
- ai_review/config.py +62 -0
- ai_review/libs/__init__.py +0 -0
- ai_review/libs/asynchronous/__init__.py +0 -0
- ai_review/libs/asynchronous/gather.py +14 -0
- ai_review/libs/config/__init__.py +0 -0
- ai_review/libs/config/artifacts.py +12 -0
- ai_review/libs/config/base.py +24 -0
- ai_review/libs/config/claude.py +13 -0
- ai_review/libs/config/gemini.py +13 -0
- ai_review/libs/config/gitlab.py +12 -0
- ai_review/libs/config/http.py +19 -0
- ai_review/libs/config/llm.py +61 -0
- ai_review/libs/config/logger.py +17 -0
- ai_review/libs/config/openai.py +13 -0
- ai_review/libs/config/prompt.py +121 -0
- ai_review/libs/config/review.py +30 -0
- ai_review/libs/config/vcs.py +19 -0
- ai_review/libs/constants/__init__.py +0 -0
- ai_review/libs/constants/llm_provider.py +7 -0
- ai_review/libs/constants/vcs_provider.py +6 -0
- ai_review/libs/diff/__init__.py +0 -0
- ai_review/libs/diff/models.py +100 -0
- ai_review/libs/diff/parser.py +111 -0
- ai_review/libs/diff/tools.py +24 -0
- ai_review/libs/http/__init__.py +0 -0
- ai_review/libs/http/client.py +14 -0
- ai_review/libs/http/event_hooks/__init__.py +0 -0
- ai_review/libs/http/event_hooks/base.py +13 -0
- ai_review/libs/http/event_hooks/logger.py +17 -0
- ai_review/libs/http/handlers.py +34 -0
- ai_review/libs/http/transports/__init__.py +0 -0
- ai_review/libs/http/transports/retry.py +34 -0
- ai_review/libs/logger.py +19 -0
- ai_review/libs/resources.py +24 -0
- ai_review/prompts/__init__.py +0 -0
- ai_review/prompts/default_context.md +14 -0
- ai_review/prompts/default_inline.md +8 -0
- ai_review/prompts/default_summary.md +3 -0
- ai_review/prompts/default_system_context.md +27 -0
- ai_review/prompts/default_system_inline.md +25 -0
- ai_review/prompts/default_system_summary.md +7 -0
- ai_review/resources/__init__.py +0 -0
- ai_review/resources/pricing.yaml +55 -0
- ai_review/services/__init__.py +0 -0
- ai_review/services/artifacts/__init__.py +0 -0
- ai_review/services/artifacts/schema.py +11 -0
- ai_review/services/artifacts/service.py +47 -0
- ai_review/services/artifacts/tools.py +8 -0
- ai_review/services/cost/__init__.py +0 -0
- ai_review/services/cost/schema.py +44 -0
- ai_review/services/cost/service.py +58 -0
- ai_review/services/diff/__init__.py +0 -0
- ai_review/services/diff/renderers.py +149 -0
- ai_review/services/diff/schema.py +6 -0
- ai_review/services/diff/service.py +96 -0
- ai_review/services/diff/tools.py +59 -0
- ai_review/services/git/__init__.py +0 -0
- ai_review/services/git/service.py +35 -0
- ai_review/services/git/types.py +11 -0
- ai_review/services/llm/__init__.py +0 -0
- ai_review/services/llm/claude/__init__.py +0 -0
- ai_review/services/llm/claude/client.py +26 -0
- ai_review/services/llm/factory.py +18 -0
- ai_review/services/llm/gemini/__init__.py +0 -0
- ai_review/services/llm/gemini/client.py +31 -0
- ai_review/services/llm/openai/__init__.py +0 -0
- ai_review/services/llm/openai/client.py +28 -0
- ai_review/services/llm/types.py +15 -0
- ai_review/services/prompt/__init__.py +0 -0
- ai_review/services/prompt/adapter.py +25 -0
- ai_review/services/prompt/schema.py +71 -0
- ai_review/services/prompt/service.py +56 -0
- ai_review/services/review/__init__.py +0 -0
- ai_review/services/review/inline/__init__.py +0 -0
- ai_review/services/review/inline/schema.py +53 -0
- ai_review/services/review/inline/service.py +38 -0
- ai_review/services/review/policy/__init__.py +0 -0
- ai_review/services/review/policy/service.py +60 -0
- ai_review/services/review/service.py +207 -0
- ai_review/services/review/summary/__init__.py +0 -0
- ai_review/services/review/summary/schema.py +15 -0
- ai_review/services/review/summary/service.py +14 -0
- ai_review/services/vcs/__init__.py +0 -0
- ai_review/services/vcs/factory.py +12 -0
- ai_review/services/vcs/gitlab/__init__.py +0 -0
- ai_review/services/vcs/gitlab/client.py +152 -0
- ai_review/services/vcs/types.py +55 -0
- ai_review/tests/__init__.py +0 -0
- ai_review/tests/fixtures/__init__.py +0 -0
- ai_review/tests/fixtures/git.py +31 -0
- ai_review/tests/suites/__init__.py +0 -0
- ai_review/tests/suites/clients/__init__.py +0 -0
- ai_review/tests/suites/clients/claude/__init__.py +0 -0
- ai_review/tests/suites/clients/claude/test_client.py +31 -0
- ai_review/tests/suites/clients/claude/test_schema.py +59 -0
- ai_review/tests/suites/clients/gemini/__init__.py +0 -0
- ai_review/tests/suites/clients/gemini/test_client.py +30 -0
- ai_review/tests/suites/clients/gemini/test_schema.py +105 -0
- ai_review/tests/suites/clients/openai/__init__.py +0 -0
- ai_review/tests/suites/clients/openai/test_client.py +30 -0
- ai_review/tests/suites/clients/openai/test_schema.py +53 -0
- ai_review/tests/suites/libs/__init__.py +0 -0
- ai_review/tests/suites/libs/diff/__init__.py +0 -0
- ai_review/tests/suites/libs/diff/test_models.py +105 -0
- ai_review/tests/suites/libs/diff/test_parser.py +115 -0
- ai_review/tests/suites/libs/diff/test_tools.py +62 -0
- ai_review/tests/suites/services/__init__.py +0 -0
- ai_review/tests/suites/services/diff/__init__.py +0 -0
- ai_review/tests/suites/services/diff/test_renderers.py +168 -0
- ai_review/tests/suites/services/diff/test_service.py +84 -0
- ai_review/tests/suites/services/diff/test_tools.py +108 -0
- ai_review/tests/suites/services/prompt/__init__.py +0 -0
- ai_review/tests/suites/services/prompt/test_schema.py +38 -0
- ai_review/tests/suites/services/prompt/test_service.py +128 -0
- ai_review/tests/suites/services/review/__init__.py +0 -0
- ai_review/tests/suites/services/review/inline/__init__.py +0 -0
- ai_review/tests/suites/services/review/inline/test_schema.py +65 -0
- ai_review/tests/suites/services/review/inline/test_service.py +49 -0
- ai_review/tests/suites/services/review/policy/__init__.py +0 -0
- ai_review/tests/suites/services/review/policy/test_service.py +95 -0
- ai_review/tests/suites/services/review/summary/__init__.py +0 -0
- ai_review/tests/suites/services/review/summary/test_schema.py +22 -0
- ai_review/tests/suites/services/review/summary/test_service.py +16 -0
- xai_review-0.3.0.dist-info/METADATA +11 -0
- xai_review-0.3.0.dist-info/RECORD +154 -0
- xai_review-0.3.0.dist-info/WHEEL +5 -0
- xai_review-0.3.0.dist-info/entry_points.txt +2 -0
- xai_review-0.3.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from typing import Self
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field, RootModel, field_validator
|
|
4
|
+
|
|
5
|
+
from ai_review.config import settings
|
|
6
|
+
|
|
7
|
+
DedupKey = tuple[str, int, str]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class InlineCommentSchema(BaseModel):
|
|
11
|
+
file: str = Field(min_length=1)
|
|
12
|
+
line: int = Field(ge=1)
|
|
13
|
+
message: str = Field(min_length=1)
|
|
14
|
+
suggestion: str | None = None
|
|
15
|
+
|
|
16
|
+
@field_validator("file")
|
|
17
|
+
def normalize_file(cls, value: str) -> str:
|
|
18
|
+
value = value.strip().replace("\\", "/")
|
|
19
|
+
return value.lstrip("/")
|
|
20
|
+
|
|
21
|
+
@field_validator("message")
|
|
22
|
+
def normalize_message(cls, value: str) -> str:
|
|
23
|
+
return value.strip()
|
|
24
|
+
|
|
25
|
+
@property
|
|
26
|
+
def dedup_key(self) -> DedupKey:
|
|
27
|
+
return self.file, self.line, (self.suggestion or self.message).strip().lower()
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
def body(self) -> str:
|
|
31
|
+
if self.suggestion:
|
|
32
|
+
return f"{self.message}\n\n```suggestion\n{self.suggestion}\n```"
|
|
33
|
+
|
|
34
|
+
return self.message
|
|
35
|
+
|
|
36
|
+
@property
|
|
37
|
+
def body_with_tag(self) -> str:
|
|
38
|
+
return f"{self.body}\n\n{settings.review.inline_tag}"
|
|
39
|
+
|
|
40
|
+
@property
|
|
41
|
+
def fallback_body_with_tag(self) -> str:
|
|
42
|
+
return f"**{self.file}:{self.line}** — {self.message}\n\n{settings.review.inline_tag}"
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class InlineCommentListSchema(RootModel[list[InlineCommentSchema]]):
|
|
46
|
+
root: list[InlineCommentSchema]
|
|
47
|
+
|
|
48
|
+
def dedupe(self) -> Self:
|
|
49
|
+
results_map: dict[DedupKey, InlineCommentSchema] = {
|
|
50
|
+
comment.dedup_key: comment for comment in self.root
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
return InlineCommentListSchema(root=list(results_map.values()))
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import re
|
|
2
|
+
|
|
3
|
+
from pydantic import ValidationError
|
|
4
|
+
|
|
5
|
+
from ai_review.libs.logger import get_logger
|
|
6
|
+
from ai_review.services.review.inline.schema import InlineCommentListSchema
|
|
7
|
+
|
|
8
|
+
logger = get_logger("INLINE_COMMENT_SERVICE")
|
|
9
|
+
|
|
10
|
+
FIRST_JSON_ARRAY_RE = re.compile(r"\[[\s\S]*]", re.MULTILINE)
|
|
11
|
+
CLEAN_JSON_BLOCK_RE = re.compile(r"```(?:json)?(.*?)```", re.DOTALL | re.IGNORECASE)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class InlineCommentService:
|
|
15
|
+
@classmethod
|
|
16
|
+
def parse_model_output(cls, output: str) -> InlineCommentListSchema:
|
|
17
|
+
output = (output or "").strip()
|
|
18
|
+
if not output:
|
|
19
|
+
logger.warning("️LLM returned empty string for inline review")
|
|
20
|
+
return InlineCommentListSchema(root=[])
|
|
21
|
+
|
|
22
|
+
if match := CLEAN_JSON_BLOCK_RE.search(output):
|
|
23
|
+
output = match.group(1).strip()
|
|
24
|
+
|
|
25
|
+
try:
|
|
26
|
+
return InlineCommentListSchema.model_validate_json(output)
|
|
27
|
+
except ValidationError:
|
|
28
|
+
logger.warning("LLM output is not valid JSON, trying to extract first JSON array...")
|
|
29
|
+
|
|
30
|
+
if json_array_match := FIRST_JSON_ARRAY_RE.search(output):
|
|
31
|
+
try:
|
|
32
|
+
return InlineCommentListSchema.model_validate_json(json_array_match.group(0))
|
|
33
|
+
except ValidationError:
|
|
34
|
+
logger.exception("JSON array found but still invalid")
|
|
35
|
+
else:
|
|
36
|
+
logger.exception("No JSON array found in LLM output")
|
|
37
|
+
|
|
38
|
+
return InlineCommentListSchema(root=[])
|
|
File without changes
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import fnmatch
|
|
2
|
+
|
|
3
|
+
from ai_review.config import settings
|
|
4
|
+
from ai_review.libs.logger import get_logger
|
|
5
|
+
|
|
6
|
+
logger = get_logger("REVIEW_POLICY_SERVICE")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ReviewPolicyService:
|
|
10
|
+
@classmethod
|
|
11
|
+
def should_review_file(cls, file: str) -> bool:
|
|
12
|
+
review = settings.review
|
|
13
|
+
|
|
14
|
+
for pattern in review.ignore_changes:
|
|
15
|
+
if fnmatch.fnmatch(file, pattern):
|
|
16
|
+
logger.debug(f"Skipping {file} (matched ignore: {pattern})")
|
|
17
|
+
return False
|
|
18
|
+
|
|
19
|
+
if not review.allow_changes:
|
|
20
|
+
logger.debug(f"Allowing {file} (no allow rules, passed ignore)")
|
|
21
|
+
return True
|
|
22
|
+
|
|
23
|
+
for pattern in review.allow_changes:
|
|
24
|
+
if fnmatch.fnmatch(file, pattern):
|
|
25
|
+
logger.debug(f"Allowing {file} (matched allow: {pattern})")
|
|
26
|
+
return True
|
|
27
|
+
|
|
28
|
+
logger.debug(f"Skipping {file} (did not match any allow rule)")
|
|
29
|
+
return False
|
|
30
|
+
|
|
31
|
+
@classmethod
|
|
32
|
+
def apply_for_files(cls, files: list[str]) -> list[str]:
|
|
33
|
+
allowed = [file for file in files if cls.should_review_file(file)]
|
|
34
|
+
skipped = [file for file in files if not cls.should_review_file(file)]
|
|
35
|
+
|
|
36
|
+
if skipped:
|
|
37
|
+
logger.info(f"Skipped {len(skipped)} files by policy: {skipped}")
|
|
38
|
+
|
|
39
|
+
if allowed:
|
|
40
|
+
logger.info(f"Proceeding with {len(allowed)} files after policy filter")
|
|
41
|
+
|
|
42
|
+
return allowed
|
|
43
|
+
|
|
44
|
+
@classmethod
|
|
45
|
+
def apply_for_inline_comments(cls, comments: list) -> list:
|
|
46
|
+
limit = settings.review.max_inline_comments
|
|
47
|
+
if limit and (len(comments) > limit):
|
|
48
|
+
logger.info(f"Limiting inline comments to {limit} (from {len(comments)})")
|
|
49
|
+
return comments[:limit]
|
|
50
|
+
|
|
51
|
+
return comments
|
|
52
|
+
|
|
53
|
+
@classmethod
|
|
54
|
+
def apply_for_context_comments(cls, comments: list) -> list:
|
|
55
|
+
limit = settings.review.max_context_comments
|
|
56
|
+
if limit and (len(comments) > limit):
|
|
57
|
+
logger.info(f"Limiting context comments to {limit} (from {len(comments)})")
|
|
58
|
+
return comments[:limit]
|
|
59
|
+
|
|
60
|
+
return comments
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
from typing import Literal
|
|
2
|
+
|
|
3
|
+
from ai_review.config import settings
|
|
4
|
+
from ai_review.libs.asynchronous.gather import bounded_gather
|
|
5
|
+
from ai_review.libs.logger import get_logger
|
|
6
|
+
from ai_review.services.artifacts.service import ArtifactsService
|
|
7
|
+
from ai_review.services.cost.service import CostService
|
|
8
|
+
from ai_review.services.diff.service import DiffService
|
|
9
|
+
from ai_review.services.git.service import GitService
|
|
10
|
+
from ai_review.services.llm.factory import get_llm_client
|
|
11
|
+
from ai_review.services.prompt.adapter import build_prompt_context_from_mr_info
|
|
12
|
+
from ai_review.services.prompt.service import PromptService
|
|
13
|
+
from ai_review.services.review.inline.schema import InlineCommentListSchema
|
|
14
|
+
from ai_review.services.review.inline.service import InlineCommentService
|
|
15
|
+
from ai_review.services.review.policy.service import ReviewPolicyService
|
|
16
|
+
from ai_review.services.review.summary.service import SummaryCommentService
|
|
17
|
+
from ai_review.services.vcs.factory import get_vcs_client
|
|
18
|
+
from ai_review.services.vcs.types import MRInfoSchema
|
|
19
|
+
|
|
20
|
+
logger = get_logger("REVIEW_SERVICE")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ReviewService:
|
|
24
|
+
def __init__(self):
|
|
25
|
+
self.llm = get_llm_client()
|
|
26
|
+
self.vcs = get_vcs_client()
|
|
27
|
+
self.git = GitService()
|
|
28
|
+
self.diff = DiffService()
|
|
29
|
+
self.cost = CostService()
|
|
30
|
+
self.prompt = PromptService()
|
|
31
|
+
self.policy = ReviewPolicyService()
|
|
32
|
+
self.inline = InlineCommentService()
|
|
33
|
+
self.summary = SummaryCommentService()
|
|
34
|
+
self.artifacts = ArtifactsService()
|
|
35
|
+
|
|
36
|
+
async def ask_llm(self, prompt: str, prompt_system: str) -> str:
|
|
37
|
+
try:
|
|
38
|
+
result = await self.llm.chat(prompt, prompt_system)
|
|
39
|
+
if not result.text:
|
|
40
|
+
logger.warning(
|
|
41
|
+
f"LLM returned an empty response (prompt length={len(prompt)} chars)"
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
report = self.cost.calculate(result)
|
|
45
|
+
if report:
|
|
46
|
+
logger.info(report.pretty())
|
|
47
|
+
|
|
48
|
+
await self.artifacts.save_llm_interaction(prompt, prompt_system, result.text)
|
|
49
|
+
|
|
50
|
+
return result.text
|
|
51
|
+
except Exception as error:
|
|
52
|
+
logger.exception(f"LLM request failed: {error}")
|
|
53
|
+
raise
|
|
54
|
+
|
|
55
|
+
async def has_existing_inline_discussions(self) -> bool:
|
|
56
|
+
discussions = await self.vcs.get_discussions()
|
|
57
|
+
has_discussions = any(
|
|
58
|
+
settings.review.inline_tag in note.body
|
|
59
|
+
for discussion in discussions
|
|
60
|
+
for note in discussion.notes
|
|
61
|
+
)
|
|
62
|
+
if has_discussions:
|
|
63
|
+
logger.info("Skipping inline review: AI inline discussions already exist")
|
|
64
|
+
|
|
65
|
+
return has_discussions
|
|
66
|
+
|
|
67
|
+
async def has_existing_summary_comments(self) -> bool:
|
|
68
|
+
comments = await self.vcs.get_comments()
|
|
69
|
+
has_comments = any(settings.review.summary_tag in comment.body for comment in comments)
|
|
70
|
+
if has_comments:
|
|
71
|
+
logger.info("Skipping summary review: AI summary comment already exists")
|
|
72
|
+
|
|
73
|
+
return has_comments
|
|
74
|
+
|
|
75
|
+
async def process_discussions(self, flow: Literal["inline", "context"], comments: InlineCommentListSchema) -> None:
|
|
76
|
+
results = await bounded_gather([
|
|
77
|
+
self.vcs.create_discussion(
|
|
78
|
+
file=comment.file,
|
|
79
|
+
line=comment.line,
|
|
80
|
+
message=comment.body_with_tag
|
|
81
|
+
)
|
|
82
|
+
for comment in comments.root
|
|
83
|
+
])
|
|
84
|
+
fallbacks = [
|
|
85
|
+
self.vcs.create_comment(comment.fallback_body_with_tag)
|
|
86
|
+
for comment, result in zip(comments.root, results)
|
|
87
|
+
if isinstance(result, Exception)
|
|
88
|
+
]
|
|
89
|
+
if fallbacks:
|
|
90
|
+
logger.warning(f"Falling back to {len(fallbacks)} general comments ({flow} review)")
|
|
91
|
+
await bounded_gather(fallbacks)
|
|
92
|
+
|
|
93
|
+
async def process_file_inline(self, file: str, mr_info: MRInfoSchema) -> None:
|
|
94
|
+
raw_diff = self.git.get_diff_for_file(mr_info.base_sha, mr_info.head_sha, file)
|
|
95
|
+
if not raw_diff.strip():
|
|
96
|
+
logger.debug(f"No diff for {file}, skipping")
|
|
97
|
+
return
|
|
98
|
+
|
|
99
|
+
rendered_file = self.diff.render_file(
|
|
100
|
+
file=file,
|
|
101
|
+
base_sha=mr_info.base_sha,
|
|
102
|
+
head_sha=mr_info.head_sha,
|
|
103
|
+
raw_diff=raw_diff,
|
|
104
|
+
)
|
|
105
|
+
prompt_context = build_prompt_context_from_mr_info(mr_info)
|
|
106
|
+
prompt = self.prompt.build_inline_request(rendered_file, prompt_context)
|
|
107
|
+
prompt_system = self.prompt.build_system_inline_request(prompt_context)
|
|
108
|
+
prompt_result = await self.ask_llm(prompt, prompt_system)
|
|
109
|
+
|
|
110
|
+
comments = self.inline.parse_model_output(prompt_result).dedupe()
|
|
111
|
+
comments.root = self.policy.apply_for_inline_comments(comments.root)
|
|
112
|
+
if not comments.root:
|
|
113
|
+
logger.info(f"No inline comments for file: {file}")
|
|
114
|
+
return
|
|
115
|
+
|
|
116
|
+
logger.info(f"Posting {len(comments.root)} inline comments to {file}")
|
|
117
|
+
await self.process_discussions(flow="inline", comments=comments)
|
|
118
|
+
|
|
119
|
+
async def run_inline_review(self) -> None:
|
|
120
|
+
if await self.has_existing_inline_discussions():
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
mr_info = await self.vcs.get_mr_info()
|
|
124
|
+
|
|
125
|
+
logger.info(f"Starting inline review: {len(mr_info.changed_files)} files changed")
|
|
126
|
+
|
|
127
|
+
changed_files = self.policy.apply_for_files(mr_info.changed_files)
|
|
128
|
+
await bounded_gather([
|
|
129
|
+
self.process_file_inline(changed_file, mr_info)
|
|
130
|
+
for changed_file in changed_files
|
|
131
|
+
])
|
|
132
|
+
|
|
133
|
+
async def run_context_review(self) -> None:
|
|
134
|
+
if await self.has_existing_inline_discussions():
|
|
135
|
+
return
|
|
136
|
+
|
|
137
|
+
mr_info = await self.vcs.get_mr_info()
|
|
138
|
+
changed_files = self.policy.apply_for_files(mr_info.changed_files)
|
|
139
|
+
|
|
140
|
+
if not changed_files:
|
|
141
|
+
logger.info("No files to review for context review")
|
|
142
|
+
return
|
|
143
|
+
|
|
144
|
+
logger.info(f"Starting context inline review: {len(changed_files)} files changed")
|
|
145
|
+
|
|
146
|
+
rendered_files = self.diff.render_files(
|
|
147
|
+
git=self.git,
|
|
148
|
+
files=changed_files,
|
|
149
|
+
base_sha=mr_info.base_sha,
|
|
150
|
+
head_sha=mr_info.head_sha,
|
|
151
|
+
)
|
|
152
|
+
prompt_context = build_prompt_context_from_mr_info(mr_info)
|
|
153
|
+
prompt = self.prompt.build_context_request(rendered_files, prompt_context)
|
|
154
|
+
prompt_system = self.prompt.build_system_context_request(prompt_context)
|
|
155
|
+
prompt_result = await self.ask_llm(prompt, prompt_system)
|
|
156
|
+
|
|
157
|
+
comments = self.inline.parse_model_output(prompt_result).dedupe()
|
|
158
|
+
comments.root = self.policy.apply_for_context_comments(comments.root)
|
|
159
|
+
if not comments.root:
|
|
160
|
+
logger.info("No inline comments from context review")
|
|
161
|
+
return
|
|
162
|
+
|
|
163
|
+
logger.info(f"Posting {len(comments.root)} inline comments (context review)")
|
|
164
|
+
await self.process_discussions(flow="context", comments=comments)
|
|
165
|
+
|
|
166
|
+
async def run_summary_review(self) -> None:
|
|
167
|
+
if await self.has_existing_summary_comments():
|
|
168
|
+
return
|
|
169
|
+
|
|
170
|
+
mr_info = await self.vcs.get_mr_info()
|
|
171
|
+
changed_files = self.policy.apply_for_files(mr_info.changed_files)
|
|
172
|
+
|
|
173
|
+
if not changed_files:
|
|
174
|
+
logger.info("No files to review for summary")
|
|
175
|
+
return
|
|
176
|
+
|
|
177
|
+
logger.info(f"Starting summary review: {len(changed_files)} files changed")
|
|
178
|
+
|
|
179
|
+
rendered_files = self.diff.render_files(
|
|
180
|
+
git=self.git,
|
|
181
|
+
files=changed_files,
|
|
182
|
+
base_sha=mr_info.base_sha,
|
|
183
|
+
head_sha=mr_info.head_sha,
|
|
184
|
+
)
|
|
185
|
+
prompt_context = build_prompt_context_from_mr_info(mr_info)
|
|
186
|
+
prompt = self.prompt.build_summary_request(rendered_files, prompt_context)
|
|
187
|
+
prompt_system = self.prompt.build_system_summary_request(prompt_context)
|
|
188
|
+
prompt_result = await self.ask_llm(prompt, prompt_system)
|
|
189
|
+
|
|
190
|
+
summary = self.summary.parse_model_output(prompt_result)
|
|
191
|
+
if not summary.text.strip():
|
|
192
|
+
logger.warning("Summary LLM output was empty, skipping comment")
|
|
193
|
+
return
|
|
194
|
+
|
|
195
|
+
logger.info(f"Posting summary review comment ({len(summary.text)} chars)")
|
|
196
|
+
await self.vcs.create_comment(summary.body_with_tag)
|
|
197
|
+
|
|
198
|
+
def report_total_cost(self):
|
|
199
|
+
total_report = self.cost.aggregate()
|
|
200
|
+
if total_report:
|
|
201
|
+
logger.info(
|
|
202
|
+
"\n=== TOTAL REVIEW COST ===\n"
|
|
203
|
+
f"{total_report.pretty()}\n"
|
|
204
|
+
"========================="
|
|
205
|
+
)
|
|
206
|
+
else:
|
|
207
|
+
logger.info("No cost data collected for this review")
|
|
File without changes
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from pydantic import BaseModel, field_validator
|
|
2
|
+
|
|
3
|
+
from ai_review.config import settings
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SummaryCommentSchema(BaseModel):
|
|
7
|
+
text: str
|
|
8
|
+
|
|
9
|
+
@field_validator("text")
|
|
10
|
+
def normalize_text(cls, value: str) -> str:
|
|
11
|
+
return (value or "").strip()
|
|
12
|
+
|
|
13
|
+
@property
|
|
14
|
+
def body_with_tag(self):
|
|
15
|
+
return f"{self.text}\n\n{settings.review.summary_tag}"
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from ai_review.libs.logger import get_logger
|
|
2
|
+
from ai_review.services.review.summary.schema import SummaryCommentSchema
|
|
3
|
+
|
|
4
|
+
logger = get_logger("SUMMARY_COMMENT_SERVICE")
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class SummaryCommentService:
|
|
8
|
+
@classmethod
|
|
9
|
+
def parse_model_output(cls, output: str) -> SummaryCommentSchema:
|
|
10
|
+
text = (output or "").strip()
|
|
11
|
+
if not text:
|
|
12
|
+
logger.warning("LLM returned empty summary")
|
|
13
|
+
|
|
14
|
+
return SummaryCommentSchema(text=text)
|
|
File without changes
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from ai_review.config import settings
|
|
2
|
+
from ai_review.libs.constants.vcs_provider import VCSProvider
|
|
3
|
+
from ai_review.services.vcs.gitlab.client import GitLabVCSClient
|
|
4
|
+
from ai_review.services.vcs.types import VCSClient
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def get_vcs_client() -> VCSClient:
|
|
8
|
+
match settings.vcs.provider:
|
|
9
|
+
case VCSProvider.GITLAB:
|
|
10
|
+
return GitLabVCSClient()
|
|
11
|
+
case _:
|
|
12
|
+
raise ValueError(f"Unsupported provider: {settings.llm.provider}")
|
|
File without changes
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
from ai_review.clients.gitlab.client import get_gitlab_http_client
|
|
2
|
+
from ai_review.clients.gitlab.mr.schema.discussions import (
|
|
3
|
+
GitLabDiscussionPositionSchema,
|
|
4
|
+
GitLabCreateMRDiscussionRequestSchema
|
|
5
|
+
)
|
|
6
|
+
from ai_review.config import settings
|
|
7
|
+
from ai_review.libs.logger import get_logger
|
|
8
|
+
from ai_review.services.vcs.types import (
|
|
9
|
+
VCSClient,
|
|
10
|
+
MRUserSchema,
|
|
11
|
+
MRInfoSchema,
|
|
12
|
+
MRNoteSchema,
|
|
13
|
+
MRCommentSchema,
|
|
14
|
+
MRDiscussionSchema,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
logger = get_logger("GITLAB_VCS_CLIENT")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class GitLabVCSClient(VCSClient):
|
|
21
|
+
def __init__(self):
|
|
22
|
+
self.http_client = get_gitlab_http_client()
|
|
23
|
+
self.project_id = settings.vcs.pipeline.project_id
|
|
24
|
+
self.merge_request_id = settings.vcs.pipeline.merge_request_id
|
|
25
|
+
|
|
26
|
+
async def get_mr_info(self) -> MRInfoSchema:
|
|
27
|
+
try:
|
|
28
|
+
response = await self.http_client.mr.get_changes(
|
|
29
|
+
project_id=self.project_id,
|
|
30
|
+
merge_request_id=self.merge_request_id,
|
|
31
|
+
)
|
|
32
|
+
logger.info(f"Fetched MR info for project_id={self.project_id} merge_request_id={self.merge_request_id}")
|
|
33
|
+
|
|
34
|
+
return MRInfoSchema(
|
|
35
|
+
title=response.title,
|
|
36
|
+
author=MRUserSchema(
|
|
37
|
+
name=response.author.name,
|
|
38
|
+
username=response.author.username
|
|
39
|
+
),
|
|
40
|
+
labels=response.labels,
|
|
41
|
+
base_sha=response.diff_refs.base_sha,
|
|
42
|
+
head_sha=response.diff_refs.head_sha,
|
|
43
|
+
start_sha=response.diff_refs.start_sha,
|
|
44
|
+
reviewers=[
|
|
45
|
+
MRUserSchema(name=reviewer.name, username=reviewer.username)
|
|
46
|
+
for reviewer in response.reviewers
|
|
47
|
+
],
|
|
48
|
+
assignees=[
|
|
49
|
+
MRUserSchema(name=assignee.name, username=assignee.username)
|
|
50
|
+
for assignee in response.assignees
|
|
51
|
+
],
|
|
52
|
+
description=response.description,
|
|
53
|
+
source_branch=response.source_branch,
|
|
54
|
+
target_branch=response.target_branch,
|
|
55
|
+
changed_files=[change.new_path for change in response.changes if change.new_path],
|
|
56
|
+
)
|
|
57
|
+
except Exception as error:
|
|
58
|
+
logger.exception(
|
|
59
|
+
f"Failed to fetch MR info project_id={self.project_id} "
|
|
60
|
+
f"merge_request_id={self.merge_request_id}: {error}"
|
|
61
|
+
)
|
|
62
|
+
return MRInfoSchema()
|
|
63
|
+
|
|
64
|
+
async def get_comments(self) -> list[MRCommentSchema]:
|
|
65
|
+
try:
|
|
66
|
+
response = await self.http_client.mr.get_comments(
|
|
67
|
+
project_id=self.project_id,
|
|
68
|
+
merge_request_id=self.merge_request_id,
|
|
69
|
+
)
|
|
70
|
+
logger.info(
|
|
71
|
+
f"Fetched comments for project_id={self.project_id} merge_request_id={self.merge_request_id}"
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
return [MRCommentSchema(id=comment.id, body=comment.body) for comment in response.root]
|
|
75
|
+
except Exception as error:
|
|
76
|
+
logger.exception(
|
|
77
|
+
f"Failed to fetch comments project_id={self.project_id} "
|
|
78
|
+
f"merge_request_id={self.merge_request_id}: {error}"
|
|
79
|
+
)
|
|
80
|
+
return []
|
|
81
|
+
|
|
82
|
+
async def get_discussions(self) -> list[MRDiscussionSchema]:
|
|
83
|
+
try:
|
|
84
|
+
response = await self.http_client.mr.get_discussions(
|
|
85
|
+
project_id=self.project_id,
|
|
86
|
+
merge_request_id=self.merge_request_id,
|
|
87
|
+
)
|
|
88
|
+
logger.info(
|
|
89
|
+
f"Fetched discussions for project_id={self.project_id} merge_request_id={self.merge_request_id}"
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
return [
|
|
93
|
+
MRDiscussionSchema(
|
|
94
|
+
id=discussion.id,
|
|
95
|
+
notes=[MRNoteSchema(id=note.id, body=note.body or "") for note in discussion.notes],
|
|
96
|
+
)
|
|
97
|
+
for discussion in response.root
|
|
98
|
+
]
|
|
99
|
+
except Exception as error:
|
|
100
|
+
logger.exception(
|
|
101
|
+
f"Failed to fetch discussions project_id={self.project_id} "
|
|
102
|
+
f"merge_request_id={self.merge_request_id}: {error}"
|
|
103
|
+
)
|
|
104
|
+
return []
|
|
105
|
+
|
|
106
|
+
async def create_comment(self, message: str) -> None:
|
|
107
|
+
try:
|
|
108
|
+
logger.info(
|
|
109
|
+
f"Posting comment to merge_request_id={self.merge_request_id}: {message}",
|
|
110
|
+
)
|
|
111
|
+
await self.http_client.mr.create_comment(
|
|
112
|
+
comment=message,
|
|
113
|
+
project_id=self.project_id,
|
|
114
|
+
merge_request_id=self.merge_request_id,
|
|
115
|
+
)
|
|
116
|
+
logger.info(f"Created comment in {self.merge_request_id=}")
|
|
117
|
+
except Exception as error:
|
|
118
|
+
logger.exception(f"Failed to create comment in merge_request_id={self.merge_request_id}: {error}")
|
|
119
|
+
|
|
120
|
+
async def create_discussion(self, file: str, line: int, message: str) -> None:
|
|
121
|
+
try:
|
|
122
|
+
logger.info(
|
|
123
|
+
f"Posting discussion to merge_request_id={self.merge_request_id} at {file}:{line}: {message}"
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
response = await self.http_client.mr.get_changes(
|
|
127
|
+
project_id=self.project_id,
|
|
128
|
+
merge_request_id=self.merge_request_id,
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
request = GitLabCreateMRDiscussionRequestSchema(
|
|
132
|
+
body=message,
|
|
133
|
+
position=GitLabDiscussionPositionSchema(
|
|
134
|
+
position_type="text",
|
|
135
|
+
base_sha=response.diff_refs.base_sha,
|
|
136
|
+
head_sha=response.diff_refs.head_sha,
|
|
137
|
+
start_sha=response.diff_refs.start_sha,
|
|
138
|
+
new_path=file,
|
|
139
|
+
new_line=line,
|
|
140
|
+
)
|
|
141
|
+
)
|
|
142
|
+
await self.http_client.mr.create_discussion(
|
|
143
|
+
request=request,
|
|
144
|
+
project_id=self.project_id,
|
|
145
|
+
merge_request_id=self.merge_request_id,
|
|
146
|
+
)
|
|
147
|
+
logger.info(f"Created discussion in merge_request_id={self.merge_request_id} at {file}:{line}")
|
|
148
|
+
except Exception as error:
|
|
149
|
+
logger.exception(
|
|
150
|
+
f"Failed to create discussion in merge_request_id={self.merge_request_id} "
|
|
151
|
+
f"at {file}:{line}: {error}"
|
|
152
|
+
)
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
from typing import Protocol
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class MRUserSchema(BaseModel):
|
|
7
|
+
name: str = ""
|
|
8
|
+
username: str = ""
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class MRInfoSchema(BaseModel):
|
|
12
|
+
title: str = ""
|
|
13
|
+
author: MRUserSchema = Field(default_factory=MRUserSchema)
|
|
14
|
+
labels: list[str] = Field(default_factory=list)
|
|
15
|
+
base_sha: str = ""
|
|
16
|
+
head_sha: str = ""
|
|
17
|
+
assignees: list[MRUserSchema] = Field(default_factory=list)
|
|
18
|
+
reviewers: list[MRUserSchema] = Field(default_factory=list)
|
|
19
|
+
start_sha: str = ""
|
|
20
|
+
description: str = ""
|
|
21
|
+
source_branch: str = ""
|
|
22
|
+
target_branch: str = ""
|
|
23
|
+
changed_files: list[str] = Field(default_factory=list)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class MRNoteSchema(BaseModel):
|
|
27
|
+
id: int | str
|
|
28
|
+
body: str
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class MRDiscussionSchema(BaseModel):
|
|
32
|
+
id: str
|
|
33
|
+
notes: list[MRNoteSchema]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class MRCommentSchema(BaseModel):
|
|
37
|
+
id: int | str
|
|
38
|
+
body: str
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class VCSClient(Protocol):
|
|
42
|
+
async def get_mr_info(self) -> MRInfoSchema:
|
|
43
|
+
...
|
|
44
|
+
|
|
45
|
+
async def get_comments(self) -> list[MRCommentSchema]:
|
|
46
|
+
...
|
|
47
|
+
|
|
48
|
+
async def get_discussions(self) -> list[MRDiscussionSchema]:
|
|
49
|
+
...
|
|
50
|
+
|
|
51
|
+
async def create_comment(self, message: str) -> None:
|
|
52
|
+
...
|
|
53
|
+
|
|
54
|
+
async def create_discussion(self, file: str, line: int, message: str) -> None:
|
|
55
|
+
...
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# ai_review/tests/conftest.py
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
|
|
6
|
+
from ai_review.services.git.types import GitServiceProtocol
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class FakeGitService(GitServiceProtocol):
|
|
10
|
+
"""Simple fake for GitService used in tests."""
|
|
11
|
+
|
|
12
|
+
def __init__(self, responses: dict[str, Any] | None = None) -> None:
|
|
13
|
+
self.responses = responses or {}
|
|
14
|
+
|
|
15
|
+
def get_diff(self, base_sha: str, head_sha: str, unified: int = 3) -> str:
|
|
16
|
+
return self.responses.get("get_diff", "")
|
|
17
|
+
|
|
18
|
+
def get_diff_for_file(self, base_sha: str, head_sha: str, file: str, unified: int = 3) -> str:
|
|
19
|
+
return self.responses.get("get_diff_for_file", "")
|
|
20
|
+
|
|
21
|
+
def get_changed_files(self, base_sha: str, head_sha: str) -> list[str]:
|
|
22
|
+
return self.responses.get("get_changed_files", [])
|
|
23
|
+
|
|
24
|
+
def get_file_at_commit(self, file_path: str, sha: str) -> str | None:
|
|
25
|
+
return self.responses.get("get_file_at_commit", None)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@pytest.fixture
|
|
29
|
+
def fake_git() -> FakeGitService:
|
|
30
|
+
"""Default fake GitService with empty responses."""
|
|
31
|
+
return FakeGitService()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|