xai-review 0.19.0__py3-none-any.whl → 0.21.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xai-review might be problematic. Click here for more details.

Files changed (54) hide show
  1. ai_review/services/artifacts/service.py +2 -1
  2. ai_review/services/artifacts/types.py +20 -0
  3. ai_review/services/cost/service.py +2 -1
  4. ai_review/services/cost/types.py +12 -0
  5. ai_review/services/diff/service.py +2 -1
  6. ai_review/services/diff/types.py +28 -0
  7. ai_review/services/hook/__init__.py +5 -0
  8. ai_review/services/hook/constants.py +24 -0
  9. ai_review/services/hook/service.py +162 -0
  10. ai_review/services/hook/types.py +28 -0
  11. ai_review/services/llm/claude/client.py +2 -2
  12. ai_review/services/llm/factory.py +2 -2
  13. ai_review/services/llm/gemini/client.py +2 -2
  14. ai_review/services/llm/openai/client.py +2 -2
  15. ai_review/services/llm/types.py +1 -1
  16. ai_review/services/prompt/service.py +2 -1
  17. ai_review/services/prompt/types.py +27 -0
  18. ai_review/services/review/gateway/__init__.py +0 -0
  19. ai_review/services/review/gateway/comment.py +65 -0
  20. ai_review/services/review/gateway/llm.py +40 -0
  21. ai_review/services/review/inline/schema.py +2 -2
  22. ai_review/services/review/inline/service.py +2 -1
  23. ai_review/services/review/inline/types.py +11 -0
  24. ai_review/services/review/service.py +23 -74
  25. ai_review/services/review/summary/service.py +2 -1
  26. ai_review/services/review/summary/types.py +8 -0
  27. ai_review/services/vcs/factory.py +2 -2
  28. ai_review/services/vcs/github/client.py +4 -2
  29. ai_review/services/vcs/gitlab/client.py +4 -2
  30. ai_review/services/vcs/types.py +1 -1
  31. ai_review/tests/fixtures/artifacts.py +51 -0
  32. ai_review/tests/fixtures/cost.py +48 -0
  33. ai_review/tests/fixtures/diff.py +46 -0
  34. ai_review/tests/fixtures/git.py +11 -5
  35. ai_review/tests/fixtures/llm.py +26 -0
  36. ai_review/tests/fixtures/prompt.py +43 -0
  37. ai_review/tests/fixtures/review/__init__.py +0 -0
  38. ai_review/tests/fixtures/review/inline.py +25 -0
  39. ai_review/tests/fixtures/review/summary.py +19 -0
  40. ai_review/tests/fixtures/vcs.py +49 -0
  41. ai_review/tests/suites/services/diff/test_service.py +3 -3
  42. ai_review/tests/suites/services/diff/test_tools.py +9 -9
  43. ai_review/tests/suites/services/hook/__init__.py +0 -0
  44. ai_review/tests/suites/services/hook/test_service.py +93 -0
  45. ai_review/tests/suites/services/review/inline/test_schema.py +10 -9
  46. ai_review/tests/suites/services/review/summary/test_schema.py +0 -1
  47. ai_review/tests/suites/services/review/summary/test_service.py +10 -7
  48. ai_review/tests/suites/services/review/test_service.py +126 -0
  49. {xai_review-0.19.0.dist-info → xai_review-0.21.0.dist-info}/METADATA +10 -7
  50. {xai_review-0.19.0.dist-info → xai_review-0.21.0.dist-info}/RECORD +54 -29
  51. {xai_review-0.19.0.dist-info → xai_review-0.21.0.dist-info}/WHEEL +0 -0
  52. {xai_review-0.19.0.dist-info → xai_review-0.21.0.dist-info}/entry_points.txt +0 -0
  53. {xai_review-0.19.0.dist-info → xai_review-0.21.0.dist-info}/licenses/LICENSE +0 -0
  54. {xai_review-0.19.0.dist-info → xai_review-0.21.0.dist-info}/top_level.txt +0 -0
@@ -1,16 +1,15 @@
1
- from typing import Literal
2
-
3
- from ai_review.config import settings
4
1
  from ai_review.libs.asynchronous.gather import bounded_gather
5
2
  from ai_review.libs.logger import get_logger
6
3
  from ai_review.services.artifacts.service import ArtifactsService
7
4
  from ai_review.services.cost.service import CostService
8
5
  from ai_review.services.diff.service import DiffService
9
6
  from ai_review.services.git.service import GitService
7
+ from ai_review.services.hook import hook
10
8
  from ai_review.services.llm.factory import get_llm_client
11
9
  from ai_review.services.prompt.adapter import build_prompt_context_from_mr_info
12
10
  from ai_review.services.prompt.service import PromptService
13
- from ai_review.services.review.inline.schema import InlineCommentListSchema
11
+ from ai_review.services.review.gateway.comment import ReviewCommentGateway
12
+ from ai_review.services.review.gateway.llm import ReviewLLMGateway
14
13
  from ai_review.services.review.inline.service import InlineCommentService
15
14
  from ai_review.services.review.policy.service import ReviewPolicyService
16
15
  from ai_review.services.review.summary.service import SummaryCommentService
@@ -33,68 +32,12 @@ class ReviewService:
33
32
  self.summary = SummaryCommentService()
34
33
  self.artifacts = ArtifactsService()
35
34
 
36
- async def ask_llm(self, prompt: str, prompt_system: str) -> str:
37
- try:
38
- result = await self.llm.chat(prompt, prompt_system)
39
- if not result.text:
40
- logger.warning(
41
- f"LLM returned an empty response (prompt length={len(prompt)} chars)"
42
- )
43
-
44
- report = self.cost.calculate(result)
45
- if report:
46
- logger.info(report.pretty())
47
-
48
- await self.artifacts.save_llm_interaction(prompt, prompt_system, result.text)
49
-
50
- return result.text
51
- except Exception as error:
52
- logger.exception(f"LLM request failed: {error}")
53
- raise
54
-
55
- async def has_existing_inline_comments(self) -> bool:
56
- comments = await self.vcs.get_inline_comments()
57
- has_comments = any(
58
- settings.review.inline_tag in comment.body
59
- for comment in comments
60
- )
61
- if has_comments:
62
- logger.info("Skipping inline review: AI inline comments already exist")
63
-
64
- return has_comments
65
-
66
- async def has_existing_summary_comments(self) -> bool:
67
- comments = await self.vcs.get_general_comments()
68
- has_comments = any(
69
- settings.review.summary_tag in comment.body for comment in comments
35
+ self.llm_gateway = ReviewLLMGateway(
36
+ llm=self.llm,
37
+ cost=self.cost,
38
+ artifacts=self.artifacts
70
39
  )
71
- if has_comments:
72
- logger.info("Skipping summary review: AI summary comment already exists")
73
-
74
- return has_comments
75
-
76
- async def process_inline_comments(
77
- self,
78
- flow: Literal["inline", "context"],
79
- comments: InlineCommentListSchema
80
- ) -> None:
81
- results = await bounded_gather([
82
- self.vcs.create_inline_comment(
83
- file=comment.file,
84
- line=comment.line,
85
- message=comment.body_with_tag
86
- )
87
- for comment in comments.root
88
- ])
89
-
90
- fallbacks = [
91
- self.vcs.create_general_comment(comment.fallback_body_with_tag)
92
- for comment, result in zip(comments.root, results)
93
- if isinstance(result, Exception)
94
- ]
95
- if fallbacks:
96
- logger.warning(f"Falling back to {len(fallbacks)} general comments ({flow} review)")
97
- await bounded_gather(fallbacks)
40
+ self.comment_gateway = ReviewCommentGateway(vcs=self.vcs)
98
41
 
99
42
  async def process_file_inline(self, file: str, review_info: ReviewInfoSchema) -> None:
100
43
  raw_diff = self.git.get_diff_for_file(review_info.base_sha, review_info.head_sha, file)
@@ -111,7 +54,7 @@ class ReviewService:
111
54
  prompt_context = build_prompt_context_from_mr_info(review_info)
112
55
  prompt = self.prompt.build_inline_request(rendered_file, prompt_context)
113
56
  prompt_system = self.prompt.build_system_inline_request(prompt_context)
114
- prompt_result = await self.ask_llm(prompt, prompt_system)
57
+ prompt_result = await self.llm_gateway.ask(prompt, prompt_system)
115
58
 
116
59
  comments = self.inline.parse_model_output(prompt_result).dedupe()
117
60
  comments.root = self.policy.apply_for_inline_comments(comments.root)
@@ -120,10 +63,11 @@ class ReviewService:
120
63
  return
121
64
 
122
65
  logger.info(f"Posting {len(comments.root)} inline comments to {file}")
123
- await self.process_inline_comments(flow="inline", comments=comments)
66
+ await self.comment_gateway.process_inline_comments(comments)
124
67
 
125
68
  async def run_inline_review(self) -> None:
126
- if await self.has_existing_inline_comments():
69
+ await hook.emit_inline_review_start()
70
+ if await self.comment_gateway.has_existing_inline_comments():
127
71
  return
128
72
 
129
73
  review_info = await self.vcs.get_review_info()
@@ -134,9 +78,11 @@ class ReviewService:
134
78
  self.process_file_inline(changed_file, review_info)
135
79
  for changed_file in changed_files
136
80
  ])
81
+ await hook.emit_inline_review_complete(self.cost.aggregate())
137
82
 
138
83
  async def run_context_review(self) -> None:
139
- if await self.has_existing_inline_comments():
84
+ await hook.emit_context_review_start()
85
+ if await self.comment_gateway.has_existing_inline_comments():
140
86
  return
141
87
 
142
88
  review_info = await self.vcs.get_review_info()
@@ -156,7 +102,7 @@ class ReviewService:
156
102
  prompt_context = build_prompt_context_from_mr_info(review_info)
157
103
  prompt = self.prompt.build_context_request(rendered_files, prompt_context)
158
104
  prompt_system = self.prompt.build_system_context_request(prompt_context)
159
- prompt_result = await self.ask_llm(prompt, prompt_system)
105
+ prompt_result = await self.llm_gateway.ask(prompt, prompt_system)
160
106
 
161
107
  comments = self.inline.parse_model_output(prompt_result).dedupe()
162
108
  comments.root = self.policy.apply_for_context_comments(comments.root)
@@ -165,10 +111,12 @@ class ReviewService:
165
111
  return
166
112
 
167
113
  logger.info(f"Posting {len(comments.root)} inline comments (context review)")
168
- await self.process_inline_comments(flow="context", comments=comments)
114
+ await self.comment_gateway.process_inline_comments(comments)
115
+ await hook.emit_context_review_complete(self.cost.aggregate())
169
116
 
170
117
  async def run_summary_review(self) -> None:
171
- if await self.has_existing_summary_comments():
118
+ await hook.emit_summary_review_start()
119
+ if await self.comment_gateway.has_existing_summary_comments():
172
120
  return
173
121
 
174
122
  review_info = await self.vcs.get_review_info()
@@ -188,7 +136,7 @@ class ReviewService:
188
136
  prompt_context = build_prompt_context_from_mr_info(review_info)
189
137
  prompt = self.prompt.build_summary_request(rendered_files, prompt_context)
190
138
  prompt_system = self.prompt.build_system_summary_request(prompt_context)
191
- prompt_result = await self.ask_llm(prompt, prompt_system)
139
+ prompt_result = await self.llm_gateway.ask(prompt, prompt_system)
192
140
 
193
141
  summary = self.summary.parse_model_output(prompt_result)
194
142
  if not summary.text.strip():
@@ -196,7 +144,8 @@ class ReviewService:
196
144
  return
197
145
 
198
146
  logger.info(f"Posting summary review comment ({len(summary.text)} chars)")
199
- await self.vcs.create_general_comment(summary.body_with_tag)
147
+ await self.comment_gateway.process_summary_comment(summary)
148
+ await hook.emit_summary_review_complete(self.cost.aggregate())
200
149
 
201
150
  def report_total_cost(self):
202
151
  total_report = self.cost.aggregate()
@@ -1,10 +1,11 @@
1
1
  from ai_review.libs.logger import get_logger
2
2
  from ai_review.services.review.summary.schema import SummaryCommentSchema
3
+ from ai_review.services.review.summary.types import SummaryCommentServiceProtocol
3
4
 
4
5
  logger = get_logger("SUMMARY_COMMENT_SERVICE")
5
6
 
6
7
 
7
- class SummaryCommentService:
8
+ class SummaryCommentService(SummaryCommentServiceProtocol):
8
9
  @classmethod
9
10
  def parse_model_output(cls, output: str) -> SummaryCommentSchema:
10
11
  text = (output or "").strip()
@@ -0,0 +1,8 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.services.review.summary.schema import SummaryCommentSchema
4
+
5
+
6
+ class SummaryCommentServiceProtocol(Protocol):
7
+ def parse_model_output(self, output: str) -> SummaryCommentSchema:
8
+ ...
@@ -2,10 +2,10 @@ from ai_review.config import settings
2
2
  from ai_review.libs.constants.vcs_provider import VCSProvider
3
3
  from ai_review.services.vcs.github.client import GitHubVCSClient
4
4
  from ai_review.services.vcs.gitlab.client import GitLabVCSClient
5
- from ai_review.services.vcs.types import VCSClient
5
+ from ai_review.services.vcs.types import VCSClientProtocol
6
6
 
7
7
 
8
- def get_vcs_client() -> VCSClient:
8
+ def get_vcs_client() -> VCSClientProtocol:
9
9
  match settings.vcs.provider:
10
10
  case VCSProvider.GITLAB:
11
11
  return GitLabVCSClient()
@@ -3,7 +3,7 @@ from ai_review.clients.github.pr.schema.comments import GitHubCreateReviewCommen
3
3
  from ai_review.config import settings
4
4
  from ai_review.libs.logger import get_logger
5
5
  from ai_review.services.vcs.types import (
6
- VCSClient,
6
+ VCSClientProtocol,
7
7
  UserSchema,
8
8
  BranchRefSchema,
9
9
  ReviewInfoSchema,
@@ -13,7 +13,7 @@ from ai_review.services.vcs.types import (
13
13
  logger = get_logger("GITHUB_VCS_CLIENT")
14
14
 
15
15
 
16
- class GitHubVCSClient(VCSClient):
16
+ class GitHubVCSClient(VCSClientProtocol):
17
17
  def __init__(self):
18
18
  self.http_client = get_github_http_client()
19
19
  self.owner = settings.vcs.pipeline.owner
@@ -135,6 +135,7 @@ class GitHubVCSClient(VCSClient):
135
135
  logger.exception(
136
136
  f"Failed to create general comment in PR {self.owner}/{self.repo}#{self.pull_number}: {error}"
137
137
  )
138
+ raise
138
139
 
139
140
  async def create_inline_comment(self, file: str, line: int, message: str) -> None:
140
141
  try:
@@ -167,3 +168,4 @@ class GitHubVCSClient(VCSClient):
167
168
  f"Failed to create inline comment in {self.owner}/{self.repo}#{self.pull_number} "
168
169
  f"at {file}:{line}: {error}"
169
170
  )
171
+ raise
@@ -6,7 +6,7 @@ from ai_review.clients.gitlab.mr.schema.discussions import (
6
6
  from ai_review.config import settings
7
7
  from ai_review.libs.logger import get_logger
8
8
  from ai_review.services.vcs.types import (
9
- VCSClient,
9
+ VCSClientProtocol,
10
10
  UserSchema,
11
11
  BranchRefSchema,
12
12
  ReviewInfoSchema,
@@ -16,7 +16,7 @@ from ai_review.services.vcs.types import (
16
16
  logger = get_logger("GITLAB_VCS_CLIENT")
17
17
 
18
18
 
19
- class GitLabVCSClient(VCSClient):
19
+ class GitLabVCSClient(VCSClientProtocol):
20
20
  def __init__(self):
21
21
  self.http_client = get_gitlab_http_client()
22
22
  self.project_id = settings.vcs.pipeline.project_id
@@ -132,6 +132,7 @@ class GitLabVCSClient(VCSClient):
132
132
  logger.exception(
133
133
  f"Failed to create general comment in merge_request_id={self.merge_request_id}: {error}"
134
134
  )
135
+ raise
135
136
 
136
137
  async def create_inline_comment(self, file: str, line: int, message: str) -> None:
137
138
  try:
@@ -168,3 +169,4 @@ class GitLabVCSClient(VCSClient):
168
169
  f"Failed to create inline comment in merge_request_id={self.merge_request_id} "
169
170
  f"at {file}:{line}: {error}"
170
171
  )
172
+ raise
@@ -42,7 +42,7 @@ class ReviewThreadSchema(BaseModel):
42
42
  comments: list[ReviewCommentSchema]
43
43
 
44
44
 
45
- class VCSClient(Protocol):
45
+ class VCSClientProtocol(Protocol):
46
46
  """
47
47
  Unified interface for version control system integrations (GitHub, GitLab, Bitbucket, etc.).
48
48
  Designed for code review automation: fetching review info, comments, and posting feedback.
@@ -0,0 +1,51 @@
1
+ from pathlib import Path
2
+
3
+ import pytest
4
+
5
+ from ai_review.services.artifacts.types import ArtifactsServiceProtocol
6
+
7
+
8
+ class FakeArtifactsService(ArtifactsServiceProtocol):
9
+ def __init__(self):
10
+ self.calls: list[tuple[str, dict]] = []
11
+ self.saved_artifacts: list[tuple[Path, str, str]] = []
12
+ self.saved_llm_interactions: list[dict[str, str | None]] = []
13
+
14
+ async def save_llm_interaction(
15
+ self,
16
+ prompt: str,
17
+ prompt_system: str,
18
+ response: str | None = None
19
+ ) -> str:
20
+ self.calls.append((
21
+ "save_llm_interaction",
22
+ {"prompt": prompt, "prompt_system": prompt_system, "response": response},
23
+ ))
24
+
25
+ artifact_id = f"fake-{len(self.saved_llm_interactions) + 1}"
26
+ self.saved_llm_interactions.append({
27
+ "id": artifact_id,
28
+ "prompt": prompt,
29
+ "prompt_system": prompt_system,
30
+ "response": response,
31
+ })
32
+ return artifact_id
33
+
34
+ async def save_artifact(
35
+ self,
36
+ file: Path,
37
+ content: str,
38
+ kind: str = "artifact"
39
+ ) -> Path:
40
+ self.calls.append((
41
+ "save_artifact",
42
+ {"file": str(file), "content": content, "kind": kind},
43
+ ))
44
+
45
+ self.saved_artifacts.append((file, content, kind))
46
+ return file
47
+
48
+
49
+ @pytest.fixture
50
+ def fake_artifacts_service() -> FakeArtifactsService:
51
+ return FakeArtifactsService()
@@ -0,0 +1,48 @@
1
+ import pytest
2
+
3
+ from ai_review.services.cost.schema import CostReportSchema
4
+ from ai_review.services.cost.types import CostServiceProtocol
5
+ from ai_review.services.llm.types import ChatResultSchema
6
+
7
+
8
+ class FakeCostService(CostServiceProtocol):
9
+ def __init__(self):
10
+ self.calls: list[tuple[str, dict]] = []
11
+ self.reports: list[CostReportSchema] = []
12
+ self.calculated_results: list[ChatResultSchema] = []
13
+
14
+ def calculate(self, result: ChatResultSchema) -> CostReportSchema:
15
+ self.calls.append(("calculate", {"result": result}))
16
+ self.calculated_results.append(result)
17
+
18
+ report = CostReportSchema(
19
+ model="fake-model",
20
+ prompt_tokens=result.prompt_tokens or 10,
21
+ completion_tokens=result.completion_tokens or 5,
22
+ input_cost=0.001,
23
+ output_cost=0.002,
24
+ total_cost=0.003,
25
+ )
26
+ self.reports.append(report)
27
+ return report
28
+
29
+ def aggregate(self) -> CostReportSchema | None:
30
+ self.calls.append(("aggregate", {}))
31
+
32
+ if not self.reports:
33
+ return None
34
+
35
+ total_cost = sum(r.total_cost for r in self.reports)
36
+ return CostReportSchema(
37
+ model="fake-model",
38
+ total_cost=total_cost,
39
+ input_cost=0.001 * len(self.reports),
40
+ output_cost=0.002 * len(self.reports),
41
+ prompt_tokens=sum(r.prompt_tokens for r in self.reports),
42
+ completion_tokens=sum(r.completion_tokens for r in self.reports),
43
+ )
44
+
45
+
46
+ @pytest.fixture
47
+ def fake_cost_service() -> "FakeCostService":
48
+ return FakeCostService()
@@ -0,0 +1,46 @@
1
+ import pytest
2
+
3
+ from ai_review.libs.diff.models import Diff
4
+ from ai_review.services.diff.schema import DiffFileSchema
5
+ from ai_review.services.diff.types import DiffServiceProtocol
6
+ from ai_review.services.git.types import GitServiceProtocol
7
+
8
+
9
+ class FakeDiffService(DiffServiceProtocol):
10
+ def __init__(self):
11
+ self.calls: list[tuple[str, dict]] = []
12
+
13
+ def parse(self, raw_diff: str) -> Diff:
14
+ self.calls.append(("parse", {"raw_diff": raw_diff}))
15
+ return Diff(files=[], raw=raw_diff)
16
+
17
+ def render_file(
18
+ self,
19
+ file: str,
20
+ raw_diff: str,
21
+ base_sha: str | None = None,
22
+ head_sha: str | None = None,
23
+ ) -> DiffFileSchema:
24
+ self.calls.append((
25
+ "render_file",
26
+ {"file": file, "raw_diff": raw_diff, "base_sha": base_sha, "head_sha": head_sha},
27
+ ))
28
+ return DiffFileSchema(file=file, diff=f"FAKE_DIFF_CONTENT for {file}")
29
+
30
+ def render_files(
31
+ self,
32
+ git: GitServiceProtocol,
33
+ files: list[str],
34
+ base_sha: str,
35
+ head_sha: str,
36
+ ) -> list[DiffFileSchema]:
37
+ self.calls.append((
38
+ "render_files",
39
+ {"git": git, "files": files, "base_sha": base_sha, "head_sha": head_sha},
40
+ ))
41
+ return [DiffFileSchema(file=file, diff=f"FAKE_DIFF for {file}") for file in files]
42
+
43
+
44
+ @pytest.fixture
45
+ def fake_diff_service() -> FakeDiffService:
46
+ return FakeDiffService()
@@ -1,4 +1,3 @@
1
- # ai_review/tests/conftest.py
2
1
  from typing import Any
3
2
 
4
3
  import pytest
@@ -7,25 +6,32 @@ from ai_review.services.git.types import GitServiceProtocol
7
6
 
8
7
 
9
8
  class FakeGitService(GitServiceProtocol):
10
- """Simple fake for GitService used in tests."""
11
-
12
9
  def __init__(self, responses: dict[str, Any] | None = None) -> None:
10
+ self.calls: list[tuple[str, dict]] = []
13
11
  self.responses = responses or {}
14
12
 
15
13
  def get_diff(self, base_sha: str, head_sha: str, unified: int = 3) -> str:
14
+ self.calls.append(("get_diff", {"base_sha": base_sha, "head_sha": head_sha, "unified": unified}))
16
15
  return self.responses.get("get_diff", "")
17
16
 
18
17
  def get_diff_for_file(self, base_sha: str, head_sha: str, file: str, unified: int = 3) -> str:
18
+ self.calls.append(
19
+ (
20
+ "get_diff_for_file",
21
+ {"base_sha": base_sha, "head_sha": head_sha, "file": file, "unified": unified}
22
+ )
23
+ )
19
24
  return self.responses.get("get_diff_for_file", "")
20
25
 
21
26
  def get_changed_files(self, base_sha: str, head_sha: str) -> list[str]:
27
+ self.calls.append(("get_changed_files", {"base_sha": base_sha, "head_sha": head_sha}))
22
28
  return self.responses.get("get_changed_files", [])
23
29
 
24
30
  def get_file_at_commit(self, file_path: str, sha: str) -> str | None:
31
+ self.calls.append(("get_file_at_commit", {"file_path": file_path, "sha": sha}))
25
32
  return self.responses.get("get_file_at_commit", None)
26
33
 
27
34
 
28
35
  @pytest.fixture
29
- def fake_git() -> FakeGitService:
30
- """Default fake GitService with empty responses."""
36
+ def fake_git_service() -> FakeGitService:
31
37
  return FakeGitService()
@@ -0,0 +1,26 @@
1
+ from typing import Any
2
+
3
+ import pytest
4
+
5
+ from ai_review.services.llm.types import LLMClientProtocol, ChatResultSchema
6
+
7
+
8
+ class FakeLLMClient(LLMClientProtocol):
9
+ def __init__(self, responses: dict[str, Any] | None = None) -> None:
10
+ self.calls: list[tuple[str, dict]] = []
11
+ self.responses = responses or {}
12
+
13
+ async def chat(self, prompt: str, prompt_system: str) -> ChatResultSchema:
14
+ self.calls.append(("chat", {"prompt": prompt, "prompt_system": prompt_system}))
15
+
16
+ return ChatResultSchema(
17
+ text=self.responses.get("text", "FAKE_RESPONSE"),
18
+ total_tokens=self.responses.get("total_tokens", 42),
19
+ prompt_tokens=self.responses.get("prompt_tokens", 21),
20
+ completion_tokens=self.responses.get("completion_tokens", 21),
21
+ )
22
+
23
+
24
+ @pytest.fixture
25
+ def fake_llm_client() -> FakeLLMClient:
26
+ return FakeLLMClient()
@@ -0,0 +1,43 @@
1
+ import pytest
2
+
3
+ from ai_review.services.diff.schema import DiffFileSchema
4
+ from ai_review.services.prompt.schema import PromptContextSchema
5
+ from ai_review.services.prompt.types import PromptServiceProtocol
6
+
7
+
8
+ class FakePromptService(PromptServiceProtocol):
9
+ def __init__(self):
10
+ self.calls: list[tuple[str, dict]] = []
11
+
12
+ def prepare_prompt(self, prompts: list[str], context: PromptContextSchema) -> str:
13
+ self.calls.append(("prepare_prompt", {"prompts": prompts, "context": context}))
14
+ return "FAKE_PROMPT"
15
+
16
+ def build_inline_request(self, diff: DiffFileSchema, context: PromptContextSchema) -> str:
17
+ self.calls.append(("build_inline_request", {"diff": diff, "context": context}))
18
+ return f"INLINE_PROMPT_FOR_{diff.file}"
19
+
20
+ def build_summary_request(self, diffs: list[DiffFileSchema], context: PromptContextSchema) -> str:
21
+ self.calls.append(("build_summary_request", {"diffs": diffs, "context": context}))
22
+ return "SUMMARY_PROMPT"
23
+
24
+ def build_context_request(self, diffs: list[DiffFileSchema], context: PromptContextSchema) -> str:
25
+ self.calls.append(("build_context_request", {"diffs": diffs, "context": context}))
26
+ return "CONTEXT_PROMPT"
27
+
28
+ def build_system_inline_request(self, context: PromptContextSchema) -> str:
29
+ self.calls.append(("build_system_inline_request", {"context": context}))
30
+ return "SYSTEM_INLINE_PROMPT"
31
+
32
+ def build_system_context_request(self, context: PromptContextSchema) -> str:
33
+ self.calls.append(("build_system_context_request", {"context": context}))
34
+ return "SYSTEM_CONTEXT_PROMPT"
35
+
36
+ def build_system_summary_request(self, context: PromptContextSchema) -> str:
37
+ self.calls.append(("build_system_summary_request", {"context": context}))
38
+ return "SYSTEM_SUMMARY_PROMPT"
39
+
40
+
41
+ @pytest.fixture
42
+ def fake_prompt_service() -> FakePromptService:
43
+ return FakePromptService()
File without changes
@@ -0,0 +1,25 @@
1
+ import pytest
2
+
3
+ from ai_review.services.review.inline.schema import InlineCommentListSchema, InlineCommentSchema
4
+ from ai_review.services.review.inline.types import InlineCommentServiceProtocol
5
+
6
+
7
+ class FakeInlineCommentService(InlineCommentServiceProtocol):
8
+ def __init__(self, comments: list[InlineCommentSchema] | None = None):
9
+ self.calls: list[tuple[str, dict]] = []
10
+ self.comments = comments or [
11
+ InlineCommentSchema(file="main.py", line=1, message="Test comment"),
12
+ ]
13
+
14
+ def parse_model_output(self, output: str) -> InlineCommentListSchema:
15
+ self.calls.append(("parse_model_output", {"output": output}))
16
+ return InlineCommentListSchema(root=self.comments)
17
+
18
+ def try_parse_model_output(self, raw: str) -> InlineCommentListSchema | None:
19
+ self.calls.append(("try_parse_model_output", {"raw": raw}))
20
+ return InlineCommentListSchema(root=self.comments)
21
+
22
+
23
+ @pytest.fixture
24
+ def fake_inline_comment_service() -> FakeInlineCommentService:
25
+ return FakeInlineCommentService()
@@ -0,0 +1,19 @@
1
+ import pytest
2
+
3
+ from ai_review.services.review.summary.schema import SummaryCommentSchema
4
+ from ai_review.services.review.summary.types import SummaryCommentServiceProtocol
5
+
6
+
7
+ class FakeSummaryCommentService(SummaryCommentServiceProtocol):
8
+ def __init__(self, text: str = "This is a summary comment"):
9
+ self.text = text
10
+ self.calls: list[tuple[str, dict]] = []
11
+
12
+ def parse_model_output(self, output: str) -> SummaryCommentSchema:
13
+ self.calls.append(("parse_model_output", {"output": output}))
14
+ return SummaryCommentSchema(text=self.text)
15
+
16
+
17
+ @pytest.fixture
18
+ def fake_summary_comment_service() -> FakeSummaryCommentService:
19
+ return FakeSummaryCommentService()
@@ -0,0 +1,49 @@
1
+ from typing import Any
2
+
3
+ import pytest
4
+
5
+ from ai_review.services.vcs.types import (
6
+ VCSClientProtocol,
7
+ ReviewInfoSchema,
8
+ ReviewCommentSchema,
9
+ )
10
+
11
+
12
+ class FakeVCSClient(VCSClientProtocol):
13
+ def __init__(self, responses: dict[str, Any] | None = None) -> None:
14
+ self.calls: list[tuple[str, tuple, dict]] = []
15
+ self.responses = responses or {}
16
+
17
+ async def get_review_info(self) -> ReviewInfoSchema:
18
+ self.calls.append(("get_review_info", (), {}))
19
+ return self.responses.get(
20
+ "get_review_info",
21
+ ReviewInfoSchema(changed_files=["file.py"], base_sha="A", head_sha="B")
22
+ )
23
+
24
+ async def get_general_comments(self) -> list[ReviewCommentSchema]:
25
+ self.calls.append(("get_general_comments", (), {}))
26
+ return self.responses.get("get_general_comments", [])
27
+
28
+ async def get_inline_comments(self) -> list[ReviewCommentSchema]:
29
+ self.calls.append(("get_inline_comments", (), {}))
30
+ return self.responses.get("get_inline_comments", [])
31
+
32
+ async def create_general_comment(self, message: str) -> None:
33
+ self.calls.append(("create_general_comment", (message,), {}))
34
+ if error := self.responses.get("create_general_comment_error"):
35
+ raise error
36
+
37
+ return self.responses.get("create_general_comment_result", None)
38
+
39
+ async def create_inline_comment(self, file: str, line: int, message: str) -> None:
40
+ self.calls.append(("create_inline_comment", (file, line, message), {}))
41
+ if error := self.responses.get("create_inline_comment_error"):
42
+ raise error
43
+
44
+ return self.responses.get("create_inline_comment_result", None)
45
+
46
+
47
+ @pytest.fixture
48
+ def fake_vcs_client() -> FakeVCSClient:
49
+ return FakeVCSClient()
@@ -69,16 +69,16 @@ def test_render_file_returns_unsupported(monkeypatch: pytest.MonkeyPatch, fake_d
69
69
 
70
70
 
71
71
  def test_render_files_invokes_render_file(
72
- fake_git: FakeGitService,
73
72
  fake_diff: Diff,
74
73
  monkeypatch: pytest.MonkeyPatch,
74
+ fake_git_service: FakeGitService,
75
75
  ) -> None:
76
76
  monkeypatch.setattr("ai_review.services.diff.service.DiffParser.parse", lambda _: fake_diff)
77
77
  monkeypatch.setattr(config.settings.review, "mode", ReviewMode.FULL_FILE_DIFF)
78
78
 
79
- fake_git.responses["get_diff_for_file"] = "fake-diff"
79
+ fake_git_service.responses["get_diff_for_file"] = "fake-diff"
80
80
 
81
- out = DiffService.render_files(git=fake_git, base_sha="A", head_sha="B", files=["b/x"])
81
+ out = DiffService.render_files(git=fake_git_service, base_sha="A", head_sha="B", files=["b/x"])
82
82
  assert out
83
83
  assert out[0].file == "b/x"
84
84
  assert out[0].diff.startswith("# No matching lines for mode")