xai-review 0.20.0__py3-none-any.whl → 0.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xai-review might be problematic. Click here for more details.

Files changed (95) hide show
  1. ai_review/clients/claude/client.py +1 -1
  2. ai_review/clients/gemini/client.py +1 -1
  3. ai_review/clients/github/client.py +1 -1
  4. ai_review/clients/github/pr/client.py +64 -16
  5. ai_review/clients/github/pr/schema/comments.py +4 -0
  6. ai_review/clients/github/pr/schema/files.py +4 -0
  7. ai_review/clients/github/pr/schema/reviews.py +4 -0
  8. ai_review/clients/github/pr/types.py +49 -0
  9. ai_review/clients/gitlab/client.py +1 -1
  10. ai_review/clients/gitlab/mr/client.py +25 -8
  11. ai_review/clients/gitlab/mr/schema/discussions.py +4 -0
  12. ai_review/clients/gitlab/mr/schema/notes.py +4 -0
  13. ai_review/clients/gitlab/mr/types.py +35 -0
  14. ai_review/clients/openai/client.py +1 -1
  15. ai_review/config.py +2 -0
  16. ai_review/libs/asynchronous/gather.py +6 -3
  17. ai_review/libs/config/core.py +5 -0
  18. ai_review/libs/http/event_hooks/logger.py +5 -2
  19. ai_review/libs/http/transports/retry.py +23 -6
  20. ai_review/services/artifacts/service.py +2 -1
  21. ai_review/services/artifacts/types.py +20 -0
  22. ai_review/services/cost/service.py +2 -1
  23. ai_review/services/cost/types.py +12 -0
  24. ai_review/services/diff/service.py +2 -1
  25. ai_review/services/diff/types.py +28 -0
  26. ai_review/services/hook/__init__.py +5 -0
  27. ai_review/services/hook/constants.py +24 -0
  28. ai_review/services/hook/service.py +162 -0
  29. ai_review/services/hook/types.py +28 -0
  30. ai_review/services/llm/claude/client.py +2 -2
  31. ai_review/services/llm/factory.py +2 -2
  32. ai_review/services/llm/gemini/client.py +2 -2
  33. ai_review/services/llm/openai/client.py +2 -2
  34. ai_review/services/llm/types.py +1 -1
  35. ai_review/services/prompt/service.py +2 -1
  36. ai_review/services/prompt/types.py +27 -0
  37. ai_review/services/review/gateway/__init__.py +0 -0
  38. ai_review/services/review/gateway/comment.py +65 -0
  39. ai_review/services/review/gateway/llm.py +40 -0
  40. ai_review/services/review/inline/schema.py +2 -2
  41. ai_review/services/review/inline/service.py +2 -1
  42. ai_review/services/review/inline/types.py +11 -0
  43. ai_review/services/review/service.py +23 -74
  44. ai_review/services/review/summary/service.py +2 -1
  45. ai_review/services/review/summary/types.py +8 -0
  46. ai_review/services/vcs/factory.py +2 -2
  47. ai_review/services/vcs/github/client.py +4 -2
  48. ai_review/services/vcs/gitlab/client.py +4 -2
  49. ai_review/services/vcs/types.py +1 -1
  50. ai_review/tests/fixtures/clients/__init__.py +0 -0
  51. ai_review/tests/fixtures/clients/claude.py +22 -0
  52. ai_review/tests/fixtures/clients/gemini.py +21 -0
  53. ai_review/tests/fixtures/clients/github.py +181 -0
  54. ai_review/tests/fixtures/clients/gitlab.py +150 -0
  55. ai_review/tests/fixtures/clients/openai.py +21 -0
  56. ai_review/tests/fixtures/services/__init__.py +0 -0
  57. ai_review/tests/fixtures/services/artifacts.py +51 -0
  58. ai_review/tests/fixtures/services/cost.py +48 -0
  59. ai_review/tests/fixtures/services/diff.py +46 -0
  60. ai_review/tests/fixtures/{git.py → services/git.py} +11 -5
  61. ai_review/tests/fixtures/services/llm.py +26 -0
  62. ai_review/tests/fixtures/services/prompt.py +43 -0
  63. ai_review/tests/fixtures/services/review/__init__.py +0 -0
  64. ai_review/tests/fixtures/services/review/inline.py +25 -0
  65. ai_review/tests/fixtures/services/review/summary.py +19 -0
  66. ai_review/tests/fixtures/services/vcs.py +49 -0
  67. ai_review/tests/suites/clients/claude/test_client.py +1 -20
  68. ai_review/tests/suites/clients/gemini/test_client.py +1 -19
  69. ai_review/tests/suites/clients/github/test_client.py +1 -23
  70. ai_review/tests/suites/clients/gitlab/test_client.py +1 -22
  71. ai_review/tests/suites/clients/openai/test_client.py +1 -19
  72. ai_review/tests/suites/libs/asynchronous/__init__.py +0 -0
  73. ai_review/tests/suites/libs/asynchronous/test_gather.py +46 -0
  74. ai_review/tests/suites/services/diff/test_service.py +4 -4
  75. ai_review/tests/suites/services/diff/test_tools.py +10 -10
  76. ai_review/tests/suites/services/hook/__init__.py +0 -0
  77. ai_review/tests/suites/services/hook/test_service.py +93 -0
  78. ai_review/tests/suites/services/llm/__init__.py +0 -0
  79. ai_review/tests/suites/services/llm/test_factory.py +30 -0
  80. ai_review/tests/suites/services/review/inline/test_schema.py +10 -9
  81. ai_review/tests/suites/services/review/summary/test_schema.py +0 -1
  82. ai_review/tests/suites/services/review/summary/test_service.py +10 -7
  83. ai_review/tests/suites/services/review/test_service.py +126 -0
  84. ai_review/tests/suites/services/vcs/__init__.py +0 -0
  85. ai_review/tests/suites/services/vcs/github/__init__.py +0 -0
  86. ai_review/tests/suites/services/vcs/github/test_service.py +114 -0
  87. ai_review/tests/suites/services/vcs/gitlab/__init__.py +0 -0
  88. ai_review/tests/suites/services/vcs/gitlab/test_service.py +123 -0
  89. ai_review/tests/suites/services/vcs/test_factory.py +23 -0
  90. {xai_review-0.20.0.dist-info → xai_review-0.22.0.dist-info}/METADATA +5 -2
  91. {xai_review-0.20.0.dist-info → xai_review-0.22.0.dist-info}/RECORD +95 -50
  92. {xai_review-0.20.0.dist-info → xai_review-0.22.0.dist-info}/WHEEL +0 -0
  93. {xai_review-0.20.0.dist-info → xai_review-0.22.0.dist-info}/entry_points.txt +0 -0
  94. {xai_review-0.20.0.dist-info → xai_review-0.22.0.dist-info}/licenses/LICENSE +0 -0
  95. {xai_review-0.20.0.dist-info → xai_review-0.22.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,26 @@
1
+ from typing import Any
2
+
3
+ import pytest
4
+
5
+ from ai_review.services.llm.types import LLMClientProtocol, ChatResultSchema
6
+
7
+
8
+ class FakeLLMClient(LLMClientProtocol):
9
+ def __init__(self, responses: dict[str, Any] | None = None) -> None:
10
+ self.calls: list[tuple[str, dict]] = []
11
+ self.responses = responses or {}
12
+
13
+ async def chat(self, prompt: str, prompt_system: str) -> ChatResultSchema:
14
+ self.calls.append(("chat", {"prompt": prompt, "prompt_system": prompt_system}))
15
+
16
+ return ChatResultSchema(
17
+ text=self.responses.get("text", "FAKE_RESPONSE"),
18
+ total_tokens=self.responses.get("total_tokens", 42),
19
+ prompt_tokens=self.responses.get("prompt_tokens", 21),
20
+ completion_tokens=self.responses.get("completion_tokens", 21),
21
+ )
22
+
23
+
24
+ @pytest.fixture
25
+ def fake_llm_client() -> FakeLLMClient:
26
+ return FakeLLMClient()
@@ -0,0 +1,43 @@
1
+ import pytest
2
+
3
+ from ai_review.services.diff.schema import DiffFileSchema
4
+ from ai_review.services.prompt.schema import PromptContextSchema
5
+ from ai_review.services.prompt.types import PromptServiceProtocol
6
+
7
+
8
+ class FakePromptService(PromptServiceProtocol):
9
+ def __init__(self):
10
+ self.calls: list[tuple[str, dict]] = []
11
+
12
+ def prepare_prompt(self, prompts: list[str], context: PromptContextSchema) -> str:
13
+ self.calls.append(("prepare_prompt", {"prompts": prompts, "context": context}))
14
+ return "FAKE_PROMPT"
15
+
16
+ def build_inline_request(self, diff: DiffFileSchema, context: PromptContextSchema) -> str:
17
+ self.calls.append(("build_inline_request", {"diff": diff, "context": context}))
18
+ return f"INLINE_PROMPT_FOR_{diff.file}"
19
+
20
+ def build_summary_request(self, diffs: list[DiffFileSchema], context: PromptContextSchema) -> str:
21
+ self.calls.append(("build_summary_request", {"diffs": diffs, "context": context}))
22
+ return "SUMMARY_PROMPT"
23
+
24
+ def build_context_request(self, diffs: list[DiffFileSchema], context: PromptContextSchema) -> str:
25
+ self.calls.append(("build_context_request", {"diffs": diffs, "context": context}))
26
+ return "CONTEXT_PROMPT"
27
+
28
+ def build_system_inline_request(self, context: PromptContextSchema) -> str:
29
+ self.calls.append(("build_system_inline_request", {"context": context}))
30
+ return "SYSTEM_INLINE_PROMPT"
31
+
32
+ def build_system_context_request(self, context: PromptContextSchema) -> str:
33
+ self.calls.append(("build_system_context_request", {"context": context}))
34
+ return "SYSTEM_CONTEXT_PROMPT"
35
+
36
+ def build_system_summary_request(self, context: PromptContextSchema) -> str:
37
+ self.calls.append(("build_system_summary_request", {"context": context}))
38
+ return "SYSTEM_SUMMARY_PROMPT"
39
+
40
+
41
+ @pytest.fixture
42
+ def fake_prompt_service() -> FakePromptService:
43
+ return FakePromptService()
File without changes
@@ -0,0 +1,25 @@
1
+ import pytest
2
+
3
+ from ai_review.services.review.inline.schema import InlineCommentListSchema, InlineCommentSchema
4
+ from ai_review.services.review.inline.types import InlineCommentServiceProtocol
5
+
6
+
7
+ class FakeInlineCommentService(InlineCommentServiceProtocol):
8
+ def __init__(self, comments: list[InlineCommentSchema] | None = None):
9
+ self.calls: list[tuple[str, dict]] = []
10
+ self.comments = comments or [
11
+ InlineCommentSchema(file="main.py", line=1, message="Test comment"),
12
+ ]
13
+
14
+ def parse_model_output(self, output: str) -> InlineCommentListSchema:
15
+ self.calls.append(("parse_model_output", {"output": output}))
16
+ return InlineCommentListSchema(root=self.comments)
17
+
18
+ def try_parse_model_output(self, raw: str) -> InlineCommentListSchema | None:
19
+ self.calls.append(("try_parse_model_output", {"raw": raw}))
20
+ return InlineCommentListSchema(root=self.comments)
21
+
22
+
23
+ @pytest.fixture
24
+ def fake_inline_comment_service() -> FakeInlineCommentService:
25
+ return FakeInlineCommentService()
@@ -0,0 +1,19 @@
1
+ import pytest
2
+
3
+ from ai_review.services.review.summary.schema import SummaryCommentSchema
4
+ from ai_review.services.review.summary.types import SummaryCommentServiceProtocol
5
+
6
+
7
+ class FakeSummaryCommentService(SummaryCommentServiceProtocol):
8
+ def __init__(self, text: str = "This is a summary comment"):
9
+ self.text = text
10
+ self.calls: list[tuple[str, dict]] = []
11
+
12
+ def parse_model_output(self, output: str) -> SummaryCommentSchema:
13
+ self.calls.append(("parse_model_output", {"output": output}))
14
+ return SummaryCommentSchema(text=self.text)
15
+
16
+
17
+ @pytest.fixture
18
+ def fake_summary_comment_service() -> FakeSummaryCommentService:
19
+ return FakeSummaryCommentService()
@@ -0,0 +1,49 @@
1
+ from typing import Any
2
+
3
+ import pytest
4
+
5
+ from ai_review.services.vcs.types import (
6
+ VCSClientProtocol,
7
+ ReviewInfoSchema,
8
+ ReviewCommentSchema,
9
+ )
10
+
11
+
12
+ class FakeVCSClient(VCSClientProtocol):
13
+ def __init__(self, responses: dict[str, Any] | None = None) -> None:
14
+ self.calls: list[tuple[str, tuple, dict]] = []
15
+ self.responses = responses or {}
16
+
17
+ async def get_review_info(self) -> ReviewInfoSchema:
18
+ self.calls.append(("get_review_info", (), {}))
19
+ return self.responses.get(
20
+ "get_review_info",
21
+ ReviewInfoSchema(changed_files=["file.py"], base_sha="A", head_sha="B")
22
+ )
23
+
24
+ async def get_general_comments(self) -> list[ReviewCommentSchema]:
25
+ self.calls.append(("get_general_comments", (), {}))
26
+ return self.responses.get("get_general_comments", [])
27
+
28
+ async def get_inline_comments(self) -> list[ReviewCommentSchema]:
29
+ self.calls.append(("get_inline_comments", (), {}))
30
+ return self.responses.get("get_inline_comments", [])
31
+
32
+ async def create_general_comment(self, message: str) -> None:
33
+ self.calls.append(("create_general_comment", (message,), {}))
34
+ if error := self.responses.get("create_general_comment_error"):
35
+ raise error
36
+
37
+ return self.responses.get("create_general_comment_result", None)
38
+
39
+ async def create_inline_comment(self, file: str, line: int, message: str) -> None:
40
+ self.calls.append(("create_inline_comment", (file, line, message), {}))
41
+ if error := self.responses.get("create_inline_comment_error"):
42
+ raise error
43
+
44
+ return self.responses.get("create_inline_comment_result", None)
45
+
46
+
47
+ @pytest.fixture
48
+ def fake_vcs_client() -> FakeVCSClient:
49
+ return FakeVCSClient()
@@ -1,29 +1,10 @@
1
1
  import pytest
2
2
  from httpx import AsyncClient
3
- from pydantic import HttpUrl, SecretStr
4
3
 
5
4
  from ai_review.clients.claude.client import get_claude_http_client, ClaudeHTTPClient
6
- from ai_review.config import settings
7
- from ai_review.libs.config.claude import ClaudeMetaConfig
8
- from ai_review.libs.config.llm import ClaudeLLMConfig, ClaudeHTTPClientConfig
9
- from ai_review.libs.constants.llm_provider import LLMProvider
10
-
11
-
12
- @pytest.fixture(autouse=True)
13
- def claude_http_client_config(monkeypatch):
14
- fake_config = ClaudeLLMConfig(
15
- meta=ClaudeMetaConfig(),
16
- provider=LLMProvider.CLAUDE,
17
- http_client=ClaudeHTTPClientConfig(
18
- timeout=10,
19
- api_url=HttpUrl("https://api.anthropic.com"),
20
- api_token=SecretStr("fake-token"),
21
- api_version="2023-06-01",
22
- )
23
- )
24
- monkeypatch.setattr(settings, "llm", fake_config)
25
5
 
26
6
 
7
+ @pytest.mark.usefixtures('claude_http_client_config')
27
8
  def test_get_claude_http_client_builds_ok():
28
9
  claude_http_client = get_claude_http_client()
29
10
 
@@ -1,28 +1,10 @@
1
1
  import pytest
2
2
  from httpx import AsyncClient
3
- from pydantic import HttpUrl, SecretStr
4
3
 
5
4
  from ai_review.clients.gemini.client import get_gemini_http_client, GeminiHTTPClient
6
- from ai_review.config import settings
7
- from ai_review.libs.config.gemini import GeminiMetaConfig, GeminiHTTPClientConfig
8
- from ai_review.libs.config.llm import GeminiLLMConfig
9
- from ai_review.libs.constants.llm_provider import LLMProvider
10
-
11
-
12
- @pytest.fixture(autouse=True)
13
- def gemini_http_client_config(monkeypatch):
14
- fake_config = GeminiLLMConfig(
15
- meta=GeminiMetaConfig(),
16
- provider=LLMProvider.GEMINI,
17
- http_client=GeminiHTTPClientConfig(
18
- timeout=10,
19
- api_url=HttpUrl("https://generativelanguage.googleapis.com"),
20
- api_token=SecretStr("fake-token"),
21
- )
22
- )
23
- monkeypatch.setattr(settings, "llm", fake_config)
24
5
 
25
6
 
7
+ @pytest.mark.usefixtures('gemini_http_client_config')
26
8
  def test_get_gemini_http_client_builds_ok():
27
9
  gemini_http_client = get_gemini_http_client()
28
10
 
@@ -1,33 +1,11 @@
1
1
  import pytest
2
2
  from httpx import AsyncClient
3
- from pydantic import HttpUrl, SecretStr
4
3
 
5
4
  from ai_review.clients.github.client import get_github_http_client, GitHubHTTPClient
6
5
  from ai_review.clients.github.pr.client import GitHubPullRequestsHTTPClient
7
- from ai_review.config import settings
8
- from ai_review.libs.config.github import GitHubPipelineConfig, GitHubHTTPClientConfig
9
- from ai_review.libs.config.vcs import GitHubVCSConfig
10
- from ai_review.libs.constants.vcs_provider import VCSProvider
11
-
12
-
13
- @pytest.fixture(autouse=True)
14
- def github_http_client_config(monkeypatch: pytest.MonkeyPatch):
15
- fake_config = GitHubVCSConfig(
16
- provider=VCSProvider.GITHUB,
17
- pipeline=GitHubPipelineConfig(
18
- repo="repo",
19
- owner="owner",
20
- pull_number="pull_number"
21
- ),
22
- http_client=GitHubHTTPClientConfig(
23
- timeout=10,
24
- api_url=HttpUrl("https://github.com"),
25
- api_token=SecretStr("fake-token"),
26
- )
27
- )
28
- monkeypatch.setattr(settings, "vcs", fake_config)
29
6
 
30
7
 
8
+ @pytest.mark.usefixtures("github_http_client_config")
31
9
  def test_get_github_http_client_builds_ok():
32
10
  github_http_client = get_github_http_client()
33
11
 
@@ -1,32 +1,11 @@
1
1
  import pytest
2
2
  from httpx import AsyncClient
3
- from pydantic import HttpUrl, SecretStr
4
3
 
5
4
  from ai_review.clients.gitlab.client import get_gitlab_http_client, GitLabHTTPClient
6
5
  from ai_review.clients.gitlab.mr.client import GitLabMergeRequestsHTTPClient
7
- from ai_review.config import settings
8
- from ai_review.libs.config.gitlab import GitLabPipelineConfig, GitLabHTTPClientConfig
9
- from ai_review.libs.config.vcs import GitLabVCSConfig
10
- from ai_review.libs.constants.vcs_provider import VCSProvider
11
-
12
-
13
- @pytest.fixture(autouse=True)
14
- def gitlab_http_client_config(monkeypatch: pytest.MonkeyPatch):
15
- fake_config = GitLabVCSConfig(
16
- provider=VCSProvider.GITLAB,
17
- pipeline=GitLabPipelineConfig(
18
- project_id="project-id",
19
- merge_request_id="merge-request-id"
20
- ),
21
- http_client=GitLabHTTPClientConfig(
22
- timeout=10,
23
- api_url=HttpUrl("https://gitlab.com"),
24
- api_token=SecretStr("fake-token"),
25
- )
26
- )
27
- monkeypatch.setattr(settings, "vcs", fake_config)
28
6
 
29
7
 
8
+ @pytest.mark.usefixtures("gitlab_http_client_config")
30
9
  def test_get_gitlab_http_client_builds_ok():
31
10
  gitlab_http_client = get_gitlab_http_client()
32
11
 
@@ -1,28 +1,10 @@
1
1
  import pytest
2
2
  from httpx import AsyncClient
3
- from pydantic import HttpUrl, SecretStr
4
3
 
5
4
  from ai_review.clients.openai.client import get_openai_http_client, OpenAIHTTPClient
6
- from ai_review.config import settings
7
- from ai_review.libs.config.llm import OpenAILLMConfig
8
- from ai_review.libs.config.openai import OpenAIMetaConfig, OpenAIHTTPClientConfig
9
- from ai_review.libs.constants.llm_provider import LLMProvider
10
-
11
-
12
- @pytest.fixture(autouse=True)
13
- def openai_http_client_config(monkeypatch):
14
- fake_config = OpenAILLMConfig(
15
- meta=OpenAIMetaConfig(),
16
- provider=LLMProvider.OPENAI,
17
- http_client=OpenAIHTTPClientConfig(
18
- timeout=10,
19
- api_url=HttpUrl("https://api.openai.com/v1"),
20
- api_token=SecretStr("fake-token"),
21
- )
22
- )
23
- monkeypatch.setattr(settings, "llm", fake_config)
24
5
 
25
6
 
7
+ @pytest.mark.usefixtures('openai_http_client_config')
26
8
  def test_get_openai_http_client_builds_ok():
27
9
  openai_http_client = get_openai_http_client()
28
10
 
File without changes
@@ -0,0 +1,46 @@
1
+ import asyncio
2
+
3
+ import pytest
4
+
5
+ from ai_review.config import settings
6
+ from ai_review.libs.asynchronous.gather import bounded_gather
7
+
8
+
9
+ @pytest.mark.asyncio
10
+ async def test_bounded_gather_limits_concurrency(monkeypatch: pytest.MonkeyPatch):
11
+ concurrency_limit = 3
12
+ monkeypatch.setattr(settings.core, "concurrency", concurrency_limit)
13
+
14
+ active = 0
15
+ max_active = 0
16
+
17
+ async def task(number: int):
18
+ nonlocal active, max_active
19
+ active += 1
20
+ max_active = max(max_active, active)
21
+ await asyncio.sleep(0.05)
22
+ active -= 1
23
+ return number * 2
24
+
25
+ results = await bounded_gather(task(index) for index in range(10))
26
+
27
+ assert max_active <= concurrency_limit
28
+ assert results == tuple(index * 2 for index in range(10))
29
+
30
+
31
+ @pytest.mark.asyncio
32
+ async def test_bounded_gather_returns_exceptions(monkeypatch: pytest.MonkeyPatch):
33
+ monkeypatch.setattr(settings.core, "concurrency", 2)
34
+
35
+ async def ok_task():
36
+ await asyncio.sleep(0.01)
37
+ return "ok"
38
+
39
+ async def fail_task():
40
+ raise ValueError("boom")
41
+
42
+ results = await bounded_gather([ok_task(), fail_task(), ok_task()])
43
+
44
+ assert len(results) == 3
45
+ assert any(isinstance(result, Exception) for result in results)
46
+ assert any(r == "ok" for r in results)
@@ -4,7 +4,7 @@ from ai_review import config
4
4
  from ai_review.libs.config.review import ReviewMode
5
5
  from ai_review.libs.diff.models import Diff, DiffFile, FileMode
6
6
  from ai_review.services.diff.service import DiffService
7
- from ai_review.tests.fixtures.git import FakeGitService
7
+ from ai_review.tests.fixtures.services.git import FakeGitService
8
8
 
9
9
 
10
10
  @pytest.fixture
@@ -69,16 +69,16 @@ def test_render_file_returns_unsupported(monkeypatch: pytest.MonkeyPatch, fake_d
69
69
 
70
70
 
71
71
  def test_render_files_invokes_render_file(
72
- fake_git: FakeGitService,
73
72
  fake_diff: Diff,
74
73
  monkeypatch: pytest.MonkeyPatch,
74
+ fake_git_service: FakeGitService,
75
75
  ) -> None:
76
76
  monkeypatch.setattr("ai_review.services.diff.service.DiffParser.parse", lambda _: fake_diff)
77
77
  monkeypatch.setattr(config.settings.review, "mode", ReviewMode.FULL_FILE_DIFF)
78
78
 
79
- fake_git.responses["get_diff_for_file"] = "fake-diff"
79
+ fake_git_service.responses["get_diff_for_file"] = "fake-diff"
80
80
 
81
- out = DiffService.render_files(git=fake_git, base_sha="A", head_sha="B", files=["b/x"])
81
+ out = DiffService.render_files(git=fake_git_service, base_sha="A", head_sha="B", files=["b/x"])
82
82
  assert out
83
83
  assert out[0].file == "b/x"
84
84
  assert out[0].diff.startswith("# No matching lines for mode")
@@ -5,7 +5,7 @@ import pytest
5
5
 
6
6
  from ai_review.libs.diff.models import Diff, DiffFile, DiffHunk, DiffRange, DiffLineType, FileMode
7
7
  from ai_review.services.diff import tools
8
- from ai_review.tests.fixtures.git import FakeGitService
8
+ from ai_review.tests.fixtures.services.git import FakeGitService
9
9
 
10
10
 
11
11
  # ---------- normalize_file_path ----------
@@ -55,23 +55,23 @@ def test_find_diff_file_not_found_returns_none() -> None:
55
55
 
56
56
  # ---------- read_snapshot ----------
57
57
 
58
- def test_read_snapshot_prefers_git(monkeypatch: pytest.MonkeyPatch, fake_git: FakeGitService) -> None:
59
- fake_git.responses["get_file_at_commit"] = "from git"
60
- monkeypatch.setattr(tools, "GitService", lambda: fake_git)
58
+ def test_read_snapshot_prefers_git(monkeypatch: pytest.MonkeyPatch, fake_git_service: FakeGitService) -> None:
59
+ fake_git_service.responses["get_file_at_commit"] = "from git"
60
+ monkeypatch.setattr(tools, "GitService", lambda: fake_git_service)
61
61
 
62
62
  assert tools.read_snapshot("foo.py", head_sha="HEAD") == "from git"
63
63
 
64
64
 
65
65
  def test_read_snapshot_fallback_to_filesystem(
66
66
  tmp_path: Path,
67
- fake_git: FakeGitService,
68
67
  monkeypatch: pytest.MonkeyPatch,
68
+ fake_git_service: FakeGitService,
69
69
  ) -> None:
70
70
  file = tmp_path / "file.txt"
71
71
  file.write_text("hello")
72
72
 
73
- fake_git.responses["get_file_at_commit"] = None
74
- monkeypatch.setattr(tools, "GitService", lambda: fake_git)
73
+ fake_git_service.responses["get_file_at_commit"] = None
74
+ monkeypatch.setattr(tools, "GitService", lambda: fake_git_service)
75
75
 
76
76
  result = tools.read_snapshot(str(file))
77
77
  assert result == "hello"
@@ -79,11 +79,11 @@ def test_read_snapshot_fallback_to_filesystem(
79
79
 
80
80
  def test_read_snapshot_returns_none_if_missing(
81
81
  tmp_path: Path,
82
- fake_git: FakeGitService,
83
82
  monkeypatch: pytest.MonkeyPatch,
83
+ fake_git_service: FakeGitService,
84
84
  ) -> None:
85
- fake_git.responses["get_file_at_commit"] = None
86
- monkeypatch.setattr(tools, "GitService", lambda: fake_git)
85
+ fake_git_service.responses["get_file_at_commit"] = None
86
+ monkeypatch.setattr(tools, "GitService", lambda: fake_git_service)
87
87
 
88
88
  assert tools.read_snapshot(str(tmp_path / "nope.txt")) is None
89
89
 
File without changes
@@ -0,0 +1,93 @@
1
+ import pytest
2
+
3
+ from ai_review.services.cost.schema import CostReportSchema
4
+ from ai_review.services.hook.constants import HookType
5
+ from ai_review.services.hook.service import HookService
6
+
7
+
8
+ @pytest.fixture
9
+ def hook_service() -> HookService:
10
+ """Return a fresh HookService instance for each test."""
11
+ return HookService()
12
+
13
+
14
+ @pytest.mark.asyncio
15
+ async def test_inject_and_emit_simple(hook_service: HookService):
16
+ """
17
+ Should register hook and invoke it with emitted args.
18
+ """
19
+ results = []
20
+
21
+ async def sample_hook(arg1: str, arg2: int):
22
+ results.append((arg1, arg2))
23
+
24
+ hook_service.inject_hook(HookType.ON_CHAT_START, sample_hook)
25
+ await hook_service.emit(HookType.ON_CHAT_START, "hi", 42)
26
+
27
+ assert results == [("hi", 42)]
28
+
29
+
30
+ @pytest.mark.asyncio
31
+ async def test_emit_without_hooks_does_nothing(hook_service: HookService):
32
+ """
33
+ If no hooks are registered, emit should silently return.
34
+ """
35
+ await hook_service.emit(HookType.ON_CHAT_COMPLETE, "text")
36
+
37
+
38
+ @pytest.mark.asyncio
39
+ async def test_emit_handles_hook_exception(monkeypatch: pytest.MonkeyPatch, hook_service: HookService):
40
+ """
41
+ Should catch exceptions in hook and log them, without breaking flow.
42
+ """
43
+ errors = []
44
+
45
+ async def failing_hook():
46
+ raise ValueError("Boom!")
47
+
48
+ def fake_logger_exception(message: str):
49
+ errors.append(message)
50
+
51
+ monkeypatch.setattr("ai_review.services.hook.service.logger.exception", fake_logger_exception)
52
+ hook_service.inject_hook(HookType.ON_CHAT_COMPLETE, failing_hook)
53
+
54
+ await hook_service.emit(HookType.ON_CHAT_COMPLETE)
55
+ assert any("Boom!" in message for message in errors)
56
+
57
+
58
+ @pytest.mark.asyncio
59
+ async def test_on_chat_start_decorator_registers_hook(hook_service: HookService):
60
+ """
61
+ Using @on_chat_start should register the callback.
62
+ """
63
+ results = []
64
+
65
+ @hook_service.on_chat_start
66
+ async def chat_start_hook(prompt: str, prompt_system: str):
67
+ results.append((prompt, prompt_system))
68
+
69
+ await hook_service.emit_chat_start("Hello", "SYS")
70
+ assert results == [("Hello", "SYS")]
71
+
72
+
73
+ @pytest.mark.asyncio
74
+ async def test_on_chat_complete_decorator_registers_hook(hook_service: HookService):
75
+ """
76
+ Using @on_chat_complete should register and trigger hook.
77
+ """
78
+ results = []
79
+
80
+ @hook_service.on_chat_complete
81
+ async def chat_complete_hook(result: str, report: CostReportSchema | None):
82
+ results.append((result, report))
83
+
84
+ cost_report = CostReportSchema(
85
+ model="gpt",
86
+ prompt_tokens=10,
87
+ completion_tokens=100,
88
+ total_cost=26,
89
+ input_cost=10.5,
90
+ output_cost=15.5
91
+ )
92
+ await hook_service.emit_chat_complete("done", cost_report)
93
+ assert results == [("done", cost_report)]
File without changes
@@ -0,0 +1,30 @@
1
+ import pytest
2
+
3
+ from ai_review.services.llm.claude.client import ClaudeLLMClient
4
+ from ai_review.services.llm.factory import get_llm_client
5
+ from ai_review.services.llm.gemini.client import GeminiLLMClient
6
+ from ai_review.services.llm.openai.client import OpenAILLMClient
7
+
8
+
9
+ @pytest.mark.usefixtures("openai_http_client_config")
10
+ def test_get_llm_client_returns_openai(monkeypatch: pytest.MonkeyPatch):
11
+ client = get_llm_client()
12
+ assert isinstance(client, OpenAILLMClient)
13
+
14
+
15
+ @pytest.mark.usefixtures("gemini_http_client_config")
16
+ def test_get_llm_client_returns_gemini(monkeypatch: pytest.MonkeyPatch):
17
+ client = get_llm_client()
18
+ assert isinstance(client, GeminiLLMClient)
19
+
20
+
21
+ @pytest.mark.usefixtures("claude_http_client_config")
22
+ def test_get_llm_client_returns_claude(monkeypatch: pytest.MonkeyPatch):
23
+ client = get_llm_client()
24
+ assert isinstance(client, ClaudeLLMClient)
25
+
26
+
27
+ def test_get_llm_client_unsupported_provider(monkeypatch: pytest.MonkeyPatch):
28
+ monkeypatch.setattr("ai_review.services.llm.factory.settings.llm.provider", "DEEPSEEK")
29
+ with pytest.raises(ValueError):
30
+ get_llm_client()
@@ -1,3 +1,5 @@
1
+ import pytest
2
+
1
3
  from ai_review.config import settings
2
4
  from ai_review.services.review.inline.schema import (
3
5
  InlineCommentSchema,
@@ -7,14 +9,14 @@ from ai_review.services.review.inline.schema import (
7
9
 
8
10
  def test_normalize_file_and_message():
9
11
  comment = InlineCommentSchema(file=" \\src\\main.py ", line=10, message=" fix bug ")
10
- assert comment.file == "src/main.py" # нормализуется и слеши, и пробелы
11
- assert comment.message == "fix bug" # пробелы убраны
12
+ assert comment.file == "src/main.py"
13
+ assert comment.message == "fix bug"
12
14
 
13
15
 
14
16
  def test_body_without_suggestion():
15
17
  comment = InlineCommentSchema(file="a.py", line=1, message="use f-string")
16
18
  assert comment.body == "use f-string"
17
- assert settings.review.inline_tag not in comment.body # тег ещё не добавлен
19
+ assert settings.review.inline_tag not in comment.body
18
20
 
19
21
 
20
22
  def test_body_with_suggestion():
@@ -31,18 +33,17 @@ def test_body_with_suggestion():
31
33
  assert comment.body == expected
32
34
 
33
35
 
34
- def test_body_with_tag(monkeypatch):
36
+ def test_body_with_tag(monkeypatch: pytest.MonkeyPatch):
35
37
  monkeypatch.setattr(settings.review, "inline_tag", "#ai-inline")
36
38
  comment = InlineCommentSchema(file="a.py", line=3, message="something")
37
39
  assert comment.body_with_tag.endswith("\n\n#ai-inline")
40
+ assert settings.review.inline_tag not in comment.body
38
41
 
39
42
 
40
- def test_fallback_body_with_tag(monkeypatch):
43
+ def test_fallback_body(monkeypatch: pytest.MonkeyPatch):
41
44
  monkeypatch.setattr(settings.review, "inline_tag", "#ai-inline")
42
45
  comment = InlineCommentSchema(file="a.py", line=42, message="missing check")
43
- body = comment.fallback_body_with_tag
44
- assert body.startswith("**a.py:42** — missing check")
45
- assert "#ai-inline" in body
46
+ assert comment.fallback_body.startswith("**a.py:42** — missing check")
46
47
 
47
48
 
48
49
  def test_dedup_key_differs_on_message_and_suggestion():
@@ -53,7 +54,7 @@ def test_dedup_key_differs_on_message_and_suggestion():
53
54
 
54
55
  def test_list_dedupe_removes_duplicates():
55
56
  c1 = InlineCommentSchema(file="a.py", line=1, message="msg one")
56
- c2 = InlineCommentSchema(file="a.py", line=1, message="msg one") # дубликат
57
+ c2 = InlineCommentSchema(file="a.py", line=1, message="msg one")
57
58
  c3 = InlineCommentSchema(file="a.py", line=2, message="msg two")
58
59
 
59
60
  comment_list = InlineCommentListSchema(root=[c1, c2, c3])
@@ -18,5 +18,4 @@ def test_body_with_tag_appends_tag(monkeypatch):
18
18
  body = comment.body_with_tag
19
19
  assert body.startswith("Review passed")
20
20
  assert body.endswith("\n\n#ai-summary")
21
- # убедимся, что перенос строки присутствует
22
21
  assert "\n\n#ai-summary" in body