xai-review 0.27.0__py3-none-any.whl → 0.28.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xai-review might be problematic. Click here for more details.

Files changed (147) hide show
  1. ai_review/cli/commands/run_inline_reply_review.py +7 -0
  2. ai_review/cli/commands/run_summary_reply_review.py +7 -0
  3. ai_review/cli/main.py +17 -0
  4. ai_review/clients/bitbucket/pr/schema/comments.py +14 -0
  5. ai_review/clients/bitbucket/pr/schema/pull_request.py +1 -5
  6. ai_review/clients/bitbucket/pr/schema/user.py +7 -0
  7. ai_review/clients/github/pr/client.py +35 -4
  8. ai_review/clients/github/pr/schema/comments.py +21 -0
  9. ai_review/clients/github/pr/schema/pull_request.py +1 -4
  10. ai_review/clients/github/pr/schema/user.py +6 -0
  11. ai_review/clients/github/pr/types.py +11 -1
  12. ai_review/clients/gitlab/mr/client.py +32 -1
  13. ai_review/clients/gitlab/mr/schema/changes.py +1 -5
  14. ai_review/clients/gitlab/mr/schema/discussions.py +17 -7
  15. ai_review/clients/gitlab/mr/schema/notes.py +3 -0
  16. ai_review/clients/gitlab/mr/schema/user.py +7 -0
  17. ai_review/clients/gitlab/mr/types.py +16 -7
  18. ai_review/libs/config/prompt.py +96 -64
  19. ai_review/libs/config/review.py +2 -0
  20. ai_review/libs/llm/output_json_parser.py +60 -0
  21. ai_review/prompts/default_inline_reply.md +10 -0
  22. ai_review/prompts/default_summary_reply.md +14 -0
  23. ai_review/prompts/default_system_inline_reply.md +31 -0
  24. ai_review/prompts/default_system_summary_reply.md +13 -0
  25. ai_review/services/artifacts/schema.py +2 -2
  26. ai_review/services/hook/constants.py +14 -0
  27. ai_review/services/hook/service.py +95 -4
  28. ai_review/services/hook/types.py +18 -2
  29. ai_review/services/prompt/adapter.py +1 -1
  30. ai_review/services/prompt/service.py +49 -3
  31. ai_review/services/prompt/tools.py +21 -0
  32. ai_review/services/prompt/types.py +23 -0
  33. ai_review/services/review/gateway/comment.py +45 -6
  34. ai_review/services/review/gateway/llm.py +2 -1
  35. ai_review/services/review/gateway/types.py +50 -0
  36. ai_review/services/review/internal/inline/service.py +40 -0
  37. ai_review/services/review/internal/inline/types.py +8 -0
  38. ai_review/services/review/internal/inline_reply/schema.py +23 -0
  39. ai_review/services/review/internal/inline_reply/service.py +20 -0
  40. ai_review/services/review/internal/inline_reply/types.py +8 -0
  41. ai_review/services/review/{policy → internal/policy}/service.py +2 -1
  42. ai_review/services/review/internal/policy/types.py +15 -0
  43. ai_review/services/review/{summary → internal/summary}/service.py +2 -2
  44. ai_review/services/review/{summary → internal/summary}/types.py +1 -1
  45. ai_review/services/review/internal/summary_reply/__init__.py +0 -0
  46. ai_review/services/review/internal/summary_reply/schema.py +8 -0
  47. ai_review/services/review/internal/summary_reply/service.py +15 -0
  48. ai_review/services/review/internal/summary_reply/types.py +8 -0
  49. ai_review/services/review/runner/__init__.py +0 -0
  50. ai_review/services/review/runner/context.py +72 -0
  51. ai_review/services/review/runner/inline.py +80 -0
  52. ai_review/services/review/runner/inline_reply.py +80 -0
  53. ai_review/services/review/runner/summary.py +71 -0
  54. ai_review/services/review/runner/summary_reply.py +79 -0
  55. ai_review/services/review/runner/types.py +6 -0
  56. ai_review/services/review/service.py +78 -110
  57. ai_review/services/vcs/bitbucket/adapter.py +24 -0
  58. ai_review/services/vcs/bitbucket/client.py +107 -42
  59. ai_review/services/vcs/github/adapter.py +35 -0
  60. ai_review/services/vcs/github/client.py +105 -44
  61. ai_review/services/vcs/gitlab/adapter.py +26 -0
  62. ai_review/services/vcs/gitlab/client.py +91 -38
  63. ai_review/services/vcs/types.py +34 -0
  64. ai_review/tests/fixtures/clients/bitbucket.py +2 -2
  65. ai_review/tests/fixtures/clients/github.py +35 -6
  66. ai_review/tests/fixtures/clients/gitlab.py +42 -3
  67. ai_review/tests/fixtures/libs/__init__.py +0 -0
  68. ai_review/tests/fixtures/libs/llm/__init__.py +0 -0
  69. ai_review/tests/fixtures/libs/llm/output_json_parser.py +13 -0
  70. ai_review/tests/fixtures/services/hook.py +8 -0
  71. ai_review/tests/fixtures/services/llm.py +8 -5
  72. ai_review/tests/fixtures/services/prompt.py +70 -0
  73. ai_review/tests/fixtures/services/review/base.py +41 -0
  74. ai_review/tests/fixtures/services/review/gateway/__init__.py +0 -0
  75. ai_review/tests/fixtures/services/review/gateway/comment.py +98 -0
  76. ai_review/tests/fixtures/services/review/gateway/llm.py +17 -0
  77. ai_review/tests/fixtures/services/review/internal/__init__.py +0 -0
  78. ai_review/tests/fixtures/services/review/{inline.py → internal/inline.py} +8 -6
  79. ai_review/tests/fixtures/services/review/internal/inline_reply.py +25 -0
  80. ai_review/tests/fixtures/services/review/internal/policy.py +28 -0
  81. ai_review/tests/fixtures/services/review/internal/summary.py +21 -0
  82. ai_review/tests/fixtures/services/review/internal/summary_reply.py +19 -0
  83. ai_review/tests/fixtures/services/review/runner/__init__.py +0 -0
  84. ai_review/tests/fixtures/services/review/runner/context.py +50 -0
  85. ai_review/tests/fixtures/services/review/runner/inline.py +50 -0
  86. ai_review/tests/fixtures/services/review/runner/inline_reply.py +50 -0
  87. ai_review/tests/fixtures/services/review/runner/summary.py +50 -0
  88. ai_review/tests/fixtures/services/review/runner/summary_reply.py +50 -0
  89. ai_review/tests/fixtures/services/vcs.py +23 -0
  90. ai_review/tests/suites/cli/__init__.py +0 -0
  91. ai_review/tests/suites/cli/test_main.py +54 -0
  92. ai_review/tests/suites/libs/config/test_prompt.py +108 -28
  93. ai_review/tests/suites/libs/llm/__init__.py +0 -0
  94. ai_review/tests/suites/libs/llm/test_output_json_parser.py +155 -0
  95. ai_review/tests/suites/services/hook/test_service.py +88 -4
  96. ai_review/tests/suites/services/prompt/test_adapter.py +3 -3
  97. ai_review/tests/suites/services/prompt/test_service.py +102 -58
  98. ai_review/tests/suites/services/prompt/test_tools.py +86 -1
  99. ai_review/tests/suites/services/review/gateway/__init__.py +0 -0
  100. ai_review/tests/suites/services/review/gateway/test_comment.py +253 -0
  101. ai_review/tests/suites/services/review/gateway/test_llm.py +82 -0
  102. ai_review/tests/suites/services/review/internal/__init__.py +0 -0
  103. ai_review/tests/suites/services/review/internal/inline/__init__.py +0 -0
  104. ai_review/tests/suites/services/review/{inline → internal/inline}/test_schema.py +1 -1
  105. ai_review/tests/suites/services/review/internal/inline/test_service.py +81 -0
  106. ai_review/tests/suites/services/review/internal/inline_reply/__init__.py +0 -0
  107. ai_review/tests/suites/services/review/internal/inline_reply/test_schema.py +57 -0
  108. ai_review/tests/suites/services/review/internal/inline_reply/test_service.py +72 -0
  109. ai_review/tests/suites/services/review/internal/policy/__init__.py +0 -0
  110. ai_review/tests/suites/services/review/{policy → internal/policy}/test_service.py +1 -1
  111. ai_review/tests/suites/services/review/internal/summary/__init__.py +0 -0
  112. ai_review/tests/suites/services/review/{summary → internal/summary}/test_schema.py +1 -1
  113. ai_review/tests/suites/services/review/{summary → internal/summary}/test_service.py +2 -2
  114. ai_review/tests/suites/services/review/internal/summary_reply/__init__.py +0 -0
  115. ai_review/tests/suites/services/review/internal/summary_reply/test_schema.py +19 -0
  116. ai_review/tests/suites/services/review/internal/summary_reply/test_service.py +21 -0
  117. ai_review/tests/suites/services/review/runner/__init__.py +0 -0
  118. ai_review/tests/suites/services/review/runner/test_context.py +89 -0
  119. ai_review/tests/suites/services/review/runner/test_inline.py +100 -0
  120. ai_review/tests/suites/services/review/runner/test_inline_reply.py +109 -0
  121. ai_review/tests/suites/services/review/runner/test_summary.py +87 -0
  122. ai_review/tests/suites/services/review/runner/test_summary_reply.py +97 -0
  123. ai_review/tests/suites/services/review/test_service.py +64 -97
  124. ai_review/tests/suites/services/vcs/bitbucket/test_adapter.py +109 -0
  125. ai_review/tests/suites/services/vcs/bitbucket/{test_service.py → test_client.py} +88 -1
  126. ai_review/tests/suites/services/vcs/github/test_adapter.py +162 -0
  127. ai_review/tests/suites/services/vcs/github/{test_service.py → test_client.py} +102 -2
  128. ai_review/tests/suites/services/vcs/gitlab/test_adapter.py +105 -0
  129. ai_review/tests/suites/services/vcs/gitlab/{test_service.py → test_client.py} +99 -1
  130. {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/METADATA +8 -5
  131. {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/RECORD +143 -70
  132. ai_review/services/review/inline/service.py +0 -54
  133. ai_review/services/review/inline/types.py +0 -11
  134. ai_review/tests/fixtures/services/review/summary.py +0 -19
  135. ai_review/tests/suites/services/review/inline/test_service.py +0 -107
  136. /ai_review/{services/review/inline → libs/llm}/__init__.py +0 -0
  137. /ai_review/services/review/{policy → internal}/__init__.py +0 -0
  138. /ai_review/services/review/{summary → internal/inline}/__init__.py +0 -0
  139. /ai_review/services/review/{inline → internal/inline}/schema.py +0 -0
  140. /ai_review/{tests/suites/services/review/inline → services/review/internal/inline_reply}/__init__.py +0 -0
  141. /ai_review/{tests/suites/services/review → services/review/internal}/policy/__init__.py +0 -0
  142. /ai_review/{tests/suites/services/review → services/review/internal}/summary/__init__.py +0 -0
  143. /ai_review/services/review/{summary → internal/summary}/schema.py +0 -0
  144. {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/WHEEL +0 -0
  145. {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/entry_points.txt +0 -0
  146. {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/licenses/LICENSE +0 -0
  147. {xai_review-0.27.0.dist-info → xai_review-0.28.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,50 @@
1
+ import pytest
2
+
3
+ from ai_review.services.cost.types import CostServiceProtocol
4
+ from ai_review.services.diff.types import DiffServiceProtocol
5
+ from ai_review.services.git.types import GitServiceProtocol
6
+ from ai_review.services.prompt.types import PromptServiceProtocol
7
+ from ai_review.services.review.gateway.types import ReviewCommentGatewayProtocol, ReviewLLMGatewayProtocol
8
+ from ai_review.services.review.internal.policy.types import ReviewPolicyServiceProtocol
9
+ from ai_review.services.review.internal.summary.types import SummaryCommentServiceProtocol
10
+ from ai_review.services.review.runner.summary import SummaryReviewRunner
11
+ from ai_review.services.review.runner.types import ReviewRunnerProtocol
12
+ from ai_review.services.vcs.types import VCSClientProtocol
13
+
14
+
15
+ class FakeSummaryReviewRunner(ReviewRunnerProtocol):
16
+ def __init__(self):
17
+ self.calls = []
18
+
19
+ async def run(self) -> None:
20
+ self.calls.append(("run", {}))
21
+
22
+
23
+ @pytest.fixture
24
+ def fake_summary_review_runner() -> FakeSummaryReviewRunner:
25
+ return FakeSummaryReviewRunner()
26
+
27
+
28
+ @pytest.fixture
29
+ def summary_review_runner(
30
+ fake_vcs_client: VCSClientProtocol,
31
+ fake_git_service: GitServiceProtocol,
32
+ fake_diff_service: DiffServiceProtocol,
33
+ fake_cost_service: CostServiceProtocol,
34
+ fake_prompt_service: PromptServiceProtocol,
35
+ fake_review_llm_gateway: ReviewLLMGatewayProtocol,
36
+ fake_review_policy_service: ReviewPolicyServiceProtocol,
37
+ fake_review_comment_gateway: ReviewCommentGatewayProtocol,
38
+ fake_summary_comment_service: SummaryCommentServiceProtocol,
39
+ ) -> SummaryReviewRunner:
40
+ return SummaryReviewRunner(
41
+ vcs=fake_vcs_client,
42
+ git=fake_git_service,
43
+ diff=fake_diff_service,
44
+ cost=fake_cost_service,
45
+ prompt=fake_prompt_service,
46
+ review_policy=fake_review_policy_service,
47
+ summary_comment=fake_summary_comment_service,
48
+ review_llm_gateway=fake_review_llm_gateway,
49
+ review_comment_gateway=fake_review_comment_gateway,
50
+ )
@@ -0,0 +1,50 @@
1
+ import pytest
2
+
3
+ from ai_review.services.cost.types import CostServiceProtocol
4
+ from ai_review.services.diff.types import DiffServiceProtocol
5
+ from ai_review.services.git.types import GitServiceProtocol
6
+ from ai_review.services.prompt.types import PromptServiceProtocol
7
+ from ai_review.services.review.gateway.types import ReviewLLMGatewayProtocol, ReviewCommentGatewayProtocol
8
+ from ai_review.services.review.internal.policy.types import ReviewPolicyServiceProtocol
9
+ from ai_review.services.review.internal.summary_reply.types import SummaryCommentReplyServiceProtocol
10
+ from ai_review.services.review.runner.summary_reply import SummaryReplyReviewRunner
11
+ from ai_review.services.review.runner.types import ReviewRunnerProtocol
12
+ from ai_review.services.vcs.types import VCSClientProtocol
13
+
14
+
15
+ class FakeSummaryReplyReviewRunner(ReviewRunnerProtocol):
16
+ def __init__(self):
17
+ self.calls = []
18
+
19
+ async def run(self) -> None:
20
+ self.calls.append(("run", {}))
21
+
22
+
23
+ @pytest.fixture
24
+ def fake_summary_reply_review_runner() -> FakeSummaryReplyReviewRunner:
25
+ return FakeSummaryReplyReviewRunner()
26
+
27
+
28
+ @pytest.fixture
29
+ def summary_reply_review_runner(
30
+ fake_vcs_client: VCSClientProtocol,
31
+ fake_git_service: GitServiceProtocol,
32
+ fake_diff_service: DiffServiceProtocol,
33
+ fake_cost_service: CostServiceProtocol,
34
+ fake_prompt_service: PromptServiceProtocol,
35
+ fake_review_llm_gateway: ReviewLLMGatewayProtocol,
36
+ fake_review_policy_service: ReviewPolicyServiceProtocol,
37
+ fake_review_comment_gateway: ReviewCommentGatewayProtocol,
38
+ fake_summary_comment_reply_service: SummaryCommentReplyServiceProtocol,
39
+ ) -> SummaryReplyReviewRunner:
40
+ return SummaryReplyReviewRunner(
41
+ vcs=fake_vcs_client,
42
+ git=fake_git_service,
43
+ diff=fake_diff_service,
44
+ cost=fake_cost_service,
45
+ prompt=fake_prompt_service,
46
+ review_policy=fake_review_policy_service,
47
+ review_llm_gateway=fake_review_llm_gateway,
48
+ summary_comment_reply=fake_summary_comment_reply_service,
49
+ review_comment_gateway=fake_review_comment_gateway,
50
+ )
@@ -5,6 +5,7 @@ import pytest
5
5
  from ai_review.services.vcs.types import (
6
6
  VCSClientProtocol,
7
7
  ReviewInfoSchema,
8
+ ReviewThreadSchema,
8
9
  ReviewCommentSchema,
9
10
  )
10
11
 
@@ -43,6 +44,28 @@ class FakeVCSClient(VCSClientProtocol):
43
44
 
44
45
  return self.responses.get("create_inline_comment_result", None)
45
46
 
47
+ async def create_inline_reply(self, thread_id: int | str, message: str) -> None:
48
+ self.calls.append(("create_inline_reply", (thread_id, message), {}))
49
+ if error := self.responses.get("create_inline_reply_error"):
50
+ raise error
51
+
52
+ return self.responses.get("create_inline_reply_result", None)
53
+
54
+ async def create_summary_reply(self, thread_id: int | str, message: str) -> None:
55
+ self.calls.append(("create_summary_reply", (thread_id, message), {}))
56
+ if error := self.responses.get("create_summary_reply_error"):
57
+ raise error
58
+
59
+ return self.responses.get("create_summary_reply_result", None)
60
+
61
+ async def get_inline_threads(self) -> list[ReviewThreadSchema]:
62
+ self.calls.append(("get_inline_threads", (), {}))
63
+ return self.responses.get("get_inline_threads", [])
64
+
65
+ async def get_general_threads(self) -> list[ReviewThreadSchema]:
66
+ self.calls.append(("get_general_threads", (), {}))
67
+ return self.responses.get("get_general_threads", [])
68
+
46
69
 
47
70
  @pytest.fixture
48
71
  def fake_vcs_client() -> FakeVCSClient:
File without changes
@@ -0,0 +1,54 @@
1
+ import pytest
2
+ from typer.testing import CliRunner
3
+
4
+ from ai_review.cli.main import app
5
+ from ai_review.services.review.service import ReviewService
6
+
7
+ runner = CliRunner()
8
+
9
+
10
+ @pytest.fixture(autouse=True)
11
+ def dummy_review_service(monkeypatch: pytest.MonkeyPatch, review_service: ReviewService):
12
+ monkeypatch.setattr("ai_review.cli.commands.run_review.ReviewService", lambda: review_service)
13
+ monkeypatch.setattr("ai_review.cli.commands.run_inline_review.ReviewService", lambda: review_service)
14
+ monkeypatch.setattr("ai_review.cli.commands.run_context_review.ReviewService", lambda: review_service)
15
+ monkeypatch.setattr("ai_review.cli.commands.run_summary_review.ReviewService", lambda: review_service)
16
+ monkeypatch.setattr("ai_review.cli.commands.run_inline_reply_review.ReviewService", lambda: review_service)
17
+ monkeypatch.setattr("ai_review.cli.commands.run_summary_reply_review.ReviewService", lambda: review_service)
18
+
19
+
20
+ @pytest.mark.parametrize(
21
+ "args, expected_output",
22
+ [
23
+ (["run"], "Starting full AI review..."),
24
+ (["run-inline"], "Starting inline AI review..."),
25
+ (["run-context"], "Starting context AI review..."),
26
+ (["run-summary"], "Starting summary AI review..."),
27
+ (["run-inline-reply"], "Starting inline reply AI review..."),
28
+ (["run-summary-reply"], "Starting summary reply AI review..."),
29
+ ],
30
+ )
31
+ def test_cli_commands_invoke_review_service_successfully(args: list[str], expected_output: str):
32
+ """
33
+ Ensure CLI commands correctly call the ReviewService with fake dependencies.
34
+ """
35
+ result = runner.invoke(app, args)
36
+
37
+ assert result.exit_code == 0
38
+ assert expected_output in result.output
39
+ assert "AI review completed successfully!" in result.output
40
+
41
+
42
+ def test_show_config_outputs_json(monkeypatch: pytest.MonkeyPatch):
43
+ """
44
+ Validate that the 'show-config' command prints settings as JSON.
45
+ """
46
+ monkeypatch.setattr(
47
+ "ai_review.cli.main.settings.model_dump_json",
48
+ lambda **_: '{"debug": true}'
49
+ )
50
+
51
+ result = runner.invoke(app, ["show-config"])
52
+ assert result.exit_code == 0
53
+ assert "Loaded AI Review configuration" in result.output
54
+ assert '{"debug": true}' in result.output
@@ -2,56 +2,136 @@ from pathlib import Path
2
2
 
3
3
  import pytest
4
4
 
5
- from ai_review.libs.config.prompt import PromptConfig
5
+ from ai_review.libs.config.prompt import PromptConfig, resolve_prompt_files, resolve_system_prompt_files
6
6
 
7
7
 
8
- def test_inline_prompt_files_or_default_uses_defaults(tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
9
- dummy_file = tmp_path / "dummy.md"
10
- dummy_file.write_text("DUMMY")
11
- monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
12
-
13
- config = PromptConfig()
14
- result = config.inline_prompt_files_or_default
8
+ # ---------- resolve_prompt_files ----------
15
9
 
10
+ def test_resolve_prompt_files_returns_given_list(tmp_path: Path):
11
+ dummy_file = tmp_path / "file.md"
12
+ result = resolve_prompt_files([dummy_file], "default_inline.md")
16
13
  assert result == [dummy_file]
17
- assert config.load_inline() == ["DUMMY"]
18
14
 
19
15
 
20
- def test_system_inline_prompts_none_returns_global(tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
21
- dummy_file = tmp_path / "global.md"
22
- dummy_file.write_text("GLOBAL")
16
+ def test_resolve_prompt_files_loads_default_when_none(tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
17
+ dummy_file = tmp_path / "inline_default.md"
18
+ dummy_file.write_text("INLINE_DEFAULT")
23
19
  monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
24
20
 
25
- config = PromptConfig(system_inline_prompt_files=None)
26
- result = config.system_inline_prompt_files_or_default
21
+ result = resolve_prompt_files(None, "default_inline.md")
22
+ assert result == [dummy_file]
27
23
 
24
+
25
+ # ---------- resolve_system_prompt_files ----------
26
+
27
+ def test_resolve_system_prompt_files_none_returns_global(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
28
+ dummy_file = tmp_path / "sys.md"
29
+ dummy_file.write_text("SYS")
30
+ monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
31
+
32
+ result = resolve_system_prompt_files(None, include=True, default_file="default_system_inline.md")
28
33
  assert result == [dummy_file]
29
- assert config.load_system_inline() == ["GLOBAL"]
30
34
 
31
35
 
32
- def test_system_inline_prompts_include_true(tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
36
+ def test_resolve_system_prompt_files_include_true(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
33
37
  global_file = tmp_path / "global.md"
34
38
  global_file.write_text("GLOBAL")
35
39
  custom_file = tmp_path / "custom.md"
36
- custom_file.write_text("CUSTOM")
37
40
  monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: global_file)
38
41
 
39
- config = PromptConfig(system_inline_prompt_files=[custom_file], include_inline_system_prompts=True)
40
- result = config.system_inline_prompt_files_or_default
41
-
42
- assert global_file in result and custom_file in result
43
- assert config.load_system_inline() == ["GLOBAL", "CUSTOM"]
42
+ result = resolve_system_prompt_files([custom_file], include=True, default_file="default_system_inline.md")
43
+ assert result == [global_file, custom_file]
44
44
 
45
45
 
46
- def test_system_inline_prompts_include_false(tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
46
+ def test_resolve_system_prompt_files_include_false(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
47
47
  global_file = tmp_path / "global.md"
48
48
  global_file.write_text("GLOBAL")
49
49
  custom_file = tmp_path / "custom.md"
50
- custom_file.write_text("CUSTOM")
51
50
  monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: global_file)
52
51
 
53
- config = PromptConfig(system_inline_prompt_files=[custom_file], include_inline_system_prompts=False)
54
- result = config.system_inline_prompt_files_or_default
55
-
52
+ result = resolve_system_prompt_files([custom_file], include=False, default_file="default_system_inline.md")
56
53
  assert result == [custom_file]
57
- assert config.load_system_inline() == ["CUSTOM"]
54
+
55
+
56
+ # ---------- Prompts ---------
57
+
58
+ def test_load_context_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
59
+ dummy_file = tmp_path / "context.md"
60
+ dummy_file.write_text("CTX")
61
+ monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
62
+
63
+ config = PromptConfig()
64
+ assert config.context_prompt_files_or_default == [dummy_file]
65
+ assert config.load_context() == ["CTX"]
66
+
67
+
68
+ def test_load_summary_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
69
+ dummy_file = tmp_path / "summary.md"
70
+ dummy_file.write_text("SUM")
71
+ monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
72
+
73
+ config = PromptConfig()
74
+ assert config.summary_prompt_files_or_default == [dummy_file]
75
+ assert config.load_summary() == ["SUM"]
76
+
77
+
78
+ def test_load_inline_reply_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
79
+ dummy_file = tmp_path / "inline_reply.md"
80
+ dummy_file.write_text("INL_R")
81
+ monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
82
+
83
+ config = PromptConfig()
84
+ assert config.inline_reply_prompt_files_or_default == [dummy_file]
85
+ assert config.load_inline_reply() == ["INL_R"]
86
+
87
+
88
+ def test_load_summary_reply_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
89
+ dummy_file = tmp_path / "summary_reply.md"
90
+ dummy_file.write_text("SUM_R")
91
+ monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
92
+
93
+ config = PromptConfig()
94
+ assert config.summary_reply_prompt_files_or_default == [dummy_file]
95
+ assert config.load_summary_reply() == ["SUM_R"]
96
+
97
+
98
+ # ---------- System Prompts ----------
99
+
100
+ def test_load_system_context_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
101
+ dummy_file = tmp_path / "sys_context.md"
102
+ dummy_file.write_text("SYS_CTX")
103
+ monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
104
+
105
+ config = PromptConfig()
106
+ assert config.system_context_prompt_files_or_default == [dummy_file]
107
+ assert config.load_system_context() == ["SYS_CTX"]
108
+
109
+
110
+ def test_load_system_summary_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
111
+ dummy_file = tmp_path / "sys_summary.md"
112
+ dummy_file.write_text("SYS_SUM")
113
+ monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
114
+
115
+ config = PromptConfig()
116
+ assert config.system_summary_prompt_files_or_default == [dummy_file]
117
+ assert config.load_system_summary() == ["SYS_SUM"]
118
+
119
+
120
+ def test_load_system_inline_reply_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
121
+ dummy_file = tmp_path / "sys_inline_reply.md"
122
+ dummy_file.write_text("SYS_IR")
123
+ monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
124
+
125
+ config = PromptConfig()
126
+ assert config.system_inline_reply_prompt_files_or_default == [dummy_file]
127
+ assert config.load_system_inline_reply() == ["SYS_IR"]
128
+
129
+
130
+ def test_load_system_summary_reply_prompts(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
131
+ dummy_file = tmp_path / "sys_summary_reply.md"
132
+ dummy_file.write_text("SYS_SR")
133
+ monkeypatch.setattr("ai_review.libs.config.prompt.load_resource", lambda **_: dummy_file)
134
+
135
+ config = PromptConfig()
136
+ assert config.system_summary_reply_prompt_files_or_default == [dummy_file]
137
+ assert config.load_system_summary_reply() == ["SYS_SR"]
File without changes
@@ -0,0 +1,155 @@
1
+ from ai_review.libs.llm.output_json_parser import LLMOutputJSONParser, CLEAN_JSON_BLOCK_RE
2
+ from ai_review.tests.fixtures.libs.llm.output_json_parser import DummyModel
3
+
4
+
5
+ # ---------- try_parse ----------
6
+
7
+ def test_try_parse_happy_path(llm_output_json_parser: LLMOutputJSONParser):
8
+ """Should successfully parse valid JSON string."""
9
+ raw = '{"text": "hello"}'
10
+ result = llm_output_json_parser.try_parse(raw)
11
+
12
+ assert isinstance(result, DummyModel)
13
+ assert result.text == "hello"
14
+
15
+
16
+ def test_try_parse_with_sanitization_success(llm_output_json_parser: LLMOutputJSONParser):
17
+ """Should retry and parse after sanitization fixes minor issues."""
18
+ raw = '{text: "hi"}'
19
+ result = llm_output_json_parser.try_parse(raw)
20
+
21
+ assert result is None
22
+
23
+
24
+ def test_try_parse_with_sanitization_still_invalid(llm_output_json_parser: LLMOutputJSONParser):
25
+ """Should return None if even sanitized JSON invalid."""
26
+ raw = '{"wrong_field": "hi"}'
27
+ result = llm_output_json_parser.try_parse(raw)
28
+
29
+ assert result is None
30
+
31
+
32
+ def test_try_parse_with_control_characters(llm_output_json_parser: LLMOutputJSONParser):
33
+ """Should sanitize and parse JSON containing control characters (e.g., newlines, tabs)."""
34
+ raw = '{\n\t"text": "multi-line\nvalue"\r}'
35
+ result = llm_output_json_parser.try_parse(raw)
36
+
37
+ assert result is None
38
+
39
+
40
+ def test_try_parse_with_unicode_and_escaped_symbols(llm_output_json_parser: LLMOutputJSONParser):
41
+ """Should handle escaped unicode and symbols correctly."""
42
+ raw = '{"text": "Привет 👋 \\n new line"}'
43
+ result = llm_output_json_parser.try_parse(raw)
44
+
45
+ assert isinstance(result, DummyModel)
46
+ assert "Привет" in result.text
47
+ assert "\\n" in result.text or "\n" in result.text
48
+
49
+
50
+ # ---------- parse_output ----------
51
+
52
+ def test_parse_output_happy_path(llm_output_json_parser: LLMOutputJSONParser):
53
+ """Should parse plain JSON output successfully."""
54
+ output = '{"text": "parsed"}'
55
+ result = llm_output_json_parser.parse_output(output)
56
+
57
+ assert isinstance(result, DummyModel)
58
+ assert result.text == "parsed"
59
+
60
+
61
+ def test_parse_output_with_fenced_block(llm_output_json_parser: LLMOutputJSONParser):
62
+ """Should extract JSON from fenced block and parse successfully."""
63
+ output = "```json\n{\"text\": \"inside block\"}\n```"
64
+ result = llm_output_json_parser.parse_output(output)
65
+
66
+ assert isinstance(result, DummyModel)
67
+ assert result.text == "inside block"
68
+
69
+
70
+ def test_parse_output_with_non_json_fence(llm_output_json_parser: LLMOutputJSONParser):
71
+ """Should extract even from ``` block without explicit json tag."""
72
+ output = "```{\"text\": \"inside fence\"}```"
73
+ result = llm_output_json_parser.parse_output(output)
74
+
75
+ assert isinstance(result, DummyModel)
76
+ assert result.text == "inside fence"
77
+
78
+
79
+ def test_parse_output_empty_string(llm_output_json_parser: LLMOutputJSONParser):
80
+ """Should return None and log warning when output empty."""
81
+ result = llm_output_json_parser.parse_output("")
82
+ assert result is None
83
+
84
+
85
+ def test_parse_output_invalid_json(llm_output_json_parser: LLMOutputJSONParser):
86
+ """Should return None if JSON invalid and cannot be sanitized."""
87
+ output = "```json\n{\"wrong_field\": \"oops\"}\n```"
88
+ result = llm_output_json_parser.parse_output(output)
89
+ assert result is None
90
+
91
+
92
+ def test_clean_json_block_regex_extracts_content():
93
+ """Should correctly extract JSON body from fenced block."""
94
+ text = "Some intro ```json\n{\"x\": 42}\n``` and trailing"
95
+ match = CLEAN_JSON_BLOCK_RE.search(text)
96
+ assert match
97
+ assert "{\"x\": 42}" in match.group(1)
98
+
99
+
100
+ def test_parse_output_with_extra_text_around_json(llm_output_json_parser: LLMOutputJSONParser):
101
+ """Should extract and parse JSON when surrounded by extra LLM chatter."""
102
+ output = "Here's what I found:\n```json\n{\"text\": \"valid\"}\n```Hope that helps!"
103
+ result = llm_output_json_parser.parse_output(output)
104
+
105
+ assert isinstance(result, DummyModel)
106
+ assert result.text == "valid"
107
+
108
+
109
+ def test_parse_output_with_broken_json_then_valid_block(llm_output_json_parser: LLMOutputJSONParser):
110
+ """Should skip broken JSON and parse valid fenced one."""
111
+ output = '{"text": invalid}\n```json\n{"text": "fixed"}\n```'
112
+ result = llm_output_json_parser.parse_output(output)
113
+
114
+ assert isinstance(result, DummyModel)
115
+ assert result.text == "fixed"
116
+
117
+
118
+ def test_parse_output_with_code_fence_but_extra_backticks(llm_output_json_parser: LLMOutputJSONParser):
119
+ """Should correctly handle fenced block even with multiple triple-backticks."""
120
+ output = "``````json\n{\"text\": \"messy fences\"}\n``````"
121
+ result = llm_output_json_parser.parse_output(output)
122
+
123
+ assert result is None
124
+
125
+
126
+ def test_parse_output_with_llm_style_json(llm_output_json_parser: LLMOutputJSONParser):
127
+ """Should handle LLM output containing pseudo-JSON like 'text: value'."""
128
+ output = '```json\n{text: "approximate JSON"}\n```'
129
+ result = llm_output_json_parser.parse_output(output)
130
+
131
+ assert result is None
132
+
133
+
134
+ def test_parse_output_with_multiple_json_blocks(llm_output_json_parser: LLMOutputJSONParser):
135
+ """Should parse first valid fenced JSON block."""
136
+ output = """
137
+ ```json
138
+ {"text": "first"}
139
+ ```
140
+ ```json
141
+ {"text": "second"}
142
+ ```
143
+ """
144
+ result = llm_output_json_parser.parse_output(output)
145
+
146
+ assert isinstance(result, DummyModel)
147
+ assert result.text == "first"
148
+
149
+
150
+ def test_parse_output_with_extra_control_chars(llm_output_json_parser: LLMOutputJSONParser):
151
+ """Should handle JSON polluted by invisible control characters."""
152
+ raw = '{\x00"text": "ok\x07"}'
153
+ result = llm_output_json_parser.try_parse(raw)
154
+
155
+ assert result is None
@@ -3,12 +3,96 @@ import pytest
3
3
  from ai_review.services.cost.schema import CostReportSchema
4
4
  from ai_review.services.hook.constants import HookType
5
5
  from ai_review.services.hook.service import HookService
6
+ from ai_review.services.review.internal.inline.schema import InlineCommentSchema
7
+ from ai_review.services.review.internal.inline_reply.schema import InlineCommentReplySchema
8
+ from ai_review.services.review.internal.summary.schema import SummaryCommentSchema
9
+ from ai_review.services.review.internal.summary_reply.schema import SummaryCommentReplySchema
10
+
11
+ cost_report = CostReportSchema(
12
+ model="gpt",
13
+ prompt_tokens=1,
14
+ completion_tokens=2,
15
+ total_cost=0.3,
16
+ input_cost=0.1,
17
+ output_cost=0.2
18
+ )
19
+ inline_comment = InlineCommentSchema(file="a.py", line=1, message="fix this")
20
+ inline_reply = InlineCommentReplySchema(message="ok", suggestion="use helper()")
21
+ summary_comment = SummaryCommentSchema(text="summary text")
22
+ summary_reply = SummaryCommentReplySchema(text="reply summary")
23
+
24
+ HOOK_CASES = [
25
+ # Chat
26
+ ("on_chat_start", "emit_chat_start", dict(prompt="hi", prompt_system="sys")),
27
+ ("on_chat_error", "emit_chat_error", dict(prompt="oops", prompt_system="sys")),
28
+ ("on_chat_complete", "emit_chat_complete", dict(result="done", report=cost_report)),
29
+
30
+ # Inline Review
31
+ ("on_inline_review_start", "emit_inline_review_start", {}),
32
+ ("on_inline_review_complete", "emit_inline_review_complete", dict(report=cost_report)),
33
+
34
+ # Context Review
35
+ ("on_context_review_start", "emit_context_review_start", {}),
36
+ ("on_context_review_complete", "emit_context_review_complete", dict(report=cost_report)),
37
+
38
+ # Summary Review
39
+ ("on_summary_review_start", "emit_summary_review_start", {}),
40
+ ("on_summary_review_complete", "emit_summary_review_complete", dict(report=cost_report)),
41
+
42
+ # Inline Reply Review
43
+ ("on_inline_reply_review_start", "emit_inline_reply_review_start", {}),
44
+ ("on_inline_reply_review_complete", "emit_inline_reply_review_complete", dict(report=cost_report)),
45
+
46
+ # Summary Reply Review
47
+ ("on_summary_reply_review_start", "emit_summary_reply_review_start", {}),
48
+ ("on_summary_reply_review_complete", "emit_summary_reply_review_complete", dict(report=cost_report)),
49
+
50
+ # Inline Comment
51
+ ("on_inline_comment_start", "emit_inline_comment_start", dict(comment=inline_comment)),
52
+ ("on_inline_comment_error", "emit_inline_comment_error", dict(comment=inline_comment)),
53
+ ("on_inline_comment_complete", "emit_inline_comment_complete", dict(comment=inline_comment)),
54
+
55
+ # Summary Comment
56
+ ("on_summary_comment_start", "emit_summary_comment_start", dict(comment=summary_comment)),
57
+ ("on_summary_comment_error", "emit_summary_comment_error", dict(comment=summary_comment)),
58
+ ("on_summary_comment_complete", "emit_summary_comment_complete", dict(comment=summary_comment)),
59
+
60
+ # Inline Comment Reply
61
+ ("on_inline_comment_reply_start", "emit_inline_comment_reply_start", dict(comment=inline_reply)),
62
+ ("on_inline_comment_reply_error", "emit_inline_comment_reply_error", dict(comment=inline_reply)),
63
+ ("on_inline_comment_reply_complete", "emit_inline_comment_reply_complete", dict(comment=inline_reply)),
64
+
65
+ # Summary Comment Reply
66
+ ("on_summary_comment_reply_start", "emit_summary_comment_reply_start", dict(comment=summary_reply)),
67
+ ("on_summary_comment_reply_error", "emit_summary_comment_reply_error", dict(comment=summary_reply)),
68
+ ("on_summary_comment_reply_complete", "emit_summary_comment_reply_complete", dict(comment=summary_reply)),
69
+ ]
6
70
 
7
71
 
8
- @pytest.fixture
9
- def hook_service() -> HookService:
10
- """Return a fresh HookService instance for each test."""
11
- return HookService()
72
+ @pytest.mark.asyncio
73
+ @pytest.mark.parametrize("inject_method, emit_method, args", HOOK_CASES)
74
+ async def test_all_hooks_trigger_correctly(
75
+ hook_service: HookService,
76
+ inject_method: str,
77
+ emit_method: str,
78
+ args: dict,
79
+ ):
80
+ """
81
+ Ensure every hook registration + emit combination works correctly.
82
+ Each hook should receive the emitted arguments without raising.
83
+ """
84
+ called = {}
85
+
86
+ async def sample_hook(**kwargs):
87
+ called.update(kwargs)
88
+
89
+ emit_func = getattr(hook_service, emit_method)
90
+ inject_func = getattr(hook_service, inject_method)
91
+
92
+ inject_func(sample_hook)
93
+ await emit_func(**args)
94
+
95
+ assert called == args
12
96
 
13
97
 
14
98
  @pytest.mark.asyncio
@@ -1,4 +1,4 @@
1
- from ai_review.services.prompt.adapter import build_prompt_context_from_mr_info
1
+ from ai_review.services.prompt.adapter import build_prompt_context_from_review_info
2
2
  from ai_review.services.vcs.types import (
3
3
  ReviewInfoSchema,
4
4
  UserSchema,
@@ -23,7 +23,7 @@ def test_build_prompt_context_from_full_review_info() -> None:
23
23
  changed_files=["api/views.py", "api/tests.py"],
24
24
  )
25
25
 
26
- context = build_prompt_context_from_mr_info(review_info)
26
+ context = build_prompt_context_from_review_info(review_info)
27
27
 
28
28
  assert context.review_title == "Fix API bug"
29
29
  assert context.review_description == "Refactored endpoint"
@@ -52,7 +52,7 @@ def test_build_prompt_context_handles_no_reviewers() -> None:
52
52
  reviewers=[],
53
53
  )
54
54
 
55
- context = build_prompt_context_from_mr_info(review_info)
55
+ context = build_prompt_context_from_review_info(review_info)
56
56
 
57
57
  assert context.review_reviewer == ""
58
58
  assert context.review_reviewers == []