xai-review 0.20.0__py3-none-any.whl → 0.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xai-review might be problematic. Click here for more details.

Files changed (95) hide show
  1. ai_review/clients/claude/client.py +1 -1
  2. ai_review/clients/gemini/client.py +1 -1
  3. ai_review/clients/github/client.py +1 -1
  4. ai_review/clients/github/pr/client.py +64 -16
  5. ai_review/clients/github/pr/schema/comments.py +4 -0
  6. ai_review/clients/github/pr/schema/files.py +4 -0
  7. ai_review/clients/github/pr/schema/reviews.py +4 -0
  8. ai_review/clients/github/pr/types.py +49 -0
  9. ai_review/clients/gitlab/client.py +1 -1
  10. ai_review/clients/gitlab/mr/client.py +25 -8
  11. ai_review/clients/gitlab/mr/schema/discussions.py +4 -0
  12. ai_review/clients/gitlab/mr/schema/notes.py +4 -0
  13. ai_review/clients/gitlab/mr/types.py +35 -0
  14. ai_review/clients/openai/client.py +1 -1
  15. ai_review/config.py +2 -0
  16. ai_review/libs/asynchronous/gather.py +6 -3
  17. ai_review/libs/config/core.py +5 -0
  18. ai_review/libs/http/event_hooks/logger.py +5 -2
  19. ai_review/libs/http/transports/retry.py +23 -6
  20. ai_review/services/artifacts/service.py +2 -1
  21. ai_review/services/artifacts/types.py +20 -0
  22. ai_review/services/cost/service.py +2 -1
  23. ai_review/services/cost/types.py +12 -0
  24. ai_review/services/diff/service.py +2 -1
  25. ai_review/services/diff/types.py +28 -0
  26. ai_review/services/hook/__init__.py +5 -0
  27. ai_review/services/hook/constants.py +24 -0
  28. ai_review/services/hook/service.py +162 -0
  29. ai_review/services/hook/types.py +28 -0
  30. ai_review/services/llm/claude/client.py +2 -2
  31. ai_review/services/llm/factory.py +2 -2
  32. ai_review/services/llm/gemini/client.py +2 -2
  33. ai_review/services/llm/openai/client.py +2 -2
  34. ai_review/services/llm/types.py +1 -1
  35. ai_review/services/prompt/service.py +2 -1
  36. ai_review/services/prompt/types.py +27 -0
  37. ai_review/services/review/gateway/__init__.py +0 -0
  38. ai_review/services/review/gateway/comment.py +65 -0
  39. ai_review/services/review/gateway/llm.py +40 -0
  40. ai_review/services/review/inline/schema.py +2 -2
  41. ai_review/services/review/inline/service.py +2 -1
  42. ai_review/services/review/inline/types.py +11 -0
  43. ai_review/services/review/service.py +23 -74
  44. ai_review/services/review/summary/service.py +2 -1
  45. ai_review/services/review/summary/types.py +8 -0
  46. ai_review/services/vcs/factory.py +2 -2
  47. ai_review/services/vcs/github/client.py +4 -2
  48. ai_review/services/vcs/gitlab/client.py +4 -2
  49. ai_review/services/vcs/types.py +1 -1
  50. ai_review/tests/fixtures/clients/__init__.py +0 -0
  51. ai_review/tests/fixtures/clients/claude.py +22 -0
  52. ai_review/tests/fixtures/clients/gemini.py +21 -0
  53. ai_review/tests/fixtures/clients/github.py +181 -0
  54. ai_review/tests/fixtures/clients/gitlab.py +150 -0
  55. ai_review/tests/fixtures/clients/openai.py +21 -0
  56. ai_review/tests/fixtures/services/__init__.py +0 -0
  57. ai_review/tests/fixtures/services/artifacts.py +51 -0
  58. ai_review/tests/fixtures/services/cost.py +48 -0
  59. ai_review/tests/fixtures/services/diff.py +46 -0
  60. ai_review/tests/fixtures/{git.py → services/git.py} +11 -5
  61. ai_review/tests/fixtures/services/llm.py +26 -0
  62. ai_review/tests/fixtures/services/prompt.py +43 -0
  63. ai_review/tests/fixtures/services/review/__init__.py +0 -0
  64. ai_review/tests/fixtures/services/review/inline.py +25 -0
  65. ai_review/tests/fixtures/services/review/summary.py +19 -0
  66. ai_review/tests/fixtures/services/vcs.py +49 -0
  67. ai_review/tests/suites/clients/claude/test_client.py +1 -20
  68. ai_review/tests/suites/clients/gemini/test_client.py +1 -19
  69. ai_review/tests/suites/clients/github/test_client.py +1 -23
  70. ai_review/tests/suites/clients/gitlab/test_client.py +1 -22
  71. ai_review/tests/suites/clients/openai/test_client.py +1 -19
  72. ai_review/tests/suites/libs/asynchronous/__init__.py +0 -0
  73. ai_review/tests/suites/libs/asynchronous/test_gather.py +46 -0
  74. ai_review/tests/suites/services/diff/test_service.py +4 -4
  75. ai_review/tests/suites/services/diff/test_tools.py +10 -10
  76. ai_review/tests/suites/services/hook/__init__.py +0 -0
  77. ai_review/tests/suites/services/hook/test_service.py +93 -0
  78. ai_review/tests/suites/services/llm/__init__.py +0 -0
  79. ai_review/tests/suites/services/llm/test_factory.py +30 -0
  80. ai_review/tests/suites/services/review/inline/test_schema.py +10 -9
  81. ai_review/tests/suites/services/review/summary/test_schema.py +0 -1
  82. ai_review/tests/suites/services/review/summary/test_service.py +10 -7
  83. ai_review/tests/suites/services/review/test_service.py +126 -0
  84. ai_review/tests/suites/services/vcs/__init__.py +0 -0
  85. ai_review/tests/suites/services/vcs/github/__init__.py +0 -0
  86. ai_review/tests/suites/services/vcs/github/test_service.py +114 -0
  87. ai_review/tests/suites/services/vcs/gitlab/__init__.py +0 -0
  88. ai_review/tests/suites/services/vcs/gitlab/test_service.py +123 -0
  89. ai_review/tests/suites/services/vcs/test_factory.py +23 -0
  90. {xai_review-0.20.0.dist-info → xai_review-0.22.0.dist-info}/METADATA +5 -2
  91. {xai_review-0.20.0.dist-info → xai_review-0.22.0.dist-info}/RECORD +95 -50
  92. {xai_review-0.20.0.dist-info → xai_review-0.22.0.dist-info}/WHEEL +0 -0
  93. {xai_review-0.20.0.dist-info → xai_review-0.22.0.dist-info}/entry_points.txt +0 -0
  94. {xai_review-0.20.0.dist-info → xai_review-0.22.0.dist-info}/licenses/LICENSE +0 -0
  95. {xai_review-0.20.0.dist-info → xai_review-0.22.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,24 @@
1
+ from enum import StrEnum
2
+
3
+
4
+ class HookType(StrEnum):
5
+ ON_CHAT_START = "ON_CHAT_START"
6
+ ON_CHAT_ERROR = "ON_CHAT_ERROR"
7
+ ON_CHAT_COMPLETE = "ON_CHAT_COMPLETE"
8
+
9
+ ON_INLINE_REVIEW_START = "ON_INLINE_REVIEW_START"
10
+ ON_INLINE_REVIEW_COMPLETE = "ON_INLINE_REVIEW_COMPLETE"
11
+
12
+ ON_CONTEXT_REVIEW_START = "ON_CONTEXT_REVIEW_START"
13
+ ON_CONTEXT_REVIEW_COMPLETE = "ON_CONTEXT_REVIEW_COMPLETE"
14
+
15
+ ON_SUMMARY_REVIEW_START = "ON_SUMMARY_REVIEW_START"
16
+ ON_SUMMARY_REVIEW_COMPLETE = "ON_SUMMARY_REVIEW_COMPLETE"
17
+
18
+ ON_INLINE_COMMENT_START = "ON_INLINE_COMMENT_START"
19
+ ON_INLINE_COMMENT_ERROR = "ON_INLINE_COMMENT_ERROR"
20
+ ON_INLINE_COMMENT_COMPLETE = "ON_INLINE_COMMENT_COMPLETE"
21
+
22
+ ON_SUMMARY_COMMENT_START = "ON_SUMMARY_COMMENT_START"
23
+ ON_SUMMARY_COMMENT_ERROR = "ON_SUMMARY_COMMENT_ERROR"
24
+ ON_SUMMARY_COMMENT_COMPLETE = "ON_SUMMARY_COMMENT_COMPLETE"
@@ -0,0 +1,162 @@
1
+ from collections import defaultdict
2
+ from typing import Any
3
+
4
+ from ai_review.libs.logger import get_logger
5
+ from ai_review.services.cost.schema import CostReportSchema
6
+ from ai_review.services.hook.constants import HookType
7
+ from ai_review.services.hook.types import (
8
+ HookFunc,
9
+ # --- Chat ---
10
+ ChatStartHookFunc,
11
+ ChatErrorHookFunc,
12
+ ChatCompleteHookFunc,
13
+ # --- Inline Review ---
14
+ InlineReviewStartHookFunc,
15
+ InlineReviewCompleteHookFunc,
16
+ # --- Context Review ---
17
+ ContextReviewStartHookFunc,
18
+ ContextReviewCompleteHookFunc,
19
+ # --- Summary Review ---
20
+ SummaryReviewStartHookFunc,
21
+ SummaryReviewCompleteHookFunc,
22
+ # --- Inline Comment ---
23
+ InlineCommentStartHookFunc,
24
+ InlineCommentErrorHookFunc,
25
+ InlineCommentCompleteHookFunc,
26
+ # --- Summary Comment ---
27
+ SummaryCommentStartHookFunc,
28
+ SummaryCommentCompleteHookFunc
29
+ )
30
+ from ai_review.services.review.inline.schema import InlineCommentSchema
31
+ from ai_review.services.review.summary.schema import SummaryCommentSchema
32
+
33
+ logger = get_logger("HOOK_SERVICE")
34
+
35
+
36
+ class HookService:
37
+ def __init__(self):
38
+ self.hooks: dict[HookType, list[HookFunc]] = defaultdict(list)
39
+
40
+ def inject_hook(self, name: HookType, func: HookFunc):
41
+ self.hooks[name].append(func)
42
+
43
+ async def emit(self, name: HookType, *args: Any, **kwargs: Any):
44
+ if not self.hooks.get(name):
45
+ return
46
+
47
+ for callback in self.hooks[name]:
48
+ try:
49
+ await callback(*args, **kwargs)
50
+ except Exception as error:
51
+ logger.exception(f"Error in {name} hook: {error}")
52
+
53
+ # --- Chat ---
54
+ def on_chat_start(self, func: ChatStartHookFunc):
55
+ self.inject_hook(HookType.ON_CHAT_START, func)
56
+ return func
57
+
58
+ def on_chat_error(self, func: ChatErrorHookFunc):
59
+ self.inject_hook(HookType.ON_CHAT_ERROR, func)
60
+ return func
61
+
62
+ def on_chat_complete(self, func: ChatCompleteHookFunc):
63
+ self.inject_hook(HookType.ON_CHAT_COMPLETE, func)
64
+ return func
65
+
66
+ async def emit_chat_start(self, prompt: str, prompt_system: str):
67
+ await self.emit(HookType.ON_CHAT_START, prompt=prompt, prompt_system=prompt_system)
68
+
69
+ async def emit_chat_error(self, prompt: str, prompt_system: str):
70
+ await self.emit(HookType.ON_CHAT_ERROR, prompt=prompt, prompt_system=prompt_system)
71
+
72
+ async def emit_chat_complete(self, result: str, report: CostReportSchema | None):
73
+ await self.emit(HookType.ON_CHAT_COMPLETE, result=result, report=report)
74
+
75
+ # --- Inline Review ---
76
+ def on_inline_review_start(self, func: InlineReviewStartHookFunc):
77
+ self.inject_hook(HookType.ON_INLINE_REVIEW_START, func)
78
+ return func
79
+
80
+ def on_inline_review_complete(self, func: InlineReviewCompleteHookFunc):
81
+ self.inject_hook(HookType.ON_INLINE_REVIEW_COMPLETE, func)
82
+ return func
83
+
84
+ async def emit_inline_review_start(self):
85
+ await self.emit(HookType.ON_INLINE_REVIEW_START)
86
+
87
+ async def emit_inline_review_complete(self, report: CostReportSchema | None):
88
+ await self.emit(HookType.ON_INLINE_REVIEW_COMPLETE, report=report)
89
+
90
+ # --- Context Review ---
91
+ def on_context_review_start(self, func: ContextReviewStartHookFunc):
92
+ self.inject_hook(HookType.ON_CONTEXT_REVIEW_START, func)
93
+ return func
94
+
95
+ def on_context_review_complete(self, func: ContextReviewCompleteHookFunc):
96
+ self.inject_hook(HookType.ON_CONTEXT_REVIEW_COMPLETE, func)
97
+ return func
98
+
99
+ async def emit_context_review_start(self):
100
+ await self.emit(HookType.ON_CONTEXT_REVIEW_START)
101
+
102
+ async def emit_context_review_complete(self, report: CostReportSchema | None):
103
+ await self.emit(HookType.ON_CONTEXT_REVIEW_COMPLETE, report=report)
104
+
105
+ # --- Summary Review ---
106
+ def on_summary_review_start(self, func: SummaryReviewStartHookFunc):
107
+ self.inject_hook(HookType.ON_SUMMARY_REVIEW_START, func)
108
+ return func
109
+
110
+ def on_summary_review_complete(self, func: SummaryReviewCompleteHookFunc):
111
+ self.inject_hook(HookType.ON_SUMMARY_REVIEW_COMPLETE, func)
112
+ return func
113
+
114
+ async def emit_summary_review_start(self):
115
+ await self.emit(HookType.ON_SUMMARY_REVIEW_START)
116
+
117
+ async def emit_summary_review_complete(self, report: CostReportSchema | None):
118
+ await self.emit(HookType.ON_SUMMARY_REVIEW_COMPLETE, report=report)
119
+
120
+ # --- Inline Comment ---
121
+ def on_inline_comment_start(self, func: InlineCommentStartHookFunc):
122
+ self.inject_hook(HookType.ON_INLINE_COMMENT_START, func)
123
+ return func
124
+
125
+ def on_inline_comment_error(self, func: InlineCommentErrorHookFunc):
126
+ self.inject_hook(HookType.ON_INLINE_COMMENT_ERROR, func)
127
+ return func
128
+
129
+ def on_inline_comment_complete(self, func: InlineCommentCompleteHookFunc):
130
+ self.inject_hook(HookType.ON_INLINE_COMMENT_COMPLETE, func)
131
+ return func
132
+
133
+ async def emit_inline_comment_start(self, comment: InlineCommentSchema):
134
+ await self.emit(HookType.ON_INLINE_COMMENT_START, comment=comment)
135
+
136
+ async def emit_inline_comment_error(self, comment: InlineCommentSchema):
137
+ await self.emit(HookType.ON_INLINE_COMMENT_ERROR, comment=comment)
138
+
139
+ async def emit_inline_comment_complete(self, comment: InlineCommentSchema):
140
+ await self.emit(HookType.ON_INLINE_COMMENT_COMPLETE, comment=comment)
141
+
142
+ # --- Summary Comment ---
143
+ def on_summary_comment_start(self, func: SummaryCommentStartHookFunc):
144
+ self.inject_hook(HookType.ON_SUMMARY_COMMENT_START, func)
145
+ return func
146
+
147
+ def on_summary_comment_error(self, func: InlineCommentErrorHookFunc):
148
+ self.inject_hook(HookType.ON_SUMMARY_COMMENT_ERROR, func)
149
+ return func
150
+
151
+ def on_summary_comment_complete(self, func: SummaryCommentCompleteHookFunc):
152
+ self.inject_hook(HookType.ON_SUMMARY_COMMENT_COMPLETE, func)
153
+ return func
154
+
155
+ async def emit_summary_comment_start(self, comment: SummaryCommentSchema):
156
+ await self.emit(HookType.ON_SUMMARY_COMMENT_START, comment=comment)
157
+
158
+ async def emit_summary_comment_error(self, comment: SummaryCommentSchema):
159
+ await self.emit(HookType.ON_SUMMARY_COMMENT_ERROR, comment=comment)
160
+
161
+ async def emit_summary_comment_complete(self, comment: SummaryCommentSchema):
162
+ await self.emit(HookType.ON_SUMMARY_COMMENT_COMPLETE, comment=comment)
@@ -0,0 +1,28 @@
1
+ from typing import Callable, Awaitable
2
+
3
+ from ai_review.services.cost.schema import CostReportSchema
4
+ from ai_review.services.review.inline.schema import InlineCommentSchema
5
+ from ai_review.services.review.summary.schema import SummaryCommentSchema
6
+
7
+ HookFunc = Callable[..., Awaitable[None]]
8
+
9
+ ChatStartHookFunc = Callable[[str, str], Awaitable[None]]
10
+ ChatErrorHookFunc = Callable[[str, str], Awaitable[None]]
11
+ ChatCompleteHookFunc = Callable[[str, CostReportSchema | None], Awaitable[None]]
12
+
13
+ InlineReviewStartHookFunc = Callable[..., Awaitable[None]]
14
+ InlineReviewCompleteHookFunc = Callable[[CostReportSchema | None], Awaitable[None]]
15
+
16
+ ContextReviewStartHookFunc = Callable[..., Awaitable[None]]
17
+ ContextReviewCompleteHookFunc = Callable[[CostReportSchema | None], Awaitable[None]]
18
+
19
+ SummaryReviewStartHookFunc = Callable[..., Awaitable[None]]
20
+ SummaryReviewCompleteHookFunc = Callable[[CostReportSchema | None], Awaitable[None]]
21
+
22
+ InlineCommentStartHookFunc = Callable[[InlineCommentSchema], Awaitable[None]]
23
+ InlineCommentErrorHookFunc = Callable[[InlineCommentSchema], Awaitable[None]]
24
+ InlineCommentCompleteHookFunc = Callable[[InlineCommentSchema], Awaitable[None]]
25
+
26
+ SummaryCommentStartHookFunc = Callable[[SummaryCommentSchema], Awaitable[None]]
27
+ SummaryCommentErrorHookFunc = Callable[[SummaryCommentSchema], Awaitable[None]]
28
+ SummaryCommentCompleteHookFunc = Callable[[SummaryCommentSchema], Awaitable[None]]
@@ -1,10 +1,10 @@
1
1
  from ai_review.clients.claude.client import get_claude_http_client
2
2
  from ai_review.clients.claude.schema import ClaudeChatRequestSchema, ClaudeMessageSchema
3
3
  from ai_review.config import settings
4
- from ai_review.services.llm.types import LLMClient, ChatResultSchema
4
+ from ai_review.services.llm.types import LLMClientProtocol, ChatResultSchema
5
5
 
6
6
 
7
- class ClaudeLLMClient(LLMClient):
7
+ class ClaudeLLMClient(LLMClientProtocol):
8
8
  def __init__(self):
9
9
  self.http_client = get_claude_http_client()
10
10
 
@@ -3,10 +3,10 @@ from ai_review.libs.constants.llm_provider import LLMProvider
3
3
  from ai_review.services.llm.claude.client import ClaudeLLMClient
4
4
  from ai_review.services.llm.gemini.client import GeminiLLMClient
5
5
  from ai_review.services.llm.openai.client import OpenAILLMClient
6
- from ai_review.services.llm.types import LLMClient
6
+ from ai_review.services.llm.types import LLMClientProtocol
7
7
 
8
8
 
9
- def get_llm_client() -> LLMClient:
9
+ def get_llm_client() -> LLMClientProtocol:
10
10
  match settings.llm.provider:
11
11
  case LLMProvider.OPENAI:
12
12
  return OpenAILLMClient()
@@ -6,10 +6,10 @@ from ai_review.clients.gemini.schema import (
6
6
  GeminiGenerationConfigSchema,
7
7
  )
8
8
  from ai_review.config import settings
9
- from ai_review.services.llm.types import LLMClient, ChatResultSchema
9
+ from ai_review.services.llm.types import LLMClientProtocol, ChatResultSchema
10
10
 
11
11
 
12
- class GeminiLLMClient(LLMClient):
12
+ class GeminiLLMClient(LLMClientProtocol):
13
13
  def __init__(self):
14
14
  self.http_client = get_gemini_http_client()
15
15
 
@@ -1,10 +1,10 @@
1
1
  from ai_review.clients.openai.client import get_openai_http_client
2
2
  from ai_review.clients.openai.schema import OpenAIChatRequestSchema, OpenAIMessageSchema
3
3
  from ai_review.config import settings
4
- from ai_review.services.llm.types import LLMClient, ChatResultSchema
4
+ from ai_review.services.llm.types import LLMClientProtocol, ChatResultSchema
5
5
 
6
6
 
7
- class OpenAILLMClient(LLMClient):
7
+ class OpenAILLMClient(LLMClientProtocol):
8
8
  def __init__(self):
9
9
  self.http_client = get_openai_http_client()
10
10
 
@@ -10,6 +10,6 @@ class ChatResultSchema(BaseModel):
10
10
  completion_tokens: int | None = None
11
11
 
12
12
 
13
- class LLMClient(Protocol):
13
+ class LLMClientProtocol(Protocol):
14
14
  async def chat(self, prompt: str, prompt_system: str) -> ChatResultSchema:
15
15
  ...
@@ -2,9 +2,10 @@ from ai_review.config import settings
2
2
  from ai_review.services.diff.schema import DiffFileSchema
3
3
  from ai_review.services.prompt.schema import PromptContextSchema
4
4
  from ai_review.services.prompt.tools import normalize_prompt, format_file
5
+ from ai_review.services.prompt.types import PromptServiceProtocol
5
6
 
6
7
 
7
- class PromptService:
8
+ class PromptService(PromptServiceProtocol):
8
9
  @classmethod
9
10
  def prepare_prompt(cls, prompts: list[str], context: PromptContextSchema) -> str:
10
11
  prompt = "\n\n".join(prompts)
@@ -0,0 +1,27 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.services.diff.schema import DiffFileSchema
4
+ from ai_review.services.prompt.schema import PromptContextSchema
5
+
6
+
7
+ class PromptServiceProtocol(Protocol):
8
+ def prepare_prompt(self, prompts: list[str], context: PromptContextSchema) -> str:
9
+ ...
10
+
11
+ def build_inline_request(self, diff: DiffFileSchema, context: PromptContextSchema) -> str:
12
+ ...
13
+
14
+ def build_summary_request(self, diffs: list[DiffFileSchema], context: PromptContextSchema) -> str:
15
+ ...
16
+
17
+ def build_context_request(self, diffs: list[DiffFileSchema], context: PromptContextSchema) -> str:
18
+ ...
19
+
20
+ def build_system_inline_request(self, context: PromptContextSchema) -> str:
21
+ ...
22
+
23
+ def build_system_context_request(self, context: PromptContextSchema) -> str:
24
+ ...
25
+
26
+ def build_system_summary_request(self, context: PromptContextSchema) -> str:
27
+ ...
File without changes
@@ -0,0 +1,65 @@
1
+ from ai_review.config import settings
2
+ from ai_review.libs.asynchronous.gather import bounded_gather
3
+ from ai_review.libs.logger import get_logger
4
+ from ai_review.services.hook import hook
5
+ from ai_review.services.review.inline.schema import InlineCommentListSchema, InlineCommentSchema
6
+ from ai_review.services.review.summary.schema import SummaryCommentSchema
7
+ from ai_review.services.vcs.types import VCSClientProtocol
8
+
9
+ logger = get_logger("REVIEW_COMMENT_GATEWAY")
10
+
11
+
12
+ class ReviewCommentGateway:
13
+ def __init__(self, vcs: VCSClientProtocol):
14
+ self.vcs = vcs
15
+
16
+ async def has_existing_inline_comments(self) -> bool:
17
+ comments = await self.vcs.get_inline_comments()
18
+ has_comments = any(
19
+ settings.review.inline_tag in comment.body
20
+ for comment in comments
21
+ )
22
+ if has_comments:
23
+ logger.info("Skipping inline review: AI inline comments already exist")
24
+
25
+ return has_comments
26
+
27
+ async def has_existing_summary_comments(self) -> bool:
28
+ comments = await self.vcs.get_general_comments()
29
+ has_comments = any(
30
+ settings.review.summary_tag in comment.body for comment in comments
31
+ )
32
+ if has_comments:
33
+ logger.info("Skipping summary review: AI summary comment already exists")
34
+
35
+ return has_comments
36
+
37
+ async def process_inline_comment(self, comment: InlineCommentSchema):
38
+ try:
39
+ await hook.emit_inline_comment_start(comment)
40
+ await self.vcs.create_inline_comment(
41
+ file=comment.file,
42
+ line=comment.line,
43
+ message=comment.body_with_tag,
44
+ )
45
+ await hook.emit_inline_comment_complete(comment)
46
+ except Exception as error:
47
+ logger.exception(
48
+ f"Failed to process inline comment for {comment.file}:{comment.line} — {error}"
49
+ )
50
+ await hook.emit_inline_comment_error(comment)
51
+
52
+ logger.warning(f"Falling back to general comment for {comment.file}:{comment.line}")
53
+ await self.process_summary_comment(SummaryCommentSchema(text=comment.fallback_body))
54
+
55
+ async def process_summary_comment(self, comment: SummaryCommentSchema):
56
+ try:
57
+ await hook.emit_summary_comment_start(comment)
58
+ await self.vcs.create_general_comment(comment.body_with_tag)
59
+ await hook.emit_summary_comment_complete(comment)
60
+ except Exception as error:
61
+ logger.exception(f"Failed to process summary comment: {comment} — {error}")
62
+ await hook.emit_summary_comment_error(comment)
63
+
64
+ async def process_inline_comments(self, comments: InlineCommentListSchema) -> None:
65
+ await bounded_gather([self.process_inline_comment(comment) for comment in comments.root])
@@ -0,0 +1,40 @@
1
+ from ai_review.libs.logger import get_logger
2
+ from ai_review.services.artifacts.types import ArtifactsServiceProtocol
3
+ from ai_review.services.cost.types import CostServiceProtocol
4
+ from ai_review.services.hook import hook
5
+ from ai_review.services.llm.types import LLMClientProtocol
6
+
7
+ logger = get_logger("REVIEW_LLM_GATEWAY")
8
+
9
+
10
+ class ReviewLLMGateway:
11
+ def __init__(
12
+ self,
13
+ llm: LLMClientProtocol,
14
+ cost: CostServiceProtocol,
15
+ artifacts: ArtifactsServiceProtocol
16
+ ):
17
+ self.llm = llm
18
+ self.cost = cost
19
+ self.artifacts = artifacts
20
+
21
+ async def ask(self, prompt: str, prompt_system: str) -> str:
22
+ try:
23
+ await hook.emit_chat_start(prompt, prompt_system)
24
+ result = await self.llm.chat(prompt, prompt_system)
25
+ if not result.text:
26
+ logger.warning(
27
+ f"LLM returned an empty response (prompt length={len(prompt)} chars)"
28
+ )
29
+
30
+ report = self.cost.calculate(result)
31
+ if report:
32
+ logger.info(report.pretty())
33
+
34
+ await hook.emit_chat_complete(result, report)
35
+ await self.artifacts.save_llm_interaction(prompt, prompt_system, result.text)
36
+
37
+ return result.text
38
+ except Exception as error:
39
+ logger.exception(f"LLM request failed: {error}")
40
+ await hook.emit_chat_error(prompt, prompt_system)
@@ -38,8 +38,8 @@ class InlineCommentSchema(BaseModel):
38
38
  return f"{self.body}\n\n{settings.review.inline_tag}"
39
39
 
40
40
  @property
41
- def fallback_body_with_tag(self) -> str:
42
- return f"**{self.file}:{self.line}** — {self.message}\n\n{settings.review.inline_tag}"
41
+ def fallback_body(self) -> str:
42
+ return f"**{self.file}:{self.line}** — {self.message}"
43
43
 
44
44
 
45
45
  class InlineCommentListSchema(RootModel[list[InlineCommentSchema]]):
@@ -5,6 +5,7 @@ from pydantic import ValidationError
5
5
  from ai_review.libs.json import sanitize_json_string
6
6
  from ai_review.libs.logger import get_logger
7
7
  from ai_review.services.review.inline.schema import InlineCommentListSchema
8
+ from ai_review.services.review.inline.types import InlineCommentServiceProtocol
8
9
 
9
10
  logger = get_logger("INLINE_COMMENT_SERVICE")
10
11
 
@@ -12,7 +13,7 @@ FIRST_JSON_ARRAY_RE = re.compile(r"\[[\s\S]*]", re.MULTILINE)
12
13
  CLEAN_JSON_BLOCK_RE = re.compile(r"```(?:json)?(.*?)```", re.DOTALL | re.IGNORECASE)
13
14
 
14
15
 
15
- class InlineCommentService:
16
+ class InlineCommentService(InlineCommentServiceProtocol):
16
17
  @classmethod
17
18
  def try_parse_model_output(cls, raw: str) -> InlineCommentListSchema | None:
18
19
  try:
@@ -0,0 +1,11 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.services.review.inline.schema import InlineCommentListSchema
4
+
5
+
6
+ class InlineCommentServiceProtocol(Protocol):
7
+ def parse_model_output(self, output: str) -> InlineCommentListSchema:
8
+ ...
9
+
10
+ def try_parse_model_output(self, raw: str) -> InlineCommentListSchema | None:
11
+ ...
@@ -1,16 +1,15 @@
1
- from typing import Literal
2
-
3
- from ai_review.config import settings
4
1
  from ai_review.libs.asynchronous.gather import bounded_gather
5
2
  from ai_review.libs.logger import get_logger
6
3
  from ai_review.services.artifacts.service import ArtifactsService
7
4
  from ai_review.services.cost.service import CostService
8
5
  from ai_review.services.diff.service import DiffService
9
6
  from ai_review.services.git.service import GitService
7
+ from ai_review.services.hook import hook
10
8
  from ai_review.services.llm.factory import get_llm_client
11
9
  from ai_review.services.prompt.adapter import build_prompt_context_from_mr_info
12
10
  from ai_review.services.prompt.service import PromptService
13
- from ai_review.services.review.inline.schema import InlineCommentListSchema
11
+ from ai_review.services.review.gateway.comment import ReviewCommentGateway
12
+ from ai_review.services.review.gateway.llm import ReviewLLMGateway
14
13
  from ai_review.services.review.inline.service import InlineCommentService
15
14
  from ai_review.services.review.policy.service import ReviewPolicyService
16
15
  from ai_review.services.review.summary.service import SummaryCommentService
@@ -33,68 +32,12 @@ class ReviewService:
33
32
  self.summary = SummaryCommentService()
34
33
  self.artifacts = ArtifactsService()
35
34
 
36
- async def ask_llm(self, prompt: str, prompt_system: str) -> str:
37
- try:
38
- result = await self.llm.chat(prompt, prompt_system)
39
- if not result.text:
40
- logger.warning(
41
- f"LLM returned an empty response (prompt length={len(prompt)} chars)"
42
- )
43
-
44
- report = self.cost.calculate(result)
45
- if report:
46
- logger.info(report.pretty())
47
-
48
- await self.artifacts.save_llm_interaction(prompt, prompt_system, result.text)
49
-
50
- return result.text
51
- except Exception as error:
52
- logger.exception(f"LLM request failed: {error}")
53
- raise
54
-
55
- async def has_existing_inline_comments(self) -> bool:
56
- comments = await self.vcs.get_inline_comments()
57
- has_comments = any(
58
- settings.review.inline_tag in comment.body
59
- for comment in comments
60
- )
61
- if has_comments:
62
- logger.info("Skipping inline review: AI inline comments already exist")
63
-
64
- return has_comments
65
-
66
- async def has_existing_summary_comments(self) -> bool:
67
- comments = await self.vcs.get_general_comments()
68
- has_comments = any(
69
- settings.review.summary_tag in comment.body for comment in comments
35
+ self.llm_gateway = ReviewLLMGateway(
36
+ llm=self.llm,
37
+ cost=self.cost,
38
+ artifacts=self.artifacts
70
39
  )
71
- if has_comments:
72
- logger.info("Skipping summary review: AI summary comment already exists")
73
-
74
- return has_comments
75
-
76
- async def process_inline_comments(
77
- self,
78
- flow: Literal["inline", "context"],
79
- comments: InlineCommentListSchema
80
- ) -> None:
81
- results = await bounded_gather([
82
- self.vcs.create_inline_comment(
83
- file=comment.file,
84
- line=comment.line,
85
- message=comment.body_with_tag
86
- )
87
- for comment in comments.root
88
- ])
89
-
90
- fallbacks = [
91
- self.vcs.create_general_comment(comment.fallback_body_with_tag)
92
- for comment, result in zip(comments.root, results)
93
- if isinstance(result, Exception)
94
- ]
95
- if fallbacks:
96
- logger.warning(f"Falling back to {len(fallbacks)} general comments ({flow} review)")
97
- await bounded_gather(fallbacks)
40
+ self.comment_gateway = ReviewCommentGateway(vcs=self.vcs)
98
41
 
99
42
  async def process_file_inline(self, file: str, review_info: ReviewInfoSchema) -> None:
100
43
  raw_diff = self.git.get_diff_for_file(review_info.base_sha, review_info.head_sha, file)
@@ -111,7 +54,7 @@ class ReviewService:
111
54
  prompt_context = build_prompt_context_from_mr_info(review_info)
112
55
  prompt = self.prompt.build_inline_request(rendered_file, prompt_context)
113
56
  prompt_system = self.prompt.build_system_inline_request(prompt_context)
114
- prompt_result = await self.ask_llm(prompt, prompt_system)
57
+ prompt_result = await self.llm_gateway.ask(prompt, prompt_system)
115
58
 
116
59
  comments = self.inline.parse_model_output(prompt_result).dedupe()
117
60
  comments.root = self.policy.apply_for_inline_comments(comments.root)
@@ -120,10 +63,11 @@ class ReviewService:
120
63
  return
121
64
 
122
65
  logger.info(f"Posting {len(comments.root)} inline comments to {file}")
123
- await self.process_inline_comments(flow="inline", comments=comments)
66
+ await self.comment_gateway.process_inline_comments(comments)
124
67
 
125
68
  async def run_inline_review(self) -> None:
126
- if await self.has_existing_inline_comments():
69
+ await hook.emit_inline_review_start()
70
+ if await self.comment_gateway.has_existing_inline_comments():
127
71
  return
128
72
 
129
73
  review_info = await self.vcs.get_review_info()
@@ -134,9 +78,11 @@ class ReviewService:
134
78
  self.process_file_inline(changed_file, review_info)
135
79
  for changed_file in changed_files
136
80
  ])
81
+ await hook.emit_inline_review_complete(self.cost.aggregate())
137
82
 
138
83
  async def run_context_review(self) -> None:
139
- if await self.has_existing_inline_comments():
84
+ await hook.emit_context_review_start()
85
+ if await self.comment_gateway.has_existing_inline_comments():
140
86
  return
141
87
 
142
88
  review_info = await self.vcs.get_review_info()
@@ -156,7 +102,7 @@ class ReviewService:
156
102
  prompt_context = build_prompt_context_from_mr_info(review_info)
157
103
  prompt = self.prompt.build_context_request(rendered_files, prompt_context)
158
104
  prompt_system = self.prompt.build_system_context_request(prompt_context)
159
- prompt_result = await self.ask_llm(prompt, prompt_system)
105
+ prompt_result = await self.llm_gateway.ask(prompt, prompt_system)
160
106
 
161
107
  comments = self.inline.parse_model_output(prompt_result).dedupe()
162
108
  comments.root = self.policy.apply_for_context_comments(comments.root)
@@ -165,10 +111,12 @@ class ReviewService:
165
111
  return
166
112
 
167
113
  logger.info(f"Posting {len(comments.root)} inline comments (context review)")
168
- await self.process_inline_comments(flow="context", comments=comments)
114
+ await self.comment_gateway.process_inline_comments(comments)
115
+ await hook.emit_context_review_complete(self.cost.aggregate())
169
116
 
170
117
  async def run_summary_review(self) -> None:
171
- if await self.has_existing_summary_comments():
118
+ await hook.emit_summary_review_start()
119
+ if await self.comment_gateway.has_existing_summary_comments():
172
120
  return
173
121
 
174
122
  review_info = await self.vcs.get_review_info()
@@ -188,7 +136,7 @@ class ReviewService:
188
136
  prompt_context = build_prompt_context_from_mr_info(review_info)
189
137
  prompt = self.prompt.build_summary_request(rendered_files, prompt_context)
190
138
  prompt_system = self.prompt.build_system_summary_request(prompt_context)
191
- prompt_result = await self.ask_llm(prompt, prompt_system)
139
+ prompt_result = await self.llm_gateway.ask(prompt, prompt_system)
192
140
 
193
141
  summary = self.summary.parse_model_output(prompt_result)
194
142
  if not summary.text.strip():
@@ -196,7 +144,8 @@ class ReviewService:
196
144
  return
197
145
 
198
146
  logger.info(f"Posting summary review comment ({len(summary.text)} chars)")
199
- await self.vcs.create_general_comment(summary.body_with_tag)
147
+ await self.comment_gateway.process_summary_comment(summary)
148
+ await hook.emit_summary_review_complete(self.cost.aggregate())
200
149
 
201
150
  def report_total_cost(self):
202
151
  total_report = self.cost.aggregate()
@@ -1,10 +1,11 @@
1
1
  from ai_review.libs.logger import get_logger
2
2
  from ai_review.services.review.summary.schema import SummaryCommentSchema
3
+ from ai_review.services.review.summary.types import SummaryCommentServiceProtocol
3
4
 
4
5
  logger = get_logger("SUMMARY_COMMENT_SERVICE")
5
6
 
6
7
 
7
- class SummaryCommentService:
8
+ class SummaryCommentService(SummaryCommentServiceProtocol):
8
9
  @classmethod
9
10
  def parse_model_output(cls, output: str) -> SummaryCommentSchema:
10
11
  text = (output or "").strip()
@@ -0,0 +1,8 @@
1
+ from typing import Protocol
2
+
3
+ from ai_review.services.review.summary.schema import SummaryCommentSchema
4
+
5
+
6
+ class SummaryCommentServiceProtocol(Protocol):
7
+ def parse_model_output(self, output: str) -> SummaryCommentSchema:
8
+ ...