xai-review 0.19.0__py3-none-any.whl → 0.21.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xai-review might be problematic. Click here for more details.
- ai_review/services/artifacts/service.py +2 -1
- ai_review/services/artifacts/types.py +20 -0
- ai_review/services/cost/service.py +2 -1
- ai_review/services/cost/types.py +12 -0
- ai_review/services/diff/service.py +2 -1
- ai_review/services/diff/types.py +28 -0
- ai_review/services/hook/__init__.py +5 -0
- ai_review/services/hook/constants.py +24 -0
- ai_review/services/hook/service.py +162 -0
- ai_review/services/hook/types.py +28 -0
- ai_review/services/llm/claude/client.py +2 -2
- ai_review/services/llm/factory.py +2 -2
- ai_review/services/llm/gemini/client.py +2 -2
- ai_review/services/llm/openai/client.py +2 -2
- ai_review/services/llm/types.py +1 -1
- ai_review/services/prompt/service.py +2 -1
- ai_review/services/prompt/types.py +27 -0
- ai_review/services/review/gateway/__init__.py +0 -0
- ai_review/services/review/gateway/comment.py +65 -0
- ai_review/services/review/gateway/llm.py +40 -0
- ai_review/services/review/inline/schema.py +2 -2
- ai_review/services/review/inline/service.py +2 -1
- ai_review/services/review/inline/types.py +11 -0
- ai_review/services/review/service.py +23 -74
- ai_review/services/review/summary/service.py +2 -1
- ai_review/services/review/summary/types.py +8 -0
- ai_review/services/vcs/factory.py +2 -2
- ai_review/services/vcs/github/client.py +4 -2
- ai_review/services/vcs/gitlab/client.py +4 -2
- ai_review/services/vcs/types.py +1 -1
- ai_review/tests/fixtures/artifacts.py +51 -0
- ai_review/tests/fixtures/cost.py +48 -0
- ai_review/tests/fixtures/diff.py +46 -0
- ai_review/tests/fixtures/git.py +11 -5
- ai_review/tests/fixtures/llm.py +26 -0
- ai_review/tests/fixtures/prompt.py +43 -0
- ai_review/tests/fixtures/review/__init__.py +0 -0
- ai_review/tests/fixtures/review/inline.py +25 -0
- ai_review/tests/fixtures/review/summary.py +19 -0
- ai_review/tests/fixtures/vcs.py +49 -0
- ai_review/tests/suites/services/diff/test_service.py +3 -3
- ai_review/tests/suites/services/diff/test_tools.py +9 -9
- ai_review/tests/suites/services/hook/__init__.py +0 -0
- ai_review/tests/suites/services/hook/test_service.py +93 -0
- ai_review/tests/suites/services/review/inline/test_schema.py +10 -9
- ai_review/tests/suites/services/review/summary/test_schema.py +0 -1
- ai_review/tests/suites/services/review/summary/test_service.py +10 -7
- ai_review/tests/suites/services/review/test_service.py +126 -0
- {xai_review-0.19.0.dist-info → xai_review-0.21.0.dist-info}/METADATA +10 -7
- {xai_review-0.19.0.dist-info → xai_review-0.21.0.dist-info}/RECORD +54 -29
- {xai_review-0.19.0.dist-info → xai_review-0.21.0.dist-info}/WHEEL +0 -0
- {xai_review-0.19.0.dist-info → xai_review-0.21.0.dist-info}/entry_points.txt +0 -0
- {xai_review-0.19.0.dist-info → xai_review-0.21.0.dist-info}/licenses/LICENSE +0 -0
- {xai_review-0.19.0.dist-info → xai_review-0.21.0.dist-info}/top_level.txt +0 -0
|
@@ -6,11 +6,12 @@ from ai_review.config import settings
|
|
|
6
6
|
from ai_review.libs.logger import get_logger
|
|
7
7
|
from ai_review.services.artifacts.schema import LLMArtifactSchema
|
|
8
8
|
from ai_review.services.artifacts.tools import make_artifact_id
|
|
9
|
+
from ai_review.services.artifacts.types import ArtifactsServiceProtocol
|
|
9
10
|
|
|
10
11
|
logger = get_logger("ARTIFACTS_SERVICE")
|
|
11
12
|
|
|
12
13
|
|
|
13
|
-
class ArtifactsService:
|
|
14
|
+
class ArtifactsService(ArtifactsServiceProtocol):
|
|
14
15
|
@classmethod
|
|
15
16
|
async def save_llm_interaction(cls, prompt: str, prompt_system: str, response: str | None = None) -> str | None:
|
|
16
17
|
if not settings.artifacts.llm_enabled:
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from typing import Protocol
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ArtifactsServiceProtocol(Protocol):
|
|
6
|
+
async def save_llm_interaction(
|
|
7
|
+
self,
|
|
8
|
+
prompt: str,
|
|
9
|
+
prompt_system: str,
|
|
10
|
+
response: str | None = None
|
|
11
|
+
) -> str | None:
|
|
12
|
+
...
|
|
13
|
+
|
|
14
|
+
async def save_artifact(
|
|
15
|
+
self,
|
|
16
|
+
file: Path,
|
|
17
|
+
content: str,
|
|
18
|
+
kind: str = "artifact"
|
|
19
|
+
) -> Path | None:
|
|
20
|
+
...
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
from ai_review.config import settings
|
|
2
2
|
from ai_review.libs.logger import get_logger
|
|
3
3
|
from ai_review.services.cost.schema import CostReportSchema
|
|
4
|
+
from ai_review.services.cost.types import CostServiceProtocol
|
|
4
5
|
from ai_review.services.llm.types import ChatResultSchema
|
|
5
6
|
|
|
6
7
|
logger = get_logger("COST_SERVICE")
|
|
7
8
|
|
|
8
9
|
|
|
9
|
-
class CostService:
|
|
10
|
+
class CostService(CostServiceProtocol):
|
|
10
11
|
def __init__(self):
|
|
11
12
|
self.pricing = settings.llm.load_pricing()
|
|
12
13
|
self.reports: list[CostReportSchema] = []
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from typing import Protocol
|
|
2
|
+
|
|
3
|
+
from ai_review.services.cost.schema import CostReportSchema
|
|
4
|
+
from ai_review.services.llm.types import ChatResultSchema
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class CostServiceProtocol(Protocol):
|
|
8
|
+
def calculate(self, result: ChatResultSchema) -> CostReportSchema | None:
|
|
9
|
+
...
|
|
10
|
+
|
|
11
|
+
def aggregate(self) -> CostReportSchema | None:
|
|
12
|
+
...
|
|
@@ -16,12 +16,13 @@ from ai_review.services.diff.renderers import (
|
|
|
16
16
|
)
|
|
17
17
|
from ai_review.services.diff.schema import DiffFileSchema
|
|
18
18
|
from ai_review.services.diff.tools import find_diff_file
|
|
19
|
+
from ai_review.services.diff.types import DiffServiceProtocol
|
|
19
20
|
from ai_review.services.git.types import GitServiceProtocol
|
|
20
21
|
|
|
21
22
|
logger = get_logger("DIFF_SERVICE")
|
|
22
23
|
|
|
23
24
|
|
|
24
|
-
class DiffService:
|
|
25
|
+
class DiffService(DiffServiceProtocol):
|
|
25
26
|
@classmethod
|
|
26
27
|
def parse(cls, raw_diff: str) -> Diff:
|
|
27
28
|
if not raw_diff.strip():
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from typing import Protocol
|
|
2
|
+
|
|
3
|
+
from ai_review.libs.diff.models import Diff
|
|
4
|
+
from ai_review.services.diff.schema import DiffFileSchema
|
|
5
|
+
from ai_review.services.git.types import GitServiceProtocol
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class DiffServiceProtocol(Protocol):
|
|
9
|
+
def parse(self, raw_diff: str) -> Diff:
|
|
10
|
+
...
|
|
11
|
+
|
|
12
|
+
def render_file(
|
|
13
|
+
self,
|
|
14
|
+
file: str,
|
|
15
|
+
raw_diff: str,
|
|
16
|
+
base_sha: str | None = None,
|
|
17
|
+
head_sha: str | None = None,
|
|
18
|
+
) -> DiffFileSchema:
|
|
19
|
+
...
|
|
20
|
+
|
|
21
|
+
def render_files(
|
|
22
|
+
self,
|
|
23
|
+
git: GitServiceProtocol,
|
|
24
|
+
files: list[str],
|
|
25
|
+
base_sha: str,
|
|
26
|
+
head_sha: str,
|
|
27
|
+
) -> list[DiffFileSchema]:
|
|
28
|
+
...
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from enum import StrEnum
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class HookType(StrEnum):
|
|
5
|
+
ON_CHAT_START = "ON_CHAT_START"
|
|
6
|
+
ON_CHAT_ERROR = "ON_CHAT_ERROR"
|
|
7
|
+
ON_CHAT_COMPLETE = "ON_CHAT_COMPLETE"
|
|
8
|
+
|
|
9
|
+
ON_INLINE_REVIEW_START = "ON_INLINE_REVIEW_START"
|
|
10
|
+
ON_INLINE_REVIEW_COMPLETE = "ON_INLINE_REVIEW_COMPLETE"
|
|
11
|
+
|
|
12
|
+
ON_CONTEXT_REVIEW_START = "ON_CONTEXT_REVIEW_START"
|
|
13
|
+
ON_CONTEXT_REVIEW_COMPLETE = "ON_CONTEXT_REVIEW_COMPLETE"
|
|
14
|
+
|
|
15
|
+
ON_SUMMARY_REVIEW_START = "ON_SUMMARY_REVIEW_START"
|
|
16
|
+
ON_SUMMARY_REVIEW_COMPLETE = "ON_SUMMARY_REVIEW_COMPLETE"
|
|
17
|
+
|
|
18
|
+
ON_INLINE_COMMENT_START = "ON_INLINE_COMMENT_START"
|
|
19
|
+
ON_INLINE_COMMENT_ERROR = "ON_INLINE_COMMENT_ERROR"
|
|
20
|
+
ON_INLINE_COMMENT_COMPLETE = "ON_INLINE_COMMENT_COMPLETE"
|
|
21
|
+
|
|
22
|
+
ON_SUMMARY_COMMENT_START = "ON_SUMMARY_COMMENT_START"
|
|
23
|
+
ON_SUMMARY_COMMENT_ERROR = "ON_SUMMARY_COMMENT_ERROR"
|
|
24
|
+
ON_SUMMARY_COMMENT_COMPLETE = "ON_SUMMARY_COMMENT_COMPLETE"
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
from collections import defaultdict
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from ai_review.libs.logger import get_logger
|
|
5
|
+
from ai_review.services.cost.schema import CostReportSchema
|
|
6
|
+
from ai_review.services.hook.constants import HookType
|
|
7
|
+
from ai_review.services.hook.types import (
|
|
8
|
+
HookFunc,
|
|
9
|
+
# --- Chat ---
|
|
10
|
+
ChatStartHookFunc,
|
|
11
|
+
ChatErrorHookFunc,
|
|
12
|
+
ChatCompleteHookFunc,
|
|
13
|
+
# --- Inline Review ---
|
|
14
|
+
InlineReviewStartHookFunc,
|
|
15
|
+
InlineReviewCompleteHookFunc,
|
|
16
|
+
# --- Context Review ---
|
|
17
|
+
ContextReviewStartHookFunc,
|
|
18
|
+
ContextReviewCompleteHookFunc,
|
|
19
|
+
# --- Summary Review ---
|
|
20
|
+
SummaryReviewStartHookFunc,
|
|
21
|
+
SummaryReviewCompleteHookFunc,
|
|
22
|
+
# --- Inline Comment ---
|
|
23
|
+
InlineCommentStartHookFunc,
|
|
24
|
+
InlineCommentErrorHookFunc,
|
|
25
|
+
InlineCommentCompleteHookFunc,
|
|
26
|
+
# --- Summary Comment ---
|
|
27
|
+
SummaryCommentStartHookFunc,
|
|
28
|
+
SummaryCommentCompleteHookFunc
|
|
29
|
+
)
|
|
30
|
+
from ai_review.services.review.inline.schema import InlineCommentSchema
|
|
31
|
+
from ai_review.services.review.summary.schema import SummaryCommentSchema
|
|
32
|
+
|
|
33
|
+
logger = get_logger("HOOK_SERVICE")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class HookService:
|
|
37
|
+
def __init__(self):
|
|
38
|
+
self.hooks: dict[HookType, list[HookFunc]] = defaultdict(list)
|
|
39
|
+
|
|
40
|
+
def inject_hook(self, name: HookType, func: HookFunc):
|
|
41
|
+
self.hooks[name].append(func)
|
|
42
|
+
|
|
43
|
+
async def emit(self, name: HookType, *args: Any, **kwargs: Any):
|
|
44
|
+
if not self.hooks.get(name):
|
|
45
|
+
return
|
|
46
|
+
|
|
47
|
+
for callback in self.hooks[name]:
|
|
48
|
+
try:
|
|
49
|
+
await callback(*args, **kwargs)
|
|
50
|
+
except Exception as error:
|
|
51
|
+
logger.exception(f"Error in {name} hook: {error}")
|
|
52
|
+
|
|
53
|
+
# --- Chat ---
|
|
54
|
+
def on_chat_start(self, func: ChatStartHookFunc):
|
|
55
|
+
self.inject_hook(HookType.ON_CHAT_START, func)
|
|
56
|
+
return func
|
|
57
|
+
|
|
58
|
+
def on_chat_error(self, func: ChatErrorHookFunc):
|
|
59
|
+
self.inject_hook(HookType.ON_CHAT_ERROR, func)
|
|
60
|
+
return func
|
|
61
|
+
|
|
62
|
+
def on_chat_complete(self, func: ChatCompleteHookFunc):
|
|
63
|
+
self.inject_hook(HookType.ON_CHAT_COMPLETE, func)
|
|
64
|
+
return func
|
|
65
|
+
|
|
66
|
+
async def emit_chat_start(self, prompt: str, prompt_system: str):
|
|
67
|
+
await self.emit(HookType.ON_CHAT_START, prompt=prompt, prompt_system=prompt_system)
|
|
68
|
+
|
|
69
|
+
async def emit_chat_error(self, prompt: str, prompt_system: str):
|
|
70
|
+
await self.emit(HookType.ON_CHAT_ERROR, prompt=prompt, prompt_system=prompt_system)
|
|
71
|
+
|
|
72
|
+
async def emit_chat_complete(self, result: str, report: CostReportSchema | None):
|
|
73
|
+
await self.emit(HookType.ON_CHAT_COMPLETE, result=result, report=report)
|
|
74
|
+
|
|
75
|
+
# --- Inline Review ---
|
|
76
|
+
def on_inline_review_start(self, func: InlineReviewStartHookFunc):
|
|
77
|
+
self.inject_hook(HookType.ON_INLINE_REVIEW_START, func)
|
|
78
|
+
return func
|
|
79
|
+
|
|
80
|
+
def on_inline_review_complete(self, func: InlineReviewCompleteHookFunc):
|
|
81
|
+
self.inject_hook(HookType.ON_INLINE_REVIEW_COMPLETE, func)
|
|
82
|
+
return func
|
|
83
|
+
|
|
84
|
+
async def emit_inline_review_start(self):
|
|
85
|
+
await self.emit(HookType.ON_INLINE_REVIEW_START)
|
|
86
|
+
|
|
87
|
+
async def emit_inline_review_complete(self, report: CostReportSchema | None):
|
|
88
|
+
await self.emit(HookType.ON_INLINE_REVIEW_COMPLETE, report=report)
|
|
89
|
+
|
|
90
|
+
# --- Context Review ---
|
|
91
|
+
def on_context_review_start(self, func: ContextReviewStartHookFunc):
|
|
92
|
+
self.inject_hook(HookType.ON_CONTEXT_REVIEW_START, func)
|
|
93
|
+
return func
|
|
94
|
+
|
|
95
|
+
def on_context_review_complete(self, func: ContextReviewCompleteHookFunc):
|
|
96
|
+
self.inject_hook(HookType.ON_CONTEXT_REVIEW_COMPLETE, func)
|
|
97
|
+
return func
|
|
98
|
+
|
|
99
|
+
async def emit_context_review_start(self):
|
|
100
|
+
await self.emit(HookType.ON_CONTEXT_REVIEW_START)
|
|
101
|
+
|
|
102
|
+
async def emit_context_review_complete(self, report: CostReportSchema | None):
|
|
103
|
+
await self.emit(HookType.ON_CONTEXT_REVIEW_COMPLETE, report=report)
|
|
104
|
+
|
|
105
|
+
# --- Summary Review ---
|
|
106
|
+
def on_summary_review_start(self, func: SummaryReviewStartHookFunc):
|
|
107
|
+
self.inject_hook(HookType.ON_SUMMARY_REVIEW_START, func)
|
|
108
|
+
return func
|
|
109
|
+
|
|
110
|
+
def on_summary_review_complete(self, func: SummaryReviewCompleteHookFunc):
|
|
111
|
+
self.inject_hook(HookType.ON_SUMMARY_REVIEW_COMPLETE, func)
|
|
112
|
+
return func
|
|
113
|
+
|
|
114
|
+
async def emit_summary_review_start(self):
|
|
115
|
+
await self.emit(HookType.ON_SUMMARY_REVIEW_START)
|
|
116
|
+
|
|
117
|
+
async def emit_summary_review_complete(self, report: CostReportSchema | None):
|
|
118
|
+
await self.emit(HookType.ON_SUMMARY_REVIEW_COMPLETE, report=report)
|
|
119
|
+
|
|
120
|
+
# --- Inline Comment ---
|
|
121
|
+
def on_inline_comment_start(self, func: InlineCommentStartHookFunc):
|
|
122
|
+
self.inject_hook(HookType.ON_INLINE_COMMENT_START, func)
|
|
123
|
+
return func
|
|
124
|
+
|
|
125
|
+
def on_inline_comment_error(self, func: InlineCommentErrorHookFunc):
|
|
126
|
+
self.inject_hook(HookType.ON_INLINE_COMMENT_ERROR, func)
|
|
127
|
+
return func
|
|
128
|
+
|
|
129
|
+
def on_inline_comment_complete(self, func: InlineCommentCompleteHookFunc):
|
|
130
|
+
self.inject_hook(HookType.ON_INLINE_COMMENT_COMPLETE, func)
|
|
131
|
+
return func
|
|
132
|
+
|
|
133
|
+
async def emit_inline_comment_start(self, comment: InlineCommentSchema):
|
|
134
|
+
await self.emit(HookType.ON_INLINE_COMMENT_START, comment=comment)
|
|
135
|
+
|
|
136
|
+
async def emit_inline_comment_error(self, comment: InlineCommentSchema):
|
|
137
|
+
await self.emit(HookType.ON_INLINE_COMMENT_ERROR, comment=comment)
|
|
138
|
+
|
|
139
|
+
async def emit_inline_comment_complete(self, comment: InlineCommentSchema):
|
|
140
|
+
await self.emit(HookType.ON_INLINE_COMMENT_COMPLETE, comment=comment)
|
|
141
|
+
|
|
142
|
+
# --- Summary Comment ---
|
|
143
|
+
def on_summary_comment_start(self, func: SummaryCommentStartHookFunc):
|
|
144
|
+
self.inject_hook(HookType.ON_SUMMARY_COMMENT_START, func)
|
|
145
|
+
return func
|
|
146
|
+
|
|
147
|
+
def on_summary_comment_error(self, func: InlineCommentErrorHookFunc):
|
|
148
|
+
self.inject_hook(HookType.ON_SUMMARY_COMMENT_ERROR, func)
|
|
149
|
+
return func
|
|
150
|
+
|
|
151
|
+
def on_summary_comment_complete(self, func: SummaryCommentCompleteHookFunc):
|
|
152
|
+
self.inject_hook(HookType.ON_SUMMARY_COMMENT_COMPLETE, func)
|
|
153
|
+
return func
|
|
154
|
+
|
|
155
|
+
async def emit_summary_comment_start(self, comment: SummaryCommentSchema):
|
|
156
|
+
await self.emit(HookType.ON_SUMMARY_COMMENT_START, comment=comment)
|
|
157
|
+
|
|
158
|
+
async def emit_summary_comment_error(self, comment: SummaryCommentSchema):
|
|
159
|
+
await self.emit(HookType.ON_SUMMARY_COMMENT_ERROR, comment=comment)
|
|
160
|
+
|
|
161
|
+
async def emit_summary_comment_complete(self, comment: SummaryCommentSchema):
|
|
162
|
+
await self.emit(HookType.ON_SUMMARY_COMMENT_COMPLETE, comment=comment)
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from typing import Callable, Awaitable
|
|
2
|
+
|
|
3
|
+
from ai_review.services.cost.schema import CostReportSchema
|
|
4
|
+
from ai_review.services.review.inline.schema import InlineCommentSchema
|
|
5
|
+
from ai_review.services.review.summary.schema import SummaryCommentSchema
|
|
6
|
+
|
|
7
|
+
HookFunc = Callable[..., Awaitable[None]]
|
|
8
|
+
|
|
9
|
+
ChatStartHookFunc = Callable[[str, str], Awaitable[None]]
|
|
10
|
+
ChatErrorHookFunc = Callable[[str, str], Awaitable[None]]
|
|
11
|
+
ChatCompleteHookFunc = Callable[[str, CostReportSchema | None], Awaitable[None]]
|
|
12
|
+
|
|
13
|
+
InlineReviewStartHookFunc = Callable[..., Awaitable[None]]
|
|
14
|
+
InlineReviewCompleteHookFunc = Callable[[CostReportSchema | None], Awaitable[None]]
|
|
15
|
+
|
|
16
|
+
ContextReviewStartHookFunc = Callable[..., Awaitable[None]]
|
|
17
|
+
ContextReviewCompleteHookFunc = Callable[[CostReportSchema | None], Awaitable[None]]
|
|
18
|
+
|
|
19
|
+
SummaryReviewStartHookFunc = Callable[..., Awaitable[None]]
|
|
20
|
+
SummaryReviewCompleteHookFunc = Callable[[CostReportSchema | None], Awaitable[None]]
|
|
21
|
+
|
|
22
|
+
InlineCommentStartHookFunc = Callable[[InlineCommentSchema], Awaitable[None]]
|
|
23
|
+
InlineCommentErrorHookFunc = Callable[[InlineCommentSchema], Awaitable[None]]
|
|
24
|
+
InlineCommentCompleteHookFunc = Callable[[InlineCommentSchema], Awaitable[None]]
|
|
25
|
+
|
|
26
|
+
SummaryCommentStartHookFunc = Callable[[SummaryCommentSchema], Awaitable[None]]
|
|
27
|
+
SummaryCommentErrorHookFunc = Callable[[SummaryCommentSchema], Awaitable[None]]
|
|
28
|
+
SummaryCommentCompleteHookFunc = Callable[[SummaryCommentSchema], Awaitable[None]]
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
from ai_review.clients.claude.client import get_claude_http_client
|
|
2
2
|
from ai_review.clients.claude.schema import ClaudeChatRequestSchema, ClaudeMessageSchema
|
|
3
3
|
from ai_review.config import settings
|
|
4
|
-
from ai_review.services.llm.types import
|
|
4
|
+
from ai_review.services.llm.types import LLMClientProtocol, ChatResultSchema
|
|
5
5
|
|
|
6
6
|
|
|
7
|
-
class ClaudeLLMClient(
|
|
7
|
+
class ClaudeLLMClient(LLMClientProtocol):
|
|
8
8
|
def __init__(self):
|
|
9
9
|
self.http_client = get_claude_http_client()
|
|
10
10
|
|
|
@@ -3,10 +3,10 @@ from ai_review.libs.constants.llm_provider import LLMProvider
|
|
|
3
3
|
from ai_review.services.llm.claude.client import ClaudeLLMClient
|
|
4
4
|
from ai_review.services.llm.gemini.client import GeminiLLMClient
|
|
5
5
|
from ai_review.services.llm.openai.client import OpenAILLMClient
|
|
6
|
-
from ai_review.services.llm.types import
|
|
6
|
+
from ai_review.services.llm.types import LLMClientProtocol
|
|
7
7
|
|
|
8
8
|
|
|
9
|
-
def get_llm_client() ->
|
|
9
|
+
def get_llm_client() -> LLMClientProtocol:
|
|
10
10
|
match settings.llm.provider:
|
|
11
11
|
case LLMProvider.OPENAI:
|
|
12
12
|
return OpenAILLMClient()
|
|
@@ -6,10 +6,10 @@ from ai_review.clients.gemini.schema import (
|
|
|
6
6
|
GeminiGenerationConfigSchema,
|
|
7
7
|
)
|
|
8
8
|
from ai_review.config import settings
|
|
9
|
-
from ai_review.services.llm.types import
|
|
9
|
+
from ai_review.services.llm.types import LLMClientProtocol, ChatResultSchema
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
class GeminiLLMClient(
|
|
12
|
+
class GeminiLLMClient(LLMClientProtocol):
|
|
13
13
|
def __init__(self):
|
|
14
14
|
self.http_client = get_gemini_http_client()
|
|
15
15
|
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
from ai_review.clients.openai.client import get_openai_http_client
|
|
2
2
|
from ai_review.clients.openai.schema import OpenAIChatRequestSchema, OpenAIMessageSchema
|
|
3
3
|
from ai_review.config import settings
|
|
4
|
-
from ai_review.services.llm.types import
|
|
4
|
+
from ai_review.services.llm.types import LLMClientProtocol, ChatResultSchema
|
|
5
5
|
|
|
6
6
|
|
|
7
|
-
class OpenAILLMClient(
|
|
7
|
+
class OpenAILLMClient(LLMClientProtocol):
|
|
8
8
|
def __init__(self):
|
|
9
9
|
self.http_client = get_openai_http_client()
|
|
10
10
|
|
ai_review/services/llm/types.py
CHANGED
|
@@ -2,9 +2,10 @@ from ai_review.config import settings
|
|
|
2
2
|
from ai_review.services.diff.schema import DiffFileSchema
|
|
3
3
|
from ai_review.services.prompt.schema import PromptContextSchema
|
|
4
4
|
from ai_review.services.prompt.tools import normalize_prompt, format_file
|
|
5
|
+
from ai_review.services.prompt.types import PromptServiceProtocol
|
|
5
6
|
|
|
6
7
|
|
|
7
|
-
class PromptService:
|
|
8
|
+
class PromptService(PromptServiceProtocol):
|
|
8
9
|
@classmethod
|
|
9
10
|
def prepare_prompt(cls, prompts: list[str], context: PromptContextSchema) -> str:
|
|
10
11
|
prompt = "\n\n".join(prompts)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from typing import Protocol
|
|
2
|
+
|
|
3
|
+
from ai_review.services.diff.schema import DiffFileSchema
|
|
4
|
+
from ai_review.services.prompt.schema import PromptContextSchema
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class PromptServiceProtocol(Protocol):
|
|
8
|
+
def prepare_prompt(self, prompts: list[str], context: PromptContextSchema) -> str:
|
|
9
|
+
...
|
|
10
|
+
|
|
11
|
+
def build_inline_request(self, diff: DiffFileSchema, context: PromptContextSchema) -> str:
|
|
12
|
+
...
|
|
13
|
+
|
|
14
|
+
def build_summary_request(self, diffs: list[DiffFileSchema], context: PromptContextSchema) -> str:
|
|
15
|
+
...
|
|
16
|
+
|
|
17
|
+
def build_context_request(self, diffs: list[DiffFileSchema], context: PromptContextSchema) -> str:
|
|
18
|
+
...
|
|
19
|
+
|
|
20
|
+
def build_system_inline_request(self, context: PromptContextSchema) -> str:
|
|
21
|
+
...
|
|
22
|
+
|
|
23
|
+
def build_system_context_request(self, context: PromptContextSchema) -> str:
|
|
24
|
+
...
|
|
25
|
+
|
|
26
|
+
def build_system_summary_request(self, context: PromptContextSchema) -> str:
|
|
27
|
+
...
|
|
File without changes
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from ai_review.config import settings
|
|
2
|
+
from ai_review.libs.asynchronous.gather import bounded_gather
|
|
3
|
+
from ai_review.libs.logger import get_logger
|
|
4
|
+
from ai_review.services.hook import hook
|
|
5
|
+
from ai_review.services.review.inline.schema import InlineCommentListSchema, InlineCommentSchema
|
|
6
|
+
from ai_review.services.review.summary.schema import SummaryCommentSchema
|
|
7
|
+
from ai_review.services.vcs.types import VCSClientProtocol
|
|
8
|
+
|
|
9
|
+
logger = get_logger("REVIEW_COMMENT_GATEWAY")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ReviewCommentGateway:
|
|
13
|
+
def __init__(self, vcs: VCSClientProtocol):
|
|
14
|
+
self.vcs = vcs
|
|
15
|
+
|
|
16
|
+
async def has_existing_inline_comments(self) -> bool:
|
|
17
|
+
comments = await self.vcs.get_inline_comments()
|
|
18
|
+
has_comments = any(
|
|
19
|
+
settings.review.inline_tag in comment.body
|
|
20
|
+
for comment in comments
|
|
21
|
+
)
|
|
22
|
+
if has_comments:
|
|
23
|
+
logger.info("Skipping inline review: AI inline comments already exist")
|
|
24
|
+
|
|
25
|
+
return has_comments
|
|
26
|
+
|
|
27
|
+
async def has_existing_summary_comments(self) -> bool:
|
|
28
|
+
comments = await self.vcs.get_general_comments()
|
|
29
|
+
has_comments = any(
|
|
30
|
+
settings.review.summary_tag in comment.body for comment in comments
|
|
31
|
+
)
|
|
32
|
+
if has_comments:
|
|
33
|
+
logger.info("Skipping summary review: AI summary comment already exists")
|
|
34
|
+
|
|
35
|
+
return has_comments
|
|
36
|
+
|
|
37
|
+
async def process_inline_comment(self, comment: InlineCommentSchema):
|
|
38
|
+
try:
|
|
39
|
+
await hook.emit_inline_comment_start(comment)
|
|
40
|
+
await self.vcs.create_inline_comment(
|
|
41
|
+
file=comment.file,
|
|
42
|
+
line=comment.line,
|
|
43
|
+
message=comment.body_with_tag,
|
|
44
|
+
)
|
|
45
|
+
await hook.emit_inline_comment_complete(comment)
|
|
46
|
+
except Exception as error:
|
|
47
|
+
logger.exception(
|
|
48
|
+
f"Failed to process inline comment for {comment.file}:{comment.line} — {error}"
|
|
49
|
+
)
|
|
50
|
+
await hook.emit_inline_comment_error(comment)
|
|
51
|
+
|
|
52
|
+
logger.warning(f"Falling back to general comment for {comment.file}:{comment.line}")
|
|
53
|
+
await self.process_summary_comment(SummaryCommentSchema(text=comment.fallback_body))
|
|
54
|
+
|
|
55
|
+
async def process_summary_comment(self, comment: SummaryCommentSchema):
|
|
56
|
+
try:
|
|
57
|
+
await hook.emit_summary_comment_start(comment)
|
|
58
|
+
await self.vcs.create_general_comment(comment.body_with_tag)
|
|
59
|
+
await hook.emit_summary_comment_complete(comment)
|
|
60
|
+
except Exception as error:
|
|
61
|
+
logger.exception(f"Failed to process summary comment: {comment} — {error}")
|
|
62
|
+
await hook.emit_summary_comment_error(comment)
|
|
63
|
+
|
|
64
|
+
async def process_inline_comments(self, comments: InlineCommentListSchema) -> None:
|
|
65
|
+
await bounded_gather([self.process_inline_comment(comment) for comment in comments.root])
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from ai_review.libs.logger import get_logger
|
|
2
|
+
from ai_review.services.artifacts.types import ArtifactsServiceProtocol
|
|
3
|
+
from ai_review.services.cost.types import CostServiceProtocol
|
|
4
|
+
from ai_review.services.hook import hook
|
|
5
|
+
from ai_review.services.llm.types import LLMClientProtocol
|
|
6
|
+
|
|
7
|
+
logger = get_logger("REVIEW_LLM_GATEWAY")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ReviewLLMGateway:
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
llm: LLMClientProtocol,
|
|
14
|
+
cost: CostServiceProtocol,
|
|
15
|
+
artifacts: ArtifactsServiceProtocol
|
|
16
|
+
):
|
|
17
|
+
self.llm = llm
|
|
18
|
+
self.cost = cost
|
|
19
|
+
self.artifacts = artifacts
|
|
20
|
+
|
|
21
|
+
async def ask(self, prompt: str, prompt_system: str) -> str:
|
|
22
|
+
try:
|
|
23
|
+
await hook.emit_chat_start(prompt, prompt_system)
|
|
24
|
+
result = await self.llm.chat(prompt, prompt_system)
|
|
25
|
+
if not result.text:
|
|
26
|
+
logger.warning(
|
|
27
|
+
f"LLM returned an empty response (prompt length={len(prompt)} chars)"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
report = self.cost.calculate(result)
|
|
31
|
+
if report:
|
|
32
|
+
logger.info(report.pretty())
|
|
33
|
+
|
|
34
|
+
await hook.emit_chat_complete(result, report)
|
|
35
|
+
await self.artifacts.save_llm_interaction(prompt, prompt_system, result.text)
|
|
36
|
+
|
|
37
|
+
return result.text
|
|
38
|
+
except Exception as error:
|
|
39
|
+
logger.exception(f"LLM request failed: {error}")
|
|
40
|
+
await hook.emit_chat_error(prompt, prompt_system)
|
|
@@ -38,8 +38,8 @@ class InlineCommentSchema(BaseModel):
|
|
|
38
38
|
return f"{self.body}\n\n{settings.review.inline_tag}"
|
|
39
39
|
|
|
40
40
|
@property
|
|
41
|
-
def
|
|
42
|
-
return f"**{self.file}:{self.line}** — {self.message}
|
|
41
|
+
def fallback_body(self) -> str:
|
|
42
|
+
return f"**{self.file}:{self.line}** — {self.message}"
|
|
43
43
|
|
|
44
44
|
|
|
45
45
|
class InlineCommentListSchema(RootModel[list[InlineCommentSchema]]):
|
|
@@ -5,6 +5,7 @@ from pydantic import ValidationError
|
|
|
5
5
|
from ai_review.libs.json import sanitize_json_string
|
|
6
6
|
from ai_review.libs.logger import get_logger
|
|
7
7
|
from ai_review.services.review.inline.schema import InlineCommentListSchema
|
|
8
|
+
from ai_review.services.review.inline.types import InlineCommentServiceProtocol
|
|
8
9
|
|
|
9
10
|
logger = get_logger("INLINE_COMMENT_SERVICE")
|
|
10
11
|
|
|
@@ -12,7 +13,7 @@ FIRST_JSON_ARRAY_RE = re.compile(r"\[[\s\S]*]", re.MULTILINE)
|
|
|
12
13
|
CLEAN_JSON_BLOCK_RE = re.compile(r"```(?:json)?(.*?)```", re.DOTALL | re.IGNORECASE)
|
|
13
14
|
|
|
14
15
|
|
|
15
|
-
class InlineCommentService:
|
|
16
|
+
class InlineCommentService(InlineCommentServiceProtocol):
|
|
16
17
|
@classmethod
|
|
17
18
|
def try_parse_model_output(cls, raw: str) -> InlineCommentListSchema | None:
|
|
18
19
|
try:
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from typing import Protocol
|
|
2
|
+
|
|
3
|
+
from ai_review.services.review.inline.schema import InlineCommentListSchema
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class InlineCommentServiceProtocol(Protocol):
|
|
7
|
+
def parse_model_output(self, output: str) -> InlineCommentListSchema:
|
|
8
|
+
...
|
|
9
|
+
|
|
10
|
+
def try_parse_model_output(self, raw: str) -> InlineCommentListSchema | None:
|
|
11
|
+
...
|