xai-review 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xai-review might be problematic. Click here for more details.

Files changed (154) hide show
  1. ai_review/__init__.py +0 -0
  2. ai_review/cli/__init__.py +0 -0
  3. ai_review/cli/commands/__init__.py +0 -0
  4. ai_review/cli/commands/run_context_review.py +7 -0
  5. ai_review/cli/commands/run_inline_review.py +7 -0
  6. ai_review/cli/commands/run_review.py +8 -0
  7. ai_review/cli/commands/run_summary_review.py +7 -0
  8. ai_review/cli/main.py +54 -0
  9. ai_review/clients/__init__.py +0 -0
  10. ai_review/clients/claude/__init__.py +0 -0
  11. ai_review/clients/claude/client.py +44 -0
  12. ai_review/clients/claude/schema.py +44 -0
  13. ai_review/clients/gemini/__init__.py +0 -0
  14. ai_review/clients/gemini/client.py +45 -0
  15. ai_review/clients/gemini/schema.py +78 -0
  16. ai_review/clients/gitlab/__init__.py +0 -0
  17. ai_review/clients/gitlab/client.py +31 -0
  18. ai_review/clients/gitlab/mr/__init__.py +0 -0
  19. ai_review/clients/gitlab/mr/client.py +101 -0
  20. ai_review/clients/gitlab/mr/schema/__init__.py +0 -0
  21. ai_review/clients/gitlab/mr/schema/changes.py +35 -0
  22. ai_review/clients/gitlab/mr/schema/comments.py +19 -0
  23. ai_review/clients/gitlab/mr/schema/discussions.py +34 -0
  24. ai_review/clients/openai/__init__.py +0 -0
  25. ai_review/clients/openai/client.py +42 -0
  26. ai_review/clients/openai/schema.py +37 -0
  27. ai_review/config.py +62 -0
  28. ai_review/libs/__init__.py +0 -0
  29. ai_review/libs/asynchronous/__init__.py +0 -0
  30. ai_review/libs/asynchronous/gather.py +14 -0
  31. ai_review/libs/config/__init__.py +0 -0
  32. ai_review/libs/config/artifacts.py +12 -0
  33. ai_review/libs/config/base.py +24 -0
  34. ai_review/libs/config/claude.py +13 -0
  35. ai_review/libs/config/gemini.py +13 -0
  36. ai_review/libs/config/gitlab.py +12 -0
  37. ai_review/libs/config/http.py +19 -0
  38. ai_review/libs/config/llm.py +61 -0
  39. ai_review/libs/config/logger.py +17 -0
  40. ai_review/libs/config/openai.py +13 -0
  41. ai_review/libs/config/prompt.py +121 -0
  42. ai_review/libs/config/review.py +30 -0
  43. ai_review/libs/config/vcs.py +19 -0
  44. ai_review/libs/constants/__init__.py +0 -0
  45. ai_review/libs/constants/llm_provider.py +7 -0
  46. ai_review/libs/constants/vcs_provider.py +6 -0
  47. ai_review/libs/diff/__init__.py +0 -0
  48. ai_review/libs/diff/models.py +100 -0
  49. ai_review/libs/diff/parser.py +111 -0
  50. ai_review/libs/diff/tools.py +24 -0
  51. ai_review/libs/http/__init__.py +0 -0
  52. ai_review/libs/http/client.py +14 -0
  53. ai_review/libs/http/event_hooks/__init__.py +0 -0
  54. ai_review/libs/http/event_hooks/base.py +13 -0
  55. ai_review/libs/http/event_hooks/logger.py +17 -0
  56. ai_review/libs/http/handlers.py +34 -0
  57. ai_review/libs/http/transports/__init__.py +0 -0
  58. ai_review/libs/http/transports/retry.py +34 -0
  59. ai_review/libs/logger.py +19 -0
  60. ai_review/libs/resources.py +24 -0
  61. ai_review/prompts/__init__.py +0 -0
  62. ai_review/prompts/default_context.md +14 -0
  63. ai_review/prompts/default_inline.md +8 -0
  64. ai_review/prompts/default_summary.md +3 -0
  65. ai_review/prompts/default_system_context.md +27 -0
  66. ai_review/prompts/default_system_inline.md +25 -0
  67. ai_review/prompts/default_system_summary.md +7 -0
  68. ai_review/resources/__init__.py +0 -0
  69. ai_review/resources/pricing.yaml +55 -0
  70. ai_review/services/__init__.py +0 -0
  71. ai_review/services/artifacts/__init__.py +0 -0
  72. ai_review/services/artifacts/schema.py +11 -0
  73. ai_review/services/artifacts/service.py +47 -0
  74. ai_review/services/artifacts/tools.py +8 -0
  75. ai_review/services/cost/__init__.py +0 -0
  76. ai_review/services/cost/schema.py +44 -0
  77. ai_review/services/cost/service.py +58 -0
  78. ai_review/services/diff/__init__.py +0 -0
  79. ai_review/services/diff/renderers.py +149 -0
  80. ai_review/services/diff/schema.py +6 -0
  81. ai_review/services/diff/service.py +96 -0
  82. ai_review/services/diff/tools.py +59 -0
  83. ai_review/services/git/__init__.py +0 -0
  84. ai_review/services/git/service.py +35 -0
  85. ai_review/services/git/types.py +11 -0
  86. ai_review/services/llm/__init__.py +0 -0
  87. ai_review/services/llm/claude/__init__.py +0 -0
  88. ai_review/services/llm/claude/client.py +26 -0
  89. ai_review/services/llm/factory.py +18 -0
  90. ai_review/services/llm/gemini/__init__.py +0 -0
  91. ai_review/services/llm/gemini/client.py +31 -0
  92. ai_review/services/llm/openai/__init__.py +0 -0
  93. ai_review/services/llm/openai/client.py +28 -0
  94. ai_review/services/llm/types.py +15 -0
  95. ai_review/services/prompt/__init__.py +0 -0
  96. ai_review/services/prompt/adapter.py +25 -0
  97. ai_review/services/prompt/schema.py +71 -0
  98. ai_review/services/prompt/service.py +56 -0
  99. ai_review/services/review/__init__.py +0 -0
  100. ai_review/services/review/inline/__init__.py +0 -0
  101. ai_review/services/review/inline/schema.py +53 -0
  102. ai_review/services/review/inline/service.py +38 -0
  103. ai_review/services/review/policy/__init__.py +0 -0
  104. ai_review/services/review/policy/service.py +60 -0
  105. ai_review/services/review/service.py +207 -0
  106. ai_review/services/review/summary/__init__.py +0 -0
  107. ai_review/services/review/summary/schema.py +15 -0
  108. ai_review/services/review/summary/service.py +14 -0
  109. ai_review/services/vcs/__init__.py +0 -0
  110. ai_review/services/vcs/factory.py +12 -0
  111. ai_review/services/vcs/gitlab/__init__.py +0 -0
  112. ai_review/services/vcs/gitlab/client.py +152 -0
  113. ai_review/services/vcs/types.py +55 -0
  114. ai_review/tests/__init__.py +0 -0
  115. ai_review/tests/fixtures/__init__.py +0 -0
  116. ai_review/tests/fixtures/git.py +31 -0
  117. ai_review/tests/suites/__init__.py +0 -0
  118. ai_review/tests/suites/clients/__init__.py +0 -0
  119. ai_review/tests/suites/clients/claude/__init__.py +0 -0
  120. ai_review/tests/suites/clients/claude/test_client.py +31 -0
  121. ai_review/tests/suites/clients/claude/test_schema.py +59 -0
  122. ai_review/tests/suites/clients/gemini/__init__.py +0 -0
  123. ai_review/tests/suites/clients/gemini/test_client.py +30 -0
  124. ai_review/tests/suites/clients/gemini/test_schema.py +105 -0
  125. ai_review/tests/suites/clients/openai/__init__.py +0 -0
  126. ai_review/tests/suites/clients/openai/test_client.py +30 -0
  127. ai_review/tests/suites/clients/openai/test_schema.py +53 -0
  128. ai_review/tests/suites/libs/__init__.py +0 -0
  129. ai_review/tests/suites/libs/diff/__init__.py +0 -0
  130. ai_review/tests/suites/libs/diff/test_models.py +105 -0
  131. ai_review/tests/suites/libs/diff/test_parser.py +115 -0
  132. ai_review/tests/suites/libs/diff/test_tools.py +62 -0
  133. ai_review/tests/suites/services/__init__.py +0 -0
  134. ai_review/tests/suites/services/diff/__init__.py +0 -0
  135. ai_review/tests/suites/services/diff/test_renderers.py +168 -0
  136. ai_review/tests/suites/services/diff/test_service.py +84 -0
  137. ai_review/tests/suites/services/diff/test_tools.py +108 -0
  138. ai_review/tests/suites/services/prompt/__init__.py +0 -0
  139. ai_review/tests/suites/services/prompt/test_schema.py +38 -0
  140. ai_review/tests/suites/services/prompt/test_service.py +128 -0
  141. ai_review/tests/suites/services/review/__init__.py +0 -0
  142. ai_review/tests/suites/services/review/inline/__init__.py +0 -0
  143. ai_review/tests/suites/services/review/inline/test_schema.py +65 -0
  144. ai_review/tests/suites/services/review/inline/test_service.py +49 -0
  145. ai_review/tests/suites/services/review/policy/__init__.py +0 -0
  146. ai_review/tests/suites/services/review/policy/test_service.py +95 -0
  147. ai_review/tests/suites/services/review/summary/__init__.py +0 -0
  148. ai_review/tests/suites/services/review/summary/test_schema.py +22 -0
  149. ai_review/tests/suites/services/review/summary/test_service.py +16 -0
  150. xai_review-0.3.0.dist-info/METADATA +11 -0
  151. xai_review-0.3.0.dist-info/RECORD +154 -0
  152. xai_review-0.3.0.dist-info/WHEEL +5 -0
  153. xai_review-0.3.0.dist-info/entry_points.txt +2 -0
  154. xai_review-0.3.0.dist-info/top_level.txt +1 -0
ai_review/__init__.py ADDED
File without changes
File without changes
File without changes
@@ -0,0 +1,7 @@
1
+ from ai_review.services.review.service import ReviewService
2
+
3
+
4
+ async def run_context_review_command():
5
+ review_service = ReviewService()
6
+ await review_service.run_context_review()
7
+ review_service.report_total_cost()
@@ -0,0 +1,7 @@
1
+ from ai_review.services.review.service import ReviewService
2
+
3
+
4
+ async def run_inline_review_command():
5
+ review_service = ReviewService()
6
+ await review_service.run_inline_review()
7
+ review_service.report_total_cost()
@@ -0,0 +1,8 @@
1
+ from ai_review.services.review.service import ReviewService
2
+
3
+
4
+ async def run_review_command():
5
+ review_service = ReviewService()
6
+ await review_service.run_inline_review()
7
+ await review_service.run_summary_review()
8
+ review_service.report_total_cost()
@@ -0,0 +1,7 @@
1
+ from ai_review.services.review.service import ReviewService
2
+
3
+
4
+ async def run_summary_review_command():
5
+ review_service = ReviewService()
6
+ await review_service.run_summary_review()
7
+ review_service.report_total_cost()
ai_review/cli/main.py ADDED
@@ -0,0 +1,54 @@
1
+ import asyncio
2
+
3
+ import typer
4
+
5
+ from ai_review.cli.commands.run_context_review import run_context_review_command
6
+ from ai_review.cli.commands.run_inline_review import run_inline_review_command
7
+ from ai_review.cli.commands.run_review import run_review_command
8
+ from ai_review.cli.commands.run_summary_review import run_summary_review_command
9
+ from ai_review.config import settings
10
+
11
+ app = typer.Typer(help="AI Review CLI")
12
+
13
+
14
+ @app.command("run")
15
+ def run():
16
+ """Run the full AI review pipeline"""
17
+ typer.secho("Starting full AI review...", fg=typer.colors.CYAN, bold=True)
18
+ asyncio.run(run_review_command())
19
+ typer.secho("AI review completed successfully!", fg=typer.colors.GREEN, bold=True)
20
+
21
+
22
+ @app.command("run-inline")
23
+ def run_inline():
24
+ """Run only the inline review"""
25
+ typer.secho("Starting inline AI review...", fg=typer.colors.CYAN)
26
+ asyncio.run(run_inline_review_command())
27
+ typer.secho("AI review completed successfully!", fg=typer.colors.GREEN, bold=True)
28
+
29
+
30
+ @app.command("run-context")
31
+ def run_context():
32
+ """Run only the context review"""
33
+ typer.secho("Starting context AI review...", fg=typer.colors.CYAN)
34
+ asyncio.run(run_context_review_command())
35
+ typer.secho("AI review completed successfully!", fg=typer.colors.GREEN, bold=True)
36
+
37
+
38
+ @app.command("run-summary")
39
+ def run_summary():
40
+ """Run only the summary review"""
41
+ typer.secho("Starting summary AI review...", fg=typer.colors.CYAN)
42
+ asyncio.run(run_summary_review_command())
43
+ typer.secho("AI review completed successfully!", fg=typer.colors.GREEN, bold=True)
44
+
45
+
46
+ @app.command("show-config")
47
+ def show_config():
48
+ """Show the current resolved configuration"""
49
+ typer.secho("Loaded AI Review configuration:", fg=typer.colors.CYAN, bold=True)
50
+ typer.echo(settings.model_dump_json(indent=2, exclude_none=True))
51
+
52
+
53
+ if __name__ == "__main__":
54
+ app()
File without changes
File without changes
@@ -0,0 +1,44 @@
1
+ from httpx import AsyncClient, Response, AsyncHTTPTransport
2
+
3
+ from ai_review.clients.claude.schema import ClaudeChatRequestSchema, ClaudeChatResponseSchema
4
+ from ai_review.config import settings
5
+ from ai_review.libs.http.client import HTTPClient
6
+ from ai_review.libs.http.event_hooks.logger import LoggerEventHook
7
+ from ai_review.libs.http.handlers import HTTPClientError, handle_http_error
8
+ from ai_review.libs.http.transports.retry import RetryTransport
9
+ from ai_review.libs.logger import get_logger
10
+
11
+
12
+ class ClaudeHTTPClientError(HTTPClientError):
13
+ pass
14
+
15
+
16
+ class ClaudeHTTPClient(HTTPClient):
17
+ @handle_http_error(client="ClaudeHTTPClient", exception=ClaudeHTTPClientError)
18
+ async def chat_api(self, request: ClaudeChatRequestSchema) -> Response:
19
+ return await self.post("/v1/messages", json=request.model_dump())
20
+
21
+ async def chat(self, request: ClaudeChatRequestSchema) -> ClaudeChatResponseSchema:
22
+ response = await self.chat_api(request)
23
+ return ClaudeChatResponseSchema.model_validate_json(response.text)
24
+
25
+
26
+ def get_claude_http_client() -> ClaudeHTTPClient:
27
+ logger = get_logger("CLAUDE_HTTP_CLIENT")
28
+ logger_event_hook = LoggerEventHook(logger=logger)
29
+ retry_transport = RetryTransport(transport=AsyncHTTPTransport())
30
+
31
+ client = AsyncClient(
32
+ timeout=settings.llm.http_client.timeout,
33
+ headers={
34
+ "x-api-key": settings.llm.http_client.api_key,
35
+ "anthropic-version": settings.llm.http_client.api_version,
36
+ },
37
+ base_url=settings.llm.http_client.base_url,
38
+ transport=retry_transport,
39
+ event_hooks={
40
+ "request": [logger_event_hook.request],
41
+ "response": [logger_event_hook.response],
42
+ },
43
+ )
44
+ return ClaudeHTTPClient(client=client)
@@ -0,0 +1,44 @@
1
+ from typing import Literal
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ class ClaudeMessageSchema(BaseModel):
7
+ role: Literal["user", "assistant", "system"]
8
+ content: str
9
+
10
+
11
+ class ClaudeChatRequestSchema(BaseModel):
12
+ model: str
13
+ system: str | None = None
14
+ messages: list[ClaudeMessageSchema]
15
+ max_tokens: int
16
+ temperature: float
17
+
18
+
19
+ class ClaudeContentSchema(BaseModel):
20
+ type: Literal["text"]
21
+ text: str
22
+
23
+
24
+ class ClaudeUsageSchema(BaseModel):
25
+ input_tokens: int
26
+ output_tokens: int
27
+
28
+ @property
29
+ def total_tokens(self) -> int:
30
+ return self.input_tokens + self.output_tokens
31
+
32
+
33
+ class ClaudeChatResponseSchema(BaseModel):
34
+ id: str
35
+ role: str
36
+ usage: ClaudeUsageSchema
37
+ content: list[ClaudeContentSchema]
38
+
39
+ @property
40
+ def first_text(self) -> str:
41
+ if not self.content:
42
+ return ""
43
+
44
+ return self.content[0].text.strip()
File without changes
@@ -0,0 +1,45 @@
1
+ from httpx import Response, AsyncHTTPTransport, AsyncClient
2
+
3
+ from ai_review.clients.gemini.schema import GeminiChatRequestSchema, GeminiChatResponseSchema
4
+ from ai_review.config import settings
5
+ from ai_review.libs.http.client import HTTPClient
6
+ from ai_review.libs.http.event_hooks.logger import LoggerEventHook
7
+ from ai_review.libs.http.handlers import HTTPClientError, handle_http_error
8
+ from ai_review.libs.http.transports.retry import RetryTransport
9
+ from ai_review.libs.logger import get_logger
10
+
11
+
12
+ class GeminiHTTPClientError(HTTPClientError):
13
+ pass
14
+
15
+
16
+ class GeminiHTTPClient(HTTPClient):
17
+ @handle_http_error(client="GeminiHTTPClient", exception=GeminiHTTPClientError)
18
+ async def chat_api(self, request: GeminiChatRequestSchema) -> Response:
19
+ meta = settings.llm.meta
20
+ return await self.post(
21
+ f"/v1beta/models/{meta.model}:generateContent", json=request.model_dump()
22
+ )
23
+
24
+ async def chat(self, request: GeminiChatRequestSchema) -> GeminiChatResponseSchema:
25
+ response = await self.chat_api(request)
26
+ return GeminiChatResponseSchema.model_validate_json(response.text)
27
+
28
+
29
+ def get_gemini_http_client() -> GeminiHTTPClient:
30
+ logger = get_logger("GEMINI_HTTP_CLIENT")
31
+ logger_event_hook = LoggerEventHook(logger=logger)
32
+ retry_transport = RetryTransport(transport=AsyncHTTPTransport())
33
+
34
+ client = AsyncClient(
35
+ timeout=settings.llm.http_client.timeout,
36
+ headers={"x-goog-api-key": settings.llm.http_client.api_key},
37
+ base_url=settings.llm.http_client.base_url,
38
+ transport=retry_transport,
39
+ event_hooks={
40
+ "request": [logger_event_hook.request],
41
+ "response": [logger_event_hook.response],
42
+ },
43
+ )
44
+
45
+ return GeminiHTTPClient(client=client)
@@ -0,0 +1,78 @@
1
+ from pydantic import BaseModel, Field, ConfigDict
2
+
3
+
4
+ class GeminiPartSchema(BaseModel):
5
+ text: str
6
+
7
+
8
+ class GeminiUsageSchema(BaseModel):
9
+ model_config = ConfigDict(populate_by_name=True)
10
+
11
+ prompt_token_count: int = Field(alias="promptTokenCount")
12
+ total_tokens_count: int | None = Field(default=None, alias="totalTokenCount")
13
+ candidates_token_count: int | None = Field(default=None, alias="candidatesTokenCount")
14
+ output_thoughts_token_count: int | None = Field(default=None, alias="outputThoughtsTokenCount")
15
+
16
+ @property
17
+ def total_tokens(self) -> int:
18
+ if self.total_tokens_count is not None:
19
+ return self.total_tokens_count
20
+
21
+ return (
22
+ (self.prompt_token_count or 0)
23
+ + (self.candidates_token_count or 0)
24
+ + (self.output_thoughts_token_count or 0)
25
+ )
26
+
27
+ @property
28
+ def prompt_tokens(self) -> int:
29
+ return self.prompt_token_count
30
+
31
+ @property
32
+ def completion_tokens(self) -> int | None:
33
+ return self.candidates_token_count or self.output_thoughts_token_count
34
+
35
+
36
+ class GeminiContentSchema(BaseModel):
37
+ role: str = "user"
38
+ parts: list[GeminiPartSchema] | None = None
39
+
40
+
41
+ class GeminiCandidateSchema(BaseModel):
42
+ content: GeminiContentSchema
43
+
44
+
45
+ class GeminiGenerationConfigSchema(BaseModel):
46
+ model_config = ConfigDict(populate_by_name=True)
47
+
48
+ temperature: float
49
+ max_output_tokens: int = Field(alias="maxOutputTokens")
50
+
51
+
52
+ class GeminiChatRequestSchema(BaseModel):
53
+ model_config = ConfigDict(populate_by_name=True)
54
+
55
+ contents: list[GeminiContentSchema]
56
+ generation_config: GeminiGenerationConfigSchema | None = Field(
57
+ alias="generationConfig",
58
+ default=None
59
+ )
60
+ system_instruction: GeminiContentSchema | None = Field(
61
+ alias="systemInstruction",
62
+ default=None
63
+ )
64
+
65
+
66
+ class GeminiChatResponseSchema(BaseModel):
67
+ model_config = ConfigDict(populate_by_name=True)
68
+
69
+ usage: GeminiUsageSchema = Field(alias="usageMetadata")
70
+ candidates: list[GeminiCandidateSchema]
71
+
72
+ @property
73
+ def first_text(self) -> str:
74
+ if not self.candidates:
75
+ return ""
76
+
77
+ parts = self.candidates[0].content.parts or []
78
+ return (parts[0].text if parts else "").strip()
File without changes
@@ -0,0 +1,31 @@
1
+ from httpx import AsyncClient, AsyncHTTPTransport
2
+
3
+ from ai_review.clients.gitlab.mr.client import GitLabMergeRequestsHTTPClient
4
+ from ai_review.config import settings
5
+ from ai_review.libs.http.event_hooks.logger import LoggerEventHook
6
+ from ai_review.libs.http.transports.retry import RetryTransport
7
+ from ai_review.libs.logger import get_logger
8
+
9
+
10
+ class GitLabHTTPClient:
11
+ def __init__(self, client: AsyncClient):
12
+ self.mr = GitLabMergeRequestsHTTPClient(client)
13
+
14
+
15
+ def get_gitlab_http_client() -> GitLabHTTPClient:
16
+ logger = get_logger("GITLAB_MERGE_REQUESTS_HTTP_CLIENT")
17
+ logger_event_hook = LoggerEventHook(logger=logger)
18
+ retry_transport = RetryTransport(transport=AsyncHTTPTransport())
19
+
20
+ client = AsyncClient(
21
+ timeout=settings.llm.http_client.timeout,
22
+ headers={"Authorization": f"Bearer {settings.vcs.http_client.bearer_token}"},
23
+ base_url=settings.vcs.http_client.base_url,
24
+ transport=retry_transport,
25
+ event_hooks={
26
+ 'request': [logger_event_hook.request],
27
+ 'response': [logger_event_hook.response]
28
+ }
29
+ )
30
+
31
+ return GitLabHTTPClient(client=client)
File without changes
@@ -0,0 +1,101 @@
1
+ from httpx import Response
2
+
3
+ from ai_review.clients.gitlab.mr.schema.changes import GitLabGetMRChangesResponseSchema
4
+ from ai_review.clients.gitlab.mr.schema.comments import (
5
+ GitLabGetMRCommentsResponseSchema,
6
+ GitLabCreateMRCommentRequestSchema,
7
+ GitLabCreateMRCommentResponseSchema,
8
+ )
9
+ from ai_review.clients.gitlab.mr.schema.discussions import (
10
+ GitLabGetMRDiscussionsResponseSchema,
11
+ GitLabCreateMRDiscussionRequestSchema,
12
+ GitLabCreateMRDiscussionResponseSchema
13
+ )
14
+ from ai_review.libs.http.client import HTTPClient
15
+
16
+
17
+ class GitLabMergeRequestsHTTPClient(HTTPClient):
18
+
19
+ async def get_changes_api(self, project_id: str, merge_request_id: str) -> Response:
20
+ return await self.get(
21
+ f"/api/v4/projects/{project_id}/merge_requests/{merge_request_id}/changes"
22
+ )
23
+
24
+ async def get_comments_api(self, project_id: str, merge_request_id: str) -> Response:
25
+ return await self.get(
26
+ f"/api/v4/projects/{project_id}/merge_requests/{merge_request_id}/notes"
27
+ )
28
+
29
+ async def get_discussions_api(self, project_id: str, merge_request_id: str) -> Response:
30
+ return await self.get(
31
+ f"/api/v4/projects/{project_id}/merge_requests/{merge_request_id}/discussions"
32
+ )
33
+
34
+ async def create_comment_api(
35
+ self,
36
+ project_id: str,
37
+ merge_request_id: str,
38
+ request: GitLabCreateMRCommentRequestSchema,
39
+ ) -> Response:
40
+ return await self.post(
41
+ f"/api/v4/projects/{project_id}/merge_requests/{merge_request_id}/notes",
42
+ json=request.model_dump(),
43
+ )
44
+
45
+ async def create_discussion_api(
46
+ self,
47
+ project_id: str,
48
+ merge_request_id: str,
49
+ request: GitLabCreateMRDiscussionRequestSchema,
50
+ ) -> Response:
51
+ return await self.post(
52
+ f"/api/v4/projects/{project_id}/merge_requests/{merge_request_id}/discussions",
53
+ json=request.model_dump(),
54
+ )
55
+
56
+ async def get_changes(self, project_id: str, merge_request_id: str) -> GitLabGetMRChangesResponseSchema:
57
+ response = await self.get_changes_api(project_id, merge_request_id)
58
+ return GitLabGetMRChangesResponseSchema.model_validate_json(response.text)
59
+
60
+ async def get_comments(
61
+ self,
62
+ project_id: str,
63
+ merge_request_id: str
64
+ ) -> GitLabGetMRCommentsResponseSchema:
65
+ response = await self.get_comments_api(project_id, merge_request_id)
66
+ return GitLabGetMRCommentsResponseSchema.model_validate_json(response.text)
67
+
68
+ async def get_discussions(
69
+ self,
70
+ project_id: str,
71
+ merge_request_id: str
72
+ ) -> GitLabGetMRDiscussionsResponseSchema:
73
+ response = await self.get_discussions_api(project_id, merge_request_id)
74
+ return GitLabGetMRDiscussionsResponseSchema.model_validate_json(response.text)
75
+
76
+ async def create_comment(
77
+ self,
78
+ comment: str,
79
+ project_id: str,
80
+ merge_request_id: str,
81
+ ) -> GitLabCreateMRCommentResponseSchema:
82
+ request = GitLabCreateMRCommentRequestSchema(body=comment)
83
+ response = await self.create_comment_api(
84
+ request=request,
85
+ project_id=project_id,
86
+ merge_request_id=merge_request_id
87
+ )
88
+ return GitLabCreateMRCommentResponseSchema.model_validate_json(response.text)
89
+
90
+ async def create_discussion(
91
+ self,
92
+ project_id: str,
93
+ merge_request_id: str,
94
+ request: GitLabCreateMRDiscussionRequestSchema
95
+ ):
96
+ response = await self.create_discussion_api(
97
+ request=request,
98
+ project_id=project_id,
99
+ merge_request_id=merge_request_id
100
+ )
101
+ return GitLabCreateMRDiscussionResponseSchema.model_validate_json(response.text)
File without changes
@@ -0,0 +1,35 @@
1
+ from pydantic import BaseModel, Field
2
+
3
+
4
+ class GitLabUserSchema(BaseModel):
5
+ id: int
6
+ name: str
7
+ username: str
8
+
9
+
10
+ class GitLabDiffRefsSchema(BaseModel):
11
+ base_sha: str
12
+ head_sha: str
13
+ start_sha: str
14
+
15
+
16
+ class GitLabMRChangeSchema(BaseModel):
17
+ diff: str
18
+ old_path: str
19
+ new_path: str
20
+
21
+
22
+ class GitLabGetMRChangesResponseSchema(BaseModel):
23
+ id: int
24
+ iid: int
25
+ title: str
26
+ author: GitLabUserSchema
27
+ labels: list[str] = []
28
+ changes: list[GitLabMRChangeSchema]
29
+ assignees: list[GitLabUserSchema] = Field(default_factory=list)
30
+ reviewers: list[GitLabUserSchema] = Field(default_factory=list)
31
+ diff_refs: GitLabDiffRefsSchema
32
+ project_id: int
33
+ description: str
34
+ source_branch: str
35
+ target_branch: str
@@ -0,0 +1,19 @@
1
+ from pydantic import BaseModel, RootModel
2
+
3
+
4
+ class GitLabMRCommentSchema(BaseModel):
5
+ id: int
6
+ body: str
7
+
8
+
9
+ class GitLabGetMRCommentsResponseSchema(RootModel[list[GitLabMRCommentSchema]]):
10
+ root: list[GitLabMRCommentSchema]
11
+
12
+
13
+ class GitLabCreateMRCommentRequestSchema(BaseModel):
14
+ body: str
15
+
16
+
17
+ class GitLabCreateMRCommentResponseSchema(BaseModel):
18
+ id: int
19
+ body: str
@@ -0,0 +1,34 @@
1
+ from pydantic import BaseModel, RootModel
2
+
3
+
4
+ class GitLabNoteSchema(BaseModel):
5
+ id: int
6
+ body: str
7
+
8
+
9
+ class GitLabDiscussionSchema(BaseModel):
10
+ id: str
11
+ notes: list[GitLabNoteSchema]
12
+
13
+
14
+ class GitLabDiscussionPositionSchema(BaseModel):
15
+ position_type: str = "text"
16
+ base_sha: str
17
+ head_sha: str
18
+ start_sha: str
19
+ new_path: str
20
+ new_line: int
21
+
22
+
23
+ class GitLabGetMRDiscussionsResponseSchema(RootModel[list[GitLabDiscussionSchema]]):
24
+ root: list[GitLabDiscussionSchema]
25
+
26
+
27
+ class GitLabCreateMRDiscussionRequestSchema(BaseModel):
28
+ body: str
29
+ position: GitLabDiscussionPositionSchema
30
+
31
+
32
+ class GitLabCreateMRDiscussionResponseSchema(BaseModel):
33
+ id: str
34
+ body: str | None = None
File without changes
@@ -0,0 +1,42 @@
1
+ from httpx import Response, AsyncHTTPTransport, AsyncClient
2
+
3
+ from ai_review.clients.openai.schema import OpenAIChatRequestSchema, OpenAIChatResponseSchema
4
+ from ai_review.config import settings
5
+ from ai_review.libs.http.client import HTTPClient
6
+ from ai_review.libs.http.event_hooks.logger import LoggerEventHook
7
+ from ai_review.libs.http.handlers import HTTPClientError, handle_http_error
8
+ from ai_review.libs.http.transports.retry import RetryTransport
9
+ from ai_review.libs.logger import get_logger
10
+
11
+
12
+ class OpenAIHTTPClientError(HTTPClientError):
13
+ pass
14
+
15
+
16
+ class OpenAIHTTPClient(HTTPClient):
17
+ @handle_http_error(client='OpenAIHTTPClient', exception=OpenAIHTTPClientError)
18
+ async def chat_api(self, request: OpenAIChatRequestSchema) -> Response:
19
+ return await self.post("/chat/completions", json=request.model_dump())
20
+
21
+ async def chat(self, request: OpenAIChatRequestSchema) -> OpenAIChatResponseSchema:
22
+ response = await self.chat_api(request)
23
+ return OpenAIChatResponseSchema.model_validate_json(response.text)
24
+
25
+
26
+ def get_openai_http_client() -> OpenAIHTTPClient:
27
+ logger = get_logger("OPENAI_HTTP_CLIENT")
28
+ logger_event_hook = LoggerEventHook(logger=logger)
29
+ retry_transport = RetryTransport(transport=AsyncHTTPTransport())
30
+
31
+ client = AsyncClient(
32
+ timeout=settings.llm.http_client.timeout,
33
+ headers={"Authorization": f"Bearer {settings.llm.http_client.bearer_token}"},
34
+ base_url=settings.llm.http_client.base_url,
35
+ transport=retry_transport,
36
+ event_hooks={
37
+ 'request': [logger_event_hook.request],
38
+ 'response': [logger_event_hook.response]
39
+ }
40
+ )
41
+
42
+ return OpenAIHTTPClient(client=client)
@@ -0,0 +1,37 @@
1
+ from typing import Literal
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ class OpenAIUsageSchema(BaseModel):
7
+ total_tokens: int
8
+ prompt_tokens: int
9
+ completion_tokens: int
10
+
11
+
12
+ class OpenAIMessageSchema(BaseModel):
13
+ role: Literal["system", "user", "assistant"]
14
+ content: str
15
+
16
+
17
+ class OpenAIChoiceSchema(BaseModel):
18
+ message: OpenAIMessageSchema
19
+
20
+
21
+ class OpenAIChatRequestSchema(BaseModel):
22
+ model: str
23
+ messages: list[OpenAIMessageSchema]
24
+ max_tokens: int
25
+ temperature: float
26
+
27
+
28
+ class OpenAIChatResponseSchema(BaseModel):
29
+ usage: OpenAIUsageSchema
30
+ choices: list[OpenAIChoiceSchema]
31
+
32
+ @property
33
+ def first_text(self) -> str:
34
+ if not self.choices:
35
+ return ""
36
+
37
+ return (self.choices[0].message.content or "").strip()