xai-review 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xai-review might be problematic. Click here for more details.
- ai_review/__init__.py +0 -0
- ai_review/cli/__init__.py +0 -0
- ai_review/cli/commands/__init__.py +0 -0
- ai_review/cli/commands/run_context_review.py +7 -0
- ai_review/cli/commands/run_inline_review.py +7 -0
- ai_review/cli/commands/run_review.py +8 -0
- ai_review/cli/commands/run_summary_review.py +7 -0
- ai_review/cli/main.py +54 -0
- ai_review/clients/__init__.py +0 -0
- ai_review/clients/claude/__init__.py +0 -0
- ai_review/clients/claude/client.py +44 -0
- ai_review/clients/claude/schema.py +44 -0
- ai_review/clients/gemini/__init__.py +0 -0
- ai_review/clients/gemini/client.py +45 -0
- ai_review/clients/gemini/schema.py +78 -0
- ai_review/clients/gitlab/__init__.py +0 -0
- ai_review/clients/gitlab/client.py +31 -0
- ai_review/clients/gitlab/mr/__init__.py +0 -0
- ai_review/clients/gitlab/mr/client.py +101 -0
- ai_review/clients/gitlab/mr/schema/__init__.py +0 -0
- ai_review/clients/gitlab/mr/schema/changes.py +35 -0
- ai_review/clients/gitlab/mr/schema/comments.py +19 -0
- ai_review/clients/gitlab/mr/schema/discussions.py +34 -0
- ai_review/clients/openai/__init__.py +0 -0
- ai_review/clients/openai/client.py +42 -0
- ai_review/clients/openai/schema.py +37 -0
- ai_review/config.py +62 -0
- ai_review/libs/__init__.py +0 -0
- ai_review/libs/asynchronous/__init__.py +0 -0
- ai_review/libs/asynchronous/gather.py +14 -0
- ai_review/libs/config/__init__.py +0 -0
- ai_review/libs/config/artifacts.py +12 -0
- ai_review/libs/config/base.py +24 -0
- ai_review/libs/config/claude.py +13 -0
- ai_review/libs/config/gemini.py +13 -0
- ai_review/libs/config/gitlab.py +12 -0
- ai_review/libs/config/http.py +19 -0
- ai_review/libs/config/llm.py +61 -0
- ai_review/libs/config/logger.py +17 -0
- ai_review/libs/config/openai.py +13 -0
- ai_review/libs/config/prompt.py +121 -0
- ai_review/libs/config/review.py +30 -0
- ai_review/libs/config/vcs.py +19 -0
- ai_review/libs/constants/__init__.py +0 -0
- ai_review/libs/constants/llm_provider.py +7 -0
- ai_review/libs/constants/vcs_provider.py +6 -0
- ai_review/libs/diff/__init__.py +0 -0
- ai_review/libs/diff/models.py +100 -0
- ai_review/libs/diff/parser.py +111 -0
- ai_review/libs/diff/tools.py +24 -0
- ai_review/libs/http/__init__.py +0 -0
- ai_review/libs/http/client.py +14 -0
- ai_review/libs/http/event_hooks/__init__.py +0 -0
- ai_review/libs/http/event_hooks/base.py +13 -0
- ai_review/libs/http/event_hooks/logger.py +17 -0
- ai_review/libs/http/handlers.py +34 -0
- ai_review/libs/http/transports/__init__.py +0 -0
- ai_review/libs/http/transports/retry.py +34 -0
- ai_review/libs/logger.py +19 -0
- ai_review/libs/resources.py +24 -0
- ai_review/prompts/__init__.py +0 -0
- ai_review/prompts/default_context.md +14 -0
- ai_review/prompts/default_inline.md +8 -0
- ai_review/prompts/default_summary.md +3 -0
- ai_review/prompts/default_system_context.md +27 -0
- ai_review/prompts/default_system_inline.md +25 -0
- ai_review/prompts/default_system_summary.md +7 -0
- ai_review/resources/__init__.py +0 -0
- ai_review/resources/pricing.yaml +55 -0
- ai_review/services/__init__.py +0 -0
- ai_review/services/artifacts/__init__.py +0 -0
- ai_review/services/artifacts/schema.py +11 -0
- ai_review/services/artifacts/service.py +47 -0
- ai_review/services/artifacts/tools.py +8 -0
- ai_review/services/cost/__init__.py +0 -0
- ai_review/services/cost/schema.py +44 -0
- ai_review/services/cost/service.py +58 -0
- ai_review/services/diff/__init__.py +0 -0
- ai_review/services/diff/renderers.py +149 -0
- ai_review/services/diff/schema.py +6 -0
- ai_review/services/diff/service.py +96 -0
- ai_review/services/diff/tools.py +59 -0
- ai_review/services/git/__init__.py +0 -0
- ai_review/services/git/service.py +35 -0
- ai_review/services/git/types.py +11 -0
- ai_review/services/llm/__init__.py +0 -0
- ai_review/services/llm/claude/__init__.py +0 -0
- ai_review/services/llm/claude/client.py +26 -0
- ai_review/services/llm/factory.py +18 -0
- ai_review/services/llm/gemini/__init__.py +0 -0
- ai_review/services/llm/gemini/client.py +31 -0
- ai_review/services/llm/openai/__init__.py +0 -0
- ai_review/services/llm/openai/client.py +28 -0
- ai_review/services/llm/types.py +15 -0
- ai_review/services/prompt/__init__.py +0 -0
- ai_review/services/prompt/adapter.py +25 -0
- ai_review/services/prompt/schema.py +71 -0
- ai_review/services/prompt/service.py +56 -0
- ai_review/services/review/__init__.py +0 -0
- ai_review/services/review/inline/__init__.py +0 -0
- ai_review/services/review/inline/schema.py +53 -0
- ai_review/services/review/inline/service.py +38 -0
- ai_review/services/review/policy/__init__.py +0 -0
- ai_review/services/review/policy/service.py +60 -0
- ai_review/services/review/service.py +207 -0
- ai_review/services/review/summary/__init__.py +0 -0
- ai_review/services/review/summary/schema.py +15 -0
- ai_review/services/review/summary/service.py +14 -0
- ai_review/services/vcs/__init__.py +0 -0
- ai_review/services/vcs/factory.py +12 -0
- ai_review/services/vcs/gitlab/__init__.py +0 -0
- ai_review/services/vcs/gitlab/client.py +152 -0
- ai_review/services/vcs/types.py +55 -0
- ai_review/tests/__init__.py +0 -0
- ai_review/tests/fixtures/__init__.py +0 -0
- ai_review/tests/fixtures/git.py +31 -0
- ai_review/tests/suites/__init__.py +0 -0
- ai_review/tests/suites/clients/__init__.py +0 -0
- ai_review/tests/suites/clients/claude/__init__.py +0 -0
- ai_review/tests/suites/clients/claude/test_client.py +31 -0
- ai_review/tests/suites/clients/claude/test_schema.py +59 -0
- ai_review/tests/suites/clients/gemini/__init__.py +0 -0
- ai_review/tests/suites/clients/gemini/test_client.py +30 -0
- ai_review/tests/suites/clients/gemini/test_schema.py +105 -0
- ai_review/tests/suites/clients/openai/__init__.py +0 -0
- ai_review/tests/suites/clients/openai/test_client.py +30 -0
- ai_review/tests/suites/clients/openai/test_schema.py +53 -0
- ai_review/tests/suites/libs/__init__.py +0 -0
- ai_review/tests/suites/libs/diff/__init__.py +0 -0
- ai_review/tests/suites/libs/diff/test_models.py +105 -0
- ai_review/tests/suites/libs/diff/test_parser.py +115 -0
- ai_review/tests/suites/libs/diff/test_tools.py +62 -0
- ai_review/tests/suites/services/__init__.py +0 -0
- ai_review/tests/suites/services/diff/__init__.py +0 -0
- ai_review/tests/suites/services/diff/test_renderers.py +168 -0
- ai_review/tests/suites/services/diff/test_service.py +84 -0
- ai_review/tests/suites/services/diff/test_tools.py +108 -0
- ai_review/tests/suites/services/prompt/__init__.py +0 -0
- ai_review/tests/suites/services/prompt/test_schema.py +38 -0
- ai_review/tests/suites/services/prompt/test_service.py +128 -0
- ai_review/tests/suites/services/review/__init__.py +0 -0
- ai_review/tests/suites/services/review/inline/__init__.py +0 -0
- ai_review/tests/suites/services/review/inline/test_schema.py +65 -0
- ai_review/tests/suites/services/review/inline/test_service.py +49 -0
- ai_review/tests/suites/services/review/policy/__init__.py +0 -0
- ai_review/tests/suites/services/review/policy/test_service.py +95 -0
- ai_review/tests/suites/services/review/summary/__init__.py +0 -0
- ai_review/tests/suites/services/review/summary/test_schema.py +22 -0
- ai_review/tests/suites/services/review/summary/test_service.py +16 -0
- xai_review-0.3.0.dist-info/METADATA +11 -0
- xai_review-0.3.0.dist-info/RECORD +154 -0
- xai_review-0.3.0.dist-info/WHEEL +5 -0
- xai_review-0.3.0.dist-info/entry_points.txt +2 -0
- xai_review-0.3.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from ai_review.libs.diff.models import DiffLineType
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def is_source_line(line: str) -> bool:
|
|
5
|
+
if line == r"":
|
|
6
|
+
return False
|
|
7
|
+
if not line or line.startswith("---") or line.startswith("+++"):
|
|
8
|
+
return False
|
|
9
|
+
return True
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def get_line_type(line: str) -> DiffLineType:
|
|
13
|
+
if not line:
|
|
14
|
+
raise ValueError("Empty line cannot be classified as DiffLineType")
|
|
15
|
+
|
|
16
|
+
match line[0]:
|
|
17
|
+
case "+":
|
|
18
|
+
return DiffLineType.ADDED
|
|
19
|
+
case "-":
|
|
20
|
+
return DiffLineType.REMOVED
|
|
21
|
+
case " ":
|
|
22
|
+
return DiffLineType.UNCHANGED
|
|
23
|
+
case _:
|
|
24
|
+
raise ValueError(f"Unknown diff line prefix: {line!r}")
|
|
File without changes
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from httpx import AsyncClient, Response, QueryParams
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class HTTPClient:
|
|
7
|
+
def __init__(self, client: AsyncClient) -> None:
|
|
8
|
+
self.client = client
|
|
9
|
+
|
|
10
|
+
async def get(self, url: str, query: QueryParams | None = None) -> Response:
|
|
11
|
+
return await self.client.get(url=url, params=query)
|
|
12
|
+
|
|
13
|
+
async def post(self, url: str, json: Any | None = None) -> Response:
|
|
14
|
+
return await self.client.post(url=url, json=json)
|
|
File without changes
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
|
|
3
|
+
from httpx import Request, Response
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class BaseEventHook(ABC):
|
|
7
|
+
@abstractmethod
|
|
8
|
+
async def request(self, request: Request) -> None:
|
|
9
|
+
...
|
|
10
|
+
|
|
11
|
+
@abstractmethod
|
|
12
|
+
async def response(self, response: Response) -> None:
|
|
13
|
+
...
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from logging import Logger
|
|
2
|
+
|
|
3
|
+
from httpx import Request, Response
|
|
4
|
+
|
|
5
|
+
from ai_review.libs.http.event_hooks.base import BaseEventHook
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class LoggerEventHook(BaseEventHook):
|
|
9
|
+
def __init__(self, logger: Logger):
|
|
10
|
+
self.logger = logger
|
|
11
|
+
|
|
12
|
+
async def request(self, request: Request):
|
|
13
|
+
self.logger.info(f"{request.method} {request.url} - Waiting for response")
|
|
14
|
+
|
|
15
|
+
async def response(self, response: Response):
|
|
16
|
+
request = response.request
|
|
17
|
+
self.logger.info(f"{request.method} {request.url} - Status {response.status_code}")
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from functools import wraps
|
|
2
|
+
from typing import Callable, Coroutine, Any
|
|
3
|
+
|
|
4
|
+
from httpx import Response, HTTPStatusError
|
|
5
|
+
|
|
6
|
+
APIFunc = Callable[..., Coroutine[Any, Any, Response]]
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class HTTPClientError(Exception):
|
|
10
|
+
def __init__(self, client: str, details: str, status_code: int):
|
|
11
|
+
self.details = f'[{client}]: {details}'
|
|
12
|
+
self.status_code = status_code
|
|
13
|
+
|
|
14
|
+
super().__init__(f"[{client}] {status_code}: {details}")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def handle_http_error(client: str, exception: type[HTTPClientError]):
|
|
18
|
+
def wrapper(func: APIFunc):
|
|
19
|
+
@wraps(func)
|
|
20
|
+
async def inner(*args, **kwargs):
|
|
21
|
+
response = await func(*args, **kwargs)
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
return response.raise_for_status()
|
|
25
|
+
except HTTPStatusError as error:
|
|
26
|
+
raise exception(
|
|
27
|
+
client=client,
|
|
28
|
+
details=error.response.text or f'{client} returned error',
|
|
29
|
+
status_code=error.response.status_code
|
|
30
|
+
) from error
|
|
31
|
+
|
|
32
|
+
return inner
|
|
33
|
+
|
|
34
|
+
return wrapper
|
|
File without changes
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from http import HTTPStatus
|
|
3
|
+
|
|
4
|
+
from httpx import Request, Response, AsyncBaseTransport
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class RetryTransport(AsyncBaseTransport):
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
transport: AsyncBaseTransport,
|
|
11
|
+
max_retries: int = 5,
|
|
12
|
+
retry_delay: float = 0.5,
|
|
13
|
+
retry_status_codes: tuple[HTTPStatus, ...] = (
|
|
14
|
+
HTTPStatus.BAD_GATEWAY,
|
|
15
|
+
HTTPStatus.GATEWAY_TIMEOUT,
|
|
16
|
+
HTTPStatus.SERVICE_UNAVAILABLE,
|
|
17
|
+
HTTPStatus.INTERNAL_SERVER_ERROR,
|
|
18
|
+
)
|
|
19
|
+
):
|
|
20
|
+
self.transport = transport
|
|
21
|
+
self.max_retries = max_retries
|
|
22
|
+
self.retry_delay = retry_delay
|
|
23
|
+
self.retry_status_codes = retry_status_codes
|
|
24
|
+
|
|
25
|
+
async def handle_async_request(self, request: Request) -> Response:
|
|
26
|
+
response: Response | None = None
|
|
27
|
+
for _ in range(self.max_retries):
|
|
28
|
+
response = await self.transport.handle_async_request(request)
|
|
29
|
+
if response.status_code not in self.retry_status_codes:
|
|
30
|
+
return response
|
|
31
|
+
|
|
32
|
+
await asyncio.sleep(self.retry_delay)
|
|
33
|
+
|
|
34
|
+
return response
|
ai_review/libs/logger.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
from loguru import logger
|
|
4
|
+
|
|
5
|
+
from ai_review.config import settings
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from loguru import Logger
|
|
9
|
+
|
|
10
|
+
logger.remove()
|
|
11
|
+
logger.add(
|
|
12
|
+
sink=lambda msg: print(msg, end=""),
|
|
13
|
+
format=settings.logger.format,
|
|
14
|
+
level=settings.logger.level,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def get_logger(name: str) -> "Logger":
|
|
19
|
+
return logger.bind(logger_name=name)
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import importlib.metadata
|
|
2
|
+
import importlib.resources
|
|
3
|
+
import os
|
|
4
|
+
import pathlib
|
|
5
|
+
import shutil
|
|
6
|
+
import tempfile
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def load_resource(package: str, filename: str, fallback: str | None = None) -> pathlib.Path:
|
|
10
|
+
try:
|
|
11
|
+
traversable = importlib.resources.files(package) / filename
|
|
12
|
+
if isinstance(traversable, pathlib.Path):
|
|
13
|
+
return traversable
|
|
14
|
+
|
|
15
|
+
tmp_dir = pathlib.Path(tempfile.gettempdir())
|
|
16
|
+
tmp_file = tmp_dir / filename
|
|
17
|
+
with traversable.open("rb") as src, tmp_file.open("wb") as dst:
|
|
18
|
+
shutil.copyfileobj(src, dst)
|
|
19
|
+
return tmp_file
|
|
20
|
+
except importlib.metadata.PackageNotFoundError:
|
|
21
|
+
if fallback:
|
|
22
|
+
return pathlib.Path(os.path.join(os.getcwd(), fallback))
|
|
23
|
+
|
|
24
|
+
raise
|
|
File without changes
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
Focus on code correctness, clarity, and maintainability across the entire set of changes.
|
|
2
|
+
Add inline comments for:
|
|
3
|
+
|
|
4
|
+
- potential bugs or logical errors,
|
|
5
|
+
- inconsistent or confusing naming across files,
|
|
6
|
+
- redundant or duplicate code,
|
|
7
|
+
- improvements for readability or best practices.
|
|
8
|
+
|
|
9
|
+
Guidelines:
|
|
10
|
+
|
|
11
|
+
- Analyze all provided files together, but output comments in the same format as inline review.
|
|
12
|
+
- If there are many issues, prioritize the most important ones (maximum 50 comments).
|
|
13
|
+
- Keep comments short, actionable, and relevant.
|
|
14
|
+
- Do not comment on unchanged code or stylistic preferences unless they clearly affect maintainability.
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
Return ONLY a valid JSON array of inline review comments.
|
|
2
|
+
|
|
3
|
+
Format:
|
|
4
|
+
|
|
5
|
+
```json
|
|
6
|
+
[
|
|
7
|
+
{
|
|
8
|
+
"file": "<relative_file_path>",
|
|
9
|
+
"line": <line_number>,
|
|
10
|
+
"message": "<short review message explaining the issue or suggestion>",
|
|
11
|
+
"suggestion": "<replacement code block, without markdown, or null if not applicable>"
|
|
12
|
+
}
|
|
13
|
+
]
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
Rules:
|
|
17
|
+
|
|
18
|
+
- Analyze all provided files together, but output comments in the same format as inline review.
|
|
19
|
+
- Prioritize the most important issues if there are many (maximum 50 comments).
|
|
20
|
+
- "file" must exactly match the file path in the diff.
|
|
21
|
+
- "line" must be an integer from the new version of the file.
|
|
22
|
+
- "message" must be a short, clear, and actionable explanation (1 sentence).
|
|
23
|
+
- "suggestion" must contain ONLY the code to replace the line(s), without markdown or comments.
|
|
24
|
+
- Use correct indentation from the file.
|
|
25
|
+
- If no concrete replacement is appropriate, set "suggestion" to null.
|
|
26
|
+
- Do not include anything outside the JSON array.
|
|
27
|
+
- If no issues are found, return [].
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
Return ONLY a valid JSON array of inline review comments.
|
|
2
|
+
|
|
3
|
+
Format:
|
|
4
|
+
|
|
5
|
+
```json
|
|
6
|
+
[
|
|
7
|
+
{
|
|
8
|
+
"file": "<relative_file_path>",
|
|
9
|
+
"line": <line_number>,
|
|
10
|
+
"message": "<short review message explaining the issue or suggestion>",
|
|
11
|
+
"suggestion": "<replacement code block, without markdown, or null if not applicable>"
|
|
12
|
+
}
|
|
13
|
+
]
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
Rules:
|
|
17
|
+
|
|
18
|
+
- "file" must exactly match the file path in the diff.
|
|
19
|
+
- "line" must be an integer from the new version of the file.
|
|
20
|
+
- "message" must be a short, clear, and actionable explanation (1 sentence).
|
|
21
|
+
- "suggestion" must contain ONLY the code to replace the line(s), without markdown or comments.
|
|
22
|
+
- Use correct indentation from the file.
|
|
23
|
+
- If no concrete replacement is appropriate, set "suggestion" to null.
|
|
24
|
+
- Do not include anything outside the JSON array.
|
|
25
|
+
- If no issues are found, return [].
|
|
File without changes
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
gpt-4o-mini:
|
|
2
|
+
input: 0.15e-6
|
|
3
|
+
output: 0.60e-6
|
|
4
|
+
|
|
5
|
+
gpt-4o:
|
|
6
|
+
input: 5.00e-6
|
|
7
|
+
output: 15.00e-6
|
|
8
|
+
|
|
9
|
+
gpt-4.1-mini:
|
|
10
|
+
input: 0.15e-6
|
|
11
|
+
output: 0.60e-6
|
|
12
|
+
|
|
13
|
+
gpt-4.1:
|
|
14
|
+
input: 5.00e-6
|
|
15
|
+
output: 15.00e-6
|
|
16
|
+
|
|
17
|
+
gpt-3.5-turbo:
|
|
18
|
+
input: 0.50e-6
|
|
19
|
+
output: 1.50e-6
|
|
20
|
+
|
|
21
|
+
gemini-2.5-flash-lite:
|
|
22
|
+
input: 0.10e-6
|
|
23
|
+
output: 0.40e-6
|
|
24
|
+
|
|
25
|
+
gemini-2.0-flash-lite:
|
|
26
|
+
input: 0.019e-6
|
|
27
|
+
output: 0.1e-6
|
|
28
|
+
|
|
29
|
+
gemini-2.0-pro:
|
|
30
|
+
input: 0.125e-6
|
|
31
|
+
output: 0.375e-6
|
|
32
|
+
|
|
33
|
+
gemini-2.5-pro:
|
|
34
|
+
input: 1.25e-6
|
|
35
|
+
output: 10.00e-6
|
|
36
|
+
|
|
37
|
+
gemini-2.5-pro-long-context:
|
|
38
|
+
input: 2.50e-6
|
|
39
|
+
output: 15.00e-6
|
|
40
|
+
|
|
41
|
+
claude-3.5-sonnet:
|
|
42
|
+
input: 3.00e-6
|
|
43
|
+
output: 15.00e-6
|
|
44
|
+
|
|
45
|
+
claude-3-opus:
|
|
46
|
+
input: 15.00e-6
|
|
47
|
+
output: 75.00e-6
|
|
48
|
+
|
|
49
|
+
claude-3-sonnet:
|
|
50
|
+
input: 3.00e-6
|
|
51
|
+
output: 15.00e-6
|
|
52
|
+
|
|
53
|
+
claude-3-haiku:
|
|
54
|
+
input: 0.25e-6
|
|
55
|
+
output: 1.25e-6
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class LLMArtifactSchema(BaseModel):
|
|
7
|
+
id: str
|
|
8
|
+
prompt: str
|
|
9
|
+
response: str | None = None
|
|
10
|
+
timestamp: str = Field(default_factory=datetime.utcnow().isoformat)
|
|
11
|
+
prompt_system: str
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
import aiofiles
|
|
4
|
+
|
|
5
|
+
from ai_review.config import settings
|
|
6
|
+
from ai_review.libs.logger import get_logger
|
|
7
|
+
from ai_review.services.artifacts.schema import LLMArtifactSchema
|
|
8
|
+
from ai_review.services.artifacts.tools import make_artifact_id
|
|
9
|
+
|
|
10
|
+
logger = get_logger("ARTIFACTS_SERVICE")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ArtifactsService:
|
|
14
|
+
@classmethod
|
|
15
|
+
async def save_llm_interaction(cls, prompt: str, prompt_system: str, response: str | None = None) -> str | None:
|
|
16
|
+
if not settings.artifacts.llm_enabled:
|
|
17
|
+
logger.debug("Artifacts for LLM saving is disabled, skipping")
|
|
18
|
+
return None
|
|
19
|
+
|
|
20
|
+
artifact_id = make_artifact_id(prompt)
|
|
21
|
+
logger.info(f"Creating LLM interaction with id={artifact_id}")
|
|
22
|
+
|
|
23
|
+
file = settings.artifacts.llm_dir / f"{artifact_id}.json"
|
|
24
|
+
record = LLMArtifactSchema(
|
|
25
|
+
id=artifact_id,
|
|
26
|
+
prompt=prompt,
|
|
27
|
+
response=response,
|
|
28
|
+
prompt_system=prompt_system
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
await cls.save_artifact(file, record.model_dump_json(indent=2), kind="llm_interaction")
|
|
33
|
+
except Exception as error:
|
|
34
|
+
logger.exception(f"Failed to save LLM interaction {artifact_id}: {error}")
|
|
35
|
+
|
|
36
|
+
return artifact_id
|
|
37
|
+
|
|
38
|
+
@classmethod
|
|
39
|
+
async def save_artifact(cls, file: Path, content: str, kind: str = "artifact") -> Path | None:
|
|
40
|
+
try:
|
|
41
|
+
async with aiofiles.open(file, "w", encoding="utf-8") as aiofile:
|
|
42
|
+
await aiofile.write(content)
|
|
43
|
+
logger.debug(f"Saved {kind} ā {file}")
|
|
44
|
+
return file
|
|
45
|
+
except Exception as error:
|
|
46
|
+
logger.exception(f"Failed to save {kind} {file.stem}: {error}")
|
|
47
|
+
return None
|
|
File without changes
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class CostReportSchema(BaseModel):
|
|
5
|
+
model: str
|
|
6
|
+
prompt_tokens: int
|
|
7
|
+
completion_tokens: int
|
|
8
|
+
input_cost: float
|
|
9
|
+
output_cost: float
|
|
10
|
+
total_cost: float
|
|
11
|
+
|
|
12
|
+
@property
|
|
13
|
+
def prompt_percent(self) -> float:
|
|
14
|
+
return (self.input_cost / self.total_cost * 100) if self.total_cost else 0.0
|
|
15
|
+
|
|
16
|
+
@property
|
|
17
|
+
def completion_percent(self) -> float:
|
|
18
|
+
return (self.output_cost / self.total_cost * 100) if self.total_cost else 0.0
|
|
19
|
+
|
|
20
|
+
@property
|
|
21
|
+
def pretty_total_line(self) -> str:
|
|
22
|
+
return f"- {'Total:':<20} {'':>7} {self.total_cost:12.6f} USD"
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
def pretty_prompt_line(self) -> str:
|
|
26
|
+
return (
|
|
27
|
+
f"- {'Prompt tokens:':<20} {self.prompt_tokens:>7} ā "
|
|
28
|
+
f"{self.input_cost:12.6f} USD ({self.prompt_percent:.1f}%)"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def pretty_completion_line(self) -> str:
|
|
33
|
+
return (
|
|
34
|
+
f"- {'Completion tokens:':<20} {self.completion_tokens:>7} ā "
|
|
35
|
+
f"{self.output_cost:12.6f} USD ({self.completion_percent:.1f}%)"
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
def pretty(self) -> str:
|
|
39
|
+
return (
|
|
40
|
+
f"\nš° Estimated Cost for `{self.model}`\n"
|
|
41
|
+
f"{self.pretty_prompt_line}\n"
|
|
42
|
+
f"{self.pretty_completion_line}\n"
|
|
43
|
+
f"{self.pretty_total_line}\n"
|
|
44
|
+
)
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
from ai_review.config import settings
|
|
2
|
+
from ai_review.libs.logger import get_logger
|
|
3
|
+
from ai_review.services.cost.schema import CostReportSchema
|
|
4
|
+
from ai_review.services.llm.types import ChatResultSchema
|
|
5
|
+
|
|
6
|
+
logger = get_logger("COST_SERVICE")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class CostService:
|
|
10
|
+
def __init__(self):
|
|
11
|
+
self.pricing = settings.llm.load_pricing()
|
|
12
|
+
self.reports: list[CostReportSchema] = []
|
|
13
|
+
|
|
14
|
+
def calculate(self, result: ChatResultSchema) -> CostReportSchema | None:
|
|
15
|
+
if (result.prompt_tokens is None) or (result.completion_tokens is None):
|
|
16
|
+
return None
|
|
17
|
+
|
|
18
|
+
model = settings.llm.meta.model
|
|
19
|
+
pricing = self.pricing.get(model)
|
|
20
|
+
if not pricing:
|
|
21
|
+
logger.warning(f"No pricing found for {model=}, skipping cost calculation")
|
|
22
|
+
return None
|
|
23
|
+
|
|
24
|
+
input_cost = result.prompt_tokens * pricing.input
|
|
25
|
+
output_cost = result.completion_tokens * pricing.output
|
|
26
|
+
total_cost = input_cost + output_cost
|
|
27
|
+
|
|
28
|
+
report = CostReportSchema(
|
|
29
|
+
model=settings.llm.meta.model,
|
|
30
|
+
total_cost=total_cost,
|
|
31
|
+
input_cost=input_cost,
|
|
32
|
+
output_cost=output_cost,
|
|
33
|
+
prompt_tokens=result.prompt_tokens,
|
|
34
|
+
completion_tokens=result.completion_tokens,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
self.reports.append(report)
|
|
38
|
+
return report
|
|
39
|
+
|
|
40
|
+
def aggregate(self) -> CostReportSchema | None:
|
|
41
|
+
if not self.reports:
|
|
42
|
+
return None
|
|
43
|
+
|
|
44
|
+
model = self.reports[0].model
|
|
45
|
+
total_cost = sum(report.total_cost for report in self.reports)
|
|
46
|
+
input_cost = sum(report.input_cost for report in self.reports)
|
|
47
|
+
output_cost = sum(report.output_cost for report in self.reports)
|
|
48
|
+
prompt_tokens = sum(report.prompt_tokens for report in self.reports)
|
|
49
|
+
completion_tokens = sum(report.completion_tokens for report in self.reports)
|
|
50
|
+
|
|
51
|
+
return CostReportSchema(
|
|
52
|
+
model=model,
|
|
53
|
+
prompt_tokens=prompt_tokens,
|
|
54
|
+
completion_tokens=completion_tokens,
|
|
55
|
+
input_cost=input_cost,
|
|
56
|
+
output_cost=output_cost,
|
|
57
|
+
total_cost=total_cost,
|
|
58
|
+
)
|
|
File without changes
|