xai-review 0.24.0__tar.gz → 0.25.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xai-review might be problematic. Click here for more details.
- {xai_review-0.24.0 → xai_review-0.25.0}/PKG-INFO +8 -4
- {xai_review-0.24.0 → xai_review-0.25.0}/README.md +7 -3
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/claude/client.py +2 -1
- xai_review-0.25.0/ai_review/clients/claude/types.py +8 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/gemini/client.py +2 -1
- xai_review-0.25.0/ai_review/clients/gemini/types.py +8 -0
- xai_review-0.25.0/ai_review/clients/ollama/client.py +41 -0
- xai_review-0.25.0/ai_review/clients/ollama/schema.py +47 -0
- xai_review-0.25.0/ai_review/clients/ollama/types.py +8 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/openai/client.py +2 -1
- xai_review-0.25.0/ai_review/clients/openai/types.py +8 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/config/http.py +4 -1
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/config/llm/base.py +8 -1
- xai_review-0.25.0/ai_review/libs/config/llm/claude.py +10 -0
- xai_review-0.25.0/ai_review/libs/config/llm/gemini.py +10 -0
- xai_review-0.25.0/ai_review/libs/config/llm/meta.py +7 -0
- xai_review-0.25.0/ai_review/libs/config/llm/ollama.py +14 -0
- xai_review-0.25.0/ai_review/libs/config/llm/openai.py +10 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/config/vcs/github.py +2 -2
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/config/vcs/gitlab.py +2 -2
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/constants/llm_provider.py +1 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/llm/factory.py +3 -0
- xai_review-0.25.0/ai_review/services/llm/ollama/client.py +34 -0
- xai_review-0.25.0/ai_review/tests/fixtures/clients/claude.py +67 -0
- xai_review-0.25.0/ai_review/tests/fixtures/clients/gemini.py +73 -0
- xai_review-0.25.0/ai_review/tests/fixtures/clients/ollama.py +65 -0
- xai_review-0.25.0/ai_review/tests/fixtures/clients/openai.py +69 -0
- xai_review-0.25.0/ai_review/tests/suites/clients/ollama/test_client.py +12 -0
- xai_review-0.25.0/ai_review/tests/suites/clients/ollama/test_schema.py +65 -0
- xai_review-0.25.0/ai_review/tests/suites/services/llm/claude/test_client.py +22 -0
- xai_review-0.25.0/ai_review/tests/suites/services/llm/gemini/test_client.py +22 -0
- xai_review-0.25.0/ai_review/tests/suites/services/llm/ollama/test_client.py +22 -0
- xai_review-0.25.0/ai_review/tests/suites/services/llm/openai/test_client.py +22 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/llm/test_factory.py +8 -1
- xai_review-0.25.0/ai_review/tests/suites/services/review/__init__.py +0 -0
- xai_review-0.25.0/ai_review/tests/suites/services/review/inline/__init__.py +0 -0
- xai_review-0.25.0/ai_review/tests/suites/services/review/policy/__init__.py +0 -0
- xai_review-0.25.0/ai_review/tests/suites/services/review/summary/__init__.py +0 -0
- xai_review-0.25.0/ai_review/tests/suites/services/vcs/__init__.py +0 -0
- xai_review-0.25.0/ai_review/tests/suites/services/vcs/github/__init__.py +0 -0
- xai_review-0.25.0/ai_review/tests/suites/services/vcs/gitlab/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/pyproject.toml +1 -1
- {xai_review-0.24.0 → xai_review-0.25.0}/xai_review.egg-info/PKG-INFO +8 -4
- {xai_review-0.24.0 → xai_review-0.25.0}/xai_review.egg-info/SOURCES.txt +23 -0
- xai_review-0.24.0/ai_review/libs/config/llm/claude.py +0 -13
- xai_review-0.24.0/ai_review/libs/config/llm/gemini.py +0 -13
- xai_review-0.24.0/ai_review/libs/config/llm/openai.py +0 -13
- xai_review-0.24.0/ai_review/tests/fixtures/clients/claude.py +0 -22
- xai_review-0.24.0/ai_review/tests/fixtures/clients/gemini.py +0 -21
- xai_review-0.24.0/ai_review/tests/fixtures/clients/openai.py +0 -21
- {xai_review-0.24.0 → xai_review-0.25.0}/LICENSE +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/cli/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/cli/commands/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/cli/commands/run_context_review.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/cli/commands/run_inline_review.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/cli/commands/run_review.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/cli/commands/run_summary_review.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/cli/main.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/claude/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/claude/schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/gemini/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/gemini/schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/github/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/github/client.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/github/pr/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/github/pr/client.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/github/pr/schema/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/github/pr/schema/comments.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/github/pr/schema/files.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/github/pr/schema/pull_request.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/github/pr/schema/reviews.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/github/pr/types.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/gitlab/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/gitlab/client.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/gitlab/mr/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/gitlab/mr/client.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/gitlab/mr/schema/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/gitlab/mr/schema/changes.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/gitlab/mr/schema/discussions.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/gitlab/mr/schema/notes.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/gitlab/mr/types.py +0 -0
- {xai_review-0.24.0/ai_review/clients/openai → xai_review-0.25.0/ai_review/clients/ollama}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/libs → xai_review-0.25.0/ai_review/clients/openai}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/clients/openai/schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/config.py +0 -0
- {xai_review-0.24.0/ai_review/libs/asynchronous → xai_review-0.25.0/ai_review/libs}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/libs/config → xai_review-0.25.0/ai_review/libs/asynchronous}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/asynchronous/gather.py +0 -0
- {xai_review-0.24.0/ai_review/libs/config/llm → xai_review-0.25.0/ai_review/libs/config}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/config/artifacts.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/config/base.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/config/core.py +0 -0
- {xai_review-0.24.0/ai_review/libs/config/vcs → xai_review-0.25.0/ai_review/libs/config/llm}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/config/logger.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/config/prompt.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/config/review.py +0 -0
- {xai_review-0.24.0/ai_review/libs/constants → xai_review-0.25.0/ai_review/libs/config/vcs}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/config/vcs/base.py +0 -0
- {xai_review-0.24.0/ai_review/libs/diff → xai_review-0.25.0/ai_review/libs/constants}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/constants/vcs_provider.py +0 -0
- {xai_review-0.24.0/ai_review/libs/http → xai_review-0.25.0/ai_review/libs/diff}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/diff/models.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/diff/parser.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/diff/tools.py +0 -0
- {xai_review-0.24.0/ai_review/libs/http/event_hooks → xai_review-0.25.0/ai_review/libs/http}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/http/client.py +0 -0
- {xai_review-0.24.0/ai_review/libs/http/transports → xai_review-0.25.0/ai_review/libs/http/event_hooks}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/http/event_hooks/base.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/http/event_hooks/logger.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/http/handlers.py +0 -0
- {xai_review-0.24.0/ai_review/libs/template → xai_review-0.25.0/ai_review/libs/http/transports}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/http/transports/retry.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/json.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/logger.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/resources.py +0 -0
- {xai_review-0.24.0/ai_review/prompts → xai_review-0.25.0/ai_review/libs/template}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/libs/template/render.py +0 -0
- {xai_review-0.24.0/ai_review/resources → xai_review-0.25.0/ai_review/prompts}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/prompts/default_context.md +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/prompts/default_inline.md +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/prompts/default_summary.md +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/prompts/default_system_context.md +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/prompts/default_system_inline.md +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/prompts/default_system_summary.md +0 -0
- {xai_review-0.24.0/ai_review/services → xai_review-0.25.0/ai_review/resources}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/resources/pricing.yaml +0 -0
- {xai_review-0.24.0/ai_review/services/artifacts → xai_review-0.25.0/ai_review/services}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/services/cost → xai_review-0.25.0/ai_review/services/artifacts}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/artifacts/schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/artifacts/service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/artifacts/tools.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/artifacts/types.py +0 -0
- {xai_review-0.24.0/ai_review/services/diff → xai_review-0.25.0/ai_review/services/cost}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/cost/schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/cost/service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/cost/types.py +0 -0
- {xai_review-0.24.0/ai_review/services/git → xai_review-0.25.0/ai_review/services/diff}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/diff/renderers.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/diff/schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/diff/service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/diff/tools.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/diff/types.py +0 -0
- {xai_review-0.24.0/ai_review/services/llm → xai_review-0.25.0/ai_review/services/git}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/git/service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/git/types.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/hook/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/hook/constants.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/hook/service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/hook/types.py +0 -0
- {xai_review-0.24.0/ai_review/services/llm/claude → xai_review-0.25.0/ai_review/services/llm}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/services/llm/gemini → xai_review-0.25.0/ai_review/services/llm/claude}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/llm/claude/client.py +0 -0
- {xai_review-0.24.0/ai_review/services/llm/openai → xai_review-0.25.0/ai_review/services/llm/gemini}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/llm/gemini/client.py +0 -0
- {xai_review-0.24.0/ai_review/services/prompt → xai_review-0.25.0/ai_review/services/llm/ollama}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/services/review → xai_review-0.25.0/ai_review/services/llm/openai}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/llm/openai/client.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/llm/types.py +0 -0
- {xai_review-0.24.0/ai_review/services/review/gateway → xai_review-0.25.0/ai_review/services/prompt}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/prompt/adapter.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/prompt/schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/prompt/service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/prompt/tools.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/prompt/types.py +0 -0
- {xai_review-0.24.0/ai_review/services/review/inline → xai_review-0.25.0/ai_review/services/review}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/services/review/policy → xai_review-0.25.0/ai_review/services/review/gateway}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/review/gateway/comment.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/review/gateway/llm.py +0 -0
- {xai_review-0.24.0/ai_review/services/review/summary → xai_review-0.25.0/ai_review/services/review/inline}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/review/inline/schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/review/inline/service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/review/inline/types.py +0 -0
- {xai_review-0.24.0/ai_review/services/vcs → xai_review-0.25.0/ai_review/services/review/policy}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/review/policy/service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/review/service.py +0 -0
- {xai_review-0.24.0/ai_review/services/vcs/github → xai_review-0.25.0/ai_review/services/review/summary}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/review/summary/schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/review/summary/service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/review/summary/types.py +0 -0
- {xai_review-0.24.0/ai_review/services/vcs/gitlab → xai_review-0.25.0/ai_review/services/vcs}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/vcs/factory.py +0 -0
- {xai_review-0.24.0/ai_review/tests → xai_review-0.25.0/ai_review/services/vcs/github}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/vcs/github/client.py +0 -0
- {xai_review-0.24.0/ai_review/tests/fixtures → xai_review-0.25.0/ai_review/services/vcs/gitlab}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/vcs/gitlab/client.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/services/vcs/types.py +0 -0
- {xai_review-0.24.0/ai_review/tests/fixtures/clients → xai_review-0.25.0/ai_review/tests}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/tests/fixtures/services → xai_review-0.25.0/ai_review/tests/fixtures}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/tests/fixtures/services/review → xai_review-0.25.0/ai_review/tests/fixtures/clients}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/fixtures/clients/github.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/fixtures/clients/gitlab.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites → xai_review-0.25.0/ai_review/tests/fixtures/services}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/fixtures/services/artifacts.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/fixtures/services/cost.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/fixtures/services/diff.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/fixtures/services/git.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/fixtures/services/llm.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/fixtures/services/prompt.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/clients → xai_review-0.25.0/ai_review/tests/fixtures/services/review}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/fixtures/services/review/inline.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/fixtures/services/review/summary.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/fixtures/services/vcs.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/clients/claude → xai_review-0.25.0/ai_review/tests/suites}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/clients/gemini → xai_review-0.25.0/ai_review/tests/suites/clients}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/clients/github → xai_review-0.25.0/ai_review/tests/suites/clients/claude}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/clients/claude/test_client.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/clients/claude/test_schema.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/clients/gitlab → xai_review-0.25.0/ai_review/tests/suites/clients/gemini}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/clients/gemini/test_client.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/clients/gemini/test_schema.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/clients/openai → xai_review-0.25.0/ai_review/tests/suites/clients/github}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/clients/github/test_client.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/libs → xai_review-0.25.0/ai_review/tests/suites/clients/gitlab}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/clients/gitlab/test_client.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/libs/asynchronous → xai_review-0.25.0/ai_review/tests/suites/clients/ollama}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/libs/config → xai_review-0.25.0/ai_review/tests/suites/clients/openai}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/clients/openai/test_client.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/clients/openai/test_schema.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/libs/diff → xai_review-0.25.0/ai_review/tests/suites/libs}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/libs/template → xai_review-0.25.0/ai_review/tests/suites/libs/asynchronous}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/libs/asynchronous/test_gather.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services → xai_review-0.25.0/ai_review/tests/suites/libs/config}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/libs/config/test_prompt.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services/cost → xai_review-0.25.0/ai_review/tests/suites/libs/diff}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/libs/diff/test_models.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/libs/diff/test_parser.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/libs/diff/test_tools.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services/diff → xai_review-0.25.0/ai_review/tests/suites/libs/template}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/libs/template/test_render.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/libs/test_json.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services/hook → xai_review-0.25.0/ai_review/tests/suites/services}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services/llm → xai_review-0.25.0/ai_review/tests/suites/services/cost}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/cost/test_schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/cost/test_service.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services/prompt → xai_review-0.25.0/ai_review/tests/suites/services/diff}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/diff/test_renderers.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/diff/test_service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/diff/test_tools.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services/review → xai_review-0.25.0/ai_review/tests/suites/services/hook}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/hook/test_service.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services/review/inline → xai_review-0.25.0/ai_review/tests/suites/services/llm}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services/review/policy → xai_review-0.25.0/ai_review/tests/suites/services/llm/claude}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services/review/summary → xai_review-0.25.0/ai_review/tests/suites/services/llm/gemini}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services/vcs → xai_review-0.25.0/ai_review/tests/suites/services/llm/ollama}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services/vcs/github → xai_review-0.25.0/ai_review/tests/suites/services/llm/openai}/__init__.py +0 -0
- {xai_review-0.24.0/ai_review/tests/suites/services/vcs/gitlab → xai_review-0.25.0/ai_review/tests/suites/services/prompt}/__init__.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/prompt/test_adapter.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/prompt/test_schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/prompt/test_service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/prompt/test_tools.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/review/inline/test_schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/review/inline/test_service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/review/policy/test_service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/review/summary/test_schema.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/review/summary/test_service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/review/test_service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/vcs/github/test_service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/vcs/gitlab/test_service.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/ai_review/tests/suites/services/vcs/test_factory.py +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/setup.cfg +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/xai_review.egg-info/dependency_links.txt +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/xai_review.egg-info/entry_points.txt +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/xai_review.egg-info/requires.txt +0 -0
- {xai_review-0.24.0 → xai_review-0.25.0}/xai_review.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: xai-review
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.25.0
|
|
4
4
|
Summary: AI-powered code review tool
|
|
5
5
|
Author-email: Nikita Filonov <nikita.filonov@example.com>
|
|
6
6
|
Maintainer-email: Nikita Filonov <nikita.filonov@example.com>
|
|
@@ -66,7 +66,7 @@ improve code quality, enforce consistency, and speed up the review process.
|
|
|
66
66
|
|
|
67
67
|
✨ Key features:
|
|
68
68
|
|
|
69
|
-
- **Multiple LLM providers** — choose between **OpenAI**, **Claude**,
|
|
69
|
+
- **Multiple LLM providers** — choose between **OpenAI**, **Claude**, **Gemini**, or **Ollama**, and switch anytime.
|
|
70
70
|
- **VCS integration** — works out of the box with GitLab, GitHub (more providers coming).
|
|
71
71
|
- **Customizable prompts** — adapt inline, context, and summary reviews to match your team’s coding guidelines.
|
|
72
72
|
- **Flexible configuration** — supports `YAML`, `JSON`, and `ENV`, with seamless overrides in CI/CD pipelines.
|
|
@@ -168,7 +168,7 @@ for complete, ready-to-use examples.
|
|
|
168
168
|
|
|
169
169
|
Key things you can customize:
|
|
170
170
|
|
|
171
|
-
- **LLM provider** — OpenAI, Gemini, or
|
|
171
|
+
- **LLM provider** — OpenAI, Gemini, Claude, or Ollama
|
|
172
172
|
- **Model settings** — model name, temperature, max tokens
|
|
173
173
|
- **VCS integration** — works out of the box with **GitLab** and **GitHub**.
|
|
174
174
|
- **Review policy** — which files to include/exclude, review modes
|
|
@@ -209,7 +209,7 @@ jobs:
|
|
|
209
209
|
runs-on: ubuntu-latest
|
|
210
210
|
steps:
|
|
211
211
|
- uses: actions/checkout@v4
|
|
212
|
-
- uses: Nikita-Filonov/ai-review@v0.
|
|
212
|
+
- uses: Nikita-Filonov/ai-review@v0.25.0
|
|
213
213
|
with:
|
|
214
214
|
review-command: ${{ inputs.review-command }}
|
|
215
215
|
env:
|
|
@@ -288,6 +288,10 @@ provider** explicitly configured in your `.ai-review.yaml`.
|
|
|
288
288
|
All data is sent **directly** from your CI/CD environment to the selected LLM API endpoint (e.g. OpenAI, Gemini,
|
|
289
289
|
Claude). No intermediary servers or storage layers are involved.
|
|
290
290
|
|
|
291
|
+
If you use **Ollama**, requests are sent to your **local or self-hosted Ollama runtime**
|
|
292
|
+
(by default `http://localhost:11434`). This allows you to run reviews completely **offline**, keeping all data strictly
|
|
293
|
+
inside your infrastructure.
|
|
294
|
+
|
|
291
295
|
> ⚠️ Please ensure you use proper API tokens and avoid exposing corporate or personal secrets.
|
|
292
296
|
> If you accidentally leak private code or credentials due to incorrect configuration (e.g., using a personal key
|
|
293
297
|
> instead of an enterprise one), it is **your responsibility** — the tool does not retain or share any data by itself.
|
|
@@ -32,7 +32,7 @@ improve code quality, enforce consistency, and speed up the review process.
|
|
|
32
32
|
|
|
33
33
|
✨ Key features:
|
|
34
34
|
|
|
35
|
-
- **Multiple LLM providers** — choose between **OpenAI**, **Claude**,
|
|
35
|
+
- **Multiple LLM providers** — choose between **OpenAI**, **Claude**, **Gemini**, or **Ollama**, and switch anytime.
|
|
36
36
|
- **VCS integration** — works out of the box with GitLab, GitHub (more providers coming).
|
|
37
37
|
- **Customizable prompts** — adapt inline, context, and summary reviews to match your team’s coding guidelines.
|
|
38
38
|
- **Flexible configuration** — supports `YAML`, `JSON`, and `ENV`, with seamless overrides in CI/CD pipelines.
|
|
@@ -134,7 +134,7 @@ for complete, ready-to-use examples.
|
|
|
134
134
|
|
|
135
135
|
Key things you can customize:
|
|
136
136
|
|
|
137
|
-
- **LLM provider** — OpenAI, Gemini, or
|
|
137
|
+
- **LLM provider** — OpenAI, Gemini, Claude, or Ollama
|
|
138
138
|
- **Model settings** — model name, temperature, max tokens
|
|
139
139
|
- **VCS integration** — works out of the box with **GitLab** and **GitHub**.
|
|
140
140
|
- **Review policy** — which files to include/exclude, review modes
|
|
@@ -175,7 +175,7 @@ jobs:
|
|
|
175
175
|
runs-on: ubuntu-latest
|
|
176
176
|
steps:
|
|
177
177
|
- uses: actions/checkout@v4
|
|
178
|
-
- uses: Nikita-Filonov/ai-review@v0.
|
|
178
|
+
- uses: Nikita-Filonov/ai-review@v0.25.0
|
|
179
179
|
with:
|
|
180
180
|
review-command: ${{ inputs.review-command }}
|
|
181
181
|
env:
|
|
@@ -254,6 +254,10 @@ provider** explicitly configured in your `.ai-review.yaml`.
|
|
|
254
254
|
All data is sent **directly** from your CI/CD environment to the selected LLM API endpoint (e.g. OpenAI, Gemini,
|
|
255
255
|
Claude). No intermediary servers or storage layers are involved.
|
|
256
256
|
|
|
257
|
+
If you use **Ollama**, requests are sent to your **local or self-hosted Ollama runtime**
|
|
258
|
+
(by default `http://localhost:11434`). This allows you to run reviews completely **offline**, keeping all data strictly
|
|
259
|
+
inside your infrastructure.
|
|
260
|
+
|
|
257
261
|
> ⚠️ Please ensure you use proper API tokens and avoid exposing corporate or personal secrets.
|
|
258
262
|
> If you accidentally leak private code or credentials due to incorrect configuration (e.g., using a personal key
|
|
259
263
|
> instead of an enterprise one), it is **your responsibility** — the tool does not retain or share any data by itself.
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from httpx import AsyncClient, Response, AsyncHTTPTransport
|
|
2
2
|
|
|
3
3
|
from ai_review.clients.claude.schema import ClaudeChatRequestSchema, ClaudeChatResponseSchema
|
|
4
|
+
from ai_review.clients.claude.types import ClaudeHTTPClientProtocol
|
|
4
5
|
from ai_review.config import settings
|
|
5
6
|
from ai_review.libs.http.client import HTTPClient
|
|
6
7
|
from ai_review.libs.http.event_hooks.logger import LoggerEventHook
|
|
@@ -13,7 +14,7 @@ class ClaudeHTTPClientError(HTTPClientError):
|
|
|
13
14
|
pass
|
|
14
15
|
|
|
15
16
|
|
|
16
|
-
class ClaudeHTTPClient(HTTPClient):
|
|
17
|
+
class ClaudeHTTPClient(HTTPClient, ClaudeHTTPClientProtocol):
|
|
17
18
|
@handle_http_error(client="ClaudeHTTPClient", exception=ClaudeHTTPClientError)
|
|
18
19
|
async def chat_api(self, request: ClaudeChatRequestSchema) -> Response:
|
|
19
20
|
return await self.post("/v1/messages", json=request.model_dump())
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from httpx import Response, AsyncHTTPTransport, AsyncClient
|
|
2
2
|
|
|
3
3
|
from ai_review.clients.gemini.schema import GeminiChatRequestSchema, GeminiChatResponseSchema
|
|
4
|
+
from ai_review.clients.gemini.types import GeminiHTTPClientProtocol
|
|
4
5
|
from ai_review.config import settings
|
|
5
6
|
from ai_review.libs.http.client import HTTPClient
|
|
6
7
|
from ai_review.libs.http.event_hooks.logger import LoggerEventHook
|
|
@@ -13,7 +14,7 @@ class GeminiHTTPClientError(HTTPClientError):
|
|
|
13
14
|
pass
|
|
14
15
|
|
|
15
16
|
|
|
16
|
-
class GeminiHTTPClient(HTTPClient):
|
|
17
|
+
class GeminiHTTPClient(HTTPClient, GeminiHTTPClientProtocol):
|
|
17
18
|
@handle_http_error(client="GeminiHTTPClient", exception=GeminiHTTPClientError)
|
|
18
19
|
async def chat_api(self, request: GeminiChatRequestSchema) -> Response:
|
|
19
20
|
meta = settings.llm.meta
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from httpx import AsyncClient, Response, AsyncHTTPTransport
|
|
2
|
+
|
|
3
|
+
from ai_review.clients.ollama.schema import OllamaChatRequestSchema, OllamaChatResponseSchema
|
|
4
|
+
from ai_review.clients.ollama.types import OllamaHTTPClientProtocol
|
|
5
|
+
from ai_review.config import settings
|
|
6
|
+
from ai_review.libs.http.client import HTTPClient
|
|
7
|
+
from ai_review.libs.http.event_hooks.logger import LoggerEventHook
|
|
8
|
+
from ai_review.libs.http.handlers import HTTPClientError, handle_http_error
|
|
9
|
+
from ai_review.libs.http.transports.retry import RetryTransport
|
|
10
|
+
from ai_review.libs.logger import get_logger
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class OllamaHTTPClientError(HTTPClientError):
|
|
14
|
+
pass
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class OllamaHTTPClient(HTTPClient, OllamaHTTPClientProtocol):
|
|
18
|
+
@handle_http_error(client="OllamaHTTPClient", exception=OllamaHTTPClientError)
|
|
19
|
+
async def chat_api(self, request: OllamaChatRequestSchema) -> Response:
|
|
20
|
+
return await self.post("/api/chat", json=request.model_dump())
|
|
21
|
+
|
|
22
|
+
async def chat(self, request: OllamaChatRequestSchema) -> OllamaChatResponseSchema:
|
|
23
|
+
response = await self.chat_api(request)
|
|
24
|
+
return OllamaChatResponseSchema.model_validate_json(response.text)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def get_ollama_http_client() -> OllamaHTTPClient:
|
|
28
|
+
logger = get_logger("OLLAMA_HTTP_CLIENT")
|
|
29
|
+
logger_event_hook = LoggerEventHook(logger=logger)
|
|
30
|
+
retry_transport = RetryTransport(logger=logger, transport=AsyncHTTPTransport())
|
|
31
|
+
|
|
32
|
+
client = AsyncClient(
|
|
33
|
+
timeout=settings.llm.http_client.timeout,
|
|
34
|
+
base_url=settings.llm.http_client.api_url_value,
|
|
35
|
+
transport=retry_transport,
|
|
36
|
+
event_hooks={
|
|
37
|
+
"request": [logger_event_hook.request],
|
|
38
|
+
"response": [logger_event_hook.response],
|
|
39
|
+
},
|
|
40
|
+
)
|
|
41
|
+
return OllamaHTTPClient(client=client)
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from typing import Literal
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class OllamaMessageSchema(BaseModel):
|
|
7
|
+
role: Literal["system", "user", "assistant"]
|
|
8
|
+
content: str
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class OllamaOptionsSchema(BaseModel):
|
|
12
|
+
stop: list[str] | None = None
|
|
13
|
+
seed: int | None = None
|
|
14
|
+
top_p: float | None = Field(default=None, ge=0.0, le=1.0)
|
|
15
|
+
temperature: float | None = Field(default=None, ge=0.0, le=2.0)
|
|
16
|
+
num_predict: int | None = Field(default=None, ge=1)
|
|
17
|
+
repeat_penalty: float | None = Field(default=None, ge=0.0)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class OllamaChatRequestSchema(BaseModel):
|
|
21
|
+
model: str
|
|
22
|
+
stream: bool = False
|
|
23
|
+
options: OllamaOptionsSchema | None = None
|
|
24
|
+
messages: list[OllamaMessageSchema]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class OllamaUsageSchema(BaseModel):
|
|
28
|
+
prompt_tokens: int | None = None
|
|
29
|
+
completion_tokens: int | None = None
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def total_tokens(self) -> int | None:
|
|
33
|
+
if (self.prompt_tokens is not None) and (self.completion_tokens is not None):
|
|
34
|
+
return self.prompt_tokens + self.completion_tokens
|
|
35
|
+
|
|
36
|
+
return None
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class OllamaChatResponseSchema(BaseModel):
|
|
40
|
+
done: bool = Field(default=True)
|
|
41
|
+
usage: OllamaUsageSchema | None = None
|
|
42
|
+
model: str
|
|
43
|
+
message: OllamaMessageSchema
|
|
44
|
+
|
|
45
|
+
@property
|
|
46
|
+
def first_text(self) -> str:
|
|
47
|
+
return (self.message.content or "").strip()
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from httpx import Response, AsyncHTTPTransport, AsyncClient
|
|
2
2
|
|
|
3
3
|
from ai_review.clients.openai.schema import OpenAIChatRequestSchema, OpenAIChatResponseSchema
|
|
4
|
+
from ai_review.clients.openai.types import OpenAIHTTPClientProtocol
|
|
4
5
|
from ai_review.config import settings
|
|
5
6
|
from ai_review.libs.http.client import HTTPClient
|
|
6
7
|
from ai_review.libs.http.event_hooks.logger import LoggerEventHook
|
|
@@ -13,7 +14,7 @@ class OpenAIHTTPClientError(HTTPClientError):
|
|
|
13
14
|
pass
|
|
14
15
|
|
|
15
16
|
|
|
16
|
-
class OpenAIHTTPClient(HTTPClient):
|
|
17
|
+
class OpenAIHTTPClient(HTTPClient, OpenAIHTTPClientProtocol):
|
|
17
18
|
@handle_http_error(client='OpenAIHTTPClient', exception=OpenAIHTTPClientError)
|
|
18
19
|
async def chat_api(self, request: OpenAIChatRequestSchema) -> Response:
|
|
19
20
|
return await self.post("/chat/completions", json=request.model_dump())
|
|
@@ -4,12 +4,15 @@ from pydantic import BaseModel, HttpUrl, SecretStr
|
|
|
4
4
|
class HTTPClientConfig(BaseModel):
|
|
5
5
|
timeout: float = 120
|
|
6
6
|
api_url: HttpUrl
|
|
7
|
-
api_token: SecretStr
|
|
8
7
|
|
|
9
8
|
@property
|
|
10
9
|
def api_url_value(self) -> str:
|
|
11
10
|
return str(self.api_url)
|
|
12
11
|
|
|
12
|
+
|
|
13
|
+
class HTTPClientWithTokenConfig(HTTPClientConfig):
|
|
14
|
+
api_token: SecretStr
|
|
15
|
+
|
|
13
16
|
@property
|
|
14
17
|
def api_token_value(self) -> str:
|
|
15
18
|
return self.api_token.get_secret_value()
|
|
@@ -6,6 +6,7 @@ from pydantic import BaseModel, Field, FilePath
|
|
|
6
6
|
|
|
7
7
|
from ai_review.libs.config.llm.claude import ClaudeHTTPClientConfig, ClaudeMetaConfig
|
|
8
8
|
from ai_review.libs.config.llm.gemini import GeminiHTTPClientConfig, GeminiMetaConfig
|
|
9
|
+
from ai_review.libs.config.llm.ollama import OllamaHTTPClientConfig, OllamaMetaConfig
|
|
9
10
|
from ai_review.libs.config.llm.openai import OpenAIHTTPClientConfig, OpenAIMetaConfig
|
|
10
11
|
from ai_review.libs.constants.llm_provider import LLMProvider
|
|
11
12
|
from ai_review.libs.resources import load_resource
|
|
@@ -55,7 +56,13 @@ class ClaudeLLMConfig(LLMConfigBase):
|
|
|
55
56
|
http_client: ClaudeHTTPClientConfig
|
|
56
57
|
|
|
57
58
|
|
|
59
|
+
class OllamaLLMConfig(LLMConfigBase):
|
|
60
|
+
meta: OllamaMetaConfig
|
|
61
|
+
provider: Literal[LLMProvider.OLLAMA]
|
|
62
|
+
http_client: OllamaHTTPClientConfig
|
|
63
|
+
|
|
64
|
+
|
|
58
65
|
LLMConfig = Annotated[
|
|
59
|
-
OpenAILLMConfig | GeminiLLMConfig | ClaudeLLMConfig,
|
|
66
|
+
OpenAILLMConfig | GeminiLLMConfig | ClaudeLLMConfig | OllamaLLMConfig,
|
|
60
67
|
Field(discriminator="provider")
|
|
61
68
|
]
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from ai_review.libs.config.http import HTTPClientWithTokenConfig
|
|
2
|
+
from ai_review.libs.config.llm.meta import LLMMetaConfig
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ClaudeMetaConfig(LLMMetaConfig):
|
|
6
|
+
model: str = "claude-3-sonnet"
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ClaudeHTTPClientConfig(HTTPClientWithTokenConfig):
|
|
10
|
+
api_version: str = "2023-06-01"
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from ai_review.libs.config.http import HTTPClientWithTokenConfig
|
|
2
|
+
from ai_review.libs.config.llm.meta import LLMMetaConfig
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class GeminiMetaConfig(LLMMetaConfig):
|
|
6
|
+
model: str = "gemini-2.0-pro"
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class GeminiHTTPClientConfig(HTTPClientWithTokenConfig):
|
|
10
|
+
pass
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from ai_review.libs.config.http import HTTPClientConfig
|
|
2
|
+
from ai_review.libs.config.llm.meta import LLMMetaConfig
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class OllamaMetaConfig(LLMMetaConfig):
|
|
6
|
+
stop: list[str] | None = None
|
|
7
|
+
seed: int | None = None
|
|
8
|
+
model: str = "llama2"
|
|
9
|
+
top_p: float | None = None
|
|
10
|
+
repeat_penalty: float | None = None
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class OllamaHTTPClientConfig(HTTPClientConfig):
|
|
14
|
+
pass
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from ai_review.libs.config.http import HTTPClientWithTokenConfig
|
|
2
|
+
from ai_review.libs.config.llm.meta import LLMMetaConfig
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class OpenAIMetaConfig(LLMMetaConfig):
|
|
6
|
+
model: str = "gpt-4o-mini"
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class OpenAIHTTPClientConfig(HTTPClientWithTokenConfig):
|
|
10
|
+
pass
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from pydantic import BaseModel
|
|
2
2
|
|
|
3
|
-
from ai_review.libs.config.http import
|
|
3
|
+
from ai_review.libs.config.http import HTTPClientWithTokenConfig
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
class GitHubPipelineConfig(BaseModel):
|
|
@@ -9,5 +9,5 @@ class GitHubPipelineConfig(BaseModel):
|
|
|
9
9
|
pull_number: str
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
class GitHubHTTPClientConfig(
|
|
12
|
+
class GitHubHTTPClientConfig(HTTPClientWithTokenConfig):
|
|
13
13
|
pass
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from pydantic import BaseModel
|
|
2
2
|
|
|
3
|
-
from ai_review.libs.config.http import
|
|
3
|
+
from ai_review.libs.config.http import HTTPClientWithTokenConfig
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
class GitLabPipelineConfig(BaseModel):
|
|
@@ -8,5 +8,5 @@ class GitLabPipelineConfig(BaseModel):
|
|
|
8
8
|
merge_request_id: str
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
class GitLabHTTPClientConfig(
|
|
11
|
+
class GitLabHTTPClientConfig(HTTPClientWithTokenConfig):
|
|
12
12
|
pass
|
|
@@ -2,6 +2,7 @@ from ai_review.config import settings
|
|
|
2
2
|
from ai_review.libs.constants.llm_provider import LLMProvider
|
|
3
3
|
from ai_review.services.llm.claude.client import ClaudeLLMClient
|
|
4
4
|
from ai_review.services.llm.gemini.client import GeminiLLMClient
|
|
5
|
+
from ai_review.services.llm.ollama.client import OllamaLLMClient
|
|
5
6
|
from ai_review.services.llm.openai.client import OpenAILLMClient
|
|
6
7
|
from ai_review.services.llm.types import LLMClientProtocol
|
|
7
8
|
|
|
@@ -14,5 +15,7 @@ def get_llm_client() -> LLMClientProtocol:
|
|
|
14
15
|
return GeminiLLMClient()
|
|
15
16
|
case LLMProvider.CLAUDE:
|
|
16
17
|
return ClaudeLLMClient()
|
|
18
|
+
case LLMProvider.OLLAMA:
|
|
19
|
+
return OllamaLLMClient()
|
|
17
20
|
case _:
|
|
18
21
|
raise ValueError(f"Unsupported LLM provider: {settings.llm.provider}")
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from ai_review.clients.ollama.client import get_ollama_http_client
|
|
2
|
+
from ai_review.clients.ollama.schema import OllamaChatRequestSchema, OllamaMessageSchema, OllamaOptionsSchema
|
|
3
|
+
from ai_review.config import settings
|
|
4
|
+
from ai_review.services.llm.types import LLMClientProtocol, ChatResultSchema
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class OllamaLLMClient(LLMClientProtocol):
|
|
8
|
+
def __init__(self):
|
|
9
|
+
self.http_client = get_ollama_http_client()
|
|
10
|
+
|
|
11
|
+
async def chat(self, prompt: str, prompt_system: str) -> ChatResultSchema:
|
|
12
|
+
meta = settings.llm.meta
|
|
13
|
+
request = OllamaChatRequestSchema(
|
|
14
|
+
model=meta.model,
|
|
15
|
+
options=OllamaOptionsSchema(
|
|
16
|
+
stop=meta.stop,
|
|
17
|
+
seed=meta.seed,
|
|
18
|
+
top_p=meta.top_p,
|
|
19
|
+
temperature=meta.temperature,
|
|
20
|
+
num_predict=meta.max_tokens,
|
|
21
|
+
repeat_penalty=meta.repeat_penalty,
|
|
22
|
+
),
|
|
23
|
+
messages=[
|
|
24
|
+
OllamaMessageSchema(role="system", content=prompt_system),
|
|
25
|
+
OllamaMessageSchema(role="user", content=prompt),
|
|
26
|
+
],
|
|
27
|
+
)
|
|
28
|
+
response = await self.http_client.chat(request)
|
|
29
|
+
return ChatResultSchema(
|
|
30
|
+
text=response.first_text,
|
|
31
|
+
total_tokens=response.usage.total_tokens if response.usage else None,
|
|
32
|
+
prompt_tokens=response.usage.prompt_tokens if response.usage else None,
|
|
33
|
+
completion_tokens=response.usage.completion_tokens if response.usage else None,
|
|
34
|
+
)
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
from pydantic import HttpUrl, SecretStr
|
|
5
|
+
|
|
6
|
+
from ai_review.clients.claude.schema import (
|
|
7
|
+
ClaudeUsageSchema,
|
|
8
|
+
ClaudeContentSchema,
|
|
9
|
+
ClaudeChatRequestSchema,
|
|
10
|
+
ClaudeChatResponseSchema,
|
|
11
|
+
)
|
|
12
|
+
from ai_review.clients.claude.types import ClaudeHTTPClientProtocol
|
|
13
|
+
from ai_review.config import settings
|
|
14
|
+
from ai_review.libs.config.llm.base import ClaudeLLMConfig
|
|
15
|
+
from ai_review.libs.config.llm.claude import ClaudeMetaConfig, ClaudeHTTPClientConfig
|
|
16
|
+
from ai_review.libs.constants.llm_provider import LLMProvider
|
|
17
|
+
from ai_review.services.llm.claude.client import ClaudeLLMClient
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class FakeClaudeHTTPClient(ClaudeHTTPClientProtocol):
|
|
21
|
+
def __init__(self, responses: dict[str, Any] | None = None) -> None:
|
|
22
|
+
self.calls: list[tuple[str, dict]] = []
|
|
23
|
+
self.responses = responses or {}
|
|
24
|
+
|
|
25
|
+
async def chat(self, request: ClaudeChatRequestSchema) -> ClaudeChatResponseSchema:
|
|
26
|
+
self.calls.append(("chat", {"request": request}))
|
|
27
|
+
return self.responses.get(
|
|
28
|
+
"chat",
|
|
29
|
+
ClaudeChatResponseSchema(
|
|
30
|
+
id="fake-id",
|
|
31
|
+
role="assistant",
|
|
32
|
+
usage=ClaudeUsageSchema(input_tokens=5, output_tokens=7),
|
|
33
|
+
content=[ClaudeContentSchema(type="text", text="FAKE_CLAUDE_RESPONSE")],
|
|
34
|
+
),
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@pytest.fixture
|
|
39
|
+
def fake_claude_http_client():
|
|
40
|
+
return FakeClaudeHTTPClient()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@pytest.fixture
|
|
44
|
+
def claude_llm_client(
|
|
45
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
46
|
+
fake_claude_http_client: FakeClaudeHTTPClient
|
|
47
|
+
) -> ClaudeLLMClient:
|
|
48
|
+
monkeypatch.setattr(
|
|
49
|
+
"ai_review.services.llm.claude.client.get_claude_http_client",
|
|
50
|
+
lambda: fake_claude_http_client,
|
|
51
|
+
)
|
|
52
|
+
return ClaudeLLMClient()
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@pytest.fixture
|
|
56
|
+
def claude_http_client_config(monkeypatch: pytest.MonkeyPatch):
|
|
57
|
+
fake_config = ClaudeLLMConfig(
|
|
58
|
+
meta=ClaudeMetaConfig(),
|
|
59
|
+
provider=LLMProvider.CLAUDE,
|
|
60
|
+
http_client=ClaudeHTTPClientConfig(
|
|
61
|
+
timeout=10,
|
|
62
|
+
api_url=HttpUrl("https://api.anthropic.com"),
|
|
63
|
+
api_token=SecretStr("fake-token"),
|
|
64
|
+
api_version="2023-06-01",
|
|
65
|
+
)
|
|
66
|
+
)
|
|
67
|
+
monkeypatch.setattr(settings, "llm", fake_config)
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
from pydantic import HttpUrl, SecretStr
|
|
5
|
+
|
|
6
|
+
from ai_review.clients.gemini.schema import (
|
|
7
|
+
GeminiPartSchema,
|
|
8
|
+
GeminiUsageSchema,
|
|
9
|
+
GeminiContentSchema,
|
|
10
|
+
GeminiCandidateSchema,
|
|
11
|
+
GeminiChatRequestSchema,
|
|
12
|
+
GeminiChatResponseSchema,
|
|
13
|
+
)
|
|
14
|
+
from ai_review.clients.gemini.types import GeminiHTTPClientProtocol
|
|
15
|
+
from ai_review.config import settings
|
|
16
|
+
from ai_review.libs.config.llm.base import GeminiLLMConfig
|
|
17
|
+
from ai_review.libs.config.llm.gemini import GeminiMetaConfig, GeminiHTTPClientConfig
|
|
18
|
+
from ai_review.libs.constants.llm_provider import LLMProvider
|
|
19
|
+
from ai_review.services.llm.gemini.client import GeminiLLMClient
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class FakeGeminiHTTPClient(GeminiHTTPClientProtocol):
|
|
23
|
+
def __init__(self, responses: dict[str, Any] | None = None) -> None:
|
|
24
|
+
self.calls: list[tuple[str, dict]] = []
|
|
25
|
+
self.responses = responses or {}
|
|
26
|
+
|
|
27
|
+
async def chat(self, request: GeminiChatRequestSchema) -> GeminiChatResponseSchema:
|
|
28
|
+
self.calls.append(("chat", {"request": request}))
|
|
29
|
+
return self.responses.get(
|
|
30
|
+
"chat",
|
|
31
|
+
GeminiChatResponseSchema(
|
|
32
|
+
usage=GeminiUsageSchema(prompt_token_count=2, total_tokens_count=10),
|
|
33
|
+
candidates=[
|
|
34
|
+
GeminiCandidateSchema(
|
|
35
|
+
content=GeminiContentSchema(
|
|
36
|
+
role="model",
|
|
37
|
+
parts=[GeminiPartSchema(text="FAKE_GEMINI_RESPONSE")]
|
|
38
|
+
)
|
|
39
|
+
)
|
|
40
|
+
],
|
|
41
|
+
),
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@pytest.fixture
|
|
46
|
+
def fake_gemini_http_client() -> FakeGeminiHTTPClient:
|
|
47
|
+
return FakeGeminiHTTPClient()
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@pytest.fixture
|
|
51
|
+
def gemini_llm_client(
|
|
52
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
53
|
+
fake_gemini_http_client: FakeGeminiHTTPClient
|
|
54
|
+
) -> GeminiLLMClient:
|
|
55
|
+
monkeypatch.setattr(
|
|
56
|
+
"ai_review.services.llm.gemini.client.get_gemini_http_client",
|
|
57
|
+
lambda: fake_gemini_http_client,
|
|
58
|
+
)
|
|
59
|
+
return GeminiLLMClient()
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@pytest.fixture
|
|
63
|
+
def gemini_http_client_config(monkeypatch: pytest.MonkeyPatch):
|
|
64
|
+
fake_config = GeminiLLMConfig(
|
|
65
|
+
meta=GeminiMetaConfig(),
|
|
66
|
+
provider=LLMProvider.GEMINI,
|
|
67
|
+
http_client=GeminiHTTPClientConfig(
|
|
68
|
+
timeout=10,
|
|
69
|
+
api_url=HttpUrl("https://generativelanguage.googleapis.com"),
|
|
70
|
+
api_token=SecretStr("fake-token"),
|
|
71
|
+
)
|
|
72
|
+
)
|
|
73
|
+
monkeypatch.setattr(settings, "llm", fake_config)
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
from pydantic import HttpUrl
|
|
5
|
+
|
|
6
|
+
from ai_review.clients.ollama.schema import (
|
|
7
|
+
OllamaUsageSchema,
|
|
8
|
+
OllamaMessageSchema,
|
|
9
|
+
OllamaChatRequestSchema,
|
|
10
|
+
OllamaChatResponseSchema,
|
|
11
|
+
)
|
|
12
|
+
from ai_review.clients.ollama.types import OllamaHTTPClientProtocol
|
|
13
|
+
from ai_review.config import settings
|
|
14
|
+
from ai_review.libs.config.llm.base import OllamaLLMConfig
|
|
15
|
+
from ai_review.libs.config.llm.ollama import OllamaMetaConfig, OllamaHTTPClientConfig
|
|
16
|
+
from ai_review.libs.constants.llm_provider import LLMProvider
|
|
17
|
+
from ai_review.services.llm.ollama.client import OllamaLLMClient
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class FakeOllamaHTTPClient(OllamaHTTPClientProtocol):
|
|
21
|
+
def __init__(self, responses: dict[str, Any] | None = None) -> None:
|
|
22
|
+
self.calls: list[tuple[str, dict]] = []
|
|
23
|
+
self.responses = responses or {}
|
|
24
|
+
|
|
25
|
+
async def chat(self, request: OllamaChatRequestSchema) -> OllamaChatResponseSchema:
|
|
26
|
+
self.calls.append(("chat", {"request": request}))
|
|
27
|
+
return self.responses.get(
|
|
28
|
+
"chat",
|
|
29
|
+
OllamaChatResponseSchema(
|
|
30
|
+
done=True,
|
|
31
|
+
model="llama2",
|
|
32
|
+
usage=OllamaUsageSchema(prompt_tokens=3, completion_tokens=5),
|
|
33
|
+
message=OllamaMessageSchema(role="assistant", content="FAKE_OLLAMA_RESPONSE"),
|
|
34
|
+
),
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@pytest.fixture
|
|
39
|
+
def fake_ollama_http_client():
|
|
40
|
+
return FakeOllamaHTTPClient()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@pytest.fixture
|
|
44
|
+
def ollama_llm_client(
|
|
45
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
46
|
+
fake_ollama_http_client: FakeOllamaHTTPClient
|
|
47
|
+
) -> OllamaLLMClient:
|
|
48
|
+
monkeypatch.setattr(
|
|
49
|
+
"ai_review.services.llm.ollama.client.get_ollama_http_client",
|
|
50
|
+
lambda: fake_ollama_http_client,
|
|
51
|
+
)
|
|
52
|
+
return OllamaLLMClient()
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@pytest.fixture
|
|
56
|
+
def ollama_http_client_config(monkeypatch: pytest.MonkeyPatch):
|
|
57
|
+
fake_config = OllamaLLMConfig(
|
|
58
|
+
meta=OllamaMetaConfig(),
|
|
59
|
+
provider=LLMProvider.OLLAMA,
|
|
60
|
+
http_client=OllamaHTTPClientConfig(
|
|
61
|
+
timeout=10,
|
|
62
|
+
api_url=HttpUrl("http://localhost:11434")
|
|
63
|
+
)
|
|
64
|
+
)
|
|
65
|
+
monkeypatch.setattr(settings, "llm", fake_config)
|