mito-ai 0.1.50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +114 -0
- mito_ai/_version.py +4 -0
- mito_ai/anthropic_client.py +334 -0
- mito_ai/app_deploy/__init__.py +6 -0
- mito_ai/app_deploy/app_deploy_utils.py +44 -0
- mito_ai/app_deploy/handlers.py +345 -0
- mito_ai/app_deploy/models.py +98 -0
- mito_ai/app_manager/__init__.py +4 -0
- mito_ai/app_manager/handlers.py +167 -0
- mito_ai/app_manager/models.py +71 -0
- mito_ai/app_manager/utils.py +24 -0
- mito_ai/auth/README.md +18 -0
- mito_ai/auth/__init__.py +6 -0
- mito_ai/auth/handlers.py +96 -0
- mito_ai/auth/urls.py +13 -0
- mito_ai/chat_history/handlers.py +63 -0
- mito_ai/chat_history/urls.py +32 -0
- mito_ai/completions/completion_handlers/__init__.py +3 -0
- mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +59 -0
- mito_ai/completions/completion_handlers/agent_execution_handler.py +66 -0
- mito_ai/completions/completion_handlers/chat_completion_handler.py +141 -0
- mito_ai/completions/completion_handlers/code_explain_handler.py +113 -0
- mito_ai/completions/completion_handlers/completion_handler.py +42 -0
- mito_ai/completions/completion_handlers/inline_completer_handler.py +48 -0
- mito_ai/completions/completion_handlers/smart_debug_handler.py +160 -0
- mito_ai/completions/completion_handlers/utils.py +147 -0
- mito_ai/completions/handlers.py +415 -0
- mito_ai/completions/message_history.py +401 -0
- mito_ai/completions/models.py +404 -0
- mito_ai/completions/prompt_builders/__init__.py +3 -0
- mito_ai/completions/prompt_builders/agent_execution_prompt.py +57 -0
- mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +160 -0
- mito_ai/completions/prompt_builders/agent_system_message.py +472 -0
- mito_ai/completions/prompt_builders/chat_name_prompt.py +15 -0
- mito_ai/completions/prompt_builders/chat_prompt.py +116 -0
- mito_ai/completions/prompt_builders/chat_system_message.py +92 -0
- mito_ai/completions/prompt_builders/explain_code_prompt.py +32 -0
- mito_ai/completions/prompt_builders/inline_completer_prompt.py +197 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +170 -0
- mito_ai/completions/prompt_builders/smart_debug_prompt.py +199 -0
- mito_ai/completions/prompt_builders/utils.py +84 -0
- mito_ai/completions/providers.py +284 -0
- mito_ai/constants.py +63 -0
- mito_ai/db/__init__.py +3 -0
- mito_ai/db/crawlers/__init__.py +6 -0
- mito_ai/db/crawlers/base_crawler.py +61 -0
- mito_ai/db/crawlers/constants.py +43 -0
- mito_ai/db/crawlers/snowflake.py +71 -0
- mito_ai/db/handlers.py +168 -0
- mito_ai/db/models.py +31 -0
- mito_ai/db/urls.py +34 -0
- mito_ai/db/utils.py +185 -0
- mito_ai/docker/mssql/compose.yml +37 -0
- mito_ai/docker/mssql/init/setup.sql +21 -0
- mito_ai/docker/mysql/compose.yml +18 -0
- mito_ai/docker/mysql/init/setup.sql +13 -0
- mito_ai/docker/oracle/compose.yml +17 -0
- mito_ai/docker/oracle/init/setup.sql +20 -0
- mito_ai/docker/postgres/compose.yml +17 -0
- mito_ai/docker/postgres/init/setup.sql +13 -0
- mito_ai/enterprise/__init__.py +3 -0
- mito_ai/enterprise/utils.py +15 -0
- mito_ai/file_uploads/__init__.py +3 -0
- mito_ai/file_uploads/handlers.py +248 -0
- mito_ai/file_uploads/urls.py +21 -0
- mito_ai/gemini_client.py +232 -0
- mito_ai/log/handlers.py +38 -0
- mito_ai/log/urls.py +21 -0
- mito_ai/logger.py +37 -0
- mito_ai/openai_client.py +382 -0
- mito_ai/path_utils.py +70 -0
- mito_ai/rules/handlers.py +44 -0
- mito_ai/rules/urls.py +22 -0
- mito_ai/rules/utils.py +56 -0
- mito_ai/settings/handlers.py +41 -0
- mito_ai/settings/urls.py +20 -0
- mito_ai/settings/utils.py +42 -0
- mito_ai/streamlit_conversion/agent_utils.py +37 -0
- mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
- mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
- mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
- mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
- mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
- mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
- mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
- mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
- mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
- mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
- mito_ai/streamlit_preview/__init__.py +6 -0
- mito_ai/streamlit_preview/handlers.py +111 -0
- mito_ai/streamlit_preview/manager.py +152 -0
- mito_ai/streamlit_preview/urls.py +22 -0
- mito_ai/streamlit_preview/utils.py +29 -0
- mito_ai/tests/__init__.py +3 -0
- mito_ai/tests/chat_history/test_chat_history.py +211 -0
- mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
- mito_ai/tests/conftest.py +53 -0
- mito_ai/tests/create_agent_system_message_prompt_test.py +22 -0
- mito_ai/tests/data/prompt_lg.py +69 -0
- mito_ai/tests/data/prompt_sm.py +6 -0
- mito_ai/tests/data/prompt_xl.py +13 -0
- mito_ai/tests/data/stock_data.sqlite3 +0 -0
- mito_ai/tests/db/conftest.py +39 -0
- mito_ai/tests/db/connections_test.py +102 -0
- mito_ai/tests/db/mssql_test.py +29 -0
- mito_ai/tests/db/mysql_test.py +29 -0
- mito_ai/tests/db/oracle_test.py +29 -0
- mito_ai/tests/db/postgres_test.py +29 -0
- mito_ai/tests/db/schema_test.py +93 -0
- mito_ai/tests/db/sqlite_test.py +31 -0
- mito_ai/tests/db/test_db_constants.py +61 -0
- mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
- mito_ai/tests/file_uploads/__init__.py +2 -0
- mito_ai/tests/file_uploads/test_handlers.py +282 -0
- mito_ai/tests/message_history/test_generate_short_chat_name.py +120 -0
- mito_ai/tests/message_history/test_message_history_utils.py +469 -0
- mito_ai/tests/open_ai_utils_test.py +152 -0
- mito_ai/tests/performance_test.py +329 -0
- mito_ai/tests/providers/test_anthropic_client.py +447 -0
- mito_ai/tests/providers/test_azure.py +631 -0
- mito_ai/tests/providers/test_capabilities.py +120 -0
- mito_ai/tests/providers/test_gemini_client.py +195 -0
- mito_ai/tests/providers/test_mito_server_utils.py +448 -0
- mito_ai/tests/providers/test_model_resolution.py +130 -0
- mito_ai/tests/providers/test_openai_client.py +57 -0
- mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
- mito_ai/tests/providers/test_provider_limits.py +42 -0
- mito_ai/tests/providers/test_providers.py +382 -0
- mito_ai/tests/providers/test_retry_logic.py +389 -0
- mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
- mito_ai/tests/providers/utils.py +85 -0
- mito_ai/tests/rules/conftest.py +26 -0
- mito_ai/tests/rules/rules_test.py +117 -0
- mito_ai/tests/server_limits_test.py +406 -0
- mito_ai/tests/settings/conftest.py +26 -0
- mito_ai/tests/settings/settings_test.py +70 -0
- mito_ai/tests/settings/test_settings_constants.py +9 -0
- mito_ai/tests/streamlit_conversion/__init__.py +3 -0
- mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
- mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
- mito_ai/tests/test_constants.py +47 -0
- mito_ai/tests/test_telemetry.py +12 -0
- mito_ai/tests/user/__init__.py +2 -0
- mito_ai/tests/user/test_user.py +120 -0
- mito_ai/tests/utils/__init__.py +3 -0
- mito_ai/tests/utils/test_anthropic_utils.py +162 -0
- mito_ai/tests/utils/test_gemini_utils.py +98 -0
- mito_ai/tests/version_check_test.py +169 -0
- mito_ai/user/handlers.py +45 -0
- mito_ai/user/urls.py +21 -0
- mito_ai/utils/__init__.py +3 -0
- mito_ai/utils/anthropic_utils.py +168 -0
- mito_ai/utils/create.py +94 -0
- mito_ai/utils/db.py +74 -0
- mito_ai/utils/error_classes.py +42 -0
- mito_ai/utils/gemini_utils.py +133 -0
- mito_ai/utils/message_history_utils.py +87 -0
- mito_ai/utils/mito_server_utils.py +242 -0
- mito_ai/utils/open_ai_utils.py +200 -0
- mito_ai/utils/provider_utils.py +49 -0
- mito_ai/utils/schema.py +86 -0
- mito_ai/utils/server_limits.py +152 -0
- mito_ai/utils/telemetry_utils.py +480 -0
- mito_ai/utils/utils.py +89 -0
- mito_ai/utils/version_utils.py +94 -0
- mito_ai/utils/websocket_base.py +88 -0
- mito_ai/version_check.py +60 -0
- mito_ai-0.1.50.data/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +7 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/build_log.json +728 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/package.json +243 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +238 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +37 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +21602 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js +619 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style.js +4 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +712 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2792 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +4859 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +1 -0
- mito_ai-0.1.50.dist-info/METADATA +221 -0
- mito_ai-0.1.50.dist-info/RECORD +205 -0
- mito_ai-0.1.50.dist-info/WHEEL +4 -0
- mito_ai-0.1.50.dist-info/entry_points.txt +2 -0
- mito_ai-0.1.50.dist-info/licenses/LICENSE +3 -0
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
from unittest.mock import MagicMock, patch
|
|
6
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
7
|
+
from mito_ai.tests.providers.utils import mock_azure_openai_client, mock_openai_client, patch_server_limits
|
|
8
|
+
from traitlets.config import Config
|
|
9
|
+
|
|
10
|
+
FAKE_API_KEY = "sk-1234567890"
|
|
11
|
+
|
|
12
|
+
@pytest.fixture
|
|
13
|
+
def provider_config() -> Config:
|
|
14
|
+
"""Create a proper Config object for the OpenAIProvider."""
|
|
15
|
+
config = Config()
|
|
16
|
+
config.OpenAIProvider = Config()
|
|
17
|
+
config.OpenAIClient = Config()
|
|
18
|
+
return config
|
|
19
|
+
|
|
20
|
+
@pytest.mark.parametrize("test_case", [
|
|
21
|
+
{
|
|
22
|
+
"name": "mito_server_fallback_no_keys",
|
|
23
|
+
"setup": {
|
|
24
|
+
"OPENAI_API_KEY": None,
|
|
25
|
+
"CLAUDE_API_KEY": None,
|
|
26
|
+
"GEMINI_API_KEY": None,
|
|
27
|
+
"is_azure_configured": False,
|
|
28
|
+
},
|
|
29
|
+
"expected_provider": "Mito server",
|
|
30
|
+
"expected_key_type": "mito_server_key"
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
"name": "claude_when_only_claude_key",
|
|
34
|
+
"setup": {
|
|
35
|
+
"OPENAI_API_KEY": None,
|
|
36
|
+
"CLAUDE_API_KEY": "claude-test-key",
|
|
37
|
+
"GEMINI_API_KEY": None,
|
|
38
|
+
"is_azure_configured": False,
|
|
39
|
+
},
|
|
40
|
+
"expected_provider": "Claude",
|
|
41
|
+
"expected_key_type": "claude"
|
|
42
|
+
},
|
|
43
|
+
{
|
|
44
|
+
"name": "gemini_when_only_gemini_key",
|
|
45
|
+
"setup": {
|
|
46
|
+
"OPENAI_API_KEY": None,
|
|
47
|
+
"CLAUDE_API_KEY": None,
|
|
48
|
+
"GEMINI_API_KEY": "gemini-test-key",
|
|
49
|
+
"is_azure_configured": False,
|
|
50
|
+
},
|
|
51
|
+
"expected_provider": "Gemini",
|
|
52
|
+
"expected_key_type": "gemini"
|
|
53
|
+
},
|
|
54
|
+
{
|
|
55
|
+
"name": "openai_when_openai_key",
|
|
56
|
+
"setup": {
|
|
57
|
+
"OPENAI_API_KEY": 'openai-test-key',
|
|
58
|
+
"CLAUDE_API_KEY": None,
|
|
59
|
+
"GEMINI_API_KEY": None,
|
|
60
|
+
"is_azure_configured": False,
|
|
61
|
+
},
|
|
62
|
+
"expected_provider": "OpenAI (user key)",
|
|
63
|
+
"expected_key_type": "user_key"
|
|
64
|
+
},
|
|
65
|
+
{
|
|
66
|
+
"name": "claude_priority_over_gemini",
|
|
67
|
+
"setup": {
|
|
68
|
+
"OPENAI_API_KEY": None,
|
|
69
|
+
"CLAUDE_API_KEY": "claude-test-key",
|
|
70
|
+
"GEMINI_API_KEY": "gemini-test-key",
|
|
71
|
+
"is_azure_configured": False,
|
|
72
|
+
},
|
|
73
|
+
"expected_provider": "Claude",
|
|
74
|
+
"expected_key_type": "claude"
|
|
75
|
+
},
|
|
76
|
+
])
|
|
77
|
+
def test_provider_capabilities_real_logic(
|
|
78
|
+
test_case: dict,
|
|
79
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
80
|
+
provider_config: Config
|
|
81
|
+
) -> None:
|
|
82
|
+
"""Test the actual provider selection logic in OpenAIProvider.capabilities"""
|
|
83
|
+
|
|
84
|
+
# Set up the environment based on test case
|
|
85
|
+
setup = test_case["setup"]
|
|
86
|
+
|
|
87
|
+
# CRITICAL: Set up ALL mocks BEFORE creating any clients
|
|
88
|
+
for key, value in setup.items():
|
|
89
|
+
if key == "is_azure_configured":
|
|
90
|
+
if value:
|
|
91
|
+
# For Azure case, mock to return True and set required constants
|
|
92
|
+
monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: True)
|
|
93
|
+
monkeypatch.setattr("mito_ai.constants.AZURE_OPENAI_MODEL", "gpt-4o")
|
|
94
|
+
else:
|
|
95
|
+
# For non-Azure case, mock to return False
|
|
96
|
+
monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: False)
|
|
97
|
+
else:
|
|
98
|
+
monkeypatch.setattr(f"mito_ai.constants.{key}", value)
|
|
99
|
+
|
|
100
|
+
# Clear the provider config API key to ensure it uses constants
|
|
101
|
+
provider_config.OpenAIProvider.api_key = None
|
|
102
|
+
|
|
103
|
+
# Mock HTTP calls but let the real logic run
|
|
104
|
+
with patch("openai.OpenAI") as mock_openai_constructor:
|
|
105
|
+
with patch("openai.AsyncOpenAI") as mock_async_openai:
|
|
106
|
+
with patch("openai.AsyncAzureOpenAI") as mock_async_azure_openai:
|
|
107
|
+
# Mock successful API key validation for OpenAI
|
|
108
|
+
mock_openai_instance = MagicMock()
|
|
109
|
+
mock_openai_instance.models.list.return_value = [MagicMock(id="gpt-4o-mini")]
|
|
110
|
+
mock_openai_constructor.return_value = mock_openai_instance
|
|
111
|
+
|
|
112
|
+
# Mock server limits for Mito server fallback
|
|
113
|
+
with patch_server_limits():
|
|
114
|
+
# NOW create the provider after ALL mocks are set up
|
|
115
|
+
llm = OpenAIProvider(config=provider_config)
|
|
116
|
+
|
|
117
|
+
# Test capabilities
|
|
118
|
+
capabilities = llm.capabilities
|
|
119
|
+
assert capabilities.provider == test_case["expected_provider"], f"Test case: {test_case['name']}"
|
|
120
|
+
assert llm.key_type == test_case["expected_key_type"], f"Test case: {test_case['name']}"
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
import ast
|
|
6
|
+
import inspect
|
|
7
|
+
import requests
|
|
8
|
+
from mito_ai.gemini_client import GeminiClient, get_gemini_system_prompt_and_messages
|
|
9
|
+
from mito_ai.utils.gemini_utils import get_gemini_completion_function_params, FAST_GEMINI_MODEL
|
|
10
|
+
from google.genai.types import Part, GenerateContentResponse, Candidate, Content
|
|
11
|
+
from mito_ai.completions.models import ResponseFormatInfo, AgentResponse
|
|
12
|
+
from unittest.mock import MagicMock, patch
|
|
13
|
+
from typing import List, Dict, Any
|
|
14
|
+
from mito_ai.completions.models import MessageType
|
|
15
|
+
|
|
16
|
+
# Dummy base64 image (1x1 PNG)
|
|
17
|
+
DUMMY_IMAGE_DATA_URL = (
|
|
18
|
+
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/wcAAgMBAp9l9AAAAABJRU5ErkJggg=="
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
def test_mixed_text_and_image():
|
|
22
|
+
messages: List[Dict[str, Any]] = [
|
|
23
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
24
|
+
{"role": "user", "content": [
|
|
25
|
+
{"type": "text", "text": "Here is an image:"},
|
|
26
|
+
{"type": "image_url", "image_url": {"url": DUMMY_IMAGE_DATA_URL}}
|
|
27
|
+
]}
|
|
28
|
+
]
|
|
29
|
+
system_instructions, contents = get_gemini_system_prompt_and_messages(messages)
|
|
30
|
+
assert system_instructions == "You are a helpful assistant."
|
|
31
|
+
assert len(contents) == 1
|
|
32
|
+
assert contents[0]["role"] == "user"
|
|
33
|
+
# Should have two parts: text and image
|
|
34
|
+
assert len(contents[0]["parts"]) == 2
|
|
35
|
+
assert contents[0]["parts"][0]["text"] == "Here is an image:"
|
|
36
|
+
# The second part should be a Part object (from google.genai.types)
|
|
37
|
+
assert isinstance(contents[0]["parts"][1], Part)
|
|
38
|
+
|
|
39
|
+
def test_no_system_instructions_only_content():
|
|
40
|
+
messages: List[Dict[str, Any]] = [
|
|
41
|
+
{"role": "user", "content": "Hello!"},
|
|
42
|
+
{"role": "assistant", "content": "Hi, how can I help you?"}
|
|
43
|
+
]
|
|
44
|
+
system_instructions, contents = get_gemini_system_prompt_and_messages(messages)
|
|
45
|
+
assert system_instructions == ""
|
|
46
|
+
assert len(contents) == 2
|
|
47
|
+
assert contents[0]["role"] == "user"
|
|
48
|
+
assert contents[0]["parts"][0]["text"] == "Hello!"
|
|
49
|
+
assert contents[1]["role"] == "model"
|
|
50
|
+
assert contents[1]["parts"][0]["text"] == "Hi, how can I help you?"
|
|
51
|
+
|
|
52
|
+
def test_system_instructions_and_content():
|
|
53
|
+
messages: List[Dict[str, Any]] = [
|
|
54
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
55
|
+
{"role": "user", "content": "What is the weather today?"}
|
|
56
|
+
]
|
|
57
|
+
system_instructions, contents = get_gemini_system_prompt_and_messages(messages)
|
|
58
|
+
assert system_instructions == "You are a helpful assistant."
|
|
59
|
+
assert len(contents) == 1
|
|
60
|
+
assert contents[0]["role"] == "user"
|
|
61
|
+
assert contents[0]["parts"][0]["text"] == "What is the weather today?"
|
|
62
|
+
|
|
63
|
+
@pytest.mark.asyncio
|
|
64
|
+
async def test_json_response_handling():
|
|
65
|
+
# Create a mock response with JSON content
|
|
66
|
+
mock_response = GenerateContentResponse(
|
|
67
|
+
candidates=[
|
|
68
|
+
Candidate(
|
|
69
|
+
content=Content(
|
|
70
|
+
parts=[Part(text='{"key": "value"}')]
|
|
71
|
+
)
|
|
72
|
+
)
|
|
73
|
+
]
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
# Create a mock client with the response
|
|
77
|
+
client = GeminiClient(api_key="test_key")
|
|
78
|
+
client.client = MagicMock()
|
|
79
|
+
client.client.models.generate_content.return_value = mock_response
|
|
80
|
+
|
|
81
|
+
# Test with response format info
|
|
82
|
+
response_format_info = ResponseFormatInfo(name="agent_response", format=AgentResponse)
|
|
83
|
+
result = await client.request_completions(
|
|
84
|
+
messages=[{"role": "user", "content": "Test message"}],
|
|
85
|
+
model="test-model",
|
|
86
|
+
response_format_info=response_format_info
|
|
87
|
+
)
|
|
88
|
+
assert result == '{"key": "value"}'
|
|
89
|
+
|
|
90
|
+
@pytest.mark.asyncio
|
|
91
|
+
async def test_json_response_handling_with_invalid_json():
|
|
92
|
+
"""
|
|
93
|
+
Tests how the GeminiClient handles responses with invalid JSON when a response format is specified.
|
|
94
|
+
|
|
95
|
+
This test is important because:
|
|
96
|
+
1. It verifies that the client doesn't crash when receiving malformed JSON responses
|
|
97
|
+
2. It ensures that the raw response text is returned even when JSON parsing would fail
|
|
98
|
+
3. It tests the error handling path in the response processing logic
|
|
99
|
+
"""
|
|
100
|
+
# Create a mock response with invalid JSON content
|
|
101
|
+
mock_response = GenerateContentResponse(
|
|
102
|
+
candidates=[
|
|
103
|
+
Candidate(
|
|
104
|
+
content=Content(
|
|
105
|
+
parts=[Part(text='{"key": value}')] # Invalid JSON - missing quotes around 'value'
|
|
106
|
+
)
|
|
107
|
+
)
|
|
108
|
+
]
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# Create a mock client with the response
|
|
112
|
+
client = GeminiClient(api_key="test_key")
|
|
113
|
+
client.client = MagicMock()
|
|
114
|
+
client.client.models.generate_content.return_value = mock_response
|
|
115
|
+
|
|
116
|
+
# Test with response format info
|
|
117
|
+
response_format_info = ResponseFormatInfo(name="agent_response", format=AgentResponse)
|
|
118
|
+
result = await client.request_completions(
|
|
119
|
+
messages=[{"role": "user", "content": "Test message"}],
|
|
120
|
+
model="test-model",
|
|
121
|
+
response_format_info=response_format_info
|
|
122
|
+
)
|
|
123
|
+
# Should return the raw string even if JSON is invalid
|
|
124
|
+
assert result == '{"key": value}'
|
|
125
|
+
|
|
126
|
+
@pytest.mark.asyncio
|
|
127
|
+
async def test_json_response_handling_with_multiple_parts():
|
|
128
|
+
# Create a mock response with multiple parts
|
|
129
|
+
mock_response = GenerateContentResponse(
|
|
130
|
+
candidates=[
|
|
131
|
+
Candidate(
|
|
132
|
+
content=Content(
|
|
133
|
+
parts=[
|
|
134
|
+
Part(text='Here is the JSON: '),
|
|
135
|
+
Part(text='{"key": "value"}'),
|
|
136
|
+
Part(text=' End of response')
|
|
137
|
+
]
|
|
138
|
+
)
|
|
139
|
+
)
|
|
140
|
+
]
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
# Create a mock client with the response
|
|
144
|
+
client = GeminiClient(api_key="test_key")
|
|
145
|
+
client.client = MagicMock()
|
|
146
|
+
client.client.models.generate_content.return_value = mock_response
|
|
147
|
+
|
|
148
|
+
# Test with response format info
|
|
149
|
+
response_format_info = ResponseFormatInfo(name="agent_response", format=AgentResponse)
|
|
150
|
+
result = await client.request_completions(
|
|
151
|
+
messages=[{"role": "user", "content": "Test message"}],
|
|
152
|
+
model="test-model",
|
|
153
|
+
response_format_info=response_format_info
|
|
154
|
+
)
|
|
155
|
+
# Should concatenate all parts
|
|
156
|
+
assert result == 'Here is the JSON: {"key": "value"} End of response'
|
|
157
|
+
|
|
158
|
+
CUSTOM_MODEL = "smart-gemini-model"
|
|
159
|
+
@pytest.mark.parametrize("message_type, expected_model", [
|
|
160
|
+
(MessageType.CHAT, CUSTOM_MODEL), #
|
|
161
|
+
(MessageType.SMART_DEBUG, CUSTOM_MODEL), #
|
|
162
|
+
(MessageType.CODE_EXPLAIN, CUSTOM_MODEL), #
|
|
163
|
+
(MessageType.AGENT_EXECUTION, CUSTOM_MODEL), #
|
|
164
|
+
(MessageType.AGENT_AUTO_ERROR_FIXUP, CUSTOM_MODEL), #
|
|
165
|
+
(MessageType.INLINE_COMPLETION, FAST_GEMINI_MODEL), #
|
|
166
|
+
(MessageType.CHAT_NAME_GENERATION, FAST_GEMINI_MODEL), #
|
|
167
|
+
])
|
|
168
|
+
@pytest.mark.asyncio
|
|
169
|
+
async def test_get_completion_model_selection_based_on_message_type(message_type, expected_model):
|
|
170
|
+
"""
|
|
171
|
+
Tests that the correct model is selected based on the message type.
|
|
172
|
+
"""
|
|
173
|
+
with patch('google.genai.Client') as mock_genai_class:
|
|
174
|
+
mock_client = MagicMock()
|
|
175
|
+
mock_models = MagicMock()
|
|
176
|
+
mock_client.models = mock_models
|
|
177
|
+
mock_genai_class.return_value = mock_client
|
|
178
|
+
|
|
179
|
+
client = GeminiClient(api_key="test_key")
|
|
180
|
+
|
|
181
|
+
# Create a mock response
|
|
182
|
+
mock_response = 'test-response'
|
|
183
|
+
mock_models.generate_content.return_value = mock_response
|
|
184
|
+
|
|
185
|
+
await client.request_completions(
|
|
186
|
+
messages=[{"role": "user", "content": "Test message"}],
|
|
187
|
+
model=CUSTOM_MODEL,
|
|
188
|
+
message_type=message_type,
|
|
189
|
+
response_format_info=None
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
# Verify that generate_content was called with the expected model
|
|
193
|
+
mock_models.generate_content.assert_called_once()
|
|
194
|
+
call_args = mock_models.generate_content.call_args
|
|
195
|
+
assert call_args[1]['model'] == expected_model
|