mito-ai 0.1.33__py3-none-any.whl → 0.1.49__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. mito_ai/__init__.py +49 -9
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +142 -67
  4. mito_ai/{app_builder → app_deploy}/__init__.py +1 -1
  5. mito_ai/app_deploy/app_deploy_utils.py +44 -0
  6. mito_ai/app_deploy/handlers.py +345 -0
  7. mito_ai/{app_builder → app_deploy}/models.py +35 -22
  8. mito_ai/app_manager/__init__.py +4 -0
  9. mito_ai/app_manager/handlers.py +167 -0
  10. mito_ai/app_manager/models.py +71 -0
  11. mito_ai/app_manager/utils.py +24 -0
  12. mito_ai/auth/README.md +18 -0
  13. mito_ai/auth/__init__.py +6 -0
  14. mito_ai/auth/handlers.py +96 -0
  15. mito_ai/auth/urls.py +13 -0
  16. mito_ai/chat_history/handlers.py +63 -0
  17. mito_ai/chat_history/urls.py +32 -0
  18. mito_ai/completions/completion_handlers/agent_execution_handler.py +1 -1
  19. mito_ai/completions/completion_handlers/chat_completion_handler.py +4 -4
  20. mito_ai/completions/completion_handlers/utils.py +99 -37
  21. mito_ai/completions/handlers.py +57 -20
  22. mito_ai/completions/message_history.py +9 -1
  23. mito_ai/completions/models.py +31 -7
  24. mito_ai/completions/prompt_builders/agent_execution_prompt.py +21 -2
  25. mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +8 -0
  26. mito_ai/completions/prompt_builders/agent_system_message.py +115 -42
  27. mito_ai/completions/prompt_builders/chat_name_prompt.py +6 -6
  28. mito_ai/completions/prompt_builders/chat_prompt.py +18 -11
  29. mito_ai/completions/prompt_builders/chat_system_message.py +4 -0
  30. mito_ai/completions/prompt_builders/prompt_constants.py +23 -4
  31. mito_ai/completions/prompt_builders/utils.py +72 -10
  32. mito_ai/completions/providers.py +81 -47
  33. mito_ai/constants.py +25 -24
  34. mito_ai/file_uploads/__init__.py +3 -0
  35. mito_ai/file_uploads/handlers.py +248 -0
  36. mito_ai/file_uploads/urls.py +21 -0
  37. mito_ai/gemini_client.py +44 -48
  38. mito_ai/log/handlers.py +10 -3
  39. mito_ai/log/urls.py +3 -3
  40. mito_ai/openai_client.py +30 -44
  41. mito_ai/path_utils.py +70 -0
  42. mito_ai/streamlit_conversion/agent_utils.py +37 -0
  43. mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
  44. mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
  45. mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
  46. mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
  47. mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
  48. mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
  49. mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
  50. mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
  51. mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
  52. mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
  53. mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
  54. mito_ai/streamlit_preview/__init__.py +6 -0
  55. mito_ai/streamlit_preview/handlers.py +111 -0
  56. mito_ai/streamlit_preview/manager.py +152 -0
  57. mito_ai/streamlit_preview/urls.py +22 -0
  58. mito_ai/streamlit_preview/utils.py +29 -0
  59. mito_ai/tests/chat_history/test_chat_history.py +211 -0
  60. mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
  61. mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
  62. mito_ai/tests/file_uploads/__init__.py +2 -0
  63. mito_ai/tests/file_uploads/test_handlers.py +282 -0
  64. mito_ai/tests/message_history/test_generate_short_chat_name.py +0 -4
  65. mito_ai/tests/message_history/test_message_history_utils.py +103 -23
  66. mito_ai/tests/open_ai_utils_test.py +18 -22
  67. mito_ai/tests/providers/test_anthropic_client.py +447 -0
  68. mito_ai/tests/providers/test_azure.py +2 -6
  69. mito_ai/tests/providers/test_capabilities.py +120 -0
  70. mito_ai/tests/{test_gemini_client.py → providers/test_gemini_client.py} +40 -36
  71. mito_ai/tests/providers/test_mito_server_utils.py +448 -0
  72. mito_ai/tests/providers/test_model_resolution.py +130 -0
  73. mito_ai/tests/providers/test_openai_client.py +57 -0
  74. mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
  75. mito_ai/tests/providers/test_provider_limits.py +42 -0
  76. mito_ai/tests/providers/test_providers.py +382 -0
  77. mito_ai/tests/providers/test_retry_logic.py +389 -0
  78. mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
  79. mito_ai/tests/providers/utils.py +85 -0
  80. mito_ai/tests/streamlit_conversion/__init__.py +3 -0
  81. mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
  82. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
  83. mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
  84. mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
  85. mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
  86. mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
  87. mito_ai/tests/test_constants.py +31 -3
  88. mito_ai/tests/test_telemetry.py +12 -0
  89. mito_ai/tests/user/__init__.py +2 -0
  90. mito_ai/tests/user/test_user.py +120 -0
  91. mito_ai/tests/utils/test_anthropic_utils.py +6 -6
  92. mito_ai/user/handlers.py +45 -0
  93. mito_ai/user/urls.py +21 -0
  94. mito_ai/utils/anthropic_utils.py +55 -121
  95. mito_ai/utils/create.py +17 -1
  96. mito_ai/utils/error_classes.py +42 -0
  97. mito_ai/utils/gemini_utils.py +39 -94
  98. mito_ai/utils/message_history_utils.py +7 -4
  99. mito_ai/utils/mito_server_utils.py +242 -0
  100. mito_ai/utils/open_ai_utils.py +38 -155
  101. mito_ai/utils/provider_utils.py +49 -0
  102. mito_ai/utils/server_limits.py +1 -1
  103. mito_ai/utils/telemetry_utils.py +137 -5
  104. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -100
  105. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/package.json +4 -2
  106. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +3 -1
  107. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +2 -2
  108. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +15948 -8403
  109. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
  110. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
  111. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
  112. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.8b24b5b3b93f95205b56.js +58 -33
  113. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.8b24b5b3b93f95205b56.js.map +1 -0
  114. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +10 -2
  115. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
  116. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
  117. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
  118. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
  119. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
  120. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
  121. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
  122. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
  123. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
  124. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
  125. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
  126. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2 -240
  127. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
  128. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/METADATA +5 -2
  129. mito_ai-0.1.49.dist-info/RECORD +205 -0
  130. mito_ai/app_builder/handlers.py +0 -218
  131. mito_ai/tests/providers_test.py +0 -438
  132. mito_ai/tests/test_anthropic_client.py +0 -270
  133. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js.map +0 -1
  134. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js.map +0 -1
  135. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js.map +0 -1
  136. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js +0 -7842
  137. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js.map +0 -1
  138. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -1
  139. mito_ai-0.1.33.dist-info/RECORD +0 -134
  140. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  141. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  142. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  143. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  144. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/WHEEL +0 -0
  145. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/entry_points.txt +0 -0
  146. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,389 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import pytest
5
+ import asyncio
6
+ from unittest.mock import AsyncMock, MagicMock, patch
7
+ from mito_ai.completions.providers import OpenAIProvider
8
+ from mito_ai.completions.models import MessageType, CompletionError
9
+ from mito_ai.utils.mito_server_utils import ProviderCompletionException
10
+ from mito_ai.tests.providers.utils import mock_openai_client, patch_server_limits
11
+ from traitlets.config import Config
12
+
13
+ FAKE_API_KEY = "sk-1234567890"
14
+
15
+ @pytest.fixture
16
+ def provider_config() -> Config:
17
+ """Create a proper Config object for the OpenAIProvider."""
18
+ config = Config()
19
+ config.OpenAIProvider = Config()
20
+ config.OpenAIClient = Config()
21
+ return config
22
+
23
+ @pytest.fixture
24
+ def mock_messages():
25
+ """Sample messages for testing."""
26
+ return [{"role": "user", "content": "Test message"}]
27
+
28
+ @pytest.fixture
29
+ def mock_sleep():
30
+ """Mock asyncio.sleep to avoid delays in tests."""
31
+ with patch("asyncio.sleep", new_callable=AsyncMock) as mock:
32
+ yield mock
33
+
34
+ class TestRetryLogic:
35
+ """Test retry logic in OpenAIProvider.request_completions."""
36
+
37
+ @pytest.mark.parametrize("attempts_before_success,max_retries,expected_call_count", [
38
+ (0, 3, 1), # Success on first try
39
+ (1, 3, 2), # Success after 1 retry
40
+ (2, 3, 3), # Success after 2 retries
41
+ (3, 3, 4), # Success on final try
42
+ ])
43
+ @pytest.mark.asyncio
44
+ async def test_success_after_retries(
45
+ self,
46
+ attempts_before_success: int,
47
+ max_retries: int,
48
+ expected_call_count: int,
49
+ provider_config: Config,
50
+ mock_messages,
51
+ mock_sleep,
52
+ monkeypatch: pytest.MonkeyPatch
53
+ ):
54
+ """Test that request_completions succeeds after a certain number of retries."""
55
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
56
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
57
+
58
+ # Create side effect: fail N times, then succeed
59
+ side_effects = [Exception("Test error")] * attempts_before_success + ["Success!"]
60
+
61
+ with (
62
+ patch_server_limits(),
63
+ mock_openai_client() as mock_client
64
+ ):
65
+ mock_client.return_value.request_completions = AsyncMock(side_effect=side_effects)
66
+
67
+ provider = OpenAIProvider(config=provider_config)
68
+
69
+ # Test successful completion
70
+ result = await provider.request_completions(
71
+ message_type=MessageType.CHAT,
72
+ messages=mock_messages,
73
+ model="gpt-4o-mini",
74
+ max_retries=max_retries
75
+ )
76
+
77
+ assert result == "Success!"
78
+ assert mock_client.return_value.request_completions.call_count == expected_call_count
79
+ assert provider.last_error is None # Should be reset on success
80
+
81
+ # Verify sleep was called for retries
82
+ assert mock_sleep.call_count == attempts_before_success
83
+
84
+ @pytest.mark.parametrize("exception_type,max_retries,expected_call_count", [
85
+ (ProviderCompletionException, 3, 4), # Retry ProviderCompletionException
86
+ (Exception, 3, 4), # Retry generic Exception
87
+ (RuntimeError, 3, 4), # Retry RuntimeError
88
+ (ValueError, 2, 3), # Retry ValueError with different max_retries
89
+ ])
90
+ @pytest.mark.asyncio
91
+ async def test_failure_after_all_retries(
92
+ self,
93
+ exception_type,
94
+ max_retries: int,
95
+ expected_call_count: int,
96
+ provider_config: Config,
97
+ mock_messages,
98
+ mock_sleep,
99
+ monkeypatch: pytest.MonkeyPatch
100
+ ):
101
+ """Test that request_completions fails after exhausting all retries."""
102
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
103
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
104
+
105
+ # Create exception instance
106
+ if exception_type == ProviderCompletionException:
107
+ test_exception = ProviderCompletionException("Test provider error")
108
+ else:
109
+ test_exception = exception_type("Test error")
110
+
111
+ with (
112
+ patch_server_limits(),
113
+ mock_openai_client() as mock_client
114
+ ):
115
+ mock_client.return_value.request_completions = AsyncMock(side_effect=test_exception)
116
+
117
+ provider = OpenAIProvider(config=provider_config)
118
+
119
+ # Test failure after all retries
120
+ with pytest.raises(exception_type):
121
+ await provider.request_completions(
122
+ message_type=MessageType.CHAT,
123
+ messages=mock_messages,
124
+ model="gpt-4o-mini",
125
+ max_retries=max_retries
126
+ )
127
+
128
+ assert mock_client.return_value.request_completions.call_count == expected_call_count
129
+ assert provider.last_error is not None # Should be set on final failure
130
+ assert isinstance(provider.last_error, CompletionError)
131
+
132
+ # Verify sleep was called for retries (max_retries times)
133
+ assert mock_sleep.call_count == max_retries
134
+
135
+ @pytest.mark.asyncio
136
+ async def test_mixed_exception_types(
137
+ self,
138
+ provider_config: Config,
139
+ mock_messages,
140
+ mock_sleep,
141
+ monkeypatch: pytest.MonkeyPatch
142
+ ):
143
+ """Test handling of different exception types across retry attempts."""
144
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
145
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
146
+
147
+ # Different exceptions on different attempts
148
+ side_effects = [
149
+ Exception("Generic error"),
150
+ ProviderCompletionException("Provider error"),
151
+ RuntimeError("Runtime error"),
152
+ "Success!"
153
+ ]
154
+
155
+ with (
156
+ patch_server_limits(),
157
+ mock_openai_client() as mock_client
158
+ ):
159
+ mock_client.return_value.request_completions = AsyncMock(side_effect=side_effects)
160
+
161
+ provider = OpenAIProvider(config=provider_config)
162
+
163
+ # Should succeed after 3 retries with mixed exceptions
164
+ result = await provider.request_completions(
165
+ message_type=MessageType.CHAT,
166
+ messages=mock_messages,
167
+ model="gpt-4o-mini",
168
+ max_retries=3
169
+ )
170
+
171
+ assert result == "Success!"
172
+ assert mock_client.return_value.request_completions.call_count == 4
173
+ assert provider.last_error is None # Should be reset on success
174
+ assert mock_sleep.call_count == 3 # Called for each retry
175
+
176
+ @pytest.mark.asyncio
177
+ async def test_last_error_reset_on_success(
178
+ self,
179
+ provider_config: Config,
180
+ mock_messages,
181
+ mock_sleep,
182
+ monkeypatch: pytest.MonkeyPatch
183
+ ):
184
+ """Test that last_error is reset when request succeeds after previous failure."""
185
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
186
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
187
+
188
+ with (
189
+ patch_server_limits(),
190
+ mock_openai_client() as mock_client
191
+ ):
192
+ provider = OpenAIProvider(config=provider_config)
193
+
194
+ # First request fails to set an error
195
+ mock_client.return_value.request_completions = AsyncMock(side_effect=Exception("First error"))
196
+
197
+ with pytest.raises(Exception):
198
+ await provider.request_completions(
199
+ message_type=MessageType.CHAT,
200
+ messages=mock_messages,
201
+ model="gpt-4o-mini",
202
+ max_retries=0 # No retries to fail quickly
203
+ )
204
+
205
+ # Verify error was set
206
+ assert provider.last_error is not None
207
+
208
+ # Second request succeeds
209
+ mock_client.return_value.request_completions = AsyncMock(return_value="Success!")
210
+
211
+ result = await provider.request_completions(
212
+ message_type=MessageType.CHAT,
213
+ messages=mock_messages,
214
+ model="gpt-4o-mini",
215
+ max_retries=3
216
+ )
217
+
218
+ assert result == "Success!"
219
+ assert provider.last_error is None # Should be reset
220
+
221
+ @pytest.mark.asyncio
222
+ async def test_no_retries_when_max_retries_zero(
223
+ self,
224
+ provider_config: Config,
225
+ mock_messages,
226
+ mock_sleep,
227
+ monkeypatch: pytest.MonkeyPatch
228
+ ):
229
+ """Test that no retries happen when max_retries=0."""
230
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
231
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
232
+
233
+ with (
234
+ patch_server_limits(),
235
+ mock_openai_client() as mock_client
236
+ ):
237
+ mock_client.return_value.request_completions = AsyncMock(side_effect=Exception("Test error"))
238
+
239
+ provider = OpenAIProvider(config=provider_config)
240
+
241
+ # Should fail immediately with no retries
242
+ with pytest.raises(Exception):
243
+ await provider.request_completions(
244
+ message_type=MessageType.CHAT,
245
+ messages=mock_messages,
246
+ model="gpt-4o-mini",
247
+ max_retries=0
248
+ )
249
+
250
+ assert mock_client.return_value.request_completions.call_count == 1
251
+ assert mock_sleep.call_count == 0 # No retries, no sleep calls
252
+ assert provider.last_error is not None
253
+
254
+ @pytest.mark.asyncio
255
+ async def test_provider_completion_exception_details(
256
+ self,
257
+ provider_config: Config,
258
+ mock_messages,
259
+ mock_sleep,
260
+ monkeypatch: pytest.MonkeyPatch
261
+ ):
262
+ """Test that ProviderCompletionException details are preserved in last_error."""
263
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
264
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
265
+
266
+ error_message = "Server returned an error: Rate limit exceeded"
267
+ provider_exception = ProviderCompletionException(error_message)
268
+
269
+ with (
270
+ patch_server_limits(),
271
+ mock_openai_client() as mock_client
272
+ ):
273
+ mock_client.return_value.request_completions = AsyncMock(side_effect=provider_exception)
274
+
275
+ provider = OpenAIProvider(config=provider_config)
276
+
277
+ # Should fail after retries
278
+ with pytest.raises(ProviderCompletionException):
279
+ await provider.request_completions(
280
+ message_type=MessageType.CHAT,
281
+ messages=mock_messages,
282
+ model="gpt-4o-mini",
283
+ max_retries=2
284
+ )
285
+
286
+ assert provider.last_error is not None
287
+ assert isinstance(provider.last_error, CompletionError)
288
+ assert error_message in provider.last_error.title
289
+
290
+ @pytest.mark.parametrize("scenario,side_effects,max_retries,expected_retry_logs,expected_error_logs,expected_success_logs", [
291
+ # Scenario 1: Success on first try
292
+ ("immediate_success", ["Success!"], 3, 0, 0, 1),
293
+
294
+ # Scenario 2: Fail once, then succeed
295
+ ("retry_then_success", [Exception("Test error"), "Success!"], 3, 1, 0, 1),
296
+
297
+ # Scenario 3: Fail twice, then succeed
298
+ ("retry_twice_then_success", [Exception("Test error"), Exception("Test error"), "Success!"], 3, 2, 0, 1),
299
+
300
+ # Scenario 4: Fail and never succeed (1 retry)
301
+ ("fail_after_one_retry", [Exception("Test error"), Exception("Test error")], 1, 1, 1, 0),
302
+
303
+ # Scenario 5: Fail and never succeed (2 retries)
304
+ ("fail_after_two_retries", [Exception("Test error"), Exception("Test error"), Exception("Test error")], 2, 2, 1, 0),
305
+
306
+ # Scenario 6: Fail and never succeed (3 retries)
307
+ ("fail_after_three_retries", [Exception("Test error"), Exception("Test error"), Exception("Test error"), Exception("Test error")], 3, 3, 1, 0),
308
+ ])
309
+ @pytest.mark.asyncio
310
+ async def test_logging_functions_comprehensive(
311
+ self,
312
+ scenario: str,
313
+ side_effects: list,
314
+ max_retries: int,
315
+ expected_retry_logs: int,
316
+ expected_error_logs: int,
317
+ expected_success_logs: int,
318
+ provider_config: Config,
319
+ mock_messages,
320
+ mock_sleep,
321
+ monkeypatch: pytest.MonkeyPatch,
322
+ capsys
323
+ ):
324
+ """Test comprehensive logging behavior for all retry scenarios."""
325
+ monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
326
+ monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
327
+
328
+ # Clear other API keys to ensure OpenAI path is used
329
+ monkeypatch.delenv("CLAUDE_API_KEY", raising=False)
330
+ monkeypatch.delenv("GEMINI_API_KEY", raising=False)
331
+ monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", None)
332
+ monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", None)
333
+
334
+ # Enable print logs to capture telemetry output
335
+ monkeypatch.setattr("mito_ai.utils.telemetry_utils.PRINT_LOGS", True)
336
+
337
+ with patch_server_limits():
338
+ # Create a mock OpenAI client that will be used by the provider
339
+ mock_client = MagicMock()
340
+ mock_client.request_completions = AsyncMock(side_effect=side_effects)
341
+ mock_client.key_type = "user"
342
+
343
+ # Create the provider and set the mock client
344
+ provider = OpenAIProvider(config=provider_config)
345
+ provider._openai_client = mock_client
346
+
347
+ # Determine if we expect success or failure
348
+ will_succeed = any(isinstance(effect, str) for effect in side_effects)
349
+
350
+ if will_succeed:
351
+ # Test successful completion
352
+ result = await provider.request_completions(
353
+ message_type=MessageType.CHAT,
354
+ messages=mock_messages,
355
+ model="gpt-4o-mini",
356
+ max_retries=max_retries
357
+ )
358
+
359
+ # Verify we got the expected success result
360
+ assert result == "Success!"
361
+ assert provider.last_error is None
362
+ else:
363
+ # Test failure after all retries
364
+ with pytest.raises(Exception):
365
+ await provider.request_completions(
366
+ message_type=MessageType.CHAT,
367
+ messages=mock_messages,
368
+ model="gpt-4o-mini",
369
+ max_retries=max_retries
370
+ )
371
+
372
+ # Verify error state was set
373
+ assert provider.last_error is not None
374
+ assert isinstance(provider.last_error, CompletionError)
375
+
376
+ # Capture the printed logs
377
+ captured = capsys.readouterr()
378
+ log_output = captured.out
379
+
380
+ # Count the different types of logs
381
+ retry_log_count = log_output.count("mito_ai_retry")
382
+ error_log_count = log_output.count("mito_ai_error")
383
+ success_log_count = log_output.count("mito_ai_chat_success")
384
+
385
+ # Verify logging function calls
386
+ assert retry_log_count == expected_retry_logs, f"Expected {expected_retry_logs} retry logs for scenario '{scenario}', got {retry_log_count}"
387
+ assert error_log_count == expected_error_logs, f"Expected {expected_error_logs} error logs for scenario '{scenario}', got {error_log_count}"
388
+ assert success_log_count == expected_success_logs, f"Expected {expected_success_logs} success logs for scenario '{scenario}', got {success_log_count}"
389
+
@@ -0,0 +1,140 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import asyncio
5
+ import pytest
6
+ from unittest.mock import AsyncMock, MagicMock, patch, call
7
+
8
+ from mito_ai.utils.mito_server_utils import stream_response_from_mito_server
9
+ from mito_ai.completions.models import MessageType
10
+
11
+
12
+ # Mock classes for CompletionStreamChunk, CompletionItem, etc.
13
+ class CompletionItem:
14
+ def __init__(self, content: str, isIncomplete: bool, token: str):
15
+ self.content = content
16
+ self.isIncomplete = isIncomplete
17
+ self.token = token
18
+
19
+ def __eq__(self, other):
20
+ return (
21
+ self.content == other.content
22
+ and self.isIncomplete == other.isIncomplete
23
+ and self.token == other.token
24
+ )
25
+
26
+
27
+ class CompletionStreamChunk:
28
+ def __init__(self, parent_id: str, chunk: CompletionItem, done: bool):
29
+ self.parent_id = parent_id
30
+ self.chunk = chunk
31
+ self.done = done
32
+
33
+ def __eq__(self, other):
34
+ return (
35
+ self.parent_id == other.parent_id
36
+ and self.chunk == other.chunk
37
+ and self.done == other.done
38
+ )
39
+
40
+
41
+ @pytest.mark.asyncio
42
+ async def test_stream_response_happy_path(monkeypatch):
43
+ # Arrange
44
+ url = "https://fake.mito.server/stream"
45
+ headers = {"Authorization": "Bearer token"}
46
+ data = {"prompt": "hello world"}
47
+ timeout = 10
48
+ max_retries = 2
49
+ message_type = MessageType.CHAT
50
+ message_id = "msg-123"
51
+
52
+ # Fake chunks
53
+ raw_chunks = [b"chunk1", b"chunk2"]
54
+
55
+ # Mock reply_fn
56
+ reply_fn = MagicMock()
57
+
58
+ # Mock quota check/update
59
+ monkeypatch.setattr(
60
+ "mito_ai.utils.mito_server_utils.check_mito_server_quota", lambda *_: None
61
+ )
62
+ monkeypatch.setattr(
63
+ "mito_ai.utils.mito_server_utils.update_mito_server_quota", lambda *_: None
64
+ )
65
+
66
+ # Mock HTTPClient and fetch
67
+ chunk_callback = MagicMock()
68
+
69
+ class FakeHTTPClient:
70
+ def fetch(self, *args, **kwargs):
71
+ nonlocal chunk_callback
72
+ chunk_callback = kwargs["streaming_callback"]
73
+
74
+ async def fetch_simulation():
75
+ # Simulate streaming data
76
+ for chunk in raw_chunks:
77
+ await asyncio.sleep(0.01)
78
+ chunk_callback(chunk)
79
+ return MagicMock()
80
+
81
+ return fetch_simulation()
82
+
83
+ def close(self):
84
+ pass
85
+
86
+ def mock_create_http_client(timeout_val, retry_val):
87
+ return FakeHTTPClient(), timeout_val
88
+
89
+ monkeypatch.setattr(
90
+ "mito_ai.utils.mito_server_utils._create_http_client", mock_create_http_client
91
+ )
92
+
93
+ # Act
94
+ gen = stream_response_from_mito_server(
95
+ url=url,
96
+ headers=headers,
97
+ data=data,
98
+ timeout=timeout,
99
+ max_retries=max_retries,
100
+ message_type=message_type,
101
+ reply_fn=reply_fn,
102
+ message_id=message_id,
103
+ )
104
+
105
+ results = []
106
+ async for chunk in gen:
107
+ results.append(chunk)
108
+
109
+ # Assert
110
+ assert results == [b"chunk1".decode(), b"chunk2".decode()]
111
+
112
+ # Check reply_fn calls
113
+ expected_calls = [
114
+ call(
115
+ CompletionStreamChunk(
116
+ parent_id=message_id,
117
+ chunk=CompletionItem(
118
+ content="chunk1", isIncomplete=True, token=message_id
119
+ ),
120
+ done=False,
121
+ )
122
+ ),
123
+ call(
124
+ CompletionStreamChunk(
125
+ parent_id=message_id,
126
+ chunk=CompletionItem(
127
+ content="chunk2", isIncomplete=True, token=message_id
128
+ ),
129
+ done=False,
130
+ )
131
+ ),
132
+ call(
133
+ CompletionStreamChunk(
134
+ parent_id=message_id,
135
+ chunk=CompletionItem(content="", isIncomplete=False, token=message_id),
136
+ done=True,
137
+ )
138
+ ),
139
+ ]
140
+ reply_fn.assert_has_calls(expected_calls)
@@ -0,0 +1,85 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from datetime import datetime
5
+ from typing import Any
6
+ from unittest.mock import AsyncMock, MagicMock, patch
7
+ from mito_ai.completions.models import AICapabilities
8
+
9
+ TODAY = datetime.now().strftime("%Y-%m-%d")
10
+
11
+ def patch_server_limits(is_pro: bool = False, completion_count: int = 1, first_date: str = TODAY) -> Any:
12
+ return patch.multiple(
13
+ "mito_ai.utils.server_limits",
14
+ get_chat_completion_count=MagicMock(return_value=completion_count),
15
+ get_first_completion_date=MagicMock(return_value=first_date),
16
+ is_pro=MagicMock(return_value=is_pro),
17
+ check_mito_server_quota=MagicMock(return_value=None),
18
+ update_mito_server_quota=MagicMock(return_value=None)
19
+ )
20
+
21
+
22
+ def patch_openai_model_list() -> Any:
23
+ mock_openai_instance = MagicMock()
24
+ mock_openai_instance.models.list.return_value = [MagicMock(id="gpt-4o-mini")]
25
+
26
+ # Patch the constructor call to return your mock instance
27
+ return patch("openai.OpenAI", return_value=mock_openai_instance)
28
+
29
+
30
+ def mock_openai_client() -> Any:
31
+ """Mock the OpenAI client with user key capabilities."""
32
+ mock_client = MagicMock()
33
+ mock_client.capabilities = AICapabilities(
34
+ configuration={"model": "gpt-4o-mini"},
35
+ provider="OpenAI with user key",
36
+ type="ai_capabilities"
37
+ )
38
+ mock_client.key_type = "user"
39
+ mock_client.request_completions = AsyncMock(return_value="Test completion")
40
+ mock_client.stream_completions = AsyncMock(return_value="Test completion")
41
+ return patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client)
42
+
43
+
44
+ def mock_gemini_client() -> Any:
45
+ """Mock the Gemini client capabilities."""
46
+ mock_client = MagicMock()
47
+ mock_client.capabilities = AICapabilities(
48
+ configuration={"model": "gemini-2-pro"},
49
+ provider="Gemini",
50
+ type="ai_capabilities"
51
+ )
52
+ mock_client.key_type = "gemini"
53
+ mock_client.request_completions = AsyncMock(return_value="Test completion")
54
+ mock_client.stream_completions = AsyncMock(return_value="Test completion")
55
+ return patch("mito_ai.completions.providers.GeminiClient", return_value=mock_client)
56
+
57
+
58
+ def mock_azure_openai_client() -> Any:
59
+ """Mock the Azure OpenAI client capabilities."""
60
+ mock_client = MagicMock()
61
+ mock_client.capabilities = AICapabilities(
62
+ configuration={"model": "gpt-4o"},
63
+ provider="Azure OpenAI",
64
+ type="ai_capabilities"
65
+ )
66
+ mock_client.key_type = "azure"
67
+ mock_client.request_completions = AsyncMock(return_value="Test completion")
68
+ mock_client.stream_completions = AsyncMock(return_value="Test completion")
69
+ return patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client)
70
+
71
+
72
+
73
+ def mock_claude_client() -> Any:
74
+ """Mock the Claude client capabilities."""
75
+ mock_client = MagicMock()
76
+ mock_client.capabilities = AICapabilities(
77
+ configuration={"model": "claude-3-opus-20240229"},
78
+ provider="Claude",
79
+ type="ai_capabilities"
80
+ )
81
+ mock_client.key_type = "claude"
82
+ mock_client.request_completions = AsyncMock(return_value="Test completion")
83
+ mock_client.stream_completions = AsyncMock(return_value="Test completion")
84
+ mock_client.stream_response = AsyncMock(return_value="Test completion")
85
+ return patch("mito_ai.completions.providers.AnthropicClient", return_value=mock_client)
@@ -0,0 +1,3 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+