mito-ai 0.1.32__py3-none-any.whl → 0.1.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mito-ai might be problematic. Click here for more details.

Files changed (58) hide show
  1. mito_ai/_version.py +1 -1
  2. mito_ai/anthropic_client.py +52 -54
  3. mito_ai/app_builder/handlers.py +2 -4
  4. mito_ai/completions/models.py +15 -1
  5. mito_ai/completions/prompt_builders/agent_system_message.py +10 -2
  6. mito_ai/completions/providers.py +79 -39
  7. mito_ai/constants.py +11 -24
  8. mito_ai/gemini_client.py +44 -48
  9. mito_ai/openai_client.py +30 -44
  10. mito_ai/tests/message_history/test_generate_short_chat_name.py +0 -4
  11. mito_ai/tests/open_ai_utils_test.py +18 -22
  12. mito_ai/tests/{test_anthropic_client.py → providers/test_anthropic_client.py} +37 -32
  13. mito_ai/tests/providers/test_azure.py +2 -6
  14. mito_ai/tests/providers/test_capabilities.py +120 -0
  15. mito_ai/tests/{test_gemini_client.py → providers/test_gemini_client.py} +40 -36
  16. mito_ai/tests/providers/test_mito_server_utils.py +448 -0
  17. mito_ai/tests/providers/test_model_resolution.py +130 -0
  18. mito_ai/tests/providers/test_openai_client.py +57 -0
  19. mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
  20. mito_ai/tests/providers/test_provider_limits.py +42 -0
  21. mito_ai/tests/providers/test_providers.py +382 -0
  22. mito_ai/tests/providers/test_retry_logic.py +389 -0
  23. mito_ai/tests/providers/utils.py +85 -0
  24. mito_ai/tests/test_constants.py +15 -2
  25. mito_ai/tests/test_telemetry.py +12 -0
  26. mito_ai/utils/anthropic_utils.py +21 -29
  27. mito_ai/utils/gemini_utils.py +18 -22
  28. mito_ai/utils/mito_server_utils.py +92 -0
  29. mito_ai/utils/open_ai_utils.py +22 -46
  30. mito_ai/utils/provider_utils.py +49 -0
  31. mito_ai/utils/telemetry_utils.py +11 -1
  32. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +1 -1
  33. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  34. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  35. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.42b54cf8f038cc526980.js → mito_ai-0.1.34.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a20772bc113422d0f505.js +785 -351
  36. mito_ai-0.1.34.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a20772bc113422d0f505.js.map +1 -0
  37. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.a711c58b58423173bd24.js → mito_ai-0.1.34.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.51d07439b02aaa830975.js +13 -16
  38. mito_ai-0.1.34.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.51d07439b02aaa830975.js.map +1 -0
  39. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js → mito_ai-0.1.34.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.76efcc5c3be4056457ee.js +6 -2
  40. mito_ai-0.1.34.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.76efcc5c3be4056457ee.js.map +1 -0
  41. {mito_ai-0.1.32.dist-info → mito_ai-0.1.34.dist-info}/METADATA +1 -1
  42. {mito_ai-0.1.32.dist-info → mito_ai-0.1.34.dist-info}/RECORD +52 -43
  43. mito_ai/tests/providers_test.py +0 -438
  44. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.42b54cf8f038cc526980.js.map +0 -1
  45. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.a711c58b58423173bd24.js.map +0 -1
  46. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js.map +0 -1
  47. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js +0 -7842
  48. mito_ai-0.1.32.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js.map +0 -1
  49. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  50. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  51. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  52. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js +0 -0
  53. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -0
  54. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  55. {mito_ai-0.1.32.data → mito_ai-0.1.34.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  56. {mito_ai-0.1.32.dist-info → mito_ai-0.1.34.dist-info}/WHEEL +0 -0
  57. {mito_ai-0.1.32.dist-info → mito_ai-0.1.34.dist-info}/entry_points.txt +0 -0
  58. {mito_ai-0.1.32.dist-info → mito_ai-0.1.34.dist-info}/licenses/LICENSE +0 -0
@@ -5,14 +5,17 @@ import asyncio
5
5
  import json
6
6
  import time
7
7
  from typing import Any, Dict, List, Optional, Callable, Union, AsyncGenerator, Tuple
8
- from tornado.httpclient import AsyncHTTPClient
8
+ from mito_ai.utils.mito_server_utils import get_response_from_mito_server
9
9
  from mito_ai.completions.models import AgentResponse, CompletionReply, CompletionStreamChunk, CompletionItem, MessageType
10
- from .utils import _create_http_client
11
10
  from mito_ai.constants import MITO_GEMINI_URL
11
+ from mito_ai.utils.provider_utils import does_message_require_fast_model
12
+ from mito_ai.utils.utils import _create_http_client
12
13
 
13
14
  timeout = 30
14
15
  max_retries = 1
15
16
 
17
+ FAST_GEMINI_MODEL = "gemini-2.0-flash-lite"
18
+
16
19
  def _prepare_gemini_request_data_and_headers(
17
20
  model: str,
18
21
  contents: List[Dict[str, Any]],
@@ -62,25 +65,15 @@ async def get_gemini_completion_from_mito_server(
62
65
  response_format_info: Optional[Any] = None
63
66
  ) -> str:
64
67
  data, headers = _prepare_gemini_request_data_and_headers(model, contents, message_type, config, response_format_info, stream=False)
65
- http_client, http_client_timeout = _create_http_client(timeout, max_retries)
66
- start_time = time.time()
67
- try:
68
- res = await http_client.fetch(
69
- MITO_GEMINI_URL,
70
- method="POST",
71
- headers=headers,
72
- body=json.dumps(data),
73
- request_timeout=http_client_timeout
74
- )
75
- print(f"Gemini request completed in {time.time() - start_time:.2f} seconds")
76
- except Exception as e:
77
- print(f"Gemini request failed after {time.time() - start_time:.2f} seconds with error: {str(e)}")
78
- raise
79
- finally:
80
- http_client.close()
81
-
82
- # The response is a string
83
- return res.body.decode("utf-8")
68
+ return await get_response_from_mito_server(
69
+ MITO_GEMINI_URL,
70
+ headers,
71
+ data,
72
+ timeout,
73
+ max_retries,
74
+ message_type,
75
+ provider_name="Gemini"
76
+ )
84
77
 
85
78
  async def stream_gemini_completion_from_mito_server(
86
79
  model: str,
@@ -163,15 +156,18 @@ async def stream_gemini_completion_from_mito_server(
163
156
  http_client.close()
164
157
 
165
158
  def get_gemini_completion_function_params(
159
+ message_type: MessageType,
166
160
  model: str,
167
161
  contents: list[dict[str, Any]],
168
- message_type: MessageType,
169
162
  response_format_info: Optional[Any] = None,
170
163
  ) -> Dict[str, Any]:
171
164
  """
172
165
  Build the provider_data dict for Gemini completions, mirroring the OpenAI/Anthropic approach.
173
166
  Only includes fields needed for the Gemini API.
174
167
  """
168
+ message_requires_fast_model = does_message_require_fast_model(message_type)
169
+ model = FAST_GEMINI_MODEL if message_requires_fast_model else model
170
+
175
171
  provider_data: Dict[str, Any] = {
176
172
  "model": model,
177
173
  "contents": contents,
@@ -0,0 +1,92 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from mito_ai.completions.models import MessageType
5
+ from mito_ai.utils.server_limits import check_mito_server_quota, update_mito_server_quota
6
+ from tornado.httpclient import HTTPResponse
7
+ import time
8
+ import json
9
+ from typing import Any, Dict, Optional
10
+ from mito_ai.constants import MITO_GEMINI_URL
11
+ from mito_ai.utils.utils import _create_http_client
12
+
13
+
14
+ class ProviderCompletionException(Exception):
15
+ """Custom exception for Mito server errors that converts well to CompletionError."""
16
+
17
+ def __init__(self, error_message: str, provider_name: str = "LLM Provider", error_type: str = "LLMProviderError"):
18
+ self.error_message = error_message
19
+ self.provider_name = provider_name
20
+ self.error_type = error_type
21
+
22
+ # Create user-friendly title and hint
23
+ self.user_friendly_title = f"{provider_name} Error: {error_message}"
24
+ self.user_friendly_hint = f"There was a problem with {provider_name}. Try switching to a different model and trying again."
25
+
26
+ # Set args[0] for fallback compatibility
27
+ super().__init__(self.user_friendly_title)
28
+
29
+ def __str__(self) -> str:
30
+ return f"{self.provider_name} Error: {self.error_message}"
31
+
32
+
33
+ async def get_response_from_mito_server(
34
+ url: str,
35
+ headers: dict,
36
+ data: Dict[str, Any],
37
+ timeout: int,
38
+ max_retries: int,
39
+ message_type: MessageType,
40
+ provider_name: str = "Mito Server"
41
+ ) -> str:
42
+ """
43
+ Get a response from the Mito server.
44
+
45
+ Raises:
46
+ ProviderCompletionException: When the server returns an error or invalid response
47
+ Exception: For network/HTTP errors (let these bubble up to be handled by retry logic)
48
+ """
49
+ # First check the mito server quota. If the user has reached the limit, we raise an exception.
50
+ check_mito_server_quota(message_type)
51
+
52
+ http_client, http_client_timeout = _create_http_client(timeout, max_retries)
53
+ start_time = time.time()
54
+
55
+ try:
56
+ res = await http_client.fetch(
57
+ url,
58
+ method="POST",
59
+ headers=headers,
60
+ body=json.dumps(data),
61
+ request_timeout=http_client_timeout
62
+ )
63
+ print(f"Mito server request completed in {time.time() - start_time:.2f} seconds")
64
+
65
+ # Parse and validate response
66
+ try:
67
+ content = json.loads(res.body.decode("utf-8"))
68
+
69
+ if "completion" in content:
70
+ return content["completion"] # type: ignore
71
+ elif "error" in content:
72
+ # Server returned an error
73
+ raise ProviderCompletionException(content['error'], provider_name=provider_name)
74
+ else:
75
+ # Invalid response format
76
+ raise ProviderCompletionException(f"No completion found in response: {content}", provider_name=provider_name)
77
+ except ProviderCompletionException:
78
+ # Re-raise ProviderCompletionException as-is
79
+ raise
80
+ except Exception as e:
81
+ raise ProviderCompletionException(f"Error parsing response: {str(e)}", provider_name=provider_name)
82
+
83
+ finally:
84
+ try:
85
+ # We always update the quota, even if there is an error
86
+ update_mito_server_quota(message_type)
87
+ except Exception as e:
88
+ pass
89
+
90
+ http_client.close()
91
+
92
+
@@ -10,6 +10,8 @@ import asyncio
10
10
  import json
11
11
  import time
12
12
  from typing import Any, Dict, List, Optional, Final, Union, AsyncGenerator, Tuple, Callable
13
+ from mito_ai.utils.mito_server_utils import get_response_from_mito_server
14
+ from mito_ai.utils.provider_utils import does_message_require_fast_model
13
15
  from tornado.httpclient import AsyncHTTPClient
14
16
  from openai.types.chat import ChatCompletionMessageParam
15
17
 
@@ -27,6 +29,8 @@ from mito_ai.constants import MITO_OPENAI_URL
27
29
  __user_email: Optional[str] = None
28
30
  __user_id: Optional[str] = None
29
31
 
32
+ FAST_OPENAI_MODEL = "gpt-4.1-nano"
33
+
30
34
  def _prepare_request_data_and_headers(
31
35
  last_message_content: Union[str, None],
32
36
  ai_completion_data: Dict[str, Any],
@@ -46,10 +50,7 @@ def _prepare_request_data_and_headers(
46
50
 
47
51
  Returns:
48
52
  A tuple containing the request data and headers
49
- """
50
- # Check that the user is allowed to use the Mito Server
51
- check_mito_server_quota(message_type)
52
-
53
+ """
53
54
  global __user_email, __user_id
54
55
 
55
56
  if __user_email is None:
@@ -89,47 +90,15 @@ async def get_ai_completion_from_mito_server(
89
90
  message_type
90
91
  )
91
92
 
92
- # Create HTTP client with appropriate timeout settings
93
- http_client, http_client_timeout = _create_http_client(timeout, max_retries)
94
-
95
- # There are several types of timeout errors that can happen here.
96
- # == 504 Timeout (tornado.httpclient.HTTPClientError: 504) ==
97
- # The server (AWS Lambda) took too long to process your request
98
- # == 599 Timeout (tornado.httpclient.HTTPClientError: 599) ==
99
- # The client (Tornado) gave up waiting for a response
100
-
101
- start_time = time.time()
102
- try:
103
- res = await http_client.fetch(
104
- # Important: DO NOT CHANGE MITO_AI_URL. If you want to use the dev endpoint,
105
- # go to the top of this file and change MITO_AI_URL to MITO_AI_DEV_URL. We
106
- # have a pytest that ensures that the MITO_AI_URL is always set to MITO_AI_PROD_URL
107
- # before merging into dev. So if you change which variable we are using here, the
108
- # test will not catch our mistakes.
109
- MITO_OPENAI_URL,
110
- method="POST",
111
- headers=headers,
112
- body=json.dumps(data),
113
- # For some reason, we need to add the request_timeout here as well
114
- request_timeout=http_client_timeout
115
- )
116
- print(f"Request completed in {time.time() - start_time:.2f} seconds")
117
- except Exception as e:
118
- print(f"Request failed after {time.time() - start_time:.2f} seconds with error: {str(e)}")
119
- raise
120
- finally:
121
- http_client.close()
122
-
123
- # The lambda function returns a dictionary with a completion entry in it,
124
- # so we just return that.
125
- content = json.loads(res.body)
126
-
127
- if "completion" in content:
128
- return content["completion"] # type: ignore
129
- elif "error" in content:
130
- raise Exception(f"{content['error']}")
131
- else:
132
- raise Exception(f"No completion found in response: {content}")
93
+ return await get_response_from_mito_server(
94
+ MITO_OPENAI_URL,
95
+ headers,
96
+ data,
97
+ timeout,
98
+ max_retries,
99
+ message_type,
100
+ provider_name="OpenAI"
101
+ )
133
102
 
134
103
  async def stream_ai_completion_from_mito_server(
135
104
  last_message_content: Union[str, None],
@@ -273,12 +242,19 @@ async def stream_ai_completion_from_mito_server(
273
242
 
274
243
 
275
244
  def get_open_ai_completion_function_params(
245
+ message_type: MessageType,
276
246
  model: str,
277
247
  messages: List[ChatCompletionMessageParam],
278
248
  stream: bool,
279
249
  response_format_info: Optional[ResponseFormatInfo] = None,
280
250
  ) -> Dict[str, Any]:
281
251
 
252
+ print("MESSAGE TYPE: ", message_type)
253
+ message_requires_fast_model = does_message_require_fast_model(message_type)
254
+ model = FAST_OPENAI_MODEL if message_requires_fast_model else model
255
+
256
+ print(f"model: {model}")
257
+
282
258
  completion_function_params = {
283
259
  "model": model,
284
260
  "stream": stream,
@@ -311,7 +287,7 @@ def get_open_ai_completion_function_params(
311
287
  }
312
288
 
313
289
  # o3-mini will error if we try setting the temperature
314
- if model == "gpt-4o-mini":
290
+ if not model.startswith("o3"):
315
291
  completion_function_params["temperature"] = 0.0
316
292
 
317
293
  return completion_function_params
@@ -0,0 +1,49 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from typing import Union
5
+
6
+ from mito_ai.completions.models import MessageType
7
+
8
+
9
+ def get_model_provider(model: str) -> Union[str, None]:
10
+ """
11
+ Determine the model type based on the model name prefix
12
+ """
13
+ if not model:
14
+ return None
15
+
16
+ model_lower = model.lower()
17
+
18
+ if model_lower.startswith('claude'):
19
+ return 'claude'
20
+ elif model_lower.startswith('gemini'):
21
+ return 'gemini'
22
+ elif model_lower.startswith('ollama'):
23
+ return 'ollama'
24
+ elif model_lower.startswith('gpt'):
25
+ return 'openai'
26
+
27
+ return None
28
+
29
+
30
+ def does_message_require_fast_model(message_type: MessageType) -> bool:
31
+ """
32
+ Determines if a message requires the fast model.
33
+
34
+ The fast model is used for messages that are not chat messages.
35
+ For example, inline completions and chat name generation need to be fast
36
+ so they don't slow down the user's experience.
37
+ """
38
+
39
+ if message_type in (MessageType.CHAT, MessageType.SMART_DEBUG, MessageType.CODE_EXPLAIN, MessageType.AGENT_EXECUTION, MessageType.AGENT_AUTO_ERROR_FIXUP):
40
+ return False
41
+ elif message_type in (MessageType.INLINE_COMPLETION, MessageType.CHAT_NAME_GENERATION):
42
+ return True
43
+ elif message_type in (MessageType.START_NEW_CHAT, MessageType.FETCH_HISTORY, MessageType.GET_THREADS, MessageType.DELETE_THREAD, MessageType.UPDATE_MODEL_CONFIG):
44
+ # These messages don't use any model, but we add them here for type safety
45
+ return True
46
+ else:
47
+ raise ValueError(f"Invalid message type: {message_type}")
48
+
49
+
@@ -24,6 +24,7 @@ PRINT_LOGS = False
24
24
  # Constants for logging the success or error of Mito AI
25
25
  MITO_AI_COMPLETION_SUCCESS = 'mito_ai_success'
26
26
  MITO_AI_COMPLETION_ERROR = 'mito_ai_error'
27
+ MITO_AI_COMPLETION_RETRY = 'mito_ai_retry'
27
28
 
28
29
  # Params
29
30
  # - logging the type of key
@@ -82,7 +83,6 @@ def telemetry_turned_on(key_type: Optional[str] = None) -> bool:
82
83
  Helper function that tells you if logging is turned on or
83
84
  turned off on the entire Mito instance
84
85
  """
85
-
86
86
  # If the user is on the Mito server, then they are sending
87
87
  # us their information already
88
88
  if key_type == 'mito_server_key':
@@ -338,6 +338,15 @@ def log_db_connection_success(connection_type: str, schema: Dict[str, Any]) -> N
338
338
  },
339
339
  )
340
340
 
341
+ def log_ai_completion_retry(key_type: Literal['mito_server_key', 'user_key'], message_type: MessageType, error: BaseException) -> None:
342
+ log(MITO_AI_COMPLETION_RETRY, params={KEY_TYPE_PARAM: key_type, "message_type": message_type}, key_type=key_type, error=error)
343
+
344
+ def log_ai_completion_error(key_type: Literal['mito_server_key', 'user_key'], message_type: MessageType, error: BaseException) -> None:
345
+ log(MITO_AI_COMPLETION_ERROR, params={KEY_TYPE_PARAM: key_type, "message_type": message_type}, key_type=key_type, error=error)
346
+
347
+ def log_mito_server_free_tier_limit_reached(key_type: Literal['mito_server_key', 'user_key'], message_type: MessageType) -> None:
348
+ log(MITO_SERVER_FREE_TIER_LIMIT_REACHED, params={KEY_TYPE_PARAM: key_type, "message_type": message_type}, key_type=key_type)
349
+
341
350
  def log_db_connection_error(connection_type: str, error_message: str) -> None:
342
351
  log(
343
352
  "mito_ai_db_connection_error",
@@ -346,3 +355,4 @@ def log_db_connection_error(connection_type: str, error_message: str) -> None:
346
355
  "error_message": error_message,
347
356
  }
348
357
  )
358
+
@@ -710,7 +710,7 @@
710
710
  "semver": {},
711
711
  "vscode-diff": {},
712
712
  "mito_ai": {
713
- "version": "0.1.32",
713
+ "version": "0.1.34",
714
714
  "singleton": true,
715
715
  "import": "/home/runner/work/mito/mito/mito-ai/lib/index.js"
716
716
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mito_ai",
3
- "version": "0.1.32",
3
+ "version": "0.1.34",
4
4
  "description": "AI chat for JupyterLab",
5
5
  "keywords": [
6
6
  "jupyter",
@@ -138,7 +138,7 @@
138
138
  "outputDir": "mito_ai/labextension",
139
139
  "schemaDir": "schema",
140
140
  "_build": {
141
- "load": "static/remoteEntry.a711c58b58423173bd24.js",
141
+ "load": "static/remoteEntry.51d07439b02aaa830975.js",
142
142
  "extension": "./extension",
143
143
  "style": "./style"
144
144
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mito_ai",
3
- "version": "0.1.32",
3
+ "version": "0.1.34",
4
4
  "description": "AI chat for JupyterLab",
5
5
  "keywords": [
6
6
  "jupyter",