mito-ai 0.1.52__py3-none-any.whl → 0.1.53__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. mito_ai/_version.py +1 -1
  2. mito_ai/anthropic_client.py +4 -3
  3. mito_ai/utils/anthropic_utils.py +28 -3
  4. mito_ai/utils/tokens.py +29 -0
  5. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +1 -1
  6. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  7. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  8. mito_ai-0.1.52.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.5ec1e525d244fc8588cf.js → mito_ai-0.1.53.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.4b7cd47a24bb24ef84ea.js +408 -64
  9. mito_ai-0.1.53.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.4b7cd47a24bb24ef84ea.js.map +1 -0
  10. mito_ai-0.1.52.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.89927e1d3b5962d57ae3.js → mito_ai-0.1.53.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4395ab9342efa39fc0a2.js +3 -3
  11. mito_ai-0.1.52.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.89927e1d3b5962d57ae3.js.map → mito_ai-0.1.53.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4395ab9342efa39fc0a2.js.map +1 -1
  12. {mito_ai-0.1.52.dist-info → mito_ai-0.1.53.dist-info}/METADATA +1 -1
  13. {mito_ai-0.1.52.dist-info → mito_ai-0.1.53.dist-info}/RECORD +37 -36
  14. mito_ai-0.1.52.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.5ec1e525d244fc8588cf.js.map +0 -1
  15. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  16. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  17. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
  18. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
  19. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  20. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +0 -0
  21. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +0 -0
  22. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
  23. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
  24. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
  25. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
  26. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
  27. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
  28. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
  29. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
  30. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
  31. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
  32. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
  33. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
  34. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  35. {mito_ai-0.1.52.data → mito_ai-0.1.53.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  36. {mito_ai-0.1.52.dist-info → mito_ai-0.1.53.dist-info}/WHEEL +0 -0
  37. {mito_ai-0.1.52.dist-info → mito_ai-0.1.53.dist-info}/entry_points.txt +0 -0
  38. {mito_ai-0.1.52.dist-info → mito_ai-0.1.53.dist-info}/licenses/LICENSE +0 -0
mito_ai/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # This file is auto-generated by Hatchling. As such, do not:
2
2
  # - modify
3
3
  # - track in version control e.g. be sure to add to .gitignore
4
- __version__ = VERSION = '0.1.52'
4
+ __version__ = VERSION = '0.1.53'
@@ -9,12 +9,11 @@ from anthropic.types import Message, MessageParam, TextBlockParam
9
9
  from mito_ai.completions.models import ResponseFormatInfo, CompletionReply, CompletionStreamChunk, CompletionItem, MessageType
10
10
  from mito_ai.constants import MESSAGE_HISTORY_TRIM_THRESHOLD
11
11
  from openai.types.chat import ChatCompletionMessageParam
12
- from mito_ai.utils.anthropic_utils import get_anthropic_completion_from_mito_server, stream_anthropic_completion_from_mito_server, get_anthropic_completion_function_params
12
+ from mito_ai.utils.anthropic_utils import get_anthropic_completion_from_mito_server, select_correct_model, stream_anthropic_completion_from_mito_server, get_anthropic_completion_function_params
13
13
 
14
14
  # Max tokens is a required parameter for the Anthropic API.
15
15
  # We set it to a high number so that we can edit large code cells
16
- # 8192 is the maximum allowed number of output tokens for claude-3-5-haiku-20241022
17
- MAX_TOKENS = 8_000
16
+ MAX_TOKENS = 64_000
18
17
 
19
18
  def extract_and_parse_anthropic_json_response(response: Message) -> Union[object, Any]:
20
19
  """
@@ -278,6 +277,8 @@ class AnthropicClient:
278
277
  reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None]) -> str:
279
278
  try:
280
279
  anthropic_system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages_with_caching(messages)
280
+ model = select_correct_model(model, message_type, anthropic_system_prompt, anthropic_messages)
281
+
281
282
  accumulated_response = ""
282
283
 
283
284
  if self.api_key:
@@ -10,6 +10,7 @@ from mito_ai.completions.models import AgentResponse, MessageType, ResponseForma
10
10
  from mito_ai.utils.schema import UJ_STATIC_USER_ID, UJ_USER_EMAIL
11
11
  from mito_ai.utils.db import get_user_field
12
12
  from mito_ai.constants import MITO_ANTHROPIC_URL
13
+ from mito_ai.utils.tokens import get_rough_token_estimatation_anthropic
13
14
 
14
15
  __user_email: Optional[str] = None
15
16
  __user_id: Optional[str] = None
@@ -17,7 +18,29 @@ __user_id: Optional[str] = None
17
18
  ANTHROPIC_TIMEOUT = 60
18
19
  max_retries = 1
19
20
 
20
- FAST_ANTHROPIC_MODEL = "claude-3-5-haiku-latest"
21
+ FAST_ANTHROPIC_MODEL = "claude-haiku-4-5-20251001" # This should be in sync with ModelSelector.tsx
22
+ LARGE_CONTEXT_MODEL = "claude-sonnet-4-5-20250929" # This should be in sync with ModelSelector.tsx
23
+
24
+ def does_message_exceed_max_tokens(system: Union[str, List[TextBlockParam], anthropic.Omit], messages: List[MessageParam]) -> bool:
25
+ token_estimation = get_rough_token_estimatation_anthropic(system, messages)
26
+
27
+ if token_estimation is not None and token_estimation > 200_000:
28
+ return True
29
+ return False
30
+
31
+ def select_correct_model(default_model: str, message_type: MessageType, system: Union[str, List[TextBlockParam], anthropic.Omit], messages: List[MessageParam]) -> str:
32
+
33
+ message_exceeds_fast_model_context_limit = does_message_exceed_max_tokens(system, messages)
34
+ if message_exceeds_fast_model_context_limit:
35
+ # Anthropic lets us use beta mode to extend context window for sonnet class models
36
+ # but not haiku models
37
+ return LARGE_CONTEXT_MODEL
38
+
39
+ message_requires_fast_model = does_message_require_fast_model(message_type)
40
+ if message_requires_fast_model:
41
+ return FAST_ANTHROPIC_MODEL
42
+
43
+ return default_model
21
44
 
22
45
  def _prepare_anthropic_request_data_and_headers(
23
46
  model: Union[str, None],
@@ -36,6 +59,7 @@ def _prepare_anthropic_request_data_and_headers(
36
59
  __user_email = get_user_field(UJ_USER_EMAIL)
37
60
  if __user_id is None:
38
61
  __user_id = get_user_field(UJ_STATIC_USER_ID)
62
+
39
63
  # Build the inner data dict (excluding timeout, max_retries, email, user_id)
40
64
  inner_data: Dict[str, Any] = {
41
65
  "model": model,
@@ -44,6 +68,7 @@ def _prepare_anthropic_request_data_and_headers(
44
68
  "messages": messages,
45
69
  "betas": ["context-1m-2025-08-07"]
46
70
  }
71
+
47
72
  # Add system to inner_data only if it is not anthropic.Omit
48
73
  if not isinstance(system, anthropic.Omit):
49
74
  inner_data["system"] = system
@@ -139,8 +164,7 @@ def get_anthropic_completion_function_params(
139
164
  Only includes fields needed for the Anthropic API.
140
165
  """
141
166
 
142
- message_requires_fast_model = does_message_require_fast_model(message_type)
143
- model = FAST_ANTHROPIC_MODEL if message_requires_fast_model else model
167
+ model = select_correct_model(model, message_type, system, messages)
144
168
 
145
169
  provider_data = {
146
170
  "model": model,
@@ -166,3 +190,4 @@ def get_anthropic_completion_function_params(
166
190
  provider_data["stream"] = stream
167
191
  # Optionally handle response_format_info if Anthropic supports it in the future
168
192
  return provider_data
193
+
@@ -0,0 +1,29 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from typing import List, Union, Optional
5
+ import anthropic
6
+ from anthropic.types import MessageParam, TextBlockParam, ToolUnionParam
7
+
8
+
9
+ def get_rough_token_estimatation_anthropic(system_message: Union[str, List[TextBlockParam], anthropic.Omit], messages: List[MessageParam]) -> Optional[float]:
10
+ """
11
+ Get a very rough estimation of the number of tokens in a conversation.
12
+ We bias towards overestimating to make sure we don't accidentally
13
+ think a conversation is safe to send to an AI without having applied an
14
+ optimization strategy.
15
+ """
16
+
17
+ try:
18
+ stringified_system_message = str(system_message)
19
+ stringified_messages = str(messages)
20
+ total_stringified_context = stringified_system_message + stringified_messages
21
+
22
+ # The general rule of thumb is: 1 token is about 4 characters.
23
+ # To be safe we use: 1 token is about 3 characters
24
+ # This helps make sure we always overestimate
25
+ return len(total_stringified_context) / 3
26
+
27
+ except:
28
+ return None
29
+
@@ -720,7 +720,7 @@
720
720
  "semver": {},
721
721
  "vscode-diff": {},
722
722
  "mito_ai": {
723
- "version": "0.1.52",
723
+ "version": "0.1.53",
724
724
  "singleton": true,
725
725
  "import": "/home/runner/work/mito/mito/mito-ai/lib/index.js"
726
726
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mito_ai",
3
- "version": "0.1.52",
3
+ "version": "0.1.53",
4
4
  "description": "AI chat for JupyterLab",
5
5
  "keywords": [
6
6
  "jupyter",
@@ -140,7 +140,7 @@
140
140
  "outputDir": "mito_ai/labextension",
141
141
  "schemaDir": "schema",
142
142
  "_build": {
143
- "load": "static/remoteEntry.89927e1d3b5962d57ae3.js",
143
+ "load": "static/remoteEntry.4395ab9342efa39fc0a2.js",
144
144
  "extension": "./extension",
145
145
  "style": "./style"
146
146
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mito_ai",
3
- "version": "0.1.52",
3
+ "version": "0.1.53",
4
4
  "description": "AI chat for JupyterLab",
5
5
  "keywords": [
6
6
  "jupyter",