mito-ai 0.1.57__py3-none-any.whl → 0.1.58__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. mito_ai/__init__.py +16 -22
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +24 -14
  4. mito_ai/chart_wizard/handlers.py +78 -17
  5. mito_ai/chart_wizard/urls.py +8 -5
  6. mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +6 -8
  7. mito_ai/completions/completion_handlers/agent_execution_handler.py +6 -8
  8. mito_ai/completions/completion_handlers/chat_completion_handler.py +13 -17
  9. mito_ai/completions/completion_handlers/code_explain_handler.py +13 -17
  10. mito_ai/completions/completion_handlers/completion_handler.py +3 -5
  11. mito_ai/completions/completion_handlers/inline_completer_handler.py +5 -6
  12. mito_ai/completions/completion_handlers/scratchpad_result_handler.py +6 -8
  13. mito_ai/completions/completion_handlers/smart_debug_handler.py +13 -17
  14. mito_ai/completions/completion_handlers/utils.py +3 -7
  15. mito_ai/completions/handlers.py +32 -22
  16. mito_ai/completions/message_history.py +8 -10
  17. mito_ai/completions/prompt_builders/chart_add_field_prompt.py +35 -0
  18. mito_ai/constants.py +8 -1
  19. mito_ai/enterprise/__init__.py +1 -1
  20. mito_ai/enterprise/litellm_client.py +137 -0
  21. mito_ai/log/handlers.py +1 -1
  22. mito_ai/openai_client.py +10 -90
  23. mito_ai/{completions/providers.py → provider_manager.py} +157 -53
  24. mito_ai/settings/enterprise_handler.py +26 -0
  25. mito_ai/settings/urls.py +2 -0
  26. mito_ai/streamlit_conversion/agent_utils.py +2 -30
  27. mito_ai/streamlit_conversion/streamlit_agent_handler.py +48 -46
  28. mito_ai/streamlit_preview/handlers.py +6 -3
  29. mito_ai/streamlit_preview/urls.py +5 -3
  30. mito_ai/tests/message_history/test_generate_short_chat_name.py +72 -28
  31. mito_ai/tests/providers/test_anthropic_client.py +174 -16
  32. mito_ai/tests/providers/test_azure.py +13 -13
  33. mito_ai/tests/providers/test_capabilities.py +14 -17
  34. mito_ai/tests/providers/test_gemini_client.py +14 -13
  35. mito_ai/tests/providers/test_model_resolution.py +145 -89
  36. mito_ai/tests/providers/test_openai_client.py +209 -13
  37. mito_ai/tests/providers/test_provider_limits.py +5 -5
  38. mito_ai/tests/providers/test_providers.py +229 -51
  39. mito_ai/tests/providers/test_retry_logic.py +13 -22
  40. mito_ai/tests/providers/utils.py +4 -4
  41. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +57 -85
  42. mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +4 -1
  43. mito_ai/tests/test_enterprise_mode.py +162 -0
  44. mito_ai/tests/test_model_utils.py +271 -0
  45. mito_ai/utils/anthropic_utils.py +8 -6
  46. mito_ai/utils/gemini_utils.py +0 -3
  47. mito_ai/utils/litellm_utils.py +84 -0
  48. mito_ai/utils/model_utils.py +178 -0
  49. mito_ai/utils/open_ai_utils.py +0 -8
  50. mito_ai/utils/provider_utils.py +6 -28
  51. mito_ai/utils/telemetry_utils.py +14 -2
  52. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -102
  53. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  54. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  55. mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.9d26322f3e78beb2b666.js → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js +671 -75
  56. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js.map +1 -0
  57. mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.79c1ea8a3cda73a4cb6f.js → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js +17 -17
  58. mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.79c1ea8a3cda73a4cb6f.js.map → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js.map +1 -1
  59. {mito_ai-0.1.57.dist-info → mito_ai-0.1.58.dist-info}/METADATA +2 -1
  60. {mito_ai-0.1.57.dist-info → mito_ai-0.1.58.dist-info}/RECORD +86 -79
  61. mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.9d26322f3e78beb2b666.js.map +0 -1
  62. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  63. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  64. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
  65. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
  66. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  67. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js +0 -0
  68. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js.map +0 -0
  69. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
  70. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
  71. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
  72. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
  73. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
  74. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
  75. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
  76. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
  77. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
  78. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
  79. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
  80. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
  81. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  82. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  83. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.css +0 -0
  84. {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.js +0 -0
  85. {mito_ai-0.1.57.dist-info → mito_ai-0.1.58.dist-info}/WHEEL +0 -0
  86. {mito_ai-0.1.57.dist-info → mito_ai-0.1.58.dist-info}/entry_points.txt +0 -0
  87. {mito_ai-0.1.57.dist-info → mito_ai-0.1.58.dist-info}/licenses/LICENSE +0 -0
mito_ai/__init__.py CHANGED
@@ -4,11 +4,11 @@
4
4
  from typing import List, Dict
5
5
  from jupyter_server.utils import url_path_join
6
6
  from mito_ai.completions.handlers import CompletionHandler
7
- from mito_ai.completions.providers import OpenAIProvider
7
+ from mito_ai.provider_manager import ProviderManager
8
8
  from mito_ai.completions.message_history import GlobalMessageHistory
9
9
  from mito_ai.app_deploy.handlers import AppDeployHandler
10
- from mito_ai.streamlit_preview.handlers import StreamlitPreviewHandler
11
10
  from mito_ai.log.urls import get_log_urls
11
+ from mito_ai.utils.litellm_utils import is_litellm_configured
12
12
  from mito_ai.version_check import VersionCheckHandler
13
13
  from mito_ai.db.urls import get_db_urls
14
14
  from mito_ai.settings.urls import get_settings_urls
@@ -20,6 +20,8 @@ from mito_ai.file_uploads.urls import get_file_uploads_urls
20
20
  from mito_ai.user.urls import get_user_urls
21
21
  from mito_ai.chat_history.urls import get_chat_history_urls
22
22
  from mito_ai.chart_wizard.urls import get_chart_wizard_urls
23
+ from mito_ai.utils.version_utils import is_enterprise
24
+ from mito_ai import constants
23
25
 
24
26
  # Force Matplotlib to use the Jupyter inline backend.
25
27
  # Background: importing Streamlit sets os.environ["MPLBACKEND"] = "Agg" very early.
@@ -33,16 +35,6 @@ from mito_ai.chart_wizard.urls import get_chart_wizard_urls
33
35
  import os
34
36
  os.environ["MPLBACKEND"] = "module://matplotlib_inline.backend_inline"
35
37
 
36
- try:
37
- from _version import __version__
38
- except ImportError:
39
- # Fallback when using the package in dev mode without installing in editable mode with pip. It is highly recommended to install
40
- # the package from a stable release or in editable mode: https://pip.pypa.io/en/stable/topics/local-project-installs/#editable-installs
41
- import warnings
42
-
43
- warnings.warn("Importing 'mito_ai' outside a proper installation.")
44
- __version__ = "dev"
45
-
46
38
  def _jupyter_labextension_paths() -> List[Dict[str, str]]:
47
39
  return [{"src": "labextension", "dest": "mito_ai"}]
48
40
 
@@ -65,7 +57,7 @@ def _load_jupyter_server_extension(server_app) -> None: # type: ignore
65
57
  web_app = server_app.web_app
66
58
  base_url = web_app.settings["base_url"]
67
59
 
68
- open_ai_provider = OpenAIProvider(config=server_app.config)
60
+ provider_manager = ProviderManager(config=server_app.config)
69
61
 
70
62
  # Create a single GlobalMessageHistory instance for the entire server
71
63
  # This ensures thread-safe access to the .mito/ai-chats directory
@@ -76,18 +68,13 @@ def _load_jupyter_server_extension(server_app) -> None: # type: ignore
76
68
  (
77
69
  url_path_join(base_url, "mito-ai", "completions"),
78
70
  CompletionHandler,
79
- {"llm": open_ai_provider, "message_history": global_message_history},
71
+ {"llm": provider_manager, "message_history": global_message_history},
80
72
  ),
81
73
  (
82
74
  url_path_join(base_url, "mito-ai", "app-deploy"),
83
75
  AppDeployHandler,
84
76
  {}
85
77
  ),
86
- (
87
- url_path_join(base_url, "mito-ai", "streamlit-preview"),
88
- StreamlitPreviewHandler,
89
- {}
90
- ),
91
78
  (
92
79
  url_path_join(base_url, "mito-ai", "version-check"),
93
80
  VersionCheckHandler,
@@ -104,13 +91,20 @@ def _load_jupyter_server_extension(server_app) -> None: # type: ignore
104
91
  handlers.extend(get_db_urls(base_url)) # type: ignore
105
92
  handlers.extend(get_settings_urls(base_url)) # type: ignore
106
93
  handlers.extend(get_rules_urls(base_url)) # type: ignore
107
- handlers.extend(get_log_urls(base_url, open_ai_provider.key_type)) # type: ignore
94
+ handlers.extend(get_log_urls(base_url, provider_manager.key_type)) # type: ignore
108
95
  handlers.extend(get_auth_urls(base_url)) # type: ignore
109
- handlers.extend(get_streamlit_preview_urls(base_url)) # type: ignore
96
+ handlers.extend(get_streamlit_preview_urls(base_url, provider_manager)) # type: ignore
110
97
  handlers.extend(get_file_uploads_urls(base_url)) # type: ignore
111
98
  handlers.extend(get_user_urls(base_url)) # type: ignore
112
99
  handlers.extend(get_chat_history_urls(base_url, global_message_history)) # type: ignore
113
- handlers.extend(get_chart_wizard_urls(base_url, open_ai_provider)) # type: ignore
100
+ handlers.extend(get_chart_wizard_urls(base_url, provider_manager)) # type: ignore
114
101
 
115
102
  web_app.add_handlers(host_pattern, handlers)
103
+
104
+ # Log enterprise mode status and LiteLLM configuration
105
+ if is_enterprise():
106
+ server_app.log.info("Enterprise mode enabled")
107
+ if is_litellm_configured():
108
+ server_app.log.info(f"LiteLLM configured: endpoint={constants.LITELLM_BASE_URL}, models={constants.LITELLM_MODELS}")
109
+
116
110
  server_app.log.info("Loaded the mito_ai server extension")
mito_ai/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # This file is auto-generated by Hatchling. As such, do not:
2
2
  # - modify
3
3
  # - track in version control e.g. be sure to add to .gitignore
4
- __version__ = VERSION = '0.1.57'
4
+ __version__ = VERSION = '0.1.58'
@@ -9,7 +9,7 @@ from anthropic.types import Message, MessageParam, TextBlockParam
9
9
  from mito_ai.completions.models import ResponseFormatInfo, CompletionReply, CompletionStreamChunk, CompletionItem, MessageType
10
10
  from mito_ai.completions.prompt_builders.prompt_section_registry import get_max_trim_after_messages
11
11
  from openai.types.chat import ChatCompletionMessageParam
12
- from mito_ai.utils.anthropic_utils import get_anthropic_completion_from_mito_server, select_correct_model, stream_anthropic_completion_from_mito_server, get_anthropic_completion_function_params
12
+ from mito_ai.utils.anthropic_utils import get_anthropic_completion_from_mito_server, select_correct_model, stream_anthropic_completion_from_mito_server, get_anthropic_completion_function_params, LARGE_CONTEXT_MODEL, EXTENDED_CONTEXT_BETA
13
13
 
14
14
  # Max tokens is a required parameter for the Anthropic API.
15
15
  # We set it to a high number so that we can edit large code cells
@@ -220,7 +220,10 @@ class AnthropicClient:
220
220
  self.max_retries = max_retries
221
221
  self.client: Optional[anthropic.Anthropic]
222
222
  if api_key:
223
- self.client = anthropic.Anthropic(api_key=api_key)
223
+ # Use a higher timeout to avoid the 10-minute streaming requirement for long requests
224
+ # The default SDK timeout is 600s (10 minutes), but we set it higher for agent mode
225
+ # TODO: We should update agent mode to use streaming like anthropic suggests
226
+ self.client = anthropic.Anthropic(api_key=api_key, timeout=1200.0) # 20 minutes
224
227
  else:
225
228
  self.client = None
226
229
 
@@ -249,7 +252,8 @@ class AnthropicClient:
249
252
  if self.api_key:
250
253
  # Unpack provider_data for direct API call
251
254
  assert self.client is not None
252
- response = self.client.messages.create(**provider_data)
255
+ # Beta API accepts MessageParam (compatible at runtime with BetaMessageParam)
256
+ response = self.client.beta.messages.create(**provider_data) # type: ignore[arg-type]
253
257
 
254
258
  if provider_data.get("tool_choice") is not None:
255
259
  result = extract_and_parse_anthropic_json_response(response)
@@ -284,21 +288,27 @@ class AnthropicClient:
284
288
 
285
289
  if self.api_key:
286
290
  assert self.client is not None
287
- stream = self.client.messages.create(
288
- model=model,
289
- max_tokens=MAX_TOKENS,
290
- temperature=0,
291
- system=anthropic_system_prompt,
292
- messages=anthropic_messages,
293
- stream=True
294
- )
291
+ # Beta API accepts MessageParam (compatible at runtime with BetaMessageParam)
292
+ # Enable extended context beta when using LARGE_CONTEXT_MODEL
293
+ create_params = {
294
+ "model": model,
295
+ "max_tokens": MAX_TOKENS,
296
+ "temperature": 0,
297
+ "system": anthropic_system_prompt,
298
+ "messages": anthropic_messages, # type: ignore[arg-type]
299
+ "stream": True
300
+ }
301
+ if model == LARGE_CONTEXT_MODEL:
302
+ create_params["betas"] = [EXTENDED_CONTEXT_BETA]
303
+ stream = self.client.beta.messages.create(**create_params) # type: ignore[call-overload]
295
304
 
296
305
  for chunk in stream:
297
- if chunk.type == "content_block_delta" and chunk.delta.type == "text_delta":
298
- content = chunk.delta.text
306
+ # Type checking for beta API streaming chunks (runtime type checking, types are compatible)
307
+ if chunk.type == "content_block_delta" and chunk.delta.type == "text_delta": # type: ignore[union-attr]
308
+ content = chunk.delta.text # type: ignore[union-attr]
299
309
  accumulated_response += content
300
310
 
301
- is_finished = chunk.type == "message_stop"
311
+ is_finished = chunk.type == "message_stop" # type: ignore[union-attr]
302
312
 
303
313
  reply_fn(CompletionStreamChunk(
304
314
  parent_id=message_id,
@@ -6,13 +6,18 @@ import tornado
6
6
  from typing import List
7
7
  from jupyter_server.base.handlers import APIHandler
8
8
  from openai.types.chat import ChatCompletionMessageParam
9
- from mito_ai.completions.providers import OpenAIProvider
10
- from mito_ai.utils.anthropic_utils import FAST_ANTHROPIC_MODEL
9
+ from mito_ai.provider_manager import ProviderManager
11
10
  from mito_ai.completions.models import MessageType
12
- from mito_ai.completions.prompt_builders.chart_conversion_prompt import create_chart_conversion_prompt
11
+ from mito_ai.completions.prompt_builders.chart_conversion_prompt import (
12
+ create_chart_conversion_prompt,
13
+ )
14
+ from mito_ai.completions.prompt_builders.chart_add_field_prompt import (
15
+ create_chart_add_field_prompt,
16
+ )
13
17
 
14
- class ChartWizardHandler(APIHandler):
15
- def initialize(self, llm: OpenAIProvider) -> None:
18
+
19
+ class ConvertChartHandler(APIHandler):
20
+ def initialize(self, llm: ProviderManager) -> None:
16
21
  """Initialize the handler with the LLM provider."""
17
22
  super().initialize()
18
23
  self._llm = llm
@@ -21,26 +26,82 @@ class ChartWizardHandler(APIHandler):
21
26
  async def post(self) -> None:
22
27
  """POST endpoint that receives code from the frontend and sends it to LLM."""
23
28
  try:
24
- data = json.loads(self.request.body.decode('utf-8'))
25
- code = data.get('code', '')
26
-
29
+ data = json.loads(self.request.body.decode("utf-8"))
30
+ code = data.get("code", "")
31
+
27
32
  # Create prompt using the prompt builder
28
33
  prompt = create_chart_conversion_prompt(code)
29
-
34
+
30
35
  # Call LLM
31
- messages: List[ChatCompletionMessageParam] = [{"role": "user", "content": prompt}]
36
+ messages: List[ChatCompletionMessageParam] = [
37
+ {"role": "user", "content": prompt}
38
+ ]
32
39
  converted_code = await self._llm.request_completions(
33
40
  messages=messages,
34
- model=FAST_ANTHROPIC_MODEL,
35
41
  message_type=MessageType.CHAT,
36
- thread_id=None
42
+ thread_id=None,
43
+ use_fast_model=True,
37
44
  )
38
-
45
+
39
46
  # Return the converted code
40
- self.write({
41
- "message": "Code converted successfully",
42
- "converted_code": converted_code
43
- })
47
+ self.write(
48
+ {
49
+ "message": "Code converted successfully",
50
+ "converted_code": converted_code,
51
+ }
52
+ )
53
+ self.finish()
54
+ except json.JSONDecodeError:
55
+ self.set_status(400)
56
+ self.write({"error": "Invalid JSON in request body"})
57
+ self.finish()
58
+ except Exception as e:
59
+ self.set_status(500)
60
+ self.write({"error": str(e)})
61
+ self.finish()
62
+
63
+
64
+ class AddFieldHandler(APIHandler):
65
+ def initialize(self, llm: ProviderManager) -> None:
66
+ """Initialize the handler with the LLM provider."""
67
+ super().initialize()
68
+ self._llm = llm
69
+
70
+ @tornado.web.authenticated
71
+ async def post(self) -> None:
72
+ """POST endpoint that adds a new field to the chart configuration."""
73
+ try:
74
+ data = json.loads(self.request.body.decode("utf-8"))
75
+ code = data.get("code", "")
76
+ user_description = data.get("user_description", "")
77
+ existing_variables = data.get("existing_variables", [])
78
+
79
+ if not user_description:
80
+ self.set_status(400)
81
+ self.write({"error": "user_description is required"})
82
+ self.finish()
83
+ return
84
+
85
+ # Create prompt using the prompt builder
86
+ prompt = create_chart_add_field_prompt(
87
+ code, user_description, existing_variables
88
+ )
89
+
90
+ # Call LLM
91
+ messages: List[ChatCompletionMessageParam] = [
92
+ {"role": "user", "content": prompt}
93
+ ]
94
+ updated_code = await self._llm.request_completions(
95
+ messages=messages,
96
+ message_type=MessageType.CHAT,
97
+ thread_id=None,
98
+ use_fast_model=True,
99
+ )
100
+
101
+ # Return the updated code
102
+ self.write(
103
+ {"message": "Field added successfully", "updated_code": updated_code}
104
+ )
44
105
  self.finish()
45
106
  except json.JSONDecodeError:
46
107
  self.set_status(400)
@@ -3,11 +3,13 @@
3
3
 
4
4
  from typing import List, Tuple, Any
5
5
  from jupyter_server.utils import url_path_join
6
- from mito_ai.chart_wizard.handlers import ChartWizardHandler
7
- from mito_ai.completions.providers import OpenAIProvider
6
+ from mito_ai.chart_wizard.handlers import ConvertChartHandler, AddFieldHandler
7
+ from mito_ai.provider_manager import ProviderManager
8
8
 
9
9
 
10
- def get_chart_wizard_urls(base_url: str, llm: OpenAIProvider) -> List[Tuple[str, Any, dict]]:
10
+ def get_chart_wizard_urls(
11
+ base_url: str, llm: ProviderManager
12
+ ) -> List[Tuple[str, Any, dict]]:
11
13
  """Get all chart wizard related URL patterns.
12
14
 
13
15
  Args:
@@ -17,7 +19,8 @@ def get_chart_wizard_urls(base_url: str, llm: OpenAIProvider) -> List[Tuple[str,
17
19
  Returns:
18
20
  List of (url_pattern, handler_class, handler_kwargs) tuples
19
21
  """
20
- BASE_URL = base_url + "/mito-ai"
22
+ BASE_URL = base_url + "/mito-ai/chart-wizard"
21
23
  return [
22
- (url_path_join(BASE_URL, "chart-wizard"), ChartWizardHandler, {"llm": llm}),
24
+ (url_path_join(BASE_URL, "convert"), ConvertChartHandler, {"llm": llm}),
25
+ (url_path_join(BASE_URL, "add-field"), AddFieldHandler, {"llm": llm}),
23
26
  ]
@@ -4,7 +4,7 @@
4
4
  from openai.types.chat import ChatCompletionMessageParam
5
5
  from mito_ai.completions.models import AgentResponse, AgentSmartDebugMetadata, MessageType, ResponseFormatInfo
6
6
  from mito_ai.completions.prompt_builders.agent_smart_debug_prompt import create_agent_smart_debug_prompt
7
- from mito_ai.completions.providers import OpenAIProvider
7
+ from mito_ai.provider_manager import ProviderManager
8
8
  from mito_ai.completions.message_history import GlobalMessageHistory
9
9
  from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
10
10
  from mito_ai.completions.completion_handlers.utils import append_agent_system_message
@@ -17,14 +17,13 @@ class AgentAutoErrorFixupHandler(CompletionHandler[AgentSmartDebugMetadata]):
17
17
  @staticmethod
18
18
  async def get_completion(
19
19
  metadata: AgentSmartDebugMetadata,
20
- provider: OpenAIProvider,
21
- message_history: GlobalMessageHistory,
22
- model: str
20
+ provider: ProviderManager,
21
+ message_history: GlobalMessageHistory
23
22
  ) -> str:
24
23
  """Get an agent auto error fixup completion from the AI provider."""
25
24
 
26
25
  # Add the system message if it doesn't alredy exist
27
- await append_agent_system_message(message_history, model, provider, metadata.threadId, metadata.isChromeBrowser)
26
+ await append_agent_system_message(message_history, provider, metadata.threadId, metadata.isChromeBrowser)
28
27
 
29
28
  # Create the prompt
30
29
  prompt = create_agent_smart_debug_prompt(metadata)
@@ -34,12 +33,11 @@ class AgentAutoErrorFixupHandler(CompletionHandler[AgentSmartDebugMetadata]):
34
33
  new_ai_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": prompt}
35
34
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
36
35
 
37
- await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
36
+ await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, provider, metadata.threadId)
38
37
 
39
38
  # Get the completion
40
39
  completion = await provider.request_completions(
41
40
  messages=message_history.get_ai_optimized_history(metadata.threadId),
42
- model=model,
43
41
  response_format_info=ResponseFormatInfo(
44
42
  name='agent_response',
45
43
  format=AgentResponse
@@ -51,7 +49,7 @@ class AgentAutoErrorFixupHandler(CompletionHandler[AgentSmartDebugMetadata]):
51
49
 
52
50
  ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": completion}
53
51
 
54
- await message_history.append_message(ai_response_message, ai_response_message, model, provider, metadata.threadId)
52
+ await message_history.append_message(ai_response_message, ai_response_message, provider, metadata.threadId)
55
53
 
56
54
  return completion
57
55
 
@@ -5,7 +5,7 @@ from typing import List, Literal, Union
5
5
  from openai.types.chat import ChatCompletionMessageParam
6
6
  from mito_ai.completions.models import AgentExecutionMetadata, MessageType, ResponseFormatInfo, AgentResponse
7
7
  from mito_ai.completions.prompt_builders.agent_execution_prompt import create_agent_execution_prompt
8
- from mito_ai.completions.providers import OpenAIProvider
8
+ from mito_ai.provider_manager import ProviderManager
9
9
  from mito_ai.completions.message_history import GlobalMessageHistory
10
10
  from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
11
11
  from mito_ai.completions.completion_handlers.utils import append_agent_system_message, create_ai_optimized_message
@@ -18,9 +18,8 @@ class AgentExecutionHandler(CompletionHandler[AgentExecutionMetadata]):
18
18
  @staticmethod
19
19
  async def get_completion(
20
20
  metadata: AgentExecutionMetadata,
21
- provider: OpenAIProvider,
22
- message_history: GlobalMessageHistory,
23
- model: str
21
+ provider: ProviderManager,
22
+ message_history: GlobalMessageHistory
24
23
  ) -> str:
25
24
  """Get an agent execution completion from the AI provider."""
26
25
 
@@ -31,7 +30,7 @@ class AgentExecutionHandler(CompletionHandler[AgentExecutionMetadata]):
31
30
  )
32
31
 
33
32
  # Add the system message if it doesn't alredy exist
34
- await append_agent_system_message(message_history, model, provider, metadata.threadId, metadata.isChromeBrowser)
33
+ await append_agent_system_message(message_history, provider, metadata.threadId, metadata.isChromeBrowser)
35
34
 
36
35
  # Create the prompt
37
36
  prompt = create_agent_execution_prompt(metadata)
@@ -41,12 +40,11 @@ class AgentExecutionHandler(CompletionHandler[AgentExecutionMetadata]):
41
40
  new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.additionalContext)
42
41
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
43
42
 
44
- await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
43
+ await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, provider, metadata.threadId)
45
44
 
46
45
  # Get the completion
47
46
  completion = await provider.request_completions(
48
47
  messages=message_history.get_ai_optimized_history(metadata.threadId),
49
- model=model,
50
48
  response_format_info=ResponseFormatInfo(
51
49
  name='agent_response',
52
50
  format=AgentResponse
@@ -58,7 +56,7 @@ class AgentExecutionHandler(CompletionHandler[AgentExecutionMetadata]):
58
56
 
59
57
  ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": completion}
60
58
 
61
- await message_history.append_message(ai_response_message, ai_response_message, model, provider, metadata.threadId)
59
+ await message_history.append_message(ai_response_message, ai_response_message, provider, metadata.threadId)
62
60
 
63
61
  return completion
64
62
 
@@ -6,7 +6,7 @@ from typing import List, Union, AsyncGenerator, Callable
6
6
  from openai.types.chat import ChatCompletionMessageParam
7
7
  from mito_ai.completions.models import ChatMessageMetadata, MessageType, CompletionRequest, CompletionStreamChunk, CompletionReply
8
8
  from mito_ai.completions.prompt_builders.chat_prompt import create_chat_prompt
9
- from mito_ai.completions.providers import OpenAIProvider
9
+ from mito_ai.provider_manager import ProviderManager
10
10
  from mito_ai.completions.message_history import GlobalMessageHistory
11
11
  from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
12
12
  from mito_ai.completions.completion_handlers.utils import append_chat_system_message, create_ai_optimized_message
@@ -19,9 +19,8 @@ class ChatCompletionHandler(CompletionHandler[ChatMessageMetadata]):
19
19
  @staticmethod
20
20
  async def get_completion(
21
21
  metadata: ChatMessageMetadata,
22
- provider: OpenAIProvider,
23
- message_history: GlobalMessageHistory,
24
- model: str
22
+ provider: ProviderManager,
23
+ message_history: GlobalMessageHistory
25
24
  ) -> str:
26
25
  """Get a chat completion from the AI provider."""
27
26
 
@@ -32,7 +31,7 @@ class ChatCompletionHandler(CompletionHandler[ChatMessageMetadata]):
32
31
  )
33
32
 
34
33
  # Add the system message if it doesn't alredy exist
35
- await append_chat_system_message(message_history, model, provider, metadata.threadId)
34
+ await append_chat_system_message(message_history, provider, metadata.threadId)
36
35
 
37
36
  # Create the prompt
38
37
  prompt = create_chat_prompt(
@@ -49,30 +48,28 @@ class ChatCompletionHandler(CompletionHandler[ChatMessageMetadata]):
49
48
  # Add the prompt to the message history
50
49
  new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.additionalContext)
51
50
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
52
- await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
51
+ await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, provider, metadata.threadId)
53
52
 
54
53
  # Get the completion (non-streaming)
55
54
  completion = await provider.request_completions(
56
55
  messages=message_history.get_ai_optimized_history(metadata.threadId),
57
- model=model,
58
56
  message_type=MessageType.CHAT,
59
57
  user_input=metadata.input,
60
58
  thread_id=metadata.threadId
61
59
  )
62
60
 
63
61
  ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": completion}
64
- await message_history.append_message(ai_response_message, ai_response_message, model, provider, metadata.threadId)
62
+ await message_history.append_message(ai_response_message, ai_response_message, provider, metadata.threadId)
65
63
 
66
64
  return completion
67
65
 
68
66
  @staticmethod
69
67
  async def stream_completion(
70
68
  metadata: ChatMessageMetadata,
71
- provider: OpenAIProvider,
69
+ provider: ProviderManager,
72
70
  message_history: GlobalMessageHistory,
73
71
  message_id: str,
74
- reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None],
75
- model: str
72
+ reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None]
76
73
  ) -> str:
77
74
  """Stream chat completions from the AI provider.
78
75
 
@@ -95,7 +92,7 @@ class ChatCompletionHandler(CompletionHandler[ChatMessageMetadata]):
95
92
  )
96
93
 
97
94
  # Add the system message if it doesn't already exist
98
- await append_chat_system_message(message_history, model, provider, metadata.threadId)
95
+ await append_chat_system_message(message_history, provider, metadata.threadId)
99
96
 
100
97
  # Create the prompt
101
98
  prompt = create_chat_prompt(
@@ -112,17 +109,16 @@ class ChatCompletionHandler(CompletionHandler[ChatMessageMetadata]):
112
109
  # Add the prompt to the message history
113
110
  new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.additionalContext)
114
111
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
115
- await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
112
+ await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, provider, metadata.threadId)
116
113
 
117
114
  # Stream the completions using the provider's stream method
118
115
  accumulated_response = await provider.stream_completions(
119
116
  message_type=MessageType.CHAT,
120
117
  messages=message_history.get_ai_optimized_history(metadata.threadId),
121
- model=model,
122
118
  message_id=message_id,
119
+ thread_id=metadata.threadId,
123
120
  reply_fn=reply_fn,
124
- user_input=metadata.input,
125
- thread_id=metadata.threadId
121
+ user_input=metadata.input
126
122
  )
127
123
 
128
124
  # Save the accumulated response to message history
@@ -131,7 +127,7 @@ class ChatCompletionHandler(CompletionHandler[ChatMessageMetadata]):
131
127
  "content": accumulated_response,
132
128
  }
133
129
  await message_history.append_message(
134
- ai_response_message, ai_response_message, model, provider, metadata.threadId
130
+ ai_response_message, ai_response_message, provider, metadata.threadId
135
131
  )
136
132
 
137
133
  return accumulated_response
@@ -5,7 +5,7 @@ from typing import List, Union, AsyncGenerator, Callable
5
5
  from openai.types.chat import ChatCompletionMessageParam
6
6
  from mito_ai.completions.models import CodeExplainMetadata, MessageType, CompletionRequest, CompletionStreamChunk, CompletionReply
7
7
  from mito_ai.completions.prompt_builders.explain_code_prompt import create_explain_code_prompt
8
- from mito_ai.completions.providers import OpenAIProvider
8
+ from mito_ai.provider_manager import ProviderManager
9
9
  from mito_ai.completions.message_history import GlobalMessageHistory
10
10
  from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
11
11
  from mito_ai.completions.completion_handlers.utils import append_chat_system_message
@@ -18,16 +18,15 @@ class CodeExplainHandler(CompletionHandler[CodeExplainMetadata]):
18
18
  @staticmethod
19
19
  async def get_completion(
20
20
  metadata: CodeExplainMetadata,
21
- provider: OpenAIProvider,
22
- message_history: GlobalMessageHistory,
23
- model: str
21
+ provider: ProviderManager,
22
+ message_history: GlobalMessageHistory
24
23
  ) -> str:
25
24
  """Get a code explain completion from the AI provider."""
26
25
  active_cell_code = metadata.activeCellCode or ''
27
26
  thread_id = metadata.threadId
28
27
 
29
28
  # Add the system message if it doesn't already exist
30
- await append_chat_system_message(message_history, model, provider, thread_id)
29
+ await append_chat_system_message(message_history, provider, thread_id)
31
30
 
32
31
  # Create the prompt
33
32
  prompt = create_explain_code_prompt(active_cell_code)
@@ -37,31 +36,29 @@ class CodeExplainHandler(CompletionHandler[CodeExplainMetadata]):
37
36
  new_ai_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": prompt}
38
37
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
39
38
  await message_history.append_message(
40
- new_ai_optimized_message, new_display_optimized_message, model, provider, thread_id
39
+ new_ai_optimized_message, new_display_optimized_message, provider, thread_id
41
40
  )
42
41
 
43
42
  # Get the completion
44
43
  completion = await provider.request_completions(
45
44
  messages=message_history.get_ai_optimized_history(thread_id),
46
- model=model,
47
45
  message_type=MessageType.CODE_EXPLAIN,
48
46
  thread_id=thread_id
49
47
  )
50
48
 
51
49
  # Add the response to message history
52
50
  ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": completion}
53
- await message_history.append_message(ai_response_message, ai_response_message, model, provider, thread_id)
51
+ await message_history.append_message(ai_response_message, ai_response_message, provider, thread_id)
54
52
 
55
53
  return completion
56
54
 
57
55
  @staticmethod
58
56
  async def stream_completion(
59
57
  metadata: CodeExplainMetadata,
60
- provider: OpenAIProvider,
58
+ provider: ProviderManager,
61
59
  message_history: GlobalMessageHistory,
62
60
  message_id: str,
63
- reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None],
64
- model: str
61
+ reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None]
65
62
  ) -> str:
66
63
  """Stream code explain completions from the AI provider.
67
64
 
@@ -79,7 +76,7 @@ class CodeExplainHandler(CompletionHandler[CodeExplainMetadata]):
79
76
  thread_id = metadata.threadId
80
77
 
81
78
  # Add the system message if it doesn't already exist
82
- await append_chat_system_message(message_history, model, provider, thread_id)
79
+ await append_chat_system_message(message_history, provider, thread_id)
83
80
 
84
81
  # Create the prompt
85
82
  prompt = create_explain_code_prompt(active_cell_code)
@@ -89,22 +86,21 @@ class CodeExplainHandler(CompletionHandler[CodeExplainMetadata]):
89
86
  new_ai_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": prompt}
90
87
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
91
88
  await message_history.append_message(
92
- new_ai_optimized_message, new_display_optimized_message, model, provider, thread_id
89
+ new_ai_optimized_message, new_display_optimized_message, provider, thread_id
93
90
  )
94
91
 
95
92
  # Stream the completions using the provider's stream method
96
93
  accumulated_response = await provider.stream_completions(
97
94
  message_type=MessageType.CODE_EXPLAIN,
98
95
  messages=message_history.get_ai_optimized_history(thread_id),
99
- model=model,
100
96
  message_id=message_id,
101
- reply_fn=reply_fn,
102
- thread_id=thread_id
97
+ thread_id=thread_id,
98
+ reply_fn=reply_fn
103
99
  )
104
100
 
105
101
  # Add the response to message history
106
102
  ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": accumulated_response}
107
- await message_history.append_message(ai_response_message, ai_response_message, model, provider, thread_id)
103
+ await message_history.append_message(ai_response_message, ai_response_message, provider, thread_id)
108
104
 
109
105
  return accumulated_response
110
106
 
@@ -4,7 +4,7 @@
4
4
  from typing import Protocol, TypeVar
5
5
  from abc import abstractmethod, ABCMeta
6
6
  from mito_ai.completions.models import ChatMessageMetadata, ScratchpadResultMetadata, SmartDebugMetadata, CodeExplainMetadata, AgentExecutionMetadata, InlineCompleterMetadata, AgentSmartDebugMetadata
7
- from mito_ai.completions.providers import OpenAIProvider
7
+ from mito_ai.provider_manager import ProviderManager
8
8
  from mito_ai.completions.message_history import GlobalMessageHistory
9
9
 
10
10
  T = TypeVar('T',
@@ -29,9 +29,8 @@ class CompletionHandler(Protocol[T], metaclass=ABCMeta):
29
29
  @abstractmethod
30
30
  async def get_completion(
31
31
  metadata: T,
32
- provider: OpenAIProvider,
33
- message_history: GlobalMessageHistory,
34
- model: str
32
+ provider: ProviderManager,
33
+ message_history: GlobalMessageHistory
35
34
  ) -> str:
36
35
  """Get a completion from the AI provider.
37
36
 
@@ -39,7 +38,6 @@ class CompletionHandler(Protocol[T], metaclass=ABCMeta):
39
38
  metadata: Metadata about the completion request
40
39
  provider: The AI provider to use
41
40
  message_history: The history of messages in the conversation
42
- model: The model to use for the completion
43
41
 
44
42
  Returns:
45
43
  The completion string from the AI