mito-ai 0.1.56__py3-none-any.whl → 0.1.58__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. mito_ai/__init__.py +17 -21
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +24 -14
  4. mito_ai/chart_wizard/__init__.py +3 -0
  5. mito_ai/chart_wizard/handlers.py +113 -0
  6. mito_ai/chart_wizard/urls.py +26 -0
  7. mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +6 -8
  8. mito_ai/completions/completion_handlers/agent_execution_handler.py +6 -8
  9. mito_ai/completions/completion_handlers/chat_completion_handler.py +13 -17
  10. mito_ai/completions/completion_handlers/code_explain_handler.py +13 -17
  11. mito_ai/completions/completion_handlers/completion_handler.py +14 -7
  12. mito_ai/completions/completion_handlers/inline_completer_handler.py +5 -6
  13. mito_ai/completions/completion_handlers/scratchpad_result_handler.py +64 -0
  14. mito_ai/completions/completion_handlers/smart_debug_handler.py +13 -17
  15. mito_ai/completions/completion_handlers/utils.py +3 -7
  16. mito_ai/completions/handlers.py +36 -21
  17. mito_ai/completions/message_history.py +8 -10
  18. mito_ai/completions/models.py +23 -2
  19. mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +5 -3
  20. mito_ai/completions/prompt_builders/agent_system_message.py +97 -5
  21. mito_ai/completions/prompt_builders/chart_add_field_prompt.py +35 -0
  22. mito_ai/completions/prompt_builders/chart_conversion_prompt.py +27 -0
  23. mito_ai/completions/prompt_builders/chat_system_message.py +2 -0
  24. mito_ai/completions/prompt_builders/prompt_constants.py +28 -0
  25. mito_ai/completions/prompt_builders/scratchpad_result_prompt.py +17 -0
  26. mito_ai/constants.py +8 -1
  27. mito_ai/enterprise/__init__.py +1 -1
  28. mito_ai/enterprise/litellm_client.py +137 -0
  29. mito_ai/log/handlers.py +1 -1
  30. mito_ai/openai_client.py +10 -90
  31. mito_ai/{completions/providers.py → provider_manager.py} +157 -53
  32. mito_ai/settings/enterprise_handler.py +26 -0
  33. mito_ai/settings/urls.py +2 -0
  34. mito_ai/streamlit_conversion/agent_utils.py +2 -30
  35. mito_ai/streamlit_conversion/streamlit_agent_handler.py +48 -46
  36. mito_ai/streamlit_preview/handlers.py +6 -3
  37. mito_ai/streamlit_preview/urls.py +5 -3
  38. mito_ai/tests/message_history/test_generate_short_chat_name.py +72 -28
  39. mito_ai/tests/providers/test_anthropic_client.py +174 -16
  40. mito_ai/tests/providers/test_azure.py +13 -13
  41. mito_ai/tests/providers/test_capabilities.py +14 -17
  42. mito_ai/tests/providers/test_gemini_client.py +14 -13
  43. mito_ai/tests/providers/test_model_resolution.py +145 -89
  44. mito_ai/tests/providers/test_openai_client.py +209 -13
  45. mito_ai/tests/providers/test_provider_limits.py +5 -5
  46. mito_ai/tests/providers/test_providers.py +229 -51
  47. mito_ai/tests/providers/test_retry_logic.py +13 -22
  48. mito_ai/tests/providers/utils.py +4 -4
  49. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +57 -85
  50. mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +4 -1
  51. mito_ai/tests/test_enterprise_mode.py +162 -0
  52. mito_ai/tests/test_model_utils.py +271 -0
  53. mito_ai/utils/anthropic_utils.py +8 -6
  54. mito_ai/utils/gemini_utils.py +0 -3
  55. mito_ai/utils/litellm_utils.py +84 -0
  56. mito_ai/utils/model_utils.py +178 -0
  57. mito_ai/utils/open_ai_utils.py +0 -8
  58. mito_ai/utils/provider_utils.py +6 -21
  59. mito_ai/utils/telemetry_utils.py +14 -2
  60. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -102
  61. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  62. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  63. mito_ai-0.1.56.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.dfd7975de75d64db80d6.js → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js +2992 -282
  64. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js.map +1 -0
  65. mito_ai-0.1.56.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.1e7b5cf362385f109883.js → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js +17 -17
  66. mito_ai-0.1.56.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.1e7b5cf362385f109883.js.map → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js.map +1 -1
  67. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.css +7 -2
  68. {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/METADATA +2 -1
  69. {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/RECORD +94 -81
  70. mito_ai-0.1.56.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.dfd7975de75d64db80d6.js.map +0 -1
  71. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  72. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  73. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
  74. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
  75. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  76. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js +0 -0
  77. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js.map +0 -0
  78. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
  79. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
  80. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
  81. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
  82. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
  83. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
  84. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
  85. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
  86. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
  87. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
  88. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
  89. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
  90. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  91. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  92. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.js +0 -0
  93. {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/WHEEL +0 -0
  94. {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/entry_points.txt +0 -0
  95. {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/licenses/LICENSE +0 -0
@@ -4,13 +4,13 @@
4
4
  from typing import List
5
5
  from mito_ai.completions.prompt_builders.prompt_section_registry import SG, Prompt
6
6
  from mito_ai.completions.prompt_builders.prompt_constants import (
7
+ CHART_CONFIG_RULES,
7
8
  CITATION_RULES,
8
9
  CELL_REFERENCE_RULES,
9
10
  get_database_rules
10
11
  )
11
12
  from mito_ai.completions.prompt_builders.prompt_section_registry.base import PromptSection
12
13
 
13
-
14
14
  def create_agent_system_message_prompt(isChromeBrowser: bool) -> str:
15
15
 
16
16
  # The GET_CELL_OUTPUT tool only works on Chrome based browsers.
@@ -28,6 +28,8 @@ The user is going to ask you to guide them as they complete a task. You will hel
28
28
  You have access to a set of tools that you can use to accomplish the task you've been given. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
29
29
 
30
30
  Each time you use a tool, except for the finished_task tool, the user will execute the tool and provide you with updated information about the notebook and variables defined in the kernel to help you decide what to do next."""))
31
+
32
+ sections.append(SG.Generic("Chart Config Rules", CHART_CONFIG_RULES))
31
33
 
32
34
  sections.append(SG.Generic("TOOL: CELL_UPDATE", """
33
35
 
@@ -223,6 +225,85 @@ Important information:
223
225
  4. If running all cells results in an error, the system will automatically handle the error through the normal error fixing process.
224
226
  5. Do not use this tool repeatedly if it continues to produce errors - instead, focus on fixing the specific error that occurred."""))
225
227
 
228
+ # SCRATCHPAD tool
229
+ sections.append(SG.Generic("TOOL: SCRATCHPAD", """
230
+ When you need to explore data, check the filesystem, analyze mappings, or look up values without leaving code in the notebook, use the SCRATCHPAD tool.
231
+
232
+ Format:
233
+ {{
234
+ type: 'scratchpad',
235
+ message: str,
236
+ scratchpad_code: str,
237
+ scratchpad_summary: str
238
+ }}
239
+
240
+ Important information:
241
+ 1. The scratchpad_code will execute silently against the same kernel as your notebook, so you have access to all variables and dataframes.
242
+ 2. Any variables you create in scratchpad code MUST be prefixed with "scratch_" (e.g., use "scratch_temp_df" not "temp_df", use "scratch_files" not "files"). This prevents them from appearing in the variable list and confusing future decisions.
243
+ 3. CRITICAL: Do NOT modify existing variables. If you need to explore or modify data, create a copy with the scratch_ prefix first. For example, use "scratch_df = df.copy()" and then modify scratch_df, rather than modifying df directly. This ensures existing variables remain unchanged.
244
+ 4. Structure your code to print the information you need. Use print() statements for output that will be captured.
245
+ 5. If you need structured data, consider using JSON: `import json; print(json.dumps(your_data))`
246
+ 6. The results (including any errors) will be included in your next message, so you can use them to inform your next action.
247
+ 7. If the code errors, the error message and traceback will be included in the results. You can then decide to fix the code and try again, ask the user a question, or take a different approach.
248
+ 8. Use scratchpad for exploration work that doesn't belong in the final notebook. Once you have the information, create clean CELL_UPDATES with hardcoded values.
249
+ 9. The scratchpad_summary must be a very short phrase (1–5 words maximum) that begins with a verb ending in "-ing" (e.g., "Checking files", "Exploring data", "Analyzing mappings", "Looking up values"). Avoid full sentences or explanations. This should read like a quick commit message or code label, not a description.
250
+
251
+ Example:
252
+ {{
253
+ type: 'scratchpad',
254
+ message: "I'll check what files are in the current directory to find the data file.",
255
+ scratchpad_code: "import os\\nscratch_files = os.listdir('.')\\nprint('Files:', scratch_files)\\nfor scratch_file in scratch_files:\\n if scratch_file.endswith('.csv'):\\n print(f'CSV file found: {scratch_file}')",
256
+ scratchpad_summary: "Checking files"
257
+ }}
258
+
259
+ Example with dataframe exploration:
260
+ {{
261
+ type: 'scratchpad',
262
+ message: "I'll explore the dataframe structure to understand the columns.",
263
+ scratchpad_code: "scratch_df = df.copy()\\nprint('Columns:', scratch_df.columns.tolist())\\nprint('Shape:', scratch_df.shape)\\nprint('First few rows:')\\nprint(scratch_df.head())",
264
+ scratchpad_summary: "Exploring dataframe"
265
+ }}
266
+ """))
267
+
268
+ # ASK_USER_QUESTION tool
269
+ sections.append(SG.Generic("TOOL: ASK_USER_QUESTION", f"""
270
+
271
+ When you have a specific question that you the user to answer so that you can figure out how to proceed in your work, you can respond in this format:
272
+
273
+ {{
274
+ type: 'ask_user_question',
275
+ message: str,
276
+ question: str,
277
+ answers: Optional[List[str]]
278
+ }}
279
+
280
+ Important information:
281
+ 1. Use this tool when you need clarification from the user on how to proceed. Common scenarios include:
282
+ - A file or resource doesn't exist and you need to know how to proceed
283
+ - There are multiple valid approaches and you want the user to choose
284
+ - You need clarification on ambiguous requirements
285
+ - You encounter an error that requires user input to resolve
286
+ 2. The message should be a short description of what you've tried so far and why you need to ask the user a question now. This helps the user understand the context. The message provides background information but should NOT contain the actual question.
287
+ 3. The question field is REQUIRED and must always be provided. It cannot be null or empty. The question should be a clear, direct question that ends with a question mark. It should be concise and direct - do NOT include instructions or explanations in the question itself, as the answer options should make it clear what information is needed. For example, instead of "Which companies would you like me to compare Meta's stock performance against? Please provide a list of company names or stock tickers", just ask "Which companies would you like me to compare Meta's stock performance against?" The answer options will make it clear that company names or tickers are expected.
288
+ 4. The message and question serve different purposes: the message provides context about what you've tried, while the question is the actual question the user needs to answer. If your message contains a question, extract that question into the question field. For example, if your message says "I need to understand how you'd like to access the tweets", your question should be something like "How would you like to access the tweets?"
289
+ 5. Use the optional list of answers to provide the user multiple choice options to choose from. If it is an open ended question that you cannot create helpful multiple choice answers for, leave it blank and the user will respond in the text input field.
290
+ 6. When creating multiple choice answer options:
291
+ - Make each option distinct and meaningful - avoid options that differ only slightly from each other.
292
+ - If there are no obvious predefined answers, leave it blank and the user will respond in the text input field.
293
+ 7. After the user responds to your question, you will receive their response in the next message and can continue with the task based on their answer.
294
+ 8. Do not use this tool for trivial questions that you can infer from context. Only use it when you cannot proceed in the user's task without answering your specific question first.
295
+
296
+ <Example>
297
+ {{
298
+ type: 'ask_user_question',
299
+ message: "I tried importing apple_prices.csv and confirmed that it does not exist in the current working directory.",
300
+ question: "The file apple_prices.csv does not exist. How do you want to proceed?",
301
+ answers: ["Pull Apple Stock prices using yfinance API", "Create placeholder data", "Skip this step"]
302
+ }}
303
+ </Example>
304
+
305
+ """))
306
+
226
307
  # CREATE_STREAMLIT_APP tool
227
308
  sections.append(SG.Generic("TOOL: CREATE_STREAMLIT_APP", """
228
309
 
@@ -316,18 +397,29 @@ Important information:
316
397
  # RULES section
317
398
  sections.append(SG.Generic("RULES", """
318
399
  - You are working in a Jupyter Lab environment in a .ipynb file.
319
- - In each message you can choose one of the tools to respond with. BUT YOU WILL GET TO SEND MULTIPLE MESSAGES TO THE USER TO ACCOMPLISH YOUR TASK SO DO NOT TRY TO ACCOMPLISH YOUR TASK IN A SINGLE MESSAGE.
400
+ - In each message you can choose one of the tools to respond with. YOU WILL GET TO SEND MULTIPLE MESSAGES TO THE USER TO ACCOMPLISH YOUR TASK SO DO NOT TRY TO ACCOMPLISH YOUR TASK IN A SINGLE MESSAGE.
320
401
  - After you send a CELL_UPDATE, the user will send you a message with the updated variables, code, and files in the current directory. You will use this information to decide what to do next, so it is critical that you wait for the user's response after each CELL_UPDATE before deciding your next action.
321
- - When updating code, keep as much of the original code as possible and do not recreate variables that already exist.
322
402
  - When writing the message, do not explain to the user how to use the CELL_UPDATE or FINISHED_TASK response, they will already know how to use them. Just provide a summary of your thought process. Do not reference any Cell IDs in the message.
323
403
  - When writing the message, do not include leading words like "Explanation:" or "Thought process:". Just provide a summary of your thought process.
324
404
  - When writing the message, use tickmarks when referencing specific variable names. For example, write `sales_df` instead of "sales_df" or just sales_df."""))
325
405
 
326
406
  # CODE STYLE section
327
407
  sections.append(SG.Generic("CODE STYLE", """
328
- - Avoid using try/except blocks and other defensive programming patterns (like checking if files exist before reading them, verifying variables are defined before using them, etc.) unless there is a really good reason. In Jupyter notebooks, errors should surface immediately so users can identify and fix issues. When errors are caught and suppressed or when defensive checks hide problems, users continue running broken code without realizing it, and the agent's auto-error-fix loop cannot trigger. If a column doesn't exist, a file is missing, a variable isn't defined, or a module isn't installed, let it error. The user needs to know.
408
+ - When updating code, keep as much of the original code as possible and do not recreate variables that already exist.
329
409
  - When you want to display a dataframe to the user, just write the dataframe on the last line of the code cell instead of writing print(<dataframe name>). Jupyter will automatically display the dataframe in the notebook.
330
- - When importing matplotlib, write the code `%matplotlib inline` to make sure the graphs render in Jupyter."""))
410
+ - When importing matplotlib, write the code `%matplotlib inline` to make sure the graphs render in Jupyter.
411
+ - Avoid adding try/except blocks unless there is a very good reason. Do not use them for things like:
412
+ ```
413
+ try:
414
+ df = pd.read_csv('my_data.csv')
415
+ except:
416
+ print("File not found")
417
+ ```
418
+ Instead, just let the cell error and use the ask_user_question tool to figure out how to proceed.
419
+ - Avoid defensive if statements like checking if a variable exists in the globals or verifying that a column exists. Instead, just let the code error and use the ask_user_question tool to figure out how to proceed.
420
+ - Do not simulate the data without the user explicity asking you to do so.
421
+ - Do not replace broken code with print statements that explain the issue. Instead, leave the broken code in the notebook and use the ask_user_question tools to communicate the issue to the user and figure out how to proceed.
422
+ """))
331
423
 
332
424
  # CITATION_RULES
333
425
  sections.append(SG.Generic("Citation Rules", f"""{CITATION_RULES}
@@ -0,0 +1,35 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from typing import List
5
+ from mito_ai.completions.prompt_builders.prompt_section_registry import SG, Prompt
6
+ from mito_ai.completions.prompt_builders.prompt_section_registry.base import PromptSection
7
+ from mito_ai.completions.prompt_builders.prompt_constants import CHART_CONFIG_RULES
8
+
9
+ def create_chart_add_field_prompt(code: str, user_description: str, existing_variables: List[str]) -> str:
10
+ """
11
+ Create a prompt for adding a new field to the chart configuration.
12
+
13
+ Args:
14
+ code: The current chart code
15
+ user_description: The user's description of what field they want to add
16
+ existing_variables: List of existing variable names in the config
17
+
18
+ Returns:
19
+ A formatted prompt string
20
+ """
21
+ sections: List[PromptSection] = []
22
+
23
+ sections.append(SG.Generic("Instructions", "The user wants to add a new field to the chart configuration. You need to:\n1. Understand what field the user wants to add based on their description\n2. Add the appropriate variable to the chart configuration section\n3. Use the variable in the chart code where appropriate\n4. Return the complete updated code\n\nIMPORTANT: If you cannot add the requested field (e.g., the request is unclear, ambiguous, or not applicable to chart configuration), do NOT return any code block. Simply respond with a brief explanation without including any Python code blocks."))
24
+
25
+ sections.append(SG.Generic("Chart Config Rules", CHART_CONFIG_RULES))
26
+
27
+ existing_vars_text = ", ".join(existing_variables) if existing_variables else "none"
28
+ sections.append(SG.Generic("Existing Variables", f"The following variables already exist in the chart configuration: {existing_vars_text}"))
29
+
30
+ sections.append(SG.Generic("User Request", f"The user wants to add a field for: {user_description}"))
31
+
32
+ sections.append(SG.Generic("Current Code", f"```python\n{code}\n```"))
33
+
34
+ prompt = Prompt(sections)
35
+ return str(prompt)
@@ -0,0 +1,27 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from typing import List
5
+ from mito_ai.completions.prompt_builders.prompt_section_registry import SG, Prompt
6
+ from mito_ai.completions.prompt_builders.prompt_section_registry.base import PromptSection
7
+ from mito_ai.completions.prompt_builders.prompt_constants import CHART_CONFIG_RULES
8
+
9
+ def create_chart_conversion_prompt(code: str) -> str:
10
+ """
11
+ Create a prompt for converting matplotlib chart code to be used with the Chart Wizard.
12
+
13
+ Args:
14
+ code: The matplotlib chart code to convert
15
+
16
+ Returns:
17
+ A formatted prompt string
18
+ """
19
+ sections: List[PromptSection] = []
20
+
21
+ sections.append(SG.Generic("Instructions", "The following code contains a matplotlib chart. However, the chart must be converted to a specific format for use in our tool. Below you will find the rules used to create an acceptable chart; use these rules to reformat the code."))
22
+
23
+ sections.append(SG.Generic("Chart Config Rules", CHART_CONFIG_RULES))
24
+ sections.append(SG.Generic("Code to Convert", f"```python\n{code}\n```"))
25
+
26
+ prompt = Prompt(sections)
27
+ return str(prompt)
@@ -4,6 +4,7 @@
4
4
  from typing import List
5
5
  from mito_ai.completions.prompt_builders.prompt_section_registry import SG, Prompt
6
6
  from mito_ai.completions.prompt_builders.prompt_constants import (
7
+ CHART_CONFIG_RULES,
7
8
  CHAT_CODE_FORMATTING_RULES,
8
9
  CITATION_RULES,
9
10
  CELL_REFERENCE_RULES,
@@ -31,6 +32,7 @@ Other useful information:
31
32
  2. If the user asks you to generate a dashboard, app, or streamlit app for them, you should tell them that they must use Agent mode to complete the task. You are not able to automatically switch the user to agent mode, but they can switch to it themselves by using the Chat/Agent mode toggle in the bottom left corner of the Ai taskpane.
32
33
  """))
33
34
 
35
+ sections.append(SG.Generic("Chart Config Rules", CHART_CONFIG_RULES))
34
36
  sections.append(SG.Generic("DatabaseRules", get_database_rules()))
35
37
  sections.append(SG.Generic("Citation Rules", CITATION_RULES))
36
38
  sections.append(SG.Generic("Cell Reference Rules", CELL_REFERENCE_RULES))
@@ -11,6 +11,34 @@ import json
11
11
  from typing import Final
12
12
  from mito_ai.utils.schema import MITO_FOLDER
13
13
 
14
+ CHART_CONFIG_RULES = """
15
+ When creating a matplotlib chart, you must use the `# === CHART CONFIG ===` and `# === END CONFIG ===` markers to indicate the start and end of the chart configuration section.
16
+
17
+ The chart configuration section is a list of variables used to customize the chart. This includes the titles, labels, colors, and any variables that affect the chart's appearance.
18
+
19
+ Rules:
20
+ - All imports must appear at the top, before the chart configuration section.
21
+ - Variables with multiple words should be underscore-separated.
22
+ - All colors should be in hex format (e.g., "#3498db"). Use quotes around the hex string: COLOR = "#3498db" or COLOR = '#3498db'. Do NOT nest quotes.
23
+ - Variables can only be strings, numbers, booleans, tuples, or lists.
24
+ - NEVER include comments on the same line as a variable assignment. Each variable assignment must be on its own line with no trailing comments.
25
+ - For string values, use either single or double quotes (e.g., TITLE = "Sales by Product" or TITLE = 'Sales by Product'). Do not use nested quotes (e.g., do NOT use '"value"').
26
+
27
+ Common Mistakes to Avoid:
28
+ - WRONG: COLOR = '"#1877F2" # Meta Blue' (nested quotes and inline comment)
29
+ - WRONG: COLOR = "#1877F2" # Meta Blue (inline comment)
30
+ - WRONG: COLOR = '"#1877F2"' (nested quotes)
31
+ - CORRECT: COLOR = "#1877F2" (simple hex string, no nested quotes, no inline comments)
32
+
33
+ Example:
34
+ # === CHART CONFIG ===
35
+ TITLE = "Sales by Product"
36
+ X_LABEL = "Product"
37
+ Y_LABEL = "Sales"
38
+ BAR_COLOR = "#000000"
39
+ # === END CONFIG ===
40
+ """
41
+
14
42
  CITATION_RULES = """
15
43
  It is important that the user is able to verify any insights that you share with them about their data. To make this easy for the user, you must cite the lines of code that you are drawing the insight from. To provide a citation, use one of the following formats inline in your response:
16
44
 
@@ -0,0 +1,17 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from typing import List
5
+ from mito_ai.completions.models import ScratchpadResultMetadata
6
+ from mito_ai.completions.prompt_builders.prompt_section_registry import SG, Prompt
7
+ from mito_ai.completions.prompt_builders.prompt_section_registry.base import PromptSection
8
+
9
+
10
+ def create_scratchpad_result_prompt(md: ScratchpadResultMetadata) -> str:
11
+ sections: List[PromptSection] = [
12
+ SG.Generic("Reminder", "Continue working on your current task using the scratchpad results below."),
13
+ SG.Generic("Scratchpad Result", f"The result of your scratchpad is: {md.scratchpadResult}"),
14
+ ]
15
+
16
+ prompt = Prompt(sections)
17
+ return str(prompt)
mito_ai/constants.py CHANGED
@@ -5,7 +5,7 @@ import os
5
5
  from typing import Union
6
6
 
7
7
  # Claude
8
- CLAUDE_API_KEY = os.environ.get("CLAUDE_API_KEY")
8
+ ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY")
9
9
 
10
10
  # Gemini
11
11
  GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
@@ -23,6 +23,13 @@ AZURE_OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION")
23
23
  AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
24
24
  AZURE_OPENAI_MODEL = os.environ.get("AZURE_OPENAI_MODEL")
25
25
 
26
+ # LiteLLM Config (Enterprise mode only)
27
+ LITELLM_BASE_URL = os.environ.get("LITELLM_BASE_URL")
28
+ LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
29
+ LITELLM_MODELS_STR = os.environ.get("LITELLM_MODELS", "")
30
+ # Parse comma-separated string into list, strip whitespace
31
+ LITELLM_MODELS = [model.strip() for model in LITELLM_MODELS_STR.split(",") if model.strip()] if LITELLM_MODELS_STR else []
32
+
26
33
  # Mito AI Base URLs and Endpoint Paths
27
34
  MITO_PROD_BASE_URL = "https://7eax4i53f5odkshhlry4gw23by0yvnuv.lambda-url.us-east-1.on.aws/v2"
28
35
  MITO_DEV_BASE_URL = "https://g5vwmogjg7gh7aktqezyrvcq6a0hyfnr.lambda-url.us-east-1.on.aws/v2"
@@ -1,3 +1,3 @@
1
1
  # Copyright (c) Saga Inc.
2
- # Distributed under the terms of the GNU Affero General Public License v3.0 License.
2
+ # Distributed under the terms of the Enterprise License at the root of this repository.
3
3
 
@@ -0,0 +1,137 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the The Mito Enterprise license.
3
+
4
+ from typing import Optional, List, Callable, Union, Dict, Any
5
+ from openai.types.chat import ChatCompletionMessageParam
6
+ from mito_ai.completions.models import (
7
+ MessageType,
8
+ ResponseFormatInfo,
9
+ CompletionReply,
10
+ CompletionStreamChunk,
11
+ CompletionItem,
12
+ )
13
+ from mito_ai.utils.litellm_utils import get_litellm_completion_function_params
14
+ import litellm
15
+
16
+ class LiteLLMClient:
17
+ """
18
+ A client for interacting with LiteLLM server endpoints.
19
+ LiteLLM provides an OpenAI-compatible API, so we use the LiteLLM SDK directly.
20
+ """
21
+
22
+ def __init__(self, api_key: Optional[str], base_url: str, timeout: int = 30, max_retries: int = 1):
23
+ self.api_key = api_key
24
+ self.base_url = base_url
25
+ self.timeout = timeout
26
+ self.max_retries = max_retries
27
+
28
+ async def request_completions(
29
+ self,
30
+ messages: List[ChatCompletionMessageParam],
31
+ model: str, # Should include provider prefix (e.g., "openai/gpt-4o")
32
+ response_format_info: Optional[ResponseFormatInfo] = None,
33
+ message_type: MessageType = MessageType.CHAT
34
+ ) -> str:
35
+ """
36
+ Request completions from LiteLLM server.
37
+
38
+ Args:
39
+ messages: List of chat messages
40
+ model: Model name with provider prefix (e.g., "openai/gpt-4o")
41
+ response_format_info: Optional response format specification
42
+ message_type: Type of message (chat, agent execution, etc.)
43
+
44
+ Returns:
45
+ The completion text response
46
+ """
47
+ # Prepare parameters for LiteLLM
48
+ params = get_litellm_completion_function_params(
49
+ model=model,
50
+ messages=messages,
51
+ api_key=self.api_key,
52
+ api_base=self.base_url,
53
+ timeout=self.timeout,
54
+ stream=False,
55
+ response_format_info=response_format_info,
56
+ )
57
+
58
+ try:
59
+ # Use LiteLLM's acompletion function
60
+ response = await litellm.acompletion(**params)
61
+
62
+ # Extract content from response
63
+ if response and response.choices and len(response.choices) > 0:
64
+ content = response.choices[0].message.content
65
+ return content or ""
66
+ else:
67
+ return ""
68
+ except Exception as e:
69
+ raise Exception(f"LiteLLM completion error: {str(e)}")
70
+
71
+ async def stream_completions(
72
+ self,
73
+ messages: List[ChatCompletionMessageParam],
74
+ model: str,
75
+ message_type: MessageType,
76
+ message_id: str,
77
+ reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None],
78
+ response_format_info: Optional[ResponseFormatInfo] = None
79
+ ) -> str:
80
+ """
81
+ Stream completions from LiteLLM server.
82
+
83
+ Args:
84
+ messages: List of chat messages
85
+ model: Model name with provider prefix (e.g., "openai/gpt-4o")
86
+ message_type: Type of message (chat, agent execution, etc.)
87
+ message_id: ID of the message being processed
88
+ reply_fn: Function to call with each chunk for streaming replies
89
+ response_format_info: Optional response format specification
90
+
91
+ Returns:
92
+ The accumulated response string
93
+ """
94
+ accumulated_response = ""
95
+
96
+ # Prepare parameters for LiteLLM
97
+ params = get_litellm_completion_function_params(
98
+ model=model,
99
+ messages=messages,
100
+ api_key=self.api_key,
101
+ api_base=self.base_url,
102
+ timeout=self.timeout,
103
+ stream=True,
104
+ response_format_info=response_format_info,
105
+ )
106
+
107
+ try:
108
+ # Use LiteLLM's acompletion with stream=True
109
+ # When stream=True, acompletion returns an async iterable after awaiting
110
+ stream = await litellm.acompletion(**params)
111
+
112
+ # Process streaming chunks
113
+ async for chunk in stream:
114
+ if chunk and chunk.choices and len(chunk.choices) > 0:
115
+ delta = chunk.choices[0].delta
116
+ content = delta.content if delta and delta.content else ""
117
+
118
+ if content:
119
+ accumulated_response += content
120
+
121
+ # Check if this is the final chunk
122
+ is_finished = chunk.choices[0].finish_reason is not None
123
+
124
+ # Send chunk to frontend
125
+ reply_fn(CompletionStreamChunk(
126
+ parent_id=message_id,
127
+ chunk=CompletionItem(
128
+ content=content,
129
+ isIncomplete=not is_finished,
130
+ token=message_id,
131
+ ),
132
+ done=is_finished,
133
+ ))
134
+
135
+ return accumulated_response
136
+ except Exception as e:
137
+ raise Exception(f"LiteLLM streaming error: {str(e)}")
mito_ai/log/handlers.py CHANGED
@@ -32,7 +32,7 @@ class LogHandler(APIHandler):
32
32
  log_event = data['log_event']
33
33
  params = data.get('params', {})
34
34
 
35
- key_type = MITO_SERVER_KEY if self.key_type == "mito_server_key" else USER_KEY
35
+ key_type = MITO_SERVER_KEY if self.key_type == MITO_SERVER_KEY else USER_KEY
36
36
  log(log_event, params, key_type=key_type)
37
37
 
38
38
 
mito_ai/openai_client.py CHANGED
@@ -7,7 +7,7 @@ from typing import Any, AsyncGenerator, Callable, Dict, List, Optional, Union
7
7
  from mito_ai.utils.mito_server_utils import ProviderCompletionException
8
8
  import openai
9
9
  from openai.types.chat import ChatCompletionMessageParam
10
- from traitlets import Instance, Unicode, default, validate
10
+ from traitlets import Instance, default, validate
11
11
  from traitlets.config import LoggingConfigurable
12
12
 
13
13
  from mito_ai import constants
@@ -30,22 +30,12 @@ from mito_ai.utils.open_ai_utils import (
30
30
  stream_ai_completion_from_mito_server,
31
31
  )
32
32
  from mito_ai.utils.server_limits import update_mito_server_quota
33
- from mito_ai.utils.telemetry_utils import (
34
- MITO_SERVER_KEY,
35
- USER_KEY,
36
- )
37
33
 
38
34
  OPENAI_MODEL_FALLBACK = "gpt-4.1"
39
35
 
40
36
  class OpenAIClient(LoggingConfigurable):
41
37
  """Provide AI feature through OpenAI services."""
42
38
 
43
- api_key = Unicode(
44
- config=True,
45
- allow_none=True,
46
- help="OpenAI API key. Default value is read from the OPENAI_API_KEY environment variable.",
47
- )
48
-
49
39
  last_error = Instance(
50
40
  CompletionError,
51
41
  allow_none=True,
@@ -65,61 +55,6 @@ This attribute is observed by the websocket provider to push the error to the cl
65
55
  super().__init__(log=get_logger(), **kwargs)
66
56
  self.last_error = None
67
57
  self._async_client: Optional[openai.AsyncOpenAI] = None
68
-
69
- @default("api_key")
70
- def _api_key_default(self) -> Optional[str]:
71
- default_key = constants.OPENAI_API_KEY
72
- return self._validate_api_key(default_key)
73
-
74
- @validate("api_key")
75
- def _validate_api_key(self, api_key: Optional[str]) -> Optional[str]:
76
- if not api_key:
77
- self.log.debug(
78
- "No OpenAI API key provided; following back to Mito server API."
79
- )
80
- return None
81
-
82
- client = openai.OpenAI(api_key=api_key)
83
- try:
84
- # Make an http request to OpenAI to make sure it works
85
- client.models.list()
86
- except openai.AuthenticationError as e:
87
- self.log.warning(
88
- "Invalid OpenAI API key provided.",
89
- exc_info=e,
90
- )
91
- self.last_error = CompletionError.from_exception(
92
- e,
93
- hint="You're missing the OPENAI_API_KEY environment variable. Run the following code in your terminal to set the environment variable and then relaunch the jupyter server `export OPENAI_API_KEY=<your-api-key>`",
94
- )
95
- return None
96
- except openai.PermissionDeniedError as e:
97
- self.log.warning(
98
- "Invalid OpenAI API key provided.",
99
- exc_info=e,
100
- )
101
- self.last_error = CompletionError.from_exception(e)
102
- return None
103
- except openai.InternalServerError as e:
104
- self.log.debug(
105
- "Unable to get OpenAI models due to OpenAI error.", exc_info=e
106
- )
107
- return api_key
108
- except openai.RateLimitError as e:
109
- self.log.debug(
110
- "Unable to get OpenAI models due to rate limit error.", exc_info=e
111
- )
112
- return api_key
113
- except openai.APIConnectionError as e:
114
- self.log.warning(
115
- "Unable to connect to OpenAI API.",
116
- exec_info=e,
117
- )
118
- self.last_error = CompletionError.from_exception(e)
119
- return None
120
- else:
121
- self.log.debug("User OpenAI API key validated.")
122
- return api_key
123
58
 
124
59
  @property
125
60
  def capabilities(self) -> AICapabilities:
@@ -133,7 +68,7 @@ This attribute is observed by the websocket provider to push the error to the cl
133
68
  provider="Azure OpenAI",
134
69
  )
135
70
 
136
- if constants.OLLAMA_MODEL and not self.api_key:
71
+ if constants.OLLAMA_MODEL:
137
72
  return AICapabilities(
138
73
  configuration={
139
74
  "model": constants.OLLAMA_MODEL
@@ -141,14 +76,12 @@ This attribute is observed by the websocket provider to push the error to the cl
141
76
  provider="Ollama",
142
77
  )
143
78
 
144
- if self.api_key:
145
- self._validate_api_key(self.api_key)
146
-
79
+ if constants.OPENAI_API_KEY:
147
80
  return AICapabilities(
148
81
  configuration={
149
- "model": OPENAI_MODEL_FALLBACK,
82
+ "model": "<dynamic>"
150
83
  },
151
- provider="OpenAI (user key)",
84
+ provider="OpenAI",
152
85
  )
153
86
 
154
87
  try:
@@ -169,19 +102,6 @@ This attribute is observed by the websocket provider to push the error to the cl
169
102
  if not self._async_client or self._async_client.is_closed():
170
103
  self._async_client = self._build_openai_client()
171
104
  return self._async_client
172
-
173
-
174
- @property
175
- def key_type(self) -> str:
176
- """Returns the authentication key type being used."""
177
-
178
- if self.api_key:
179
- return USER_KEY
180
-
181
- if constants.OLLAMA_MODEL:
182
- return "ollama"
183
-
184
- return MITO_SERVER_KEY
185
105
 
186
106
  def _build_openai_client(self) -> Optional[Union[openai.AsyncOpenAI, openai.AsyncAzureOpenAI]]:
187
107
  base_url = None
@@ -201,12 +121,12 @@ This attribute is observed by the websocket provider to push the error to the cl
201
121
  timeout=self.timeout,
202
122
  )
203
123
 
204
- elif constants.OLLAMA_MODEL and not self.api_key:
124
+ elif constants.OLLAMA_MODEL:
205
125
  base_url = constants.OLLAMA_BASE_URL
206
126
  llm_api_key = "ollama"
207
127
  self.log.debug(f"Using Ollama with model: {constants.OLLAMA_MODEL}")
208
- elif self.api_key:
209
- llm_api_key = self.api_key
128
+ elif constants.OPENAI_API_KEY:
129
+ llm_api_key = constants.OPENAI_API_KEY
210
130
  self.log.debug("Using OpenAI with user-provided API key")
211
131
  else:
212
132
  self.log.warning("No valid API key or model configuration provided")
@@ -262,7 +182,7 @@ This attribute is observed by the websocket provider to push the error to the cl
262
182
 
263
183
  # Handle other providers as before
264
184
  completion_function_params = get_open_ai_completion_function_params(
265
- message_type, model, messages, False, response_format_info
185
+ model, messages, False, response_format_info
266
186
  )
267
187
 
268
188
  # If they have set an Azure OpenAI or Ollama model, then we use it
@@ -313,7 +233,7 @@ This attribute is observed by the websocket provider to push the error to the cl
313
233
 
314
234
  # Handle other providers as before
315
235
  completion_function_params = get_open_ai_completion_function_params(
316
- message_type, model, messages, True, response_format_info
236
+ model, messages, True, response_format_info
317
237
  )
318
238
 
319
239
  completion_function_params["model"] = self._adjust_model_for_azure_or_ollama(completion_function_params["model"])