mito-ai 0.1.53__py3-none-any.whl → 0.1.55__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/_version.py +1 -1
- mito_ai/completions/models.py +1 -1
- mito_ai/completions/prompt_builders/agent_system_message.py +25 -12
- mito_ai/completions/prompt_builders/chat_system_message.py +17 -2
- mito_ai/completions/prompt_builders/prompt_constants.py +22 -0
- mito_ai/completions/prompt_builders/utils.py +7 -0
- mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +18 -2
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +12 -12
- mito_ai/streamlit_preview/handlers.py +13 -6
- mito_ai/streamlit_preview/manager.py +4 -1
- mito_ai/streamlit_preview/utils.py +4 -4
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +7 -7
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +4 -3
- mito_ai/utils/open_ai_utils.py +3 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +147 -102
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/package.json +5 -4
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +5 -4
- mito_ai-0.1.53.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.4b7cd47a24bb24ef84ea.js → mito_ai-0.1.55.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.49c79c62671528877c61.js +2492 -515
- mito_ai-0.1.55.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.49c79c62671528877c61.js.map +1 -0
- mito_ai-0.1.53.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4395ab9342efa39fc0a2.js → mito_ai-0.1.55.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.9dfbffc3592eb6f0aef9.js +21 -19
- mito_ai-0.1.55.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.9dfbffc3592eb6f0aef9.js.map +1 -0
- mito_ai-0.1.53.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js → mito_ai-0.1.55.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js +15 -7
- mito_ai-0.1.55.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js.map +1 -0
- mito_ai-0.1.55.data/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.css +708 -0
- mito_ai-0.1.55.data/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.js +0 -0
- {mito_ai-0.1.53.dist-info → mito_ai-0.1.55.dist-info}/METADATA +1 -1
- {mito_ai-0.1.53.dist-info → mito_ai-0.1.55.dist-info}/RECORD +49 -47
- {mito_ai-0.1.53.dist-info → mito_ai-0.1.55.dist-info}/WHEEL +1 -1
- mito_ai-0.1.53.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.4b7cd47a24bb24ef84ea.js.map +0 -1
- mito_ai-0.1.53.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4395ab9342efa39fc0a2.js.map +0 -1
- mito_ai-0.1.53.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +0 -1
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
- {mito_ai-0.1.53.data → mito_ai-0.1.55.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
- {mito_ai-0.1.53.dist-info → mito_ai-0.1.55.dist-info}/entry_points.txt +0 -0
- {mito_ai-0.1.53.dist-info → mito_ai-0.1.55.dist-info}/licenses/LICENSE +0 -0
mito_ai/_version.py
CHANGED
mito_ai/completions/models.py
CHANGED
|
@@ -35,7 +35,7 @@ class AgentResponse(BaseModel):
|
|
|
35
35
|
get_cell_output_cell_id: Optional[str]
|
|
36
36
|
next_steps: Optional[List[str]]
|
|
37
37
|
analysis_assumptions: Optional[List[str]]
|
|
38
|
-
|
|
38
|
+
streamlit_app_prompt: Optional[str]
|
|
39
39
|
|
|
40
40
|
|
|
41
41
|
@dataclass(frozen=True)
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
|
|
4
4
|
from mito_ai.completions.prompt_builders.prompt_constants import (
|
|
5
5
|
CITATION_RULES,
|
|
6
|
+
CELL_REFERENCE_RULES,
|
|
6
7
|
FILES_SECTION_HEADING,
|
|
7
8
|
JUPYTER_NOTEBOOK_SECTION_HEADING,
|
|
8
9
|
VARIABLES_SECTION_HEADING,
|
|
@@ -231,15 +232,17 @@ When you want to create a new Streamlit app from the current notebook, respond w
|
|
|
231
232
|
|
|
232
233
|
{{
|
|
233
234
|
type: 'create_streamlit_app',
|
|
235
|
+
streamlit_app_prompt: str
|
|
234
236
|
message: str
|
|
235
237
|
}}
|
|
236
238
|
|
|
237
239
|
Important information:
|
|
238
|
-
1. The
|
|
239
|
-
2.
|
|
240
|
-
3.
|
|
241
|
-
4.
|
|
242
|
-
5.
|
|
240
|
+
1. The streamlit_app_prompt is a short description of how the app should be structured. It should be a high level specification that includes things like what fields should be configurable, what tabs should exist, etc. It does not need to be overly detailed however.
|
|
241
|
+
2. The message is a short summary of why you're creating the Streamlit app.
|
|
242
|
+
3. Only use this tool when the user explicitly asks to create or preview a Streamlit app. If the streamlit app for this app already exists, then use an empty string '' as the streamlit_app_prompt.
|
|
243
|
+
4. This tool creates a new app from scratch - use EDIT_STREAMLIT_APP tool if the user is asking you to edit, update, or modify an app that already exists.
|
|
244
|
+
5. Using this tool will automatically open the app so the user can see a preview of the app. If the user is asking you to open an app that already exists, but not make any changes to the app, this is the correct tool.
|
|
245
|
+
6. When you use this tool, assume that it successfully created the Streamlit app unless the user explicitly tells you otherwise. The app will remain open so that the user can view it until the user decides to close it. You do not need to continually use the create_streamlit_app tool to keep the app open.
|
|
243
246
|
|
|
244
247
|
<Example>
|
|
245
248
|
|
|
@@ -248,6 +251,7 @@ Your task: Show me my notebook as an app.
|
|
|
248
251
|
Output:
|
|
249
252
|
{{
|
|
250
253
|
type: 'create_streamlit_app',
|
|
254
|
+
streamlit_app_prompt: "The app should have a beginning date and end date input field at the top. It should then be followed by two tabs for the user to select between: current performance and projected performance.",
|
|
251
255
|
message: "I'll convert your notebook into an app."
|
|
252
256
|
}}
|
|
253
257
|
|
|
@@ -264,12 +268,12 @@ When you want to edit an existing Streamlit app, respond with this format:
|
|
|
264
268
|
{{
|
|
265
269
|
type: 'edit_streamlit_app',
|
|
266
270
|
message: str,
|
|
267
|
-
|
|
271
|
+
streamlit_app_prompt: str
|
|
268
272
|
}}
|
|
269
273
|
|
|
270
274
|
Important information:
|
|
271
275
|
1. The message is a short summary of why you're editing the Streamlit app.
|
|
272
|
-
2. The
|
|
276
|
+
2. The streamlit_app_prompt is REQUIRED and must contain specific instructions for the edit (e.g., "Make the title text larger", "Change the chart colors to blue", "Add a sidebar with filters").
|
|
273
277
|
3. Only use this tool when the user asks to edit, update, or modify a Streamlit app.
|
|
274
278
|
4. The app does not need to already be open for you to use the tool. Using this tool will automatically open the streamlit app after applying the changes so the user can view it. You do not need to call the create_streamlit_app tool first.
|
|
275
279
|
5. When you use this tool, assume that it successfully edited the Streamlit app unless the user explicitly tells you otherwise. The app will remain open so that the user can view it until the user decides to close it.
|
|
@@ -288,7 +292,7 @@ When you have completed the user's task, respond with a message in this format:
|
|
|
288
292
|
|
|
289
293
|
Important information:
|
|
290
294
|
1. The message is a short summary of the ALL the work that you've completed on this task. It should not just refer to the final message. It could be something like "I've completed the sales strategy analysis by exploring key relationships in the data and summarizing creating a report with three recommendations to boost sales.""
|
|
291
|
-
2. The message should include citations for any insights that you shared with the user.
|
|
295
|
+
2. The message should include citations for any insights that you shared with the user and cell references for whenever you refer to specific cells that you've updated or created.
|
|
292
296
|
3. The next_steps is an optional list of 2 or 3 suggested follow-up tasks or analyses that the user might want to perform next. These should be concise, actionable suggestions that build on the work you've just completed. For example: ["Export the cleaned data to CSV", "Analyze revenue per customer", "Convert notebook into an app"].
|
|
293
297
|
4. The next_steps should be as relevant to the user's actual task as possible. Try your best not to make generic suggestions like "Analyze the data" or "Visualize the results". For example, if the user just asked you to calculate LTV of their customers, you might suggest the following next steps: ["Graph key LTV drivers: churn and average transaction value", "Visualize LTV per age group"].
|
|
294
298
|
5. If you are not sure what the user might want to do next, err on the side of suggesting next steps instead of making an assumption and using more CELL_UPDATES.
|
|
@@ -322,19 +326,29 @@ Output:
|
|
|
322
326
|
|
|
323
327
|
RULES
|
|
324
328
|
|
|
325
|
-
- You are working in a Jupyter Lab environment in a .ipynb file.
|
|
329
|
+
- You are working in a Jupyter Lab environment in a .ipynb file.
|
|
326
330
|
- You can only respond with CELL_UPDATES or FINISHED_TASK responses.
|
|
327
331
|
- In each message you send to the user, you can send one CellModification, one CellAddition, or one FINISHED_TASK response. BUT YOU WILL GET TO SEND MULTIPLE MESSAGES TO THE USER TO ACCOMPLISH YOUR TASK SO DO NOT TRY TO ACCOMPLISH YOUR TASK IN A SINGLE MESSAGE.
|
|
328
332
|
- After you send a CELL_UPDATE, the user will send you a message with the updated variables, code, and files in the current directory. You will use this information to decide what to do next, so it is critical that you wait for the user's response after each CELL_UPDATE before deciding your next action.
|
|
329
333
|
- When updating code, keep as much of the original code as possible and do not recreate variables that already exist.
|
|
330
|
-
- When you want to display a dataframe to the user, just write the dataframe on the last line of the code cell instead of writing print(<dataframe name>). Jupyter will automatically display the dataframe in the notebook.
|
|
331
334
|
- When writing the message, do not explain to the user how to use the CELL_UPDATE or FINISHED_TASK response, they will already know how to use them. Just provide a summary of your thought process. Do not reference any Cell IDs in the message.
|
|
332
335
|
- When writing the message, do not include leading words like "Explanation:" or "Thought process:". Just provide a summary of your thought process.
|
|
333
336
|
- When writing the message, use tickmarks when referencing specific variable names. For example, write `sales_df` instead of "sales_df" or just sales_df.
|
|
334
337
|
|
|
338
|
+
====
|
|
339
|
+
|
|
340
|
+
CODE STYLE
|
|
341
|
+
|
|
342
|
+
- Avoid using try/except blocks and other defensive programming patterns (like checking if files exist before reading them, verifying variables are defined before using them, etc.) unless there is a really good reason. In Jupyter notebooks, errors should surface immediately so users can identify and fix issues. When errors are caught and suppressed or when defensive checks hide problems, users continue running broken code without realizing it, and the agent's auto-error-fix loop cannot trigger. If a column doesn't exist, a file is missing, a variable isn't defined, or a module isn't installed, let it error. The user needs to know.
|
|
343
|
+
- When you want to display a dataframe to the user, just write the dataframe on the last line of the code cell instead of writing print(<dataframe name>). Jupyter will automatically display the dataframe in the notebook.
|
|
344
|
+
- When importing matplotlib, write the code `%matplotlib inline` to make sure the graphs render in Jupyter.
|
|
345
|
+
|
|
335
346
|
====
|
|
336
347
|
{CITATION_RULES}
|
|
337
348
|
|
|
349
|
+
====
|
|
350
|
+
{CELL_REFERENCE_RULES}
|
|
351
|
+
|
|
338
352
|
<Citation Example>
|
|
339
353
|
|
|
340
354
|
### User Message 1:
|
|
@@ -468,5 +482,4 @@ REMEMBER, YOU ARE GOING TO COMPLETE THE USER'S TASK OVER THE COURSE OF THE ENTIR
|
|
|
468
482
|
====
|
|
469
483
|
|
|
470
484
|
OTHER USEFUL INFORMATION:
|
|
471
|
-
1.
|
|
472
|
-
2. The active cell ID is shared with you so that when the user refers to "this cell" or similar phrases, you know which cell they mean. However, you are free to edit any cell that you see fit."""
|
|
485
|
+
1. The active cell ID is shared with you so that when the user refers to "this cell" or similar phrases, you know which cell they mean. However, you are free to edit any cell that you see fit."""
|
|
@@ -3,7 +3,8 @@
|
|
|
3
3
|
|
|
4
4
|
from mito_ai.completions.prompt_builders.prompt_constants import (
|
|
5
5
|
CHAT_CODE_FORMATTING_RULES,
|
|
6
|
-
CITATION_RULES,
|
|
6
|
+
CITATION_RULES,
|
|
7
|
+
CELL_REFERENCE_RULES,
|
|
7
8
|
ACTIVE_CELL_ID_SECTION_HEADING,
|
|
8
9
|
CODE_SECTION_HEADING,
|
|
9
10
|
get_database_rules
|
|
@@ -28,6 +29,9 @@ Other useful information:
|
|
|
28
29
|
====
|
|
29
30
|
{CITATION_RULES}
|
|
30
31
|
|
|
32
|
+
====
|
|
33
|
+
{CELL_REFERENCE_RULES}
|
|
34
|
+
|
|
31
35
|
<Example 1>
|
|
32
36
|
{ACTIVE_CELL_ID_SECTION_HEADING}
|
|
33
37
|
'7b3a9e2c-5d14-4c83-b2f9-d67891e4a5f2'
|
|
@@ -79,6 +83,18 @@ Notice in the example above that the user is just sending a friendly message, so
|
|
|
79
83
|
====
|
|
80
84
|
{CHAT_CODE_FORMATTING_RULES}
|
|
81
85
|
|
|
86
|
+
====
|
|
87
|
+
|
|
88
|
+
CODE STYLE
|
|
89
|
+
|
|
90
|
+
- Avoid using try/except blocks and other defensive programming patterns (like checking if files exist before reading them, verifying variables are defined before using them, etc.) unless there is a really good reason. In Jupyter notebooks, errors should surface immediately so users can identify and fix issues. When errors are caught and suppressed or when defensive checks hide problems, users continue running broken code without realizing it, and the agent's auto-error-fix loop cannot trigger. If a column doesn't exist, a file is missing, a variable isn't defined, or a module isn't installed, let it error. The user needs to know.
|
|
91
|
+
- Write code that preserves the intent of the original code shared with you and the task to complete.
|
|
92
|
+
- Make the solution as simple as possible.
|
|
93
|
+
- Do not add temporary comments like '# Fixed the typo here' or '# Added this line to fix the error'
|
|
94
|
+
- When importing matplotlib, write the code `%matplotlib inline` to make sure the graphs render in Jupyter.
|
|
95
|
+
|
|
96
|
+
====
|
|
97
|
+
|
|
82
98
|
IMPORTANT RULES:
|
|
83
99
|
- Do not recreate variables that already exist
|
|
84
100
|
- Keep as much of the original code as possible
|
|
@@ -87,6 +103,5 @@ IMPORTANT RULES:
|
|
|
87
103
|
- Write code that preserves the intent of the original code shared with you and the task to complete.
|
|
88
104
|
- Make the solution as simple as possible.
|
|
89
105
|
- Reuse as much of the existing code as possible.
|
|
90
|
-
- Do not add temporary comments like '# Fixed the typo here' or '# Added this line to fix the error'
|
|
91
106
|
- Whenever writing Python code, it should be a python code block starting with ```python and ending with ```
|
|
92
107
|
"""
|
|
@@ -46,6 +46,28 @@ Citation Rules:
|
|
|
46
46
|
8. Do not include the citation in the code block as a comment. ONLY include the citation in the message field of your response.
|
|
47
47
|
"""
|
|
48
48
|
|
|
49
|
+
CELL_REFERENCE_RULES = """RULES FOR REFERENCING CELLS
|
|
50
|
+
|
|
51
|
+
When referring to specific cells in the notebook in your messages, use cell references so the user can easily navigate to the cell you're talking about. The user sees cells numbered as "Cell 1", "Cell 2", etc., but internally cells are identified by their unique IDs.
|
|
52
|
+
|
|
53
|
+
To reference a cell, use this format inline in your message:
|
|
54
|
+
[MITO_CELL_REF:cell_id]
|
|
55
|
+
|
|
56
|
+
This will be displayed to the user as a clickable "Cell N" link that navigates to the referenced cell.
|
|
57
|
+
|
|
58
|
+
Cell Reference Rules:
|
|
59
|
+
|
|
60
|
+
1. Use cell references when discussing specific cells you've created or modified (e.g., "I've added the data cleaning code in [MITO_CELL_REF:abc123]").
|
|
61
|
+
2. Use cell references when referring to cells the user mentioned or that contain relevant context.
|
|
62
|
+
3. The cell_id must be an actual cell ID from the notebook - do not make up IDs.
|
|
63
|
+
4. Place the reference inline where it makes sense in your message, similar to how you would write "Cell 3" in natural language.
|
|
64
|
+
5. Do not use cell references in code - only in the message field of your responses.
|
|
65
|
+
6. Cell references are different from citations. Use citations for specific line-level insights; use cell references for general cell-level navigation.
|
|
66
|
+
|
|
67
|
+
Example:
|
|
68
|
+
"I've loaded the sales data in [MITO_CELL_REF:c68fdf19-db8c-46dd-926f-d90ad35bb3bc] and will now calculate the monthly totals."
|
|
69
|
+
"""
|
|
70
|
+
|
|
49
71
|
def get_active_cell_output_str(has_active_cell_output: bool) -> str:
|
|
50
72
|
"""
|
|
51
73
|
Used to tell the AI about the output of the active code cell.
|
|
@@ -39,6 +39,7 @@ def get_selected_context_str(additional_context: Optional[List[Dict[str, str]]])
|
|
|
39
39
|
selected_files = [context["value"] for context in additional_context if context.get("type") == "file"]
|
|
40
40
|
selected_db_connections = [context["value"] for context in additional_context if context.get("type") == "db"]
|
|
41
41
|
selected_images = [context["value"] for context in additional_context if context.get("type", "").startswith("image/")]
|
|
42
|
+
selected_cells = [context["value"] for context in additional_context if context.get("type") == "cell"]
|
|
42
43
|
|
|
43
44
|
# STEP 2: Create a list of strings (instructions) for each context type
|
|
44
45
|
context_parts = []
|
|
@@ -66,6 +67,12 @@ def get_selected_context_str(additional_context: Optional[List[Dict[str, str]]])
|
|
|
66
67
|
"The following images have been selected by the user to be used in the task:\n"
|
|
67
68
|
+ "\n".join(selected_images)
|
|
68
69
|
)
|
|
70
|
+
|
|
71
|
+
if len(selected_cells) > 0:
|
|
72
|
+
context_parts.append(
|
|
73
|
+
"The following cells have been selected by the user to be used in the task:\n"
|
|
74
|
+
+ "\n".join(selected_cells)
|
|
75
|
+
)
|
|
69
76
|
|
|
70
77
|
# STEP 3: Combine into a single string
|
|
71
78
|
return "\n\n".join(context_parts)
|
|
@@ -4,10 +4,24 @@
|
|
|
4
4
|
from typing import List
|
|
5
5
|
from mito_ai.streamlit_conversion.prompts.prompt_constants import MITO_TODO_PLACEHOLDER
|
|
6
6
|
|
|
7
|
-
def
|
|
7
|
+
def get_streamlit_app_spec_section(streamlit_app_prompt: str) -> str:
|
|
8
|
+
if streamlit_app_prompt == '':
|
|
9
|
+
return ''
|
|
10
|
+
|
|
11
|
+
return f"""
|
|
12
|
+
Here is a high level outline of the streamlit app. Use your best judgement to implement this structure.
|
|
13
|
+
|
|
14
|
+
{streamlit_app_prompt}
|
|
15
|
+
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def get_streamlit_app_creation_prompt(notebook: List[dict], streamlit_app_prompt: str) -> str:
|
|
8
20
|
"""
|
|
9
21
|
This prompt is used to create a streamlit app from a notebook.
|
|
10
22
|
"""
|
|
23
|
+
streamlit_app_spec_section = get_streamlit_app_spec_section(streamlit_app_prompt)
|
|
24
|
+
|
|
11
25
|
return f"""Convert the following Jupyter notebook into a Streamlit application.
|
|
12
26
|
|
|
13
27
|
GOAL: Create a complete, runnable Streamlit app that accurately represents the notebook. It must completely convert the notebook.
|
|
@@ -40,7 +54,9 @@ data = [
|
|
|
40
54
|
]
|
|
41
55
|
</Example>
|
|
42
56
|
|
|
43
|
-
|
|
57
|
+
{streamlit_app_spec_section}
|
|
58
|
+
|
|
59
|
+
NOTEBOOK TO CONVERT:
|
|
44
60
|
|
|
45
61
|
{notebook}
|
|
46
62
|
"""
|
|
@@ -16,10 +16,10 @@ from mito_ai.utils.error_classes import StreamlitConversionError
|
|
|
16
16
|
from mito_ai.utils.telemetry_utils import log_streamlit_app_validation_retry, log_streamlit_app_conversion_success
|
|
17
17
|
from mito_ai.path_utils import AbsoluteNotebookPath, AppFileName, get_absolute_notebook_dir_path, get_absolute_app_path, get_app_file_name
|
|
18
18
|
|
|
19
|
-
async def generate_new_streamlit_code(notebook: List[dict]) -> str:
|
|
19
|
+
async def generate_new_streamlit_code(notebook: List[dict], streamlit_app_prompt: str) -> str:
|
|
20
20
|
"""Send a query to the agent, get its response and parse the code"""
|
|
21
21
|
|
|
22
|
-
prompt_text = get_streamlit_app_creation_prompt(notebook)
|
|
22
|
+
prompt_text = get_streamlit_app_creation_prompt(notebook, streamlit_app_prompt)
|
|
23
23
|
|
|
24
24
|
messages: List[MessageParam] = [
|
|
25
25
|
cast(MessageParam, {
|
|
@@ -100,7 +100,7 @@ async def correct_error_in_generation(error: str, streamlit_app_code: str) -> st
|
|
|
100
100
|
|
|
101
101
|
return streamlit_app_code
|
|
102
102
|
|
|
103
|
-
async def streamlit_handler(notebook_path: AbsoluteNotebookPath, app_file_name: AppFileName,
|
|
103
|
+
async def streamlit_handler(create_new_app: bool, notebook_path: AbsoluteNotebookPath, app_file_name: AppFileName, streamlit_app_prompt: str = "") -> None:
|
|
104
104
|
"""Handler function for streamlit code generation and validation"""
|
|
105
105
|
|
|
106
106
|
# Convert to absolute path for consistent handling
|
|
@@ -108,22 +108,22 @@ async def streamlit_handler(notebook_path: AbsoluteNotebookPath, app_file_name:
|
|
|
108
108
|
app_directory = get_absolute_notebook_dir_path(notebook_path)
|
|
109
109
|
app_path = get_absolute_app_path(app_directory, app_file_name)
|
|
110
110
|
|
|
111
|
-
if
|
|
111
|
+
if create_new_app:
|
|
112
|
+
# Otherwise generate a new streamlit app
|
|
113
|
+
streamlit_code = await generate_new_streamlit_code(notebook_code, streamlit_app_prompt)
|
|
114
|
+
else:
|
|
112
115
|
# If the user is editing an existing streamlit app, use the update function
|
|
113
|
-
|
|
116
|
+
existing_streamlit_code = get_app_code_from_file(app_path)
|
|
114
117
|
|
|
115
|
-
if
|
|
118
|
+
if existing_streamlit_code is None:
|
|
116
119
|
raise StreamlitConversionError("Error updating existing streamlit app because app.py file was not found.", 404)
|
|
117
120
|
|
|
118
|
-
streamlit_code = await update_existing_streamlit_code(notebook_code,
|
|
119
|
-
else:
|
|
120
|
-
# Otherwise generate a new streamlit app
|
|
121
|
-
streamlit_code = await generate_new_streamlit_code(notebook_code)
|
|
121
|
+
streamlit_code = await update_existing_streamlit_code(notebook_code, existing_streamlit_code, streamlit_app_prompt)
|
|
122
122
|
|
|
123
123
|
# Then, after creating/updating the app, validate that the new code runs
|
|
124
124
|
errors = validate_app(streamlit_code, notebook_path)
|
|
125
125
|
tries = 0
|
|
126
|
-
while len(errors)>0 and tries < 5:
|
|
126
|
+
while len(errors) > 0 and tries < 5:
|
|
127
127
|
for error in errors:
|
|
128
128
|
streamlit_code = await correct_error_in_generation(error, streamlit_code)
|
|
129
129
|
|
|
@@ -141,4 +141,4 @@ async def streamlit_handler(notebook_path: AbsoluteNotebookPath, app_file_name:
|
|
|
141
141
|
|
|
142
142
|
# Finally, update the app.py file with the new code
|
|
143
143
|
create_app_file(app_path, streamlit_code)
|
|
144
|
-
log_streamlit_app_conversion_success('mito_server_key', MessageType.STREAMLIT_CONVERSION,
|
|
144
|
+
log_streamlit_app_conversion_success('mito_server_key', MessageType.STREAMLIT_CONVERSION, streamlit_app_prompt)
|
|
@@ -22,12 +22,14 @@ class StreamlitPreviewHandler(APIHandler):
|
|
|
22
22
|
self.preview_manager = StreamlitPreviewManager()
|
|
23
23
|
|
|
24
24
|
@tornado.web.authenticated
|
|
25
|
+
|
|
25
26
|
async def post(self) -> None:
|
|
26
27
|
"""Start a new streamlit preview."""
|
|
27
28
|
try:
|
|
29
|
+
|
|
28
30
|
# Parse and validate request
|
|
29
31
|
body = self.get_json_body()
|
|
30
|
-
notebook_path, notebook_id, force_recreate,
|
|
32
|
+
notebook_path, notebook_id, force_recreate, streamlit_app_prompt = validate_request_body(body)
|
|
31
33
|
|
|
32
34
|
# Ensure app exists
|
|
33
35
|
absolute_notebook_path = get_absolute_notebook_path(notebook_path)
|
|
@@ -35,14 +37,19 @@ class StreamlitPreviewHandler(APIHandler):
|
|
|
35
37
|
app_file_name = get_app_file_name(notebook_id)
|
|
36
38
|
absolute_app_path = get_absolute_app_path(absolute_notebook_dir_path, app_file_name)
|
|
37
39
|
app_path_exists = does_app_path_exist(absolute_app_path)
|
|
38
|
-
|
|
40
|
+
|
|
39
41
|
if not app_path_exists or force_recreate:
|
|
40
42
|
if not app_path_exists:
|
|
41
43
|
print("[Mito AI] App path not found, generating streamlit code")
|
|
42
44
|
else:
|
|
43
45
|
print("[Mito AI] Force recreating streamlit app")
|
|
44
46
|
|
|
45
|
-
|
|
47
|
+
# Create a new app
|
|
48
|
+
await streamlit_handler(True, absolute_notebook_path, app_file_name, streamlit_app_prompt)
|
|
49
|
+
elif streamlit_app_prompt != '':
|
|
50
|
+
# Update an existing app if there is a prompt provided. Otherwise, the user is just
|
|
51
|
+
# starting an existing app so we can skip the streamlit_handler all together
|
|
52
|
+
await streamlit_handler(False, absolute_notebook_path, app_file_name, streamlit_app_prompt)
|
|
46
53
|
|
|
47
54
|
# Start preview
|
|
48
55
|
# TODO: There's a bug here where when the user rebuilds and already running app. Instead of
|
|
@@ -58,7 +65,7 @@ class StreamlitPreviewHandler(APIHandler):
|
|
|
58
65
|
"port": port,
|
|
59
66
|
"url": f"http://localhost:{port}"
|
|
60
67
|
})
|
|
61
|
-
log_streamlit_app_preview_success('mito_server_key', MessageType.STREAMLIT_CONVERSION,
|
|
68
|
+
log_streamlit_app_preview_success('mito_server_key', MessageType.STREAMLIT_CONVERSION, streamlit_app_prompt)
|
|
62
69
|
|
|
63
70
|
except StreamlitConversionError as e:
|
|
64
71
|
print(e)
|
|
@@ -71,7 +78,7 @@ class StreamlitPreviewHandler(APIHandler):
|
|
|
71
78
|
MessageType.STREAMLIT_CONVERSION,
|
|
72
79
|
error_message,
|
|
73
80
|
formatted_traceback,
|
|
74
|
-
|
|
81
|
+
streamlit_app_prompt,
|
|
75
82
|
)
|
|
76
83
|
except StreamlitPreviewError as e:
|
|
77
84
|
print(e)
|
|
@@ -79,7 +86,7 @@ class StreamlitPreviewHandler(APIHandler):
|
|
|
79
86
|
formatted_traceback = traceback.format_exc()
|
|
80
87
|
self.set_status(e.error_code)
|
|
81
88
|
self.finish({"error": error_message})
|
|
82
|
-
log_streamlit_app_preview_failure('mito_server_key', MessageType.STREAMLIT_CONVERSION, error_message, formatted_traceback,
|
|
89
|
+
log_streamlit_app_preview_failure('mito_server_key', MessageType.STREAMLIT_CONVERSION, error_message, formatted_traceback, streamlit_app_prompt)
|
|
83
90
|
except Exception as e:
|
|
84
91
|
print(f"Exception in streamlit preview handler: {e}")
|
|
85
92
|
self.set_status(500)
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
|
|
4
4
|
import socket
|
|
5
5
|
import subprocess
|
|
6
|
+
import sys
|
|
6
7
|
import time
|
|
7
8
|
import threading
|
|
8
9
|
import requests
|
|
@@ -54,8 +55,10 @@ class StreamlitPreviewManager:
|
|
|
54
55
|
port = self.get_free_port()
|
|
55
56
|
|
|
56
57
|
# Start streamlit process
|
|
58
|
+
# Use sys.executable -m streamlit to ensure it works on Windows
|
|
59
|
+
# where streamlit may not be directly executable in PATH
|
|
57
60
|
cmd = [
|
|
58
|
-
"streamlit", "run", app_file_name,
|
|
61
|
+
sys.executable, "-m", "streamlit", "run", app_file_name,
|
|
59
62
|
"--server.port", str(port),
|
|
60
63
|
"--server.headless", "true",
|
|
61
64
|
"--server.address", "localhost",
|
|
@@ -22,8 +22,8 @@ def validate_request_body(body: Optional[dict]) -> Tuple[str, str, bool, str]:
|
|
|
22
22
|
if not isinstance(force_recreate, bool):
|
|
23
23
|
raise StreamlitPreviewError("force_recreate must be a boolean", 400)
|
|
24
24
|
|
|
25
|
-
|
|
26
|
-
if not isinstance(
|
|
27
|
-
raise StreamlitPreviewError("
|
|
25
|
+
streamlit_app_prompt = body.get("streamlit_app_prompt", "")
|
|
26
|
+
if not isinstance(streamlit_app_prompt, str):
|
|
27
|
+
raise StreamlitPreviewError("streamlit_app_prompt must be a string", 400)
|
|
28
28
|
|
|
29
|
-
return notebook_path, notebook_id, force_recreate,
|
|
29
|
+
return notebook_path, notebook_id, force_recreate, streamlit_app_prompt
|
|
@@ -89,7 +89,7 @@ class TestGenerateStreamlitCode:
|
|
|
89
89
|
mock_stream.return_value = mock_async_gen()
|
|
90
90
|
|
|
91
91
|
notebook_data: List[dict] = [{"cells": []}]
|
|
92
|
-
result = await generate_new_streamlit_code(notebook_data)
|
|
92
|
+
result = await generate_new_streamlit_code(notebook_data, '')
|
|
93
93
|
|
|
94
94
|
expected_code = "import streamlit\nst.title('Hello')\n"
|
|
95
95
|
assert result == expected_code
|
|
@@ -158,11 +158,11 @@ class TestStreamlitHandler:
|
|
|
158
158
|
# Construct the expected app path using the same method as the production code
|
|
159
159
|
app_directory = get_absolute_notebook_dir_path(notebook_path)
|
|
160
160
|
expected_app_path = get_absolute_app_path(app_directory, app_file_name)
|
|
161
|
-
await streamlit_handler(notebook_path, app_file_name)
|
|
161
|
+
await streamlit_handler(True, notebook_path, app_file_name)
|
|
162
162
|
|
|
163
163
|
# Verify calls
|
|
164
164
|
mock_parse.assert_called_once_with(notebook_path)
|
|
165
|
-
mock_generate_code.assert_called_once_with(mock_notebook_data)
|
|
165
|
+
mock_generate_code.assert_called_once_with(mock_notebook_data, '')
|
|
166
166
|
mock_validator.assert_called_once_with("import streamlit\nst.title('Test')", notebook_path)
|
|
167
167
|
mock_create_file.assert_called_once_with(expected_app_path, "import streamlit\nst.title('Test')")
|
|
168
168
|
|
|
@@ -187,7 +187,7 @@ class TestStreamlitHandler:
|
|
|
187
187
|
|
|
188
188
|
# Now it should raise an exception instead of returning a tuple
|
|
189
189
|
with pytest.raises(Exception):
|
|
190
|
-
await streamlit_handler(AbsoluteNotebookPath("notebook.ipynb"), AppFileName('test-app-file-name.py'))
|
|
190
|
+
await streamlit_handler(True, AbsoluteNotebookPath("notebook.ipynb"), AppFileName('test-app-file-name.py'), '')
|
|
191
191
|
|
|
192
192
|
# Verify that error correction was called 5 times (once per error, 5 retries)
|
|
193
193
|
# Each retry processes 1 error, so 5 retries = 5 calls
|
|
@@ -215,7 +215,7 @@ class TestStreamlitHandler:
|
|
|
215
215
|
|
|
216
216
|
# Now it should raise an exception instead of returning a tuple
|
|
217
217
|
with pytest.raises(Exception):
|
|
218
|
-
await streamlit_handler(AbsoluteNotebookPath("notebook.ipynb"), AppFileName('test-app-file-name.py'))
|
|
218
|
+
await streamlit_handler(True, AbsoluteNotebookPath("notebook.ipynb"), AppFileName('test-app-file-name.py'), '')
|
|
219
219
|
|
|
220
220
|
@pytest.mark.asyncio
|
|
221
221
|
@patch('mito_ai.streamlit_conversion.streamlit_agent_handler.parse_jupyter_notebook_to_extract_required_content')
|
|
@@ -225,7 +225,7 @@ class TestStreamlitHandler:
|
|
|
225
225
|
mock_parse.side_effect = FileNotFoundError("Notebook not found")
|
|
226
226
|
|
|
227
227
|
with pytest.raises(FileNotFoundError, match="Notebook not found"):
|
|
228
|
-
await streamlit_handler(AbsoluteNotebookPath("notebook.ipynb"), AppFileName('test-app-file-name.py'))
|
|
228
|
+
await streamlit_handler(True, AbsoluteNotebookPath("notebook.ipynb"), AppFileName('test-app-file-name.py'), '')
|
|
229
229
|
|
|
230
230
|
@pytest.mark.asyncio
|
|
231
231
|
@patch('mito_ai.streamlit_conversion.streamlit_agent_handler.parse_jupyter_notebook_to_extract_required_content')
|
|
@@ -240,7 +240,7 @@ class TestStreamlitHandler:
|
|
|
240
240
|
mock_generate_code.side_effect = Exception("Generation failed")
|
|
241
241
|
|
|
242
242
|
with pytest.raises(Exception, match="Generation failed"):
|
|
243
|
-
await streamlit_handler(AbsoluteNotebookPath("notebook.ipynb"), AppFileName('test-app-file-name.py'))
|
|
243
|
+
await streamlit_handler(True, AbsoluteNotebookPath("notebook.ipynb"), AppFileName('test-app-file-name.py'), '')
|
|
244
244
|
|
|
245
245
|
|
|
246
246
|
|
|
@@ -99,9 +99,10 @@ class TestStreamlitPreviewHandler:
|
|
|
99
99
|
assert mock_streamlit_handler.called
|
|
100
100
|
# Verify it was called with the correct arguments
|
|
101
101
|
call_args = mock_streamlit_handler.call_args
|
|
102
|
-
assert call_args[0][0] ==
|
|
103
|
-
assert call_args[0][1] ==
|
|
104
|
-
assert call_args[0][2] ==
|
|
102
|
+
assert call_args[0][0] == True
|
|
103
|
+
assert call_args[0][1] == os.path.abspath(notebook_path) # First argument should be the absolute notebook path
|
|
104
|
+
assert call_args[0][2] == app_file_name # Second argument should be the app file name
|
|
105
|
+
assert call_args[0][3] == "" # Third argument should be the edit_prompt
|
|
105
106
|
else:
|
|
106
107
|
mock_streamlit_handler.assert_not_called()
|
|
107
108
|
|
mito_ai/utils/open_ai_utils.py
CHANGED
|
@@ -171,6 +171,9 @@ def get_open_ai_completion_function_params(
|
|
|
171
171
|
"stream": stream,
|
|
172
172
|
"messages": messages,
|
|
173
173
|
}
|
|
174
|
+
|
|
175
|
+
if model == "gpt-5.2":
|
|
176
|
+
completion_function_params["reasoning_effort"] = "low"
|
|
174
177
|
|
|
175
178
|
# If a response format is provided, we need to convert it to a json schema.
|
|
176
179
|
# Pydantic models are supported by the OpenAI API, however, we need to be able to
|