runbooks 0.7.5__py3-none-any.whl → 0.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/cfat/__init__.py +2 -2
  3. runbooks/finops/__init__.py +1 -1
  4. runbooks/finops/cli.py +1 -1
  5. runbooks/operate/__init__.py +2 -2
  6. runbooks/remediation/__init__.py +2 -2
  7. runbooks/remediation/acm_remediation.py +1 -1
  8. runbooks/remediation/base.py +1 -1
  9. runbooks/remediation/cloudtrail_remediation.py +1 -1
  10. runbooks/remediation/cognito_remediation.py +1 -1
  11. runbooks/remediation/dynamodb_remediation.py +1 -1
  12. runbooks/remediation/ec2_remediation.py +1 -1
  13. runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -1
  14. runbooks/remediation/kms_enable_key_rotation.py +1 -1
  15. runbooks/remediation/kms_remediation.py +1 -1
  16. runbooks/remediation/lambda_remediation.py +1 -1
  17. runbooks/remediation/multi_account.py +1 -1
  18. runbooks/remediation/rds_remediation.py +1 -1
  19. runbooks/remediation/requirements.txt +2 -2
  20. runbooks/remediation/s3_block_public_access.py +1 -1
  21. runbooks/remediation/s3_enable_access_logging.py +1 -1
  22. runbooks/remediation/s3_encryption.py +1 -1
  23. runbooks/remediation/s3_remediation.py +1 -1
  24. runbooks/security/__init__.py +1 -1
  25. {runbooks-0.7.5.dist-info → runbooks-0.7.6.dist-info}/METADATA +4 -2
  26. {runbooks-0.7.5.dist-info → runbooks-0.7.6.dist-info}/RECORD +42 -62
  27. {runbooks-0.7.5.dist-info → runbooks-0.7.6.dist-info}/top_level.txt +0 -1
  28. jupyter-agent/.env +0 -2
  29. jupyter-agent/.env.template +0 -2
  30. jupyter-agent/.gitattributes +0 -35
  31. jupyter-agent/.gradio/certificate.pem +0 -31
  32. jupyter-agent/README.md +0 -16
  33. jupyter-agent/__main__.log +0 -8
  34. jupyter-agent/app.py +0 -256
  35. jupyter-agent/cloudops-agent.png +0 -0
  36. jupyter-agent/ds-system-prompt.txt +0 -154
  37. jupyter-agent/jupyter-agent.png +0 -0
  38. jupyter-agent/llama3_template.jinja +0 -123
  39. jupyter-agent/requirements.txt +0 -9
  40. jupyter-agent/tmp/4ojbs8a02ir/jupyter-agent.ipynb +0 -68
  41. jupyter-agent/tmp/cm5iasgpm3p/jupyter-agent.ipynb +0 -91
  42. jupyter-agent/tmp/crqbsseag5/jupyter-agent.ipynb +0 -91
  43. jupyter-agent/tmp/hohanq1u097/jupyter-agent.ipynb +0 -57
  44. jupyter-agent/tmp/jns1sam29wm/jupyter-agent.ipynb +0 -53
  45. jupyter-agent/tmp/jupyter-agent.ipynb +0 -27
  46. jupyter-agent/utils.py +0 -409
  47. runbooks/inventory/aws_organization.png +0 -0
  48. /runbooks/inventory/{tests → Tests}/common_test_data.py +0 -0
  49. /runbooks/inventory/{tests → Tests}/common_test_functions.py +0 -0
  50. /runbooks/inventory/{tests → Tests}/script_test_data.py +0 -0
  51. /runbooks/inventory/{tests → Tests}/setup.py +0 -0
  52. /runbooks/inventory/{tests → Tests}/src.py +0 -0
  53. /runbooks/inventory/{tests/test_inventory_modules.py → Tests/test_Inventory_Modules.py} +0 -0
  54. /runbooks/inventory/{tests → Tests}/test_cfn_describe_stacks.py +0 -0
  55. /runbooks/inventory/{tests → Tests}/test_ec2_describe_instances.py +0 -0
  56. /runbooks/inventory/{tests → Tests}/test_lambda_list_functions.py +0 -0
  57. /runbooks/inventory/{tests → Tests}/test_moto_integration_example.py +0 -0
  58. /runbooks/inventory/{tests → Tests}/test_org_list_accounts.py +0 -0
  59. /runbooks/inventory/{Inventory_Modules.py → inventory_modules.py} +0 -0
  60. {runbooks-0.7.5.dist-info → runbooks-0.7.6.dist-info}/WHEEL +0 -0
  61. {runbooks-0.7.5.dist-info → runbooks-0.7.6.dist-info}/entry_points.txt +0 -0
  62. {runbooks-0.7.5.dist-info → runbooks-0.7.6.dist-info}/licenses/LICENSE +0 -0
jupyter-agent/app.py DELETED
@@ -1,256 +0,0 @@
1
- """
2
- app.py
3
-
4
- Main entry point for the Jupyter Agent. This file sets up the Gradio user interface,
5
- handles session-specific sandbox initialization, file management, and orchestrates
6
- the interactive notebook generation and code execution.
7
- """
8
-
9
- import json
10
- import os
11
- from pathlib import Path
12
- from typing import Any, Dict, List, Tuple
13
-
14
- import gradio as gr
15
- from e2b_code_interpreter import Sandbox
16
- from gradio.utils import get_space
17
- from huggingface_hub import InferenceClient
18
- from transformers import AutoTokenizer
19
-
20
- ## Import helper functions from utils with type hints
21
- from utils import (
22
- create_base_notebook,
23
- run_interactive_notebook,
24
- update_notebook_display,
25
- )
26
-
27
- from runbooks.utils.logger import configure_logger
28
-
29
- ## ✅ Configure Logger
30
- logger = configure_logger(__name__)
31
-
32
- ## Load environment variables if not running in a Hugging Face Space
33
- if not get_space():
34
- try:
35
- from dotenv import load_dotenv
36
-
37
- load_dotenv()
38
- except (ImportError, ModuleNotFoundError):
39
- logger.warning("python-dotenv not installed; proceeding without .env support.")
40
- pass
41
-
42
- ## Global configurations
43
- ## For sandbox execution and Hugging Face API authentication
44
- E2B_API_KEY = os.environ["E2B_API_KEY"]
45
- HF_TOKEN = os.environ["HF_TOKEN"]
46
- ## Set a limit on the number of new tokens generated per request.
47
- DEFAULT_MAX_TOKENS = 512
48
- ## A dictionary used to keep track of separate execution environments (sandboxes) for each session.
49
- SANDBOXES = {}
50
- ## For storing temporary files (e.g., generated notebooks)
51
- TMP_DIR = "./tmp/"
52
- ## Ensure the temporary directory exists
53
- if not os.path.exists(TMP_DIR):
54
- os.makedirs(TMP_DIR)
55
-
56
- ## Initialize a base notebook on startup
57
- notebook_data = create_base_notebook([])[0]
58
- with open(TMP_DIR + "jupyter-agent.ipynb", "w", encoding="utf-8") as f:
59
- json.dump(notebook_data, f, indent=2)
60
-
61
- ## Read default system prompt
62
- try:
63
- with open("ds-system-prompt.txt", "r", encoding="utf-8") as f:
64
- DEFAULT_SYSTEM_PROMPT: str = f.read()
65
- except FileNotFoundError:
66
- logger.error("ds-system-prompt.txt not found. Please ensure it is available.")
67
- DEFAULT_SYSTEM_PROMPT = ""
68
-
69
-
70
- ## --- Main Execution Function ---
71
- def execute_jupyter_agent(
72
- system_prompt: str,
73
- user_input: str,
74
- max_new_tokens: int,
75
- model: str,
76
- files: List[str],
77
- message_history: List[Dict[str, Any]],
78
- request: gr.Request,
79
- ) -> Tuple[str, List[Dict[str, Any]], str]:
80
- """
81
- Core callback function that orchestrates the interactive notebook generation.
82
-
83
- :param system_prompt: The system prompt template.
84
- :param user_input: User's input command.
85
- :param max_new_tokens: Maximum number of tokens to generate.
86
- :param model: Identifier for the language model.
87
- :param files: List of file paths uploaded by the user.
88
- :param message_history: History of conversation messages.
89
- :param request: Gradio request object with session details.
90
- :return: A tuple containing the updated notebook HTML, updated message history,
91
- and the path to the generated notebook file.
92
- """
93
- ## Retrieve or create a sandbox instance for the given session.
94
- if request.session_hash not in SANDBOXES:
95
- ## Create a new Sandbox with the E2B API key and stored
96
- SANDBOXES[request.session_hash] = Sandbox(api_key=E2B_API_KEY)
97
- sbx = SANDBOXES[request.session_hash]
98
-
99
- ## Create session-specific directory for saving notebook
100
- save_dir = os.path.join(TMP_DIR, request.session_hash)
101
- os.makedirs(save_dir, exist_ok=True)
102
- save_dir = os.path.join(save_dir, "jupyter-agent.ipynb")
103
-
104
- ## Initializes an inference client for text generation using the provided Hugging Face token.
105
- client = InferenceClient(api_key=HF_TOKEN)
106
-
107
- ## Loads the tokenizer corresponding to the chosen model.
108
- tokenizer = AutoTokenizer.from_pretrained(model)
109
- # model = "meta-llama/Llama-3.1-8B-Instruct"
110
-
111
- ## Process uploaded files
112
- filenames = []
113
- if files is not None:
114
- for filepath in files:
115
- filpath = Path(filepath)
116
- with open(filepath, "rb") as file:
117
- print(f"uploading {filepath}...")
118
- ## Write the file into the sandbox’s file system (allowing the agent to access it during execution).
119
- sbx.files.write(filpath.name, file)
120
- filenames.append(filpath.name)
121
-
122
- ## Initialize message_history if it doesn't exist
123
- if len(message_history) == 0:
124
- message_history.append(
125
- {
126
- "role": "system",
127
- "content": system_prompt.format("- " + "\n- ".join(filenames)),
128
- }
129
- )
130
- message_history.append({"role": "user", "content": user_input})
131
-
132
- ## Outputs the current conversation history for debugging purposes.
133
- logger.debug(f"Message history: {message_history}")
134
-
135
- ## Generate notebook updates by streaming responses
136
- for notebook_html, notebook_data, messages in run_interactive_notebook(
137
- client, model, tokenizer, message_history, sbx, max_new_tokens=max_new_tokens
138
- ):
139
- message_history = messages
140
-
141
- ## Yield intermediate UI updates with a fixed download path (initial version)
142
- yield notebook_html, message_history, TMP_DIR + "jupyter-agent.ipynb"
143
-
144
- ## Save the final notebook JSON data to a specified path.
145
- with open(save_dir, "w", encoding="utf-8") as f:
146
- json.dump(notebook_data, f, indent=2)
147
- yield notebook_html, message_history, save_dir
148
- logger.info(f"Notebook saved to {save_dir}")
149
-
150
-
151
- def clear(msg_state):
152
- msg_state = []
153
- return update_notebook_display(create_base_notebook([])[0]), msg_state
154
-
155
-
156
- ## Gradio components fill the full height of the viewport, allow scrolling, and have appropriate padding
157
- custom_css = """
158
- #component-0 {
159
- height: 100vh;
160
- overflow-y: auto;
161
- padding: 20px;
162
- }
163
-
164
- .gradio-container {
165
- height: 100vh !important;
166
- }
167
-
168
- .contain {
169
- height: 100vh !important;
170
- }
171
- """
172
- ## TODO
173
- # footer {
174
- # visibility: hidden;
175
- # }
176
-
177
-
178
- ## Build and return the Gradio Blocks interface for the Jupyter Agent.
179
- # with gr.Blocks(css=custom_css) as poc:
180
- with gr.Blocks() as poc:
181
- msg_state = gr.State(value=[])
182
-
183
- html_output = gr.HTML(value=update_notebook_display(create_base_notebook([])[0]))
184
-
185
- user_input = gr.Textbox(
186
- value="Solve the Bayes' theorem equation and plot the results.",
187
- lines=3,
188
- label="User input",
189
- )
190
-
191
- with gr.Row():
192
- generate_btn = gr.Button("▶️ Let's go!")
193
- clear_btn = gr.Button("🧹 Clear")
194
-
195
- file = gr.File(
196
- TMP_DIR + "jupyter-agent.ipynb", label="💾 Download Jupyter Notebook"
197
- )
198
-
199
- with gr.Accordion("Upload files", open=False):
200
- files = gr.File(label="Upload files to use", file_count="multiple")
201
-
202
- with gr.Accordion("Advanced Settings", open=False):
203
- system_input = gr.Textbox(
204
- label="System Prompt",
205
- value=DEFAULT_SYSTEM_PROMPT,
206
- elem_classes="input-box",
207
- lines=8,
208
- )
209
- with gr.Row():
210
- max_tokens = gr.Number(
211
- label="Max New Tokens",
212
- value=DEFAULT_MAX_TOKENS,
213
- minimum=128,
214
- maximum=2048,
215
- step=8,
216
- interactive=True,
217
- )
218
-
219
- model = gr.Dropdown(
220
- # value="meta-llama/Llama-3.1-8B-Instruct",
221
- value="meta-llama/Llama-3.3-70B-Instruct",
222
- choices=[
223
- ## Text only instruct-tuned model in 70B size (text in/text out).
224
- "meta-llama/Llama-3.3-70B-Instruct",
225
- ## Pretrained and fine-tuned text models with sizes
226
- "meta-llama/Llama-3.1-8B-Instruct",
227
- ## pretrained and instruction-tuned image reasoning generative models in 11B and 90B sizes (text + images in / text out)
228
- "meta-llama/Llama-3.2-3B-Instruct",
229
- "meta-llama/Llama-3.2-11B-Vision-Instruct",
230
- ],
231
- label="Models",
232
- )
233
-
234
- generate_btn.click(
235
- fn=execute_jupyter_agent,
236
- inputs=[system_input, user_input, max_tokens, model, files, msg_state],
237
- outputs=[html_output, msg_state, file],
238
- )
239
-
240
- clear_btn.click(fn=clear, inputs=[msg_state], outputs=[html_output, msg_state])
241
-
242
- poc.load(
243
- fn=None,
244
- inputs=None,
245
- outputs=None,
246
- js=""" () => {
247
- if (document.querySelectorAll('.dark').length) {
248
- document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
249
- }
250
- }
251
- """,
252
- )
253
-
254
- ## Main entry point: launch the Gradio interface.
255
- ## Disable server-side rendering (i.e., client-side rendering is used).
256
- poc.launch(ssr_mode=False, pwa=True, share=True)
Binary file
@@ -1,154 +0,0 @@
1
- # Data Science Agent Protocol
2
-
3
- You are an intelligent data science assistant with access to an IPython interpreter. Your primary goal is to solve analytical tasks through careful, iterative exploration and execution of code. You must avoid making assumptions and instead verify everything through code execution.
4
-
5
- ## Core Principles
6
- 1. Always execute code to verify assumptions
7
- 2. Break down complex problems into smaller steps
8
- 3. Learn from execution results
9
- 4. Maintain clear communication about your process
10
-
11
- ## Available Packages
12
- You have access to these pre-installed packages:
13
-
14
- ### Core Data Science
15
- - numpy (1.26.4)
16
- - pandas (1.5.3)
17
- - scipy (1.12.0)
18
- - scikit-learn (1.4.1.post1)
19
-
20
- ### Visualization
21
- - matplotlib (3.9.2)
22
- - seaborn (0.13.2)
23
- - plotly (5.19.0)
24
- - bokeh (3.3.4)
25
- - e2b_charts (latest)
26
-
27
- ### Image & Signal Processing
28
- - opencv-python (4.9.0.80)
29
- - pillow (9.5.0)
30
- - scikit-image (0.22.0)
31
- - imageio (2.34.0)
32
-
33
- ### Text & NLP
34
- - nltk (3.8.1)
35
- - spacy (3.7.4)
36
- - gensim (4.3.2)
37
- - textblob (0.18.0)
38
-
39
- ### Audio Processing
40
- - librosa (0.10.1)
41
- - soundfile (0.12.1)
42
-
43
- ### File Handling
44
- - python-docx (1.1.0)
45
- - openpyxl (3.1.2)
46
- - xlrd (2.0.1)
47
-
48
- ### Other Utilities
49
- - requests (2.26.0)
50
- - beautifulsoup4 (4.12.3)
51
- - sympy (1.12)
52
- - xarray (2024.2.0)
53
- - joblib (1.3.2)
54
-
55
- ## Environment Constraints
56
- - You cannot install new packages or libraries
57
- - Work only with pre-installed packages in the environment
58
- - If a solution requires a package that's not available:
59
- 1. Check if the task can be solved with base libraries
60
- 2. Propose alternative approaches using available packages
61
- 3. Inform the user if the task cannot be completed with current limitations
62
-
63
- ## Analysis Protocol
64
-
65
- ### 1. Initial Assessment
66
- - Acknowledge the user's task and explain your high-level approach
67
- - List any clarifying questions needed before proceeding
68
- - Identify which available files might be relevant from: {}
69
- - Verify which required packages are available in the environment
70
-
71
- ### 2. Data Exploration
72
- Execute code to:
73
- - Read and validate each relevant file
74
- - Determine file formats (CSV, JSON, etc.)
75
- - Check basic properties:
76
- - Number of rows/records
77
- - Column names and data types
78
- - Missing values
79
- - Basic statistical summaries
80
- - Share key insights about the data structure
81
-
82
- ### 3. Execution Planning
83
- - Based on the exploration results, outline specific steps to solve the task
84
- - Break down complex operations into smaller, verifiable steps
85
- - Identify potential challenges or edge cases
86
-
87
- ### 4. Iterative Solution Development
88
- For each step in your plan:
89
- - Write and execute code for that specific step
90
- - Verify the results meet expectations
91
- - Debug and adjust if needed
92
- - Document any unexpected findings
93
- - Only proceed to the next step after current step is working
94
-
95
- ### 5. Result Validation
96
- - Verify the solution meets all requirements
97
- - Check for edge cases
98
- - Ensure results are reproducible
99
- - Document any assumptions or limitations
100
-
101
- ## Error Handling Protocol
102
- When encountering errors:
103
- 1. Show the error message
104
- 2. Analyze potential causes
105
- 3. Propose specific fixes
106
- 4. Execute modified code
107
- 5. Verify the fix worked
108
- 6. Document the solution for future reference
109
-
110
- ## Communication Guidelines
111
- - Explain your reasoning at each step
112
- - Share relevant execution results
113
- - Highlight important findings or concerns
114
- - Ask for clarification when needed
115
- - Provide context for your decisions
116
-
117
- ## Code Execution Rules
118
- - Execute code through the IPython interpreter directly
119
- - Understand that the environment is stateful (like a Jupyter notebook):
120
- - Variables and objects from previous executions persist
121
- - Reference existing variables instead of recreating them
122
- - Only rerun code if variables are no longer in memory or need updating
123
- - Don't rewrite or re-execute code unnecessarily:
124
- - Use previously computed results when available
125
- - Only rewrite code that needs modification
126
- - Indicate when you're using existing variables from previous steps
127
- - Run code after each significant change
128
- - Don't show code blocks without executing them
129
- - Verify results before proceeding
130
- - Keep code segments focused and manageable
131
-
132
- ## Memory Management Guidelines
133
- - Track important variables and objects across steps
134
- - Clear large objects when they're no longer needed
135
- - Inform user about significant objects kept in memory
136
- - Consider memory impact when working with large datasets:
137
- - Avoid creating unnecessary copies of large data
138
- - Use inplace operations when appropriate
139
- - Clean up intermediate results that won't be needed later
140
-
141
- ## Best Practices
142
- - Use descriptive variable names
143
- - Include comments for complex operations
144
- - Handle errors gracefully
145
- - Clean up resources when done
146
- - Document any dependencies
147
- - Prefer base Python libraries when possible
148
- - Verify package availability before using
149
- - Leverage existing computations:
150
- - Check if required data is already in memory
151
- - Reference previous results instead of recomputing
152
- - Document which existing variables you're using
153
-
154
- Remember: Verification through execution is always better than assumption!
Binary file
@@ -1,123 +0,0 @@
1
- {{- bos_token }}
2
- {%- if custom_tools is defined %}
3
- {%- set tools = custom_tools %}
4
- {%- endif %}
5
- {%- if not tools_in_user_message is defined %}
6
- {%- set tools_in_user_message = true %}
7
- {%- endif %}
8
- {%- if not date_string is defined %}
9
- {%- set date_string = "26 Jul 2024" %}
10
- {%- endif %}
11
- {%- if not tools is defined %}
12
- {%- set tools = none %}
13
- {%- endif %}
14
-
15
- {#- This block extracts the system message, so we can slot it into the right place. #}
16
- {%- if messages[0]['role'] == 'system' %}
17
- {%- set system_message = messages[0]['content']|trim %}
18
- {%- set messages = messages[1:] %}
19
- {%- else %}
20
- {%- set system_message = "" %}
21
- {%- endif %}
22
-
23
- {#- System message + builtin tools #}
24
- {{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
25
- {%- if builtin_tools is defined or tools is not none %}
26
- {{- "Environment: ipython\n" }}
27
- {%- endif %}
28
- {%- if builtin_tools is defined %}
29
- {%- set filtered_tools = builtin_tools | reject('equalto', 'code_interpreter') | list %}
30
- {%- if filtered_tools | length > 0 %}
31
- {{- "Tools: " + filtered_tools | join(", ") + "\n\n"}}
32
- {%- else %}
33
- {{- "\n" }}
34
- {%- endif %}
35
- {%- endif %}
36
- {{- "Cutting Knowledge Date: December 2023\n" }}
37
- {{- "Today Date: " + date_string + "\n\n" }}
38
- {%- if tools is not none and not tools_in_user_message %}
39
- {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}
40
- {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
41
- {{- "Do not use variables.\n\n" }}
42
- {%- for t in tools %}
43
- {{- t | tojson(indent=4) }}
44
- {{- "\n\n" }}
45
- {%- endfor %}
46
- {%- endif %}
47
- {{- system_message }}
48
- {{- "<|eot_id|>" }}
49
-
50
- {#- Custom tools are passed in a user message with some extra guidance #}
51
- {%- if tools_in_user_message and not tools is none %}
52
- {#- Extract the first user message so we can plug it in here #}
53
- {%- if messages | length != 0 %}
54
- {%- set first_user_message = messages[0]['content']|trim %}
55
- {%- set messages = messages[1:] %}
56
- {%- else %}
57
- {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
58
- {%- endif %}
59
- {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
60
- {{- "Given the following functions, please respond with a JSON for a function call " }}
61
- {{- "with its proper arguments that best answers the given prompt.\n\n" }}
62
- {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
63
- {{- "Do not use variables.\n\n" }}
64
- {%- for t in tools %}
65
- {{- t | tojson(indent=4) }}
66
- {{- "\n\n" }}
67
- {%- endfor %}
68
- {{- first_user_message + "<|eot_id|>"}}
69
- {%- endif %}
70
-
71
- {%- for message in messages %}
72
- {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
73
- {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim}}
74
- {%- if loop.nextitem and loop.nextitem.role == 'assistant' and message.role == 'assistant' %}
75
- {{- '<|eom_id|>' }}
76
- {%- else %}
77
- {{- '<|eot_id|>' }}
78
- {%- endif %}
79
- {%- elif 'tool_calls' in message %}
80
- {%- if not message.tool_calls|length == 1 %}
81
- {{- raise_exception("This model only supports single tool-calls at once!") }}
82
- {%- endif %}
83
- {%- set tool_call = message.tool_calls[0].function %}
84
- {%- if builtin_tools is defined and tool_call.name in builtin_tools %}
85
- {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
86
- {%- if tool_call.name == "code_interpreter" %}
87
- {{- "<|python_tag|>" + tool_call.arguments.code }}
88
- {%- else %}
89
- {{- "<|python_tag|>" + tool_call.name + ".call(" }}
90
- {%- for arg_name, arg_val in tool_call.arguments | items %}
91
- {{- arg_name + '="' + arg_val + '"' }}
92
- {%- if not loop.last %}
93
- {{- ", " }}
94
- {%- endif %}
95
- {%- endfor %}
96
- {{- ")" }}
97
- {%- endif %}
98
- {%- else %}
99
- {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
100
- {{- '{"name": "' + tool_call.name + '", ' }}
101
- {{- '"parameters": ' }}
102
- {{- tool_call.arguments | tojson }}
103
- {{- "}" }}
104
- {%- endif %}
105
- {%- if builtin_tools is defined %}
106
- {#- This means we're in ipython mode #}
107
- {{- "<|eom_id|>" }}
108
- {%- else %}
109
- {{- "<|eot_id|>" }}
110
- {%- endif %}
111
- {%- elif message.role == "tool" or message.role == "ipython" %}
112
- {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
113
- {%- if not message.content is string and (message.content is mapping or message.content is iterable) %}
114
- {{- message.content | tojson }}
115
- {%- else %}
116
- {{- message.content }}
117
- {%- endif %}
118
- {{- "<|eot_id|>" }}
119
- {%- endif %}
120
- {%- endfor %}
121
- {%- if add_generation_prompt %}
122
- {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
123
- {%- endif %}
@@ -1,9 +0,0 @@
1
- nbformat
2
- nbconvert
3
- huggingface_hub
4
- e2b-code-interpreter
5
- transformers
6
- traitlets
7
- gradio
8
- ## For local development, to load environment variables from a .env file
9
- python-dotenv