ivoryos 1.2.5__py3-none-any.whl → 1.4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docs/source/conf.py +84 -0
- ivoryos/__init__.py +16 -246
- ivoryos/app.py +154 -0
- ivoryos/optimizer/ax_optimizer.py +55 -28
- ivoryos/optimizer/base_optimizer.py +20 -1
- ivoryos/optimizer/baybe_optimizer.py +27 -17
- ivoryos/optimizer/nimo_optimizer.py +173 -0
- ivoryos/optimizer/registry.py +3 -1
- ivoryos/routes/auth/auth.py +35 -8
- ivoryos/routes/auth/templates/change_password.html +32 -0
- ivoryos/routes/control/control.py +58 -28
- ivoryos/routes/control/control_file.py +12 -15
- ivoryos/routes/control/control_new_device.py +21 -11
- ivoryos/routes/control/templates/controllers.html +27 -0
- ivoryos/routes/control/utils.py +2 -0
- ivoryos/routes/data/data.py +110 -44
- ivoryos/routes/data/templates/components/step_card.html +78 -13
- ivoryos/routes/data/templates/workflow_view.html +343 -113
- ivoryos/routes/design/design.py +59 -10
- ivoryos/routes/design/design_file.py +3 -3
- ivoryos/routes/design/design_step.py +43 -17
- ivoryos/routes/design/templates/components/action_form.html +2 -2
- ivoryos/routes/design/templates/components/canvas_main.html +6 -1
- ivoryos/routes/design/templates/components/edit_action_form.html +18 -3
- ivoryos/routes/design/templates/components/info_modal.html +318 -0
- ivoryos/routes/design/templates/components/instruments_panel.html +23 -1
- ivoryos/routes/design/templates/components/python_code_overlay.html +27 -10
- ivoryos/routes/design/templates/experiment_builder.html +3 -0
- ivoryos/routes/execute/execute.py +82 -22
- ivoryos/routes/execute/templates/components/logging_panel.html +50 -25
- ivoryos/routes/execute/templates/components/run_tabs.html +45 -2
- ivoryos/routes/execute/templates/components/tab_bayesian.html +447 -325
- ivoryos/routes/execute/templates/components/tab_configuration.html +303 -18
- ivoryos/routes/execute/templates/components/tab_repeat.html +6 -2
- ivoryos/routes/execute/templates/experiment_run.html +0 -264
- ivoryos/routes/library/library.py +9 -11
- ivoryos/routes/main/main.py +30 -2
- ivoryos/server.py +180 -0
- ivoryos/socket_handlers.py +1 -1
- ivoryos/static/ivoryos_logo.png +0 -0
- ivoryos/static/js/action_handlers.js +259 -88
- ivoryos/static/js/socket_handler.js +40 -5
- ivoryos/static/js/sortable_design.js +29 -11
- ivoryos/templates/base.html +61 -2
- ivoryos/utils/bo_campaign.py +18 -17
- ivoryos/utils/client_proxy.py +267 -36
- ivoryos/utils/db_models.py +286 -60
- ivoryos/utils/decorators.py +34 -0
- ivoryos/utils/form.py +52 -19
- ivoryos/utils/global_config.py +21 -0
- ivoryos/utils/nest_script.py +314 -0
- ivoryos/utils/py_to_json.py +80 -10
- ivoryos/utils/script_runner.py +573 -189
- ivoryos/utils/task_runner.py +69 -22
- ivoryos/utils/utils.py +48 -5
- ivoryos/version.py +1 -1
- {ivoryos-1.2.5.dist-info → ivoryos-1.4.4.dist-info}/METADATA +109 -47
- ivoryos-1.4.4.dist-info/RECORD +119 -0
- ivoryos-1.4.4.dist-info/top_level.txt +3 -0
- tests/__init__.py +0 -0
- tests/conftest.py +133 -0
- tests/integration/__init__.py +0 -0
- tests/integration/test_route_auth.py +80 -0
- tests/integration/test_route_control.py +94 -0
- tests/integration/test_route_database.py +61 -0
- tests/integration/test_route_design.py +36 -0
- tests/integration/test_route_main.py +35 -0
- tests/integration/test_sockets.py +26 -0
- tests/unit/test_type_conversion.py +42 -0
- tests/unit/test_util.py +3 -0
- ivoryos/routes/api/api.py +0 -56
- ivoryos-1.2.5.dist-info/RECORD +0 -100
- ivoryos-1.2.5.dist-info/top_level.txt +0 -1
- {ivoryos-1.2.5.dist-info → ivoryos-1.4.4.dist-info}/WHEEL +0 -0
- {ivoryos-1.2.5.dist-info → ivoryos-1.4.4.dist-info}/licenses/LICENSE +0 -0
ivoryos/utils/script_runner.py
CHANGED
|
@@ -1,22 +1,42 @@
|
|
|
1
1
|
import ast
|
|
2
|
+
import asyncio
|
|
2
3
|
import os
|
|
3
|
-
import csv
|
|
4
4
|
import threading
|
|
5
5
|
import time
|
|
6
6
|
from datetime import datetime
|
|
7
|
+
from typing import List, Dict, Any
|
|
8
|
+
|
|
9
|
+
import pandas as pd
|
|
7
10
|
|
|
8
11
|
from ivoryos.utils import utils, bo_campaign
|
|
9
|
-
from ivoryos.utils.db_models import Script, WorkflowRun, WorkflowStep, db,
|
|
12
|
+
from ivoryos.utils.db_models import Script, WorkflowRun, WorkflowStep, db, WorkflowPhase
|
|
10
13
|
from ivoryos.utils.global_config import GlobalConfig
|
|
14
|
+
from ivoryos.utils.decorators import BUILDING_BLOCKS
|
|
15
|
+
from ivoryos.utils.nest_script import validate_and_nest_control_flow
|
|
11
16
|
|
|
12
17
|
global_config = GlobalConfig()
|
|
13
18
|
global deck
|
|
14
19
|
deck = None
|
|
15
20
|
# global deck, registered_workflows
|
|
16
21
|
# deck, registered_workflows = None, None
|
|
22
|
+
class HumanInterventionRequired(Exception):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def pause(reason="Human intervention required"):
|
|
26
|
+
handlers = global_config.notification_handlers
|
|
27
|
+
if handlers:
|
|
28
|
+
for handler in handlers:
|
|
29
|
+
try:
|
|
30
|
+
handler(reason)
|
|
31
|
+
except Exception as e:
|
|
32
|
+
print(f"[notify] handler {handler} failed: {e}")
|
|
33
|
+
# raise error to pause workflow in gui
|
|
34
|
+
raise HumanInterventionRequired(reason)
|
|
17
35
|
|
|
18
36
|
class ScriptRunner:
|
|
19
37
|
def __init__(self, globals_dict=None):
|
|
38
|
+
self.logger = None
|
|
39
|
+
self.socketio = None
|
|
20
40
|
self.retry = False
|
|
21
41
|
if globals_dict is None:
|
|
22
42
|
globals_dict = globals()
|
|
@@ -59,13 +79,23 @@ class ScriptRunner:
|
|
|
59
79
|
"""Force stop everything, including ongoing tasks."""
|
|
60
80
|
self.stop_current_event.set()
|
|
61
81
|
self.abort_pending()
|
|
82
|
+
if not self.pause_event.is_set():
|
|
83
|
+
self.pause_event.set()
|
|
84
|
+
if self.lock.locked():
|
|
85
|
+
self.lock.release()
|
|
62
86
|
|
|
63
87
|
|
|
64
|
-
def run_script(self, script, repeat_count=1, run_name=None, logger=None, socketio=None, config=None,
|
|
65
|
-
output_path="", compiled=False, current_app=None, history=None, optimizer=None
|
|
88
|
+
def run_script(self, script, repeat_count=1, run_name=None, logger=None, socketio=None, config=None,
|
|
89
|
+
output_path="", compiled=False, current_app=None, history=None, optimizer=None, batch_mode=None,
|
|
90
|
+
batch_size=1, objectives=None, parameters=None, constraints=None, steps=None, optimizer_cls=None):
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
self.socketio = socketio
|
|
94
|
+
self.logger = logger
|
|
66
95
|
global deck
|
|
67
96
|
if deck is None:
|
|
68
97
|
deck = global_config.deck
|
|
98
|
+
|
|
69
99
|
# print("history", history)
|
|
70
100
|
if self.current_app is None:
|
|
71
101
|
self.current_app = current_app
|
|
@@ -73,21 +103,23 @@ class ScriptRunner:
|
|
|
73
103
|
|
|
74
104
|
# Try to acquire lock without blocking
|
|
75
105
|
if not self.lock.acquire(blocking=False):
|
|
76
|
-
if logger:
|
|
77
|
-
logger.info("System is busy. Please wait for it to finish or stop it before starting a new one.")
|
|
106
|
+
if self.logger:
|
|
107
|
+
self.logger.info("System is busy. Please wait for it to finish or stop it before starting a new one.")
|
|
78
108
|
return None
|
|
79
109
|
|
|
80
110
|
self.reset_stop_event()
|
|
81
111
|
|
|
82
112
|
thread = threading.Thread(
|
|
83
113
|
target=self._run_with_stop_check,
|
|
84
|
-
args=(script, repeat_count, run_name,
|
|
85
|
-
history, optimizer)
|
|
114
|
+
args=(script, repeat_count, run_name, config, output_path, current_app, compiled,
|
|
115
|
+
history, optimizer, batch_mode, batch_size, objectives, parameters, constraints, steps, optimizer_cls),
|
|
86
116
|
)
|
|
87
117
|
thread.start()
|
|
88
118
|
return thread
|
|
89
119
|
|
|
90
|
-
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
async def exec_steps(self, script, section_name, phase_id, kwargs_list=None, batch_size=1):
|
|
91
123
|
"""
|
|
92
124
|
Executes a function defined in a string line by line
|
|
93
125
|
:param func_str: The function as a string
|
|
@@ -95,6 +127,8 @@ class ScriptRunner:
|
|
|
95
127
|
:return: The final result of the function execution
|
|
96
128
|
"""
|
|
97
129
|
_func_str = script.python_script or script.compile()
|
|
130
|
+
_, return_list = script.config_return()
|
|
131
|
+
|
|
98
132
|
step_list: list = script.convert_to_lines(_func_str).get(section_name, [])
|
|
99
133
|
global deck
|
|
100
134
|
# global deck, registered_workflows
|
|
@@ -110,262 +144,302 @@ class ScriptRunner:
|
|
|
110
144
|
# Parse function body from string
|
|
111
145
|
temp_connections = global_config.defined_variables
|
|
112
146
|
# Prepare execution environment
|
|
113
|
-
exec_globals = {"deck": deck, "time":time} # Add required global objects
|
|
147
|
+
exec_globals = {"deck": deck, "time":time, "pause": pause} # Add required global objects
|
|
114
148
|
# exec_globals = {"deck": deck, "time": time, "registered_workflows":registered_workflows} # Add required global objects
|
|
115
149
|
exec_globals.update(temp_connections)
|
|
150
|
+
|
|
116
151
|
exec_locals = {} # Local execution scope
|
|
117
152
|
|
|
118
153
|
# Define function arguments manually in exec_locals
|
|
119
|
-
exec_locals.update(kwargs)
|
|
154
|
+
# exec_locals.update(kwargs)
|
|
120
155
|
index = 0
|
|
156
|
+
if kwargs_list:
|
|
157
|
+
results = kwargs_list.copy()
|
|
158
|
+
else:
|
|
159
|
+
results = [{} for _ in range(batch_size)]
|
|
160
|
+
nest_script = validate_and_nest_control_flow(script.script_dict.get(section_name, []))
|
|
121
161
|
|
|
122
|
-
|
|
123
|
-
while index < len(step_list):
|
|
124
|
-
if self.stop_current_event.is_set():
|
|
125
|
-
logger.info(f'Stopping execution during {section_name}')
|
|
126
|
-
step = WorkflowStep(
|
|
127
|
-
workflow_id=run_id,
|
|
128
|
-
phase=section_name,
|
|
129
|
-
repeat_index=i_progress,
|
|
130
|
-
step_index=index,
|
|
131
|
-
method_name="stop",
|
|
132
|
-
start_time=datetime.now(),
|
|
133
|
-
end_time=datetime.now(),
|
|
134
|
-
run_error=False,
|
|
135
|
-
)
|
|
136
|
-
db.session.add(step)
|
|
137
|
-
break
|
|
138
|
-
line = step_list[index]
|
|
139
|
-
method_name = line.strip().split("(")[0] if "(" in line else line.strip()
|
|
140
|
-
start_time = datetime.now()
|
|
141
|
-
step = WorkflowStep(
|
|
142
|
-
workflow_id=run_id,
|
|
143
|
-
phase=section_name,
|
|
144
|
-
repeat_index=i_progress,
|
|
145
|
-
step_index=index,
|
|
146
|
-
method_name=method_name,
|
|
147
|
-
start_time=start_time,
|
|
148
|
-
)
|
|
149
|
-
db.session.add(step)
|
|
150
|
-
db.session.commit()
|
|
151
|
-
logger.info(f"Executing: {line}")
|
|
152
|
-
socketio.emit('execution', {'section': f"{section_name}-{index}"})
|
|
153
|
-
# self._emit_progress(socketio, 100)
|
|
154
|
-
# if line.startswith("registered_workflows"):
|
|
155
|
-
# line = line.replace("registered_workflows.", "")
|
|
156
|
-
try:
|
|
157
|
-
if line.startswith("time.sleep("): # add safe sleep for time.sleep lines
|
|
158
|
-
duration_str = line[len("time.sleep("):-1]
|
|
159
|
-
duration = float(duration_str)
|
|
160
|
-
self.safe_sleep(duration)
|
|
161
|
-
else:
|
|
162
|
-
exec(line, exec_globals, exec_locals)
|
|
163
|
-
step.run_error = False
|
|
164
|
-
except Exception as e:
|
|
165
|
-
logger.error(f"Error during script execution: {e}")
|
|
166
|
-
socketio.emit('error', {'message': str(e)})
|
|
167
|
-
|
|
168
|
-
step.run_error = True
|
|
169
|
-
self.toggle_pause()
|
|
170
|
-
step.end_time = datetime.now()
|
|
171
|
-
# db.session.add(step)
|
|
172
|
-
db.session.commit()
|
|
162
|
+
await self._execute_steps_batched(nest_script, results, phase_id=phase_id, section_name=section_name)
|
|
173
163
|
|
|
174
|
-
|
|
164
|
+
return results # Return the 'results' variable
|
|
175
165
|
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
if not step.run_error:
|
|
180
|
-
index += 1
|
|
181
|
-
elif not self.retry:
|
|
182
|
-
index += 1
|
|
183
|
-
return exec_locals # Return the 'results' variable
|
|
184
|
-
|
|
185
|
-
def _run_with_stop_check(self, script: Script, repeat_count: int, run_name: str, logger, socketio, config, bo_args,
|
|
186
|
-
output_path, current_app, compiled, history=None, optimizer=None):
|
|
166
|
+
def _run_with_stop_check(self, script: Script, repeat_count: int, run_name: str, config,
|
|
167
|
+
output_path, current_app, compiled, history=None, optimizer=None, batch_mode=None,
|
|
168
|
+
batch_size=None, objectives=None, parameters=None, constraints=None, steps=None, optimizer_cls=None):
|
|
187
169
|
time.sleep(1)
|
|
188
170
|
# _func_str = script.compile()
|
|
189
171
|
# step_list_dict: dict = script.convert_to_lines(_func_str)
|
|
190
|
-
self._emit_progress(
|
|
172
|
+
self._emit_progress(1)
|
|
191
173
|
filename = None
|
|
192
174
|
error_flag = False
|
|
193
175
|
# create a new run entry in the database
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
176
|
+
repeat_mode = "batch" if config else "optimizer" if optimizer else "repeat"
|
|
177
|
+
if optimizer_cls is not None:
|
|
178
|
+
# try:
|
|
179
|
+
if self.logger:
|
|
180
|
+
self.logger.info(f"Initializing optimizer {optimizer_cls.__name__}")
|
|
181
|
+
optimizer = optimizer_cls(experiment_name=run_name, parameter_space=parameters, objective_config=objectives,
|
|
182
|
+
parameter_constraints=constraints,
|
|
183
|
+
optimizer_config=steps, datapath=output_path)
|
|
184
|
+
current_app.config["LAST_OPTIMIZER"] = optimizer
|
|
185
|
+
# except Exception as e:
|
|
186
|
+
# if self.logger:
|
|
187
|
+
# self.logger.error(f"Error during optimizer initialization: {e.__str__()}")
|
|
188
|
+
|
|
189
|
+
with current_app.app_context():
|
|
190
|
+
run = WorkflowRun(name=script.name or "untitled", platform=script.deck or "deck", start_time=datetime.now(),
|
|
191
|
+
repeat_mode=repeat_mode
|
|
192
|
+
)
|
|
193
|
+
db.session.add(run)
|
|
194
|
+
db.session.flush()
|
|
195
|
+
run_id = run.id # Save the ID
|
|
196
|
+
db.session.commit()
|
|
201
197
|
|
|
198
|
+
try:
|
|
199
|
+
# if True:
|
|
200
|
+
global_config.runner_status = {"id":run_id, "type": "workflow"}
|
|
202
201
|
# Run "prep" section once
|
|
203
|
-
self._run_actions(script, section_name="prep",
|
|
202
|
+
asyncio.run(self._run_actions(script, section_name="prep", run_id=run_id))
|
|
204
203
|
output_list = []
|
|
205
204
|
_, arg_type = script.config("script")
|
|
206
205
|
_, return_list = script.config_return()
|
|
207
206
|
# Run "script" section multiple times
|
|
208
207
|
if repeat_count:
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
208
|
+
asyncio.run(
|
|
209
|
+
self._run_repeat_section(repeat_count, arg_type, output_list, script,
|
|
210
|
+
run_name, return_list, compiled,
|
|
211
|
+
history, output_path, run_id=run_id, optimizer=optimizer,
|
|
212
|
+
batch_mode=batch_mode, batch_size=batch_size, objectives=objectives)
|
|
213
|
+
)
|
|
212
214
|
elif config:
|
|
213
|
-
|
|
214
|
-
|
|
215
|
+
asyncio.run(
|
|
216
|
+
self._run_config_section(
|
|
217
|
+
config, arg_type, output_list, script, run_name,
|
|
218
|
+
run_id=run_id, compiled=compiled, batch_mode=batch_mode, batch_size=batch_size
|
|
219
|
+
)
|
|
220
|
+
)
|
|
221
|
+
|
|
215
222
|
# Run "cleanup" section once
|
|
216
|
-
self._run_actions(script, section_name="cleanup",
|
|
223
|
+
asyncio.run(self._run_actions(script, section_name="cleanup", run_id=run_id))
|
|
217
224
|
# Reset the running flag when done
|
|
218
|
-
|
|
219
225
|
# Save results if necessary
|
|
226
|
+
if not script.python_script and return_list:
|
|
227
|
+
# print(output_list)
|
|
220
228
|
|
|
221
|
-
|
|
222
|
-
filename = self._save_results(run_name, arg_type, return_list, output_list, logger, output_path)
|
|
223
|
-
self._emit_progress(socketio, 100)
|
|
229
|
+
filename = self._save_results(run_name, arg_type, return_list, output_list, output_path)
|
|
224
230
|
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
231
|
+
|
|
232
|
+
except Exception as e:
|
|
233
|
+
if self.logger:
|
|
234
|
+
self.logger.error(f"Error during script execution: {e.__str__()}")
|
|
235
|
+
error_flag = True
|
|
236
|
+
finally:
|
|
237
|
+
self._emit_progress(100)
|
|
238
|
+
if self.lock.locked():
|
|
239
|
+
self.lock.release()
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
with current_app.app_context():
|
|
243
|
+
run = db.session.get(WorkflowRun, run_id)
|
|
244
|
+
if run is None:
|
|
245
|
+
if self.logger:
|
|
246
|
+
self.logger.info("Error: Run not found in database.")
|
|
247
|
+
else:
|
|
232
248
|
run.end_time = datetime.now()
|
|
233
|
-
run.
|
|
249
|
+
run.data_path = filename
|
|
234
250
|
run.run_error = error_flag
|
|
235
251
|
db.session.commit()
|
|
236
252
|
|
|
237
253
|
|
|
238
|
-
def _run_actions(self, script, section_name="",
|
|
254
|
+
async def _run_actions(self, script, section_name="", run_id=None):
|
|
239
255
|
_func_str = script.python_script or script.compile()
|
|
240
256
|
step_list: list = script.convert_to_lines(_func_str).get(section_name, [])
|
|
241
|
-
|
|
257
|
+
if not step_list:
|
|
258
|
+
if self.logger:
|
|
259
|
+
self.logger.info(f'No {section_name} steps')
|
|
260
|
+
return None
|
|
261
|
+
if self.logger:
|
|
262
|
+
self.logger.info(f'Executing {section_name} steps')
|
|
242
263
|
if self.stop_pending_event.is_set():
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
self.exec_steps(script, section_name, logger, socketio, run_id=run_id, i_progress=0)
|
|
264
|
+
if self.logger:
|
|
265
|
+
self.logger.info(f"Stopping execution during {section_name} section.")
|
|
266
|
+
return None
|
|
247
267
|
|
|
248
|
-
|
|
268
|
+
phase = WorkflowPhase(
|
|
269
|
+
run_id=run_id,
|
|
270
|
+
name=section_name,
|
|
271
|
+
repeat_index=0,
|
|
272
|
+
start_time=datetime.now()
|
|
273
|
+
)
|
|
274
|
+
db.session.add(phase)
|
|
275
|
+
db.session.flush()
|
|
276
|
+
phase_id = phase.id
|
|
277
|
+
|
|
278
|
+
step_outputs = await self.exec_steps(script, section_name, phase_id=phase_id)
|
|
279
|
+
# Save phase-level output
|
|
280
|
+
phase.outputs = step_outputs
|
|
281
|
+
phase.end_time = datetime.now()
|
|
282
|
+
db.session.commit()
|
|
283
|
+
return step_outputs
|
|
284
|
+
|
|
285
|
+
async def _run_config_section(self, config, arg_type, output_list, script, run_name, run_id,
|
|
286
|
+
compiled=True, batch_mode=False, batch_size=1):
|
|
249
287
|
if not compiled:
|
|
250
288
|
for i in config:
|
|
251
289
|
try:
|
|
252
290
|
i = utils.convert_config_type(i, arg_type)
|
|
253
291
|
compiled = True
|
|
254
292
|
except Exception as e:
|
|
255
|
-
logger
|
|
293
|
+
if self.logger:
|
|
294
|
+
self.logger.error(e)
|
|
256
295
|
compiled = False
|
|
257
296
|
break
|
|
258
297
|
if compiled:
|
|
259
|
-
|
|
260
|
-
|
|
298
|
+
batch_size = int(batch_size)
|
|
299
|
+
nested_list = [config[i:i + batch_size] for i in range(0, len(config), batch_size)]
|
|
300
|
+
|
|
301
|
+
for i, kwargs_list in enumerate(nested_list):
|
|
302
|
+
# kwargs = dict(kwargs)
|
|
261
303
|
if self.stop_pending_event.is_set():
|
|
262
|
-
logger
|
|
304
|
+
if self.logger:
|
|
305
|
+
self.logger.info(f'Stopping execution during {run_name}: {i + 1}/{len(config)}')
|
|
263
306
|
break
|
|
264
|
-
logger
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
307
|
+
if self.logger:
|
|
308
|
+
self.logger.info(f'Executing {i + 1} of {len(nested_list)} with kwargs = {kwargs_list}')
|
|
309
|
+
progress = ((i + 1) * 100 / len(nested_list)) - 0.1
|
|
310
|
+
self._emit_progress(progress)
|
|
311
|
+
|
|
312
|
+
phase = WorkflowPhase(
|
|
313
|
+
run_id=run_id,
|
|
314
|
+
name="main",
|
|
315
|
+
repeat_index=i,
|
|
316
|
+
parameters=kwargs_list,
|
|
317
|
+
start_time=datetime.now()
|
|
318
|
+
)
|
|
319
|
+
db.session.add(phase)
|
|
320
|
+
db.session.flush()
|
|
321
|
+
|
|
322
|
+
phase_id = phase.id
|
|
323
|
+
output = await self.exec_steps(script, "script", phase_id, kwargs_list=kwargs_list, )
|
|
324
|
+
# print(output)
|
|
270
325
|
if output:
|
|
271
326
|
# kwargs.update(output)
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
previous_runs = pd.read_csv(file_path).to_dict(orient='records')
|
|
285
|
-
ax_client = bo_campaign.ax_init_form(bo_args, arg_types, len(previous_runs))
|
|
286
|
-
for row in previous_runs:
|
|
287
|
-
parameter = {key: value for key, value in row.items() if key in arg_types.keys()}
|
|
288
|
-
raw_data = {key: value for key, value in row.items() if key in return_list}
|
|
289
|
-
_, trial_index = ax_client.attach_trial(parameter)
|
|
290
|
-
ax_client.complete_trial(trial_index=trial_index, raw_data=raw_data)
|
|
291
|
-
output_list.append(row)
|
|
292
|
-
else:
|
|
293
|
-
ax_client = bo_campaign.ax_init_form(bo_args, arg_types)
|
|
294
|
-
elif optimizer and history:
|
|
295
|
-
import pandas as pd
|
|
327
|
+
for output_dict in output:
|
|
328
|
+
output_list.append(output_dict)
|
|
329
|
+
phase.outputs = output
|
|
330
|
+
phase.end_time = datetime.now()
|
|
331
|
+
db.session.commit()
|
|
332
|
+
return output_list
|
|
333
|
+
|
|
334
|
+
async def _run_repeat_section(self, repeat_count, arg_types, output_list, script, run_name, return_list, compiled,
|
|
335
|
+
history, output_path, run_id, optimizer=None, batch_mode=None,
|
|
336
|
+
batch_size=None, objectives=None):
|
|
337
|
+
|
|
338
|
+
if optimizer and history:
|
|
296
339
|
file_path = os.path.join(output_path, history)
|
|
297
340
|
|
|
298
341
|
previous_runs = pd.read_csv(file_path)
|
|
342
|
+
|
|
343
|
+
expected_cols = list(arg_types.keys()) + list(return_list)
|
|
344
|
+
|
|
345
|
+
actual_cols = previous_runs.columns.tolist()
|
|
346
|
+
|
|
347
|
+
# NOT okay if it misses columns
|
|
348
|
+
if set(expected_cols) - set(actual_cols):
|
|
349
|
+
if self.logger:
|
|
350
|
+
self.logger.warning(f"Missing columns from history .csv file. Expecting {expected_cols} but got {actual_cols}")
|
|
351
|
+
raise ValueError("Missing columns from history .csv file.")
|
|
352
|
+
|
|
353
|
+
# okay if there is extra columns
|
|
354
|
+
if set(actual_cols) - set(expected_cols):
|
|
355
|
+
if self.logger:
|
|
356
|
+
self.logger.warning(f"Extra columns from history .csv file. Expecting {expected_cols} but got {actual_cols}")
|
|
357
|
+
|
|
299
358
|
optimizer.append_existing_data(previous_runs)
|
|
300
|
-
|
|
359
|
+
|
|
360
|
+
for row in previous_runs.to_dict(orient='records'):
|
|
301
361
|
output_list.append(row)
|
|
302
362
|
|
|
303
363
|
|
|
304
364
|
|
|
305
365
|
for i_progress in range(int(repeat_count)):
|
|
306
366
|
if self.stop_pending_event.is_set():
|
|
307
|
-
logger
|
|
367
|
+
if self.logger:
|
|
368
|
+
self.logger.info(f'Stopping execution during {run_name}: {i_progress + 1}/{int(repeat_count)}')
|
|
308
369
|
break
|
|
309
|
-
|
|
370
|
+
|
|
371
|
+
phase = WorkflowPhase(
|
|
372
|
+
run_id=run_id,
|
|
373
|
+
name="main",
|
|
374
|
+
repeat_index=i_progress,
|
|
375
|
+
start_time=datetime.now()
|
|
376
|
+
)
|
|
377
|
+
db.session.add(phase)
|
|
378
|
+
db.session.flush()
|
|
379
|
+
phase_id = phase.id
|
|
380
|
+
if self.logger:
|
|
381
|
+
self.logger.info(f'Executing {run_name} experiment: {i_progress + 1}/{int(repeat_count)}')
|
|
310
382
|
progress = (i_progress + 1) * 100 / int(repeat_count) - 0.1
|
|
311
|
-
self._emit_progress(
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
logger.info(f'Output value: {parameters}')
|
|
316
|
-
# fname = f"{run_name}_script"
|
|
317
|
-
# function = self.globals_dict[fname]
|
|
318
|
-
output = self.exec_steps(script, "script", logger, socketio, run_id, i_progress, **parameters)
|
|
319
|
-
|
|
320
|
-
_output = {key: value for key, value in output.items() if key in return_list}
|
|
321
|
-
ax_client.complete_trial(trial_index=trial_index, raw_data=_output)
|
|
322
|
-
output.update(parameters)
|
|
323
|
-
except Exception as e:
|
|
324
|
-
logger.info(f'Optimization error: {e}')
|
|
325
|
-
break
|
|
326
|
-
elif optimizer:
|
|
383
|
+
self._emit_progress(progress)
|
|
384
|
+
|
|
385
|
+
# Optimizer for UI
|
|
386
|
+
if optimizer:
|
|
327
387
|
try:
|
|
328
|
-
parameters = optimizer.suggest(
|
|
329
|
-
logger
|
|
330
|
-
|
|
388
|
+
parameters = optimizer.suggest(n=batch_size)
|
|
389
|
+
if self.logger:
|
|
390
|
+
self.logger.info(f'Parameters: {parameters}')
|
|
391
|
+
phase.parameters = parameters
|
|
392
|
+
|
|
393
|
+
output = await self.exec_steps(script, "script", phase_id, kwargs_list=parameters)
|
|
331
394
|
if output:
|
|
332
395
|
optimizer.observe(output)
|
|
333
|
-
|
|
396
|
+
|
|
397
|
+
else:
|
|
398
|
+
if self.logger:
|
|
399
|
+
self.logger.info('No output from script')
|
|
400
|
+
|
|
401
|
+
|
|
334
402
|
except Exception as e:
|
|
335
|
-
logger
|
|
403
|
+
if self.logger:
|
|
404
|
+
self.logger.info(f'Optimization error: {e}')
|
|
336
405
|
break
|
|
337
406
|
else:
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
output = self.exec_steps(script, "script", logger, socketio, run_id, i_progress)
|
|
407
|
+
|
|
408
|
+
output = await self.exec_steps(script, "script", phase_id, batch_size=batch_size)
|
|
341
409
|
|
|
342
410
|
if output:
|
|
343
|
-
|
|
344
|
-
|
|
411
|
+
# print("output: ", output)
|
|
412
|
+
output_list.extend(output)
|
|
413
|
+
if self.logger:
|
|
414
|
+
self.logger.info(f'Output value: {output}')
|
|
415
|
+
phase.outputs = output
|
|
416
|
+
|
|
417
|
+
phase.end_time = datetime.now()
|
|
418
|
+
db.session.commit()
|
|
419
|
+
|
|
420
|
+
if optimizer and self._check_early_stop(output, objectives):
|
|
421
|
+
if self.logger:
|
|
422
|
+
self.logger.info('Early stopping')
|
|
423
|
+
break
|
|
424
|
+
|
|
345
425
|
|
|
346
|
-
if bo_args:
|
|
347
|
-
ax_client.save_to_json_file(os.path.join(output_path, f"{run_name}_ax_client.json"))
|
|
348
|
-
logger.info(
|
|
349
|
-
f'Optimization complete. Results saved to {os.path.join(output_path, f"{run_name}_ax_client.json")}'
|
|
350
|
-
)
|
|
351
426
|
return output_list
|
|
352
427
|
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
args.extend(return_list)
|
|
428
|
+
def _save_results(self, run_name, arg_type, return_list, output_list, output_path):
|
|
429
|
+
output_columns = list(arg_type.keys()) + list(return_list)
|
|
430
|
+
|
|
357
431
|
filename = run_name + "_" + datetime.now().strftime("%Y-%m-%d %H-%M") + ".csv"
|
|
358
432
|
file_path = os.path.join(output_path, filename)
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
logger
|
|
433
|
+
df = pd.DataFrame(output_list)
|
|
434
|
+
df = df.loc[:, [c for c in output_columns if c in df.columns]]
|
|
435
|
+
|
|
436
|
+
df. to_csv(file_path, index=False)
|
|
437
|
+
if self.logger:
|
|
438
|
+
self.logger.info(f'Results saved to {file_path}')
|
|
364
439
|
return filename
|
|
365
440
|
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
socketio.emit('progress', {'progress': progress})
|
|
441
|
+
def _emit_progress(self, progress):
|
|
442
|
+
self.socketio.emit('progress', {'progress': progress})
|
|
369
443
|
|
|
370
444
|
def safe_sleep(self, duration: float):
|
|
371
445
|
interval = 1 # check every 1 second
|
|
@@ -383,4 +457,314 @@ class ScriptRunner:
|
|
|
383
457
|
"paused": self.paused,
|
|
384
458
|
"stop_pending": self.stop_pending_event.is_set(),
|
|
385
459
|
"stop_current": self.stop_current_event.is_set(),
|
|
386
|
-
}
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
async def _execute_steps_batched(self, steps: List[Dict], contexts: List[Dict[str, Any]], phase_id, section_name):
|
|
464
|
+
"""
|
|
465
|
+
Execute a list of steps for multiple samples, batching where appropriate.
|
|
466
|
+
"""
|
|
467
|
+
for step in steps:
|
|
468
|
+
action = step["action"]
|
|
469
|
+
instrument = step["instrument"]
|
|
470
|
+
action_id = step["id"]
|
|
471
|
+
if action == "if":
|
|
472
|
+
await self._execute_if_batched(step, contexts, phase_id=phase_id, step_index=action_id,
|
|
473
|
+
section_name=section_name)
|
|
474
|
+
elif action == "repeat":
|
|
475
|
+
await self._execute_repeat_batched(step, contexts, phase_id=phase_id, step_index=action_id,
|
|
476
|
+
section_name=section_name)
|
|
477
|
+
elif action == "while":
|
|
478
|
+
await self._execute_while_batched(step, contexts, phase_id=phase_id, step_index=action_id,
|
|
479
|
+
section_name=section_name)
|
|
480
|
+
elif instrument == "variable":
|
|
481
|
+
await self._execute_variable_batched(step, contexts, phase_id=phase_id, step_index=action_id,
|
|
482
|
+
section_name=section_name)
|
|
483
|
+
# print("Variable executed", "current context", contexts)
|
|
484
|
+
else:
|
|
485
|
+
# Regular action - check if batch
|
|
486
|
+
if step.get("batch_action", False):
|
|
487
|
+
# Execute once for all samples
|
|
488
|
+
await self._execute_action_once(step, contexts[0], phase_id=phase_id, step_index=action_id,
|
|
489
|
+
section_name=section_name)
|
|
490
|
+
|
|
491
|
+
else:
|
|
492
|
+
# Execute for each sample
|
|
493
|
+
for context in contexts:
|
|
494
|
+
await self._execute_action(step, context, phase_id=phase_id, step_index=action_id,
|
|
495
|
+
section_name=section_name)
|
|
496
|
+
self.pause_event.wait()
|
|
497
|
+
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
async def _execute_if_batched(self, step: Dict, contexts: List[Dict[str, Any]], phase_id, step_index, section_name):
|
|
501
|
+
"""Execute if/else block for multiple samples."""
|
|
502
|
+
# Evaluate condition for each sample
|
|
503
|
+
for context in contexts:
|
|
504
|
+
condition = self._evaluate_condition(step["args"]["statement"], context)
|
|
505
|
+
|
|
506
|
+
if condition:
|
|
507
|
+
await self._execute_steps_batched(step["if_block"], [context], phase_id=phase_id, section_name=section_name)
|
|
508
|
+
else:
|
|
509
|
+
await self._execute_steps_batched(step["else_block"], [context], phase_id=phase_id, section_name=section_name)
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
async def _execute_repeat_batched(self, step: Dict, contexts: List[Dict[str, Any]], phase_id, step_index, section_name):
|
|
513
|
+
"""Execute repeat block for multiple samples."""
|
|
514
|
+
times = step["args"].get("statement", 1)
|
|
515
|
+
|
|
516
|
+
for i in range(times):
|
|
517
|
+
# Add repeat index to all contexts
|
|
518
|
+
# for context in contexts:
|
|
519
|
+
# context["repeat_index"] = i
|
|
520
|
+
|
|
521
|
+
await self._execute_steps_batched(step["repeat_block"], contexts, phase_id=phase_id, section_name=section_name)
|
|
522
|
+
|
|
523
|
+
|
|
524
|
+
async def _execute_while_batched(self, step: Dict, contexts: List[Dict[str, Any]], phase_id, step_index, section_name):
|
|
525
|
+
"""Execute while block for multiple samples."""
|
|
526
|
+
max_iterations = step["args"].get("max_iterations", 1000)
|
|
527
|
+
active_contexts = contexts.copy()
|
|
528
|
+
iteration = 0
|
|
529
|
+
|
|
530
|
+
while iteration < max_iterations and active_contexts:
|
|
531
|
+
# Filter contexts that still meet the condition
|
|
532
|
+
still_active = []
|
|
533
|
+
|
|
534
|
+
for context in active_contexts:
|
|
535
|
+
condition = self._evaluate_condition(step["args"]["statement"], context)
|
|
536
|
+
|
|
537
|
+
if condition:
|
|
538
|
+
context["while_index"] = iteration
|
|
539
|
+
still_active.append(context)
|
|
540
|
+
|
|
541
|
+
if not still_active:
|
|
542
|
+
break
|
|
543
|
+
|
|
544
|
+
# Execute for contexts that are still active
|
|
545
|
+
await self._execute_steps_batched(step["while_block"], still_active, phase_id=phase_id, section_name=section_name)
|
|
546
|
+
active_contexts = still_active
|
|
547
|
+
iteration += 1
|
|
548
|
+
|
|
549
|
+
if iteration >= max_iterations:
|
|
550
|
+
raise RuntimeError(f"While loop exceeded max iterations ({max_iterations})")
|
|
551
|
+
|
|
552
|
+
async def _execute_action(self, step: Dict, context: Dict[str, Any], phase_id=1, step_index=1, section_name=None):
|
|
553
|
+
"""Execute a single action with parameter substitution."""
|
|
554
|
+
# Substitute parameters in args
|
|
555
|
+
if self.stop_current_event.is_set():
|
|
556
|
+
return context
|
|
557
|
+
substituted_args = self._substitute_params(step["args"], context)
|
|
558
|
+
|
|
559
|
+
# Get the component and method
|
|
560
|
+
instrument = step.get("instrument", "")
|
|
561
|
+
action = step["action"]
|
|
562
|
+
if instrument and "." in instrument:
|
|
563
|
+
instrument_type, instrument = instrument.split(".")
|
|
564
|
+
else:
|
|
565
|
+
instrument_type = ""
|
|
566
|
+
# Execute the action
|
|
567
|
+
step_db = WorkflowStep(
|
|
568
|
+
phase_id=phase_id,
|
|
569
|
+
step_index=step_index,
|
|
570
|
+
method_name=action,
|
|
571
|
+
start_time=datetime.now(),
|
|
572
|
+
)
|
|
573
|
+
db.session.add(step_db)
|
|
574
|
+
db.session.flush()
|
|
575
|
+
try:
|
|
576
|
+
|
|
577
|
+
# print(f"step {section_name}-{step_index}")
|
|
578
|
+
self.socketio.emit('execution', {'section': f"{section_name}-{step_index-1}"})
|
|
579
|
+
if action == "wait":
|
|
580
|
+
duration = float(substituted_args["statement"])
|
|
581
|
+
self.safe_sleep(duration)
|
|
582
|
+
|
|
583
|
+
elif action == "pause":
|
|
584
|
+
msg = substituted_args.get("statement", "")
|
|
585
|
+
pause(msg)
|
|
586
|
+
|
|
587
|
+
elif instrument_type == "deck" and hasattr(deck, instrument):
|
|
588
|
+
component = getattr(deck, instrument)
|
|
589
|
+
if hasattr(component, action):
|
|
590
|
+
method = getattr(component, action)
|
|
591
|
+
|
|
592
|
+
# Execute and handle return value
|
|
593
|
+
if step.get("coroutine", False):
|
|
594
|
+
result = await method(**substituted_args)
|
|
595
|
+
else:
|
|
596
|
+
result = method(**substituted_args)
|
|
597
|
+
|
|
598
|
+
# Store return value if specified
|
|
599
|
+
return_var = step.get("return", "")
|
|
600
|
+
if return_var:
|
|
601
|
+
context[return_var] = result
|
|
602
|
+
|
|
603
|
+
elif instrument_type == "blocks" and instrument in BUILDING_BLOCKS.keys():
|
|
604
|
+
# Inject all block categories
|
|
605
|
+
method_collection = BUILDING_BLOCKS[instrument]
|
|
606
|
+
if action in method_collection.keys():
|
|
607
|
+
method = method_collection[action]["func"]
|
|
608
|
+
|
|
609
|
+
# Execute and handle return value
|
|
610
|
+
# print(step.get("coroutine", False))
|
|
611
|
+
if step.get("coroutine", False):
|
|
612
|
+
result = await method(**substituted_args)
|
|
613
|
+
else:
|
|
614
|
+
result = method(**substituted_args)
|
|
615
|
+
|
|
616
|
+
# Store return value if specified
|
|
617
|
+
return_var = step.get("return", "")
|
|
618
|
+
if return_var:
|
|
619
|
+
context[return_var] = result
|
|
620
|
+
except HumanInterventionRequired as e:
|
|
621
|
+
self.logger.warning(f"Human intervention required: {e}")
|
|
622
|
+
self.socketio.emit('human_intervention', {'message': str(e)})
|
|
623
|
+
# Instead of auto-resume, explicitly stay paused until user action
|
|
624
|
+
# step.run_error = False
|
|
625
|
+
self.toggle_pause()
|
|
626
|
+
|
|
627
|
+
except Exception as e:
|
|
628
|
+
self.logger.error(f"Error during script execution: {e}")
|
|
629
|
+
self.socketio.emit('error', {'message': str(e)})
|
|
630
|
+
|
|
631
|
+
step_db.run_error = True
|
|
632
|
+
self.toggle_pause()
|
|
633
|
+
finally:
|
|
634
|
+
step_db.end_time = datetime.now()
|
|
635
|
+
step_db.output = context
|
|
636
|
+
db.session.commit()
|
|
637
|
+
|
|
638
|
+
self.pause_event.wait()
|
|
639
|
+
|
|
640
|
+
return context
|
|
641
|
+
|
|
642
|
+
async def _execute_action_once(self, step: Dict, context: Dict[str, Any], phase_id, step_index, section_name):
|
|
643
|
+
"""Execute a batch action once (not per sample)."""
|
|
644
|
+
# print(f"Executing batch action: {step['action']}")
|
|
645
|
+
return await self._execute_action(step, context, phase_id=phase_id, step_index=step_index, section_name=section_name)
|
|
646
|
+
|
|
647
|
+
@staticmethod
|
|
648
|
+
def _substitute_params(args: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
|
649
|
+
"""Substitute parameter placeholders like #param_1 with actual values."""
|
|
650
|
+
substituted = {}
|
|
651
|
+
|
|
652
|
+
for key, value in args.items():
|
|
653
|
+
if isinstance(value, str) and value.startswith("#"):
|
|
654
|
+
param_name = value[1:] # Remove '#'
|
|
655
|
+
substituted[key] = context.get(param_name)
|
|
656
|
+
else:
|
|
657
|
+
substituted[key] = value
|
|
658
|
+
|
|
659
|
+
return substituted
|
|
660
|
+
|
|
661
|
+
@staticmethod
|
|
662
|
+
def _evaluate_condition(condition_str: str, context: Dict[str, Any]) -> bool:
|
|
663
|
+
"""
|
|
664
|
+
Safely evaluate a condition string with context variables.
|
|
665
|
+
"""
|
|
666
|
+
# Create evaluation context with all variables
|
|
667
|
+
eval_context = {}
|
|
668
|
+
|
|
669
|
+
# Substitute variables in the condition string
|
|
670
|
+
substituted = condition_str
|
|
671
|
+
for key, value in context.items():
|
|
672
|
+
# Replace #variable with actual variable name for eval
|
|
673
|
+
substituted = substituted.replace(f"#{key}", key)
|
|
674
|
+
# Add variable to eval context
|
|
675
|
+
eval_context[key] = value
|
|
676
|
+
|
|
677
|
+
try:
|
|
678
|
+
# Safe evaluation with variables in scope
|
|
679
|
+
result = eval(substituted, {"__builtins__": {}}, eval_context)
|
|
680
|
+
return bool(result)
|
|
681
|
+
except Exception as e:
|
|
682
|
+
raise ValueError(f"Error evaluating condition '{condition_str}': {e}")
|
|
683
|
+
|
|
684
|
+
def _check_early_stop(self, output, objectives):
|
|
685
|
+
for row in output:
|
|
686
|
+
all_met = True
|
|
687
|
+
for obj in objectives:
|
|
688
|
+
name = obj['name']
|
|
689
|
+
minimize = obj.get('minimize', True)
|
|
690
|
+
threshold = obj.get('early_stop', None)
|
|
691
|
+
|
|
692
|
+
if threshold is None:
|
|
693
|
+
all_met = False
|
|
694
|
+
break# Skip if no early stop defined
|
|
695
|
+
|
|
696
|
+
value = row[name]
|
|
697
|
+
if minimize and value > threshold:
|
|
698
|
+
all_met = False
|
|
699
|
+
break
|
|
700
|
+
elif not minimize and value < threshold:
|
|
701
|
+
all_met = False
|
|
702
|
+
break
|
|
703
|
+
|
|
704
|
+
if all_met:
|
|
705
|
+
return True # At least one row meets all early stop thresholds
|
|
706
|
+
|
|
707
|
+
return False # No row met all thresholds
|
|
708
|
+
|
|
709
|
+
async def _execute_variable_batched(self, step: Dict, contexts: List[Dict[str, Any]], phase_id, step_index,
|
|
710
|
+
section_name):
|
|
711
|
+
"""Execute variable assignment for multiple samples."""
|
|
712
|
+
var_name = step["action"] # "vial" in your example
|
|
713
|
+
var_value = step["args"]["statement"]
|
|
714
|
+
arg_type = step["arg_types"]["statement"]
|
|
715
|
+
|
|
716
|
+
for context in contexts:
|
|
717
|
+
# Substitute any variable references in the value
|
|
718
|
+
if isinstance(var_value, str):
|
|
719
|
+
substituted_value = var_value
|
|
720
|
+
|
|
721
|
+
# Replace all variable references (with or without #) with their values
|
|
722
|
+
for key, val in context.items():
|
|
723
|
+
# Handle both #variable and variable (without #)
|
|
724
|
+
substituted_value = substituted_value.replace(f"#{key}", str(val))
|
|
725
|
+
# For expressions like "vial+10", replace variable name directly
|
|
726
|
+
# Use word boundaries to avoid partial matches
|
|
727
|
+
import re
|
|
728
|
+
substituted_value = re.sub(r'\b' + re.escape(key) + r'\b', str(val), substituted_value)
|
|
729
|
+
|
|
730
|
+
# Handle based on type
|
|
731
|
+
if arg_type == "float":
|
|
732
|
+
try:
|
|
733
|
+
# Evaluate as expression (e.g., "10.0+10" becomes 20.0)
|
|
734
|
+
result = eval(substituted_value, {"__builtins__": {}}, {})
|
|
735
|
+
context[var_name] = float(result)
|
|
736
|
+
except:
|
|
737
|
+
# If eval fails, try direct conversion
|
|
738
|
+
context[var_name] = float(substituted_value)
|
|
739
|
+
|
|
740
|
+
elif arg_type == "int":
|
|
741
|
+
try:
|
|
742
|
+
result = eval(substituted_value, {"__builtins__": {}}, {})
|
|
743
|
+
context[var_name] = int(result)
|
|
744
|
+
except:
|
|
745
|
+
context[var_name] = int(substituted_value)
|
|
746
|
+
|
|
747
|
+
elif arg_type == "bool":
|
|
748
|
+
try:
|
|
749
|
+
# Evaluate boolean expressions
|
|
750
|
+
result = eval(substituted_value, {"__builtins__": {}}, {})
|
|
751
|
+
context[var_name] = bool(result)
|
|
752
|
+
except:
|
|
753
|
+
context[var_name] = substituted_value.lower() in ['true', '1', 'yes']
|
|
754
|
+
|
|
755
|
+
else: # "str"
|
|
756
|
+
# For strings, check if it looks like an expression
|
|
757
|
+
if any(char in substituted_value for char in ['+', '-', '*', '/', '>', '<', '=', '(', ')']):
|
|
758
|
+
try:
|
|
759
|
+
# Try to evaluate as expression
|
|
760
|
+
result = eval(substituted_value, {"__builtins__": {}}, context)
|
|
761
|
+
context[var_name] = result
|
|
762
|
+
except:
|
|
763
|
+
# If eval fails, store as string
|
|
764
|
+
context[var_name] = substituted_value
|
|
765
|
+
else:
|
|
766
|
+
context[var_name] = substituted_value
|
|
767
|
+
else:
|
|
768
|
+
# Direct numeric or boolean value
|
|
769
|
+
context[var_name] = var_value
|
|
770
|
+
|