ivoryos 1.0.9__py3-none-any.whl → 1.4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docs/source/conf.py +84 -0
- ivoryos/__init__.py +17 -207
- ivoryos/app.py +154 -0
- ivoryos/config.py +1 -0
- ivoryos/optimizer/ax_optimizer.py +191 -0
- ivoryos/optimizer/base_optimizer.py +84 -0
- ivoryos/optimizer/baybe_optimizer.py +193 -0
- ivoryos/optimizer/nimo_optimizer.py +173 -0
- ivoryos/optimizer/registry.py +11 -0
- ivoryos/routes/auth/auth.py +43 -14
- ivoryos/routes/auth/templates/change_password.html +32 -0
- ivoryos/routes/control/control.py +101 -366
- ivoryos/routes/control/control_file.py +33 -0
- ivoryos/routes/control/control_new_device.py +152 -0
- ivoryos/routes/control/templates/controllers.html +193 -0
- ivoryos/routes/control/templates/controllers_new.html +112 -0
- ivoryos/routes/control/utils.py +40 -0
- ivoryos/routes/data/data.py +197 -0
- ivoryos/routes/data/templates/components/step_card.html +78 -0
- ivoryos/routes/{database/templates/database → data/templates}/workflow_database.html +14 -8
- ivoryos/routes/data/templates/workflow_view.html +360 -0
- ivoryos/routes/design/__init__.py +4 -0
- ivoryos/routes/design/design.py +348 -657
- ivoryos/routes/design/design_file.py +68 -0
- ivoryos/routes/design/design_step.py +171 -0
- ivoryos/routes/design/templates/components/action_form.html +53 -0
- ivoryos/routes/design/templates/components/actions_panel.html +25 -0
- ivoryos/routes/design/templates/components/autofill_toggle.html +10 -0
- ivoryos/routes/design/templates/components/canvas.html +5 -0
- ivoryos/routes/design/templates/components/canvas_footer.html +9 -0
- ivoryos/routes/design/templates/components/canvas_header.html +75 -0
- ivoryos/routes/design/templates/components/canvas_main.html +39 -0
- ivoryos/routes/design/templates/components/deck_selector.html +10 -0
- ivoryos/routes/design/templates/components/edit_action_form.html +53 -0
- ivoryos/routes/design/templates/components/info_modal.html +318 -0
- ivoryos/routes/design/templates/components/instruments_panel.html +88 -0
- ivoryos/routes/design/templates/components/modals/drop_modal.html +17 -0
- ivoryos/routes/design/templates/components/modals/json_modal.html +22 -0
- ivoryos/routes/design/templates/components/modals/new_script_modal.html +17 -0
- ivoryos/routes/design/templates/components/modals/rename_modal.html +23 -0
- ivoryos/routes/design/templates/components/modals/saveas_modal.html +27 -0
- ivoryos/routes/design/templates/components/modals.html +6 -0
- ivoryos/routes/design/templates/components/python_code_overlay.html +56 -0
- ivoryos/routes/design/templates/components/sidebar.html +15 -0
- ivoryos/routes/design/templates/components/text_to_code_panel.html +20 -0
- ivoryos/routes/design/templates/experiment_builder.html +44 -0
- ivoryos/routes/execute/__init__.py +0 -0
- ivoryos/routes/execute/execute.py +377 -0
- ivoryos/routes/execute/execute_file.py +78 -0
- ivoryos/routes/execute/templates/components/error_modal.html +20 -0
- ivoryos/routes/execute/templates/components/logging_panel.html +56 -0
- ivoryos/routes/execute/templates/components/progress_panel.html +27 -0
- ivoryos/routes/execute/templates/components/run_panel.html +9 -0
- ivoryos/routes/execute/templates/components/run_tabs.html +60 -0
- ivoryos/routes/execute/templates/components/tab_bayesian.html +520 -0
- ivoryos/routes/execute/templates/components/tab_configuration.html +383 -0
- ivoryos/routes/execute/templates/components/tab_repeat.html +18 -0
- ivoryos/routes/execute/templates/experiment_run.html +30 -0
- ivoryos/routes/library/__init__.py +0 -0
- ivoryos/routes/library/library.py +157 -0
- ivoryos/routes/{database/templates/database/scripts_database.html → library/templates/library.html} +32 -23
- ivoryos/routes/main/main.py +31 -3
- ivoryos/routes/main/templates/{main/home.html → home.html} +4 -4
- ivoryos/server.py +180 -0
- ivoryos/socket_handlers.py +52 -0
- ivoryos/static/ivoryos_logo.png +0 -0
- ivoryos/static/js/action_handlers.js +384 -0
- ivoryos/static/js/db_delete.js +23 -0
- ivoryos/static/js/script_metadata.js +39 -0
- ivoryos/static/js/socket_handler.js +40 -5
- ivoryos/static/js/sortable_design.js +107 -56
- ivoryos/static/js/ui_state.js +114 -0
- ivoryos/templates/base.html +67 -8
- ivoryos/utils/bo_campaign.py +180 -3
- ivoryos/utils/client_proxy.py +267 -36
- ivoryos/utils/db_models.py +300 -65
- ivoryos/utils/decorators.py +34 -0
- ivoryos/utils/form.py +63 -29
- ivoryos/utils/global_config.py +34 -1
- ivoryos/utils/nest_script.py +314 -0
- ivoryos/utils/py_to_json.py +295 -0
- ivoryos/utils/script_runner.py +599 -165
- ivoryos/utils/serilize.py +201 -0
- ivoryos/utils/task_runner.py +71 -21
- ivoryos/utils/utils.py +50 -6
- ivoryos/version.py +1 -1
- ivoryos-1.4.4.dist-info/METADATA +263 -0
- ivoryos-1.4.4.dist-info/RECORD +119 -0
- {ivoryos-1.0.9.dist-info → ivoryos-1.4.4.dist-info}/WHEEL +1 -1
- {ivoryos-1.0.9.dist-info → ivoryos-1.4.4.dist-info}/top_level.txt +1 -0
- tests/unit/test_type_conversion.py +42 -0
- tests/unit/test_util.py +3 -0
- ivoryos/routes/control/templates/control/controllers.html +0 -78
- ivoryos/routes/control/templates/control/controllers_home.html +0 -55
- ivoryos/routes/control/templates/control/controllers_new.html +0 -89
- ivoryos/routes/database/database.py +0 -306
- ivoryos/routes/database/templates/database/step_card.html +0 -7
- ivoryos/routes/database/templates/database/workflow_view.html +0 -130
- ivoryos/routes/design/templates/design/experiment_builder.html +0 -521
- ivoryos/routes/design/templates/design/experiment_run.html +0 -558
- ivoryos-1.0.9.dist-info/METADATA +0 -218
- ivoryos-1.0.9.dist-info/RECORD +0 -61
- /ivoryos/routes/auth/templates/{auth/login.html → login.html} +0 -0
- /ivoryos/routes/auth/templates/{auth/signup.html → signup.html} +0 -0
- /ivoryos/routes/{database → data}/__init__.py +0 -0
- /ivoryos/routes/main/templates/{main/help.html → help.html} +0 -0
- {ivoryos-1.0.9.dist-info → ivoryos-1.4.4.dist-info/licenses}/LICENSE +0 -0
ivoryos/utils/script_runner.py
CHANGED
|
@@ -1,22 +1,42 @@
|
|
|
1
1
|
import ast
|
|
2
|
+
import asyncio
|
|
2
3
|
import os
|
|
3
|
-
import csv
|
|
4
4
|
import threading
|
|
5
5
|
import time
|
|
6
6
|
from datetime import datetime
|
|
7
|
+
from typing import List, Dict, Any
|
|
8
|
+
|
|
9
|
+
import pandas as pd
|
|
7
10
|
|
|
8
11
|
from ivoryos.utils import utils, bo_campaign
|
|
9
|
-
from ivoryos.utils.db_models import Script, WorkflowRun, WorkflowStep, db,
|
|
12
|
+
from ivoryos.utils.db_models import Script, WorkflowRun, WorkflowStep, db, WorkflowPhase
|
|
10
13
|
from ivoryos.utils.global_config import GlobalConfig
|
|
14
|
+
from ivoryos.utils.decorators import BUILDING_BLOCKS
|
|
15
|
+
from ivoryos.utils.nest_script import validate_and_nest_control_flow
|
|
11
16
|
|
|
12
17
|
global_config = GlobalConfig()
|
|
13
18
|
global deck
|
|
14
19
|
deck = None
|
|
15
20
|
# global deck, registered_workflows
|
|
16
21
|
# deck, registered_workflows = None, None
|
|
22
|
+
class HumanInterventionRequired(Exception):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def pause(reason="Human intervention required"):
|
|
26
|
+
handlers = global_config.notification_handlers
|
|
27
|
+
if handlers:
|
|
28
|
+
for handler in handlers:
|
|
29
|
+
try:
|
|
30
|
+
handler(reason)
|
|
31
|
+
except Exception as e:
|
|
32
|
+
print(f"[notify] handler {handler} failed: {e}")
|
|
33
|
+
# raise error to pause workflow in gui
|
|
34
|
+
raise HumanInterventionRequired(reason)
|
|
17
35
|
|
|
18
36
|
class ScriptRunner:
|
|
19
37
|
def __init__(self, globals_dict=None):
|
|
38
|
+
self.logger = None
|
|
39
|
+
self.socketio = None
|
|
20
40
|
self.retry = False
|
|
21
41
|
if globals_dict is None:
|
|
22
42
|
globals_dict = globals()
|
|
@@ -59,34 +79,47 @@ class ScriptRunner:
|
|
|
59
79
|
"""Force stop everything, including ongoing tasks."""
|
|
60
80
|
self.stop_current_event.set()
|
|
61
81
|
self.abort_pending()
|
|
82
|
+
if not self.pause_event.is_set():
|
|
83
|
+
self.pause_event.set()
|
|
84
|
+
if self.lock.locked():
|
|
85
|
+
self.lock.release()
|
|
86
|
+
|
|
62
87
|
|
|
88
|
+
def run_script(self, script, repeat_count=1, run_name=None, logger=None, socketio=None, config=None,
|
|
89
|
+
output_path="", compiled=False, current_app=None, history=None, optimizer=None, batch_mode=None,
|
|
90
|
+
batch_size=1, objectives=None, parameters=None, constraints=None, steps=None, optimizer_cls=None):
|
|
63
91
|
|
|
64
|
-
|
|
65
|
-
|
|
92
|
+
|
|
93
|
+
self.socketio = socketio
|
|
94
|
+
self.logger = logger
|
|
66
95
|
global deck
|
|
67
96
|
if deck is None:
|
|
68
97
|
deck = global_config.deck
|
|
69
98
|
|
|
99
|
+
# print("history", history)
|
|
70
100
|
if self.current_app is None:
|
|
71
101
|
self.current_app = current_app
|
|
72
102
|
# time.sleep(1) # Optional: may help ensure deck readiness
|
|
73
103
|
|
|
74
104
|
# Try to acquire lock without blocking
|
|
75
105
|
if not self.lock.acquire(blocking=False):
|
|
76
|
-
if logger:
|
|
77
|
-
logger.info("System is busy. Please wait for it to finish or stop it before starting a new one.")
|
|
106
|
+
if self.logger:
|
|
107
|
+
self.logger.info("System is busy. Please wait for it to finish or stop it before starting a new one.")
|
|
78
108
|
return None
|
|
79
109
|
|
|
80
110
|
self.reset_stop_event()
|
|
81
111
|
|
|
82
112
|
thread = threading.Thread(
|
|
83
113
|
target=self._run_with_stop_check,
|
|
84
|
-
args=(script, repeat_count, run_name,
|
|
114
|
+
args=(script, repeat_count, run_name, config, output_path, current_app, compiled,
|
|
115
|
+
history, optimizer, batch_mode, batch_size, objectives, parameters, constraints, steps, optimizer_cls),
|
|
85
116
|
)
|
|
86
117
|
thread.start()
|
|
87
118
|
return thread
|
|
88
119
|
|
|
89
|
-
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
async def exec_steps(self, script, section_name, phase_id, kwargs_list=None, batch_size=1):
|
|
90
123
|
"""
|
|
91
124
|
Executes a function defined in a string line by line
|
|
92
125
|
:param func_str: The function as a string
|
|
@@ -94,6 +127,8 @@ class ScriptRunner:
|
|
|
94
127
|
:return: The final result of the function execution
|
|
95
128
|
"""
|
|
96
129
|
_func_str = script.python_script or script.compile()
|
|
130
|
+
_, return_list = script.config_return()
|
|
131
|
+
|
|
97
132
|
step_list: list = script.convert_to_lines(_func_str).get(section_name, [])
|
|
98
133
|
global deck
|
|
99
134
|
# global deck, registered_workflows
|
|
@@ -109,213 +144,302 @@ class ScriptRunner:
|
|
|
109
144
|
# Parse function body from string
|
|
110
145
|
temp_connections = global_config.defined_variables
|
|
111
146
|
# Prepare execution environment
|
|
112
|
-
exec_globals = {"deck": deck, "time":time} # Add required global objects
|
|
147
|
+
exec_globals = {"deck": deck, "time":time, "pause": pause} # Add required global objects
|
|
113
148
|
# exec_globals = {"deck": deck, "time": time, "registered_workflows":registered_workflows} # Add required global objects
|
|
114
149
|
exec_globals.update(temp_connections)
|
|
150
|
+
|
|
115
151
|
exec_locals = {} # Local execution scope
|
|
116
152
|
|
|
117
153
|
# Define function arguments manually in exec_locals
|
|
118
|
-
exec_locals.update(kwargs)
|
|
154
|
+
# exec_locals.update(kwargs)
|
|
119
155
|
index = 0
|
|
156
|
+
if kwargs_list:
|
|
157
|
+
results = kwargs_list.copy()
|
|
158
|
+
else:
|
|
159
|
+
results = [{} for _ in range(batch_size)]
|
|
160
|
+
nest_script = validate_and_nest_control_flow(script.script_dict.get(section_name, []))
|
|
120
161
|
|
|
121
|
-
|
|
122
|
-
while index < len(step_list):
|
|
123
|
-
if self.stop_current_event.is_set():
|
|
124
|
-
logger.info(f'Stopping execution during {section_name}')
|
|
125
|
-
step = WorkflowStep(
|
|
126
|
-
workflow_id=run_id,
|
|
127
|
-
phase=section_name,
|
|
128
|
-
repeat_index=i_progress,
|
|
129
|
-
step_index=index,
|
|
130
|
-
method_name="stop",
|
|
131
|
-
start_time=datetime.now(),
|
|
132
|
-
end_time=datetime.now(),
|
|
133
|
-
run_error=False,
|
|
134
|
-
)
|
|
135
|
-
db.session.add(step)
|
|
136
|
-
break
|
|
137
|
-
line = step_list[index]
|
|
138
|
-
method_name = line.strip().split("(")[0] if "(" in line else line.strip()
|
|
139
|
-
start_time = datetime.now()
|
|
140
|
-
step = WorkflowStep(
|
|
141
|
-
workflow_id=run_id,
|
|
142
|
-
phase=section_name,
|
|
143
|
-
repeat_index=i_progress,
|
|
144
|
-
step_index=index,
|
|
145
|
-
method_name=method_name,
|
|
146
|
-
start_time=start_time,
|
|
147
|
-
)
|
|
148
|
-
db.session.add(step)
|
|
149
|
-
db.session.commit()
|
|
150
|
-
logger.info(f"Executing: {line}")
|
|
151
|
-
socketio.emit('execution', {'section': f"{section_name}-{index}"})
|
|
152
|
-
# self._emit_progress(socketio, 100)
|
|
153
|
-
# if line.startswith("registered_workflows"):
|
|
154
|
-
# line = line.replace("registered_workflows.", "")
|
|
155
|
-
try:
|
|
156
|
-
if line.startswith("time.sleep("): # add safe sleep for time.sleep lines
|
|
157
|
-
duration_str = line[len("time.sleep("):-1]
|
|
158
|
-
duration = float(duration_str)
|
|
159
|
-
self.safe_sleep(duration)
|
|
160
|
-
else:
|
|
161
|
-
exec(line, exec_globals, exec_locals)
|
|
162
|
-
step.run_error = False
|
|
163
|
-
except Exception as e:
|
|
164
|
-
logger.error(f"Error during script execution: {e}")
|
|
165
|
-
socketio.emit('error', {'message': str(e)})
|
|
166
|
-
|
|
167
|
-
step.run_error = True
|
|
168
|
-
self.toggle_pause()
|
|
169
|
-
step.end_time = datetime.now()
|
|
170
|
-
# db.session.add(step)
|
|
171
|
-
db.session.commit()
|
|
162
|
+
await self._execute_steps_batched(nest_script, results, phase_id=phase_id, section_name=section_name)
|
|
172
163
|
|
|
173
|
-
|
|
164
|
+
return results # Return the 'results' variable
|
|
174
165
|
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
if not step.run_error:
|
|
179
|
-
index += 1
|
|
180
|
-
elif not self.retry:
|
|
181
|
-
index += 1
|
|
182
|
-
return exec_locals # Return the 'results' variable
|
|
183
|
-
|
|
184
|
-
def _run_with_stop_check(self, script: Script, repeat_count: int, run_name: str, logger, socketio, config, bo_args,
|
|
185
|
-
output_path, current_app, compiled):
|
|
166
|
+
def _run_with_stop_check(self, script: Script, repeat_count: int, run_name: str, config,
|
|
167
|
+
output_path, current_app, compiled, history=None, optimizer=None, batch_mode=None,
|
|
168
|
+
batch_size=None, objectives=None, parameters=None, constraints=None, steps=None, optimizer_cls=None):
|
|
186
169
|
time.sleep(1)
|
|
187
170
|
# _func_str = script.compile()
|
|
188
171
|
# step_list_dict: dict = script.convert_to_lines(_func_str)
|
|
189
|
-
self._emit_progress(
|
|
172
|
+
self._emit_progress(1)
|
|
173
|
+
filename = None
|
|
174
|
+
error_flag = False
|
|
175
|
+
# create a new run entry in the database
|
|
176
|
+
repeat_mode = "batch" if config else "optimizer" if optimizer else "repeat"
|
|
177
|
+
if optimizer_cls is not None:
|
|
178
|
+
# try:
|
|
179
|
+
if self.logger:
|
|
180
|
+
self.logger.info(f"Initializing optimizer {optimizer_cls.__name__}")
|
|
181
|
+
optimizer = optimizer_cls(experiment_name=run_name, parameter_space=parameters, objective_config=objectives,
|
|
182
|
+
parameter_constraints=constraints,
|
|
183
|
+
optimizer_config=steps, datapath=output_path)
|
|
184
|
+
current_app.config["LAST_OPTIMIZER"] = optimizer
|
|
185
|
+
# except Exception as e:
|
|
186
|
+
# if self.logger:
|
|
187
|
+
# self.logger.error(f"Error during optimizer initialization: {e.__str__()}")
|
|
190
188
|
|
|
191
|
-
# Run "prep" section once
|
|
192
|
-
script_dict = script.script_dict
|
|
193
189
|
with current_app.app_context():
|
|
194
|
-
|
|
195
|
-
|
|
190
|
+
run = WorkflowRun(name=script.name or "untitled", platform=script.deck or "deck", start_time=datetime.now(),
|
|
191
|
+
repeat_mode=repeat_mode
|
|
192
|
+
)
|
|
196
193
|
db.session.add(run)
|
|
197
|
-
db.session.
|
|
194
|
+
db.session.flush()
|
|
198
195
|
run_id = run.id # Save the ID
|
|
199
|
-
global_config.runner_status = {"id":run_id, "type": "workflow"}
|
|
200
|
-
self._run_actions(script, section_name="prep", logger=logger, socketio=socketio, run_id=run_id)
|
|
201
|
-
output_list = []
|
|
202
|
-
_, arg_type = script.config("script")
|
|
203
|
-
_, return_list = script.config_return()
|
|
204
|
-
|
|
205
|
-
# Run "script" section multiple times
|
|
206
|
-
if repeat_count:
|
|
207
|
-
self._run_repeat_section(repeat_count, arg_type, bo_args, output_list, script,
|
|
208
|
-
run_name, return_list, compiled, logger, socketio, run_id=run_id)
|
|
209
|
-
elif config:
|
|
210
|
-
self._run_config_section(config, arg_type, output_list, script, run_name, logger,
|
|
211
|
-
socketio, run_id=run_id, compiled=compiled)
|
|
212
|
-
|
|
213
|
-
# Run "cleanup" section once
|
|
214
|
-
self._run_actions(script, section_name="cleanup", logger=logger, socketio=socketio,run_id=run_id)
|
|
215
|
-
# Reset the running flag when done
|
|
216
|
-
self.lock.release()
|
|
217
|
-
# Save results if necessary
|
|
218
|
-
filename = None
|
|
219
|
-
if not script.python_script and output_list:
|
|
220
|
-
filename = self._save_results(run_name, arg_type, return_list, output_list, logger, output_path)
|
|
221
|
-
self._emit_progress(socketio, 100)
|
|
222
|
-
with current_app.app_context():
|
|
223
|
-
run = db.session.get(WorkflowRun, run_id) # SQLAlchemy 1.4+ recommended method
|
|
224
|
-
run.end_time = datetime.now()
|
|
225
|
-
run.data_path = filename
|
|
226
196
|
db.session.commit()
|
|
227
197
|
|
|
228
|
-
|
|
198
|
+
try:
|
|
199
|
+
# if True:
|
|
200
|
+
global_config.runner_status = {"id":run_id, "type": "workflow"}
|
|
201
|
+
# Run "prep" section once
|
|
202
|
+
asyncio.run(self._run_actions(script, section_name="prep", run_id=run_id))
|
|
203
|
+
output_list = []
|
|
204
|
+
_, arg_type = script.config("script")
|
|
205
|
+
_, return_list = script.config_return()
|
|
206
|
+
# Run "script" section multiple times
|
|
207
|
+
if repeat_count:
|
|
208
|
+
asyncio.run(
|
|
209
|
+
self._run_repeat_section(repeat_count, arg_type, output_list, script,
|
|
210
|
+
run_name, return_list, compiled,
|
|
211
|
+
history, output_path, run_id=run_id, optimizer=optimizer,
|
|
212
|
+
batch_mode=batch_mode, batch_size=batch_size, objectives=objectives)
|
|
213
|
+
)
|
|
214
|
+
elif config:
|
|
215
|
+
asyncio.run(
|
|
216
|
+
self._run_config_section(
|
|
217
|
+
config, arg_type, output_list, script, run_name,
|
|
218
|
+
run_id=run_id, compiled=compiled, batch_mode=batch_mode, batch_size=batch_size
|
|
219
|
+
)
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Run "cleanup" section once
|
|
223
|
+
asyncio.run(self._run_actions(script, section_name="cleanup", run_id=run_id))
|
|
224
|
+
# Reset the running flag when done
|
|
225
|
+
# Save results if necessary
|
|
226
|
+
if not script.python_script and return_list:
|
|
227
|
+
# print(output_list)
|
|
228
|
+
|
|
229
|
+
filename = self._save_results(run_name, arg_type, return_list, output_list, output_path)
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
except Exception as e:
|
|
233
|
+
if self.logger:
|
|
234
|
+
self.logger.error(f"Error during script execution: {e.__str__()}")
|
|
235
|
+
error_flag = True
|
|
236
|
+
finally:
|
|
237
|
+
self._emit_progress(100)
|
|
238
|
+
if self.lock.locked():
|
|
239
|
+
self.lock.release()
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
with current_app.app_context():
|
|
243
|
+
run = db.session.get(WorkflowRun, run_id)
|
|
244
|
+
if run is None:
|
|
245
|
+
if self.logger:
|
|
246
|
+
self.logger.info("Error: Run not found in database.")
|
|
247
|
+
else:
|
|
248
|
+
run.end_time = datetime.now()
|
|
249
|
+
run.data_path = filename
|
|
250
|
+
run.run_error = error_flag
|
|
251
|
+
db.session.commit()
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
async def _run_actions(self, script, section_name="", run_id=None):
|
|
229
255
|
_func_str = script.python_script or script.compile()
|
|
230
256
|
step_list: list = script.convert_to_lines(_func_str).get(section_name, [])
|
|
231
|
-
|
|
257
|
+
if not step_list:
|
|
258
|
+
if self.logger:
|
|
259
|
+
self.logger.info(f'No {section_name} steps')
|
|
260
|
+
return None
|
|
261
|
+
if self.logger:
|
|
262
|
+
self.logger.info(f'Executing {section_name} steps')
|
|
232
263
|
if self.stop_pending_event.is_set():
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
self.exec_steps(script, section_name, logger, socketio, run_id=run_id, i_progress=0)
|
|
264
|
+
if self.logger:
|
|
265
|
+
self.logger.info(f"Stopping execution during {section_name} section.")
|
|
266
|
+
return None
|
|
237
267
|
|
|
238
|
-
|
|
268
|
+
phase = WorkflowPhase(
|
|
269
|
+
run_id=run_id,
|
|
270
|
+
name=section_name,
|
|
271
|
+
repeat_index=0,
|
|
272
|
+
start_time=datetime.now()
|
|
273
|
+
)
|
|
274
|
+
db.session.add(phase)
|
|
275
|
+
db.session.flush()
|
|
276
|
+
phase_id = phase.id
|
|
277
|
+
|
|
278
|
+
step_outputs = await self.exec_steps(script, section_name, phase_id=phase_id)
|
|
279
|
+
# Save phase-level output
|
|
280
|
+
phase.outputs = step_outputs
|
|
281
|
+
phase.end_time = datetime.now()
|
|
282
|
+
db.session.commit()
|
|
283
|
+
return step_outputs
|
|
284
|
+
|
|
285
|
+
async def _run_config_section(self, config, arg_type, output_list, script, run_name, run_id,
|
|
286
|
+
compiled=True, batch_mode=False, batch_size=1):
|
|
239
287
|
if not compiled:
|
|
240
288
|
for i in config:
|
|
241
289
|
try:
|
|
242
290
|
i = utils.convert_config_type(i, arg_type)
|
|
243
291
|
compiled = True
|
|
244
292
|
except Exception as e:
|
|
245
|
-
logger
|
|
293
|
+
if self.logger:
|
|
294
|
+
self.logger.error(e)
|
|
246
295
|
compiled = False
|
|
247
296
|
break
|
|
248
297
|
if compiled:
|
|
249
|
-
|
|
250
|
-
|
|
298
|
+
batch_size = int(batch_size)
|
|
299
|
+
nested_list = [config[i:i + batch_size] for i in range(0, len(config), batch_size)]
|
|
300
|
+
|
|
301
|
+
for i, kwargs_list in enumerate(nested_list):
|
|
302
|
+
# kwargs = dict(kwargs)
|
|
251
303
|
if self.stop_pending_event.is_set():
|
|
252
|
-
logger
|
|
304
|
+
if self.logger:
|
|
305
|
+
self.logger.info(f'Stopping execution during {run_name}: {i + 1}/{len(config)}')
|
|
253
306
|
break
|
|
254
|
-
logger
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
307
|
+
if self.logger:
|
|
308
|
+
self.logger.info(f'Executing {i + 1} of {len(nested_list)} with kwargs = {kwargs_list}')
|
|
309
|
+
progress = ((i + 1) * 100 / len(nested_list)) - 0.1
|
|
310
|
+
self._emit_progress(progress)
|
|
311
|
+
|
|
312
|
+
phase = WorkflowPhase(
|
|
313
|
+
run_id=run_id,
|
|
314
|
+
name="main",
|
|
315
|
+
repeat_index=i,
|
|
316
|
+
parameters=kwargs_list,
|
|
317
|
+
start_time=datetime.now()
|
|
318
|
+
)
|
|
319
|
+
db.session.add(phase)
|
|
320
|
+
db.session.flush()
|
|
321
|
+
|
|
322
|
+
phase_id = phase.id
|
|
323
|
+
output = await self.exec_steps(script, "script", phase_id, kwargs_list=kwargs_list, )
|
|
324
|
+
# print(output)
|
|
260
325
|
if output:
|
|
261
326
|
# kwargs.update(output)
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
327
|
+
for output_dict in output:
|
|
328
|
+
output_list.append(output_dict)
|
|
329
|
+
phase.outputs = output
|
|
330
|
+
phase.end_time = datetime.now()
|
|
331
|
+
db.session.commit()
|
|
332
|
+
return output_list
|
|
333
|
+
|
|
334
|
+
async def _run_repeat_section(self, repeat_count, arg_types, output_list, script, run_name, return_list, compiled,
|
|
335
|
+
history, output_path, run_id, optimizer=None, batch_mode=None,
|
|
336
|
+
batch_size=None, objectives=None):
|
|
337
|
+
|
|
338
|
+
if optimizer and history:
|
|
339
|
+
file_path = os.path.join(output_path, history)
|
|
340
|
+
|
|
341
|
+
previous_runs = pd.read_csv(file_path)
|
|
342
|
+
|
|
343
|
+
expected_cols = list(arg_types.keys()) + list(return_list)
|
|
344
|
+
|
|
345
|
+
actual_cols = previous_runs.columns.tolist()
|
|
346
|
+
|
|
347
|
+
# NOT okay if it misses columns
|
|
348
|
+
if set(expected_cols) - set(actual_cols):
|
|
349
|
+
if self.logger:
|
|
350
|
+
self.logger.warning(f"Missing columns from history .csv file. Expecting {expected_cols} but got {actual_cols}")
|
|
351
|
+
raise ValueError("Missing columns from history .csv file.")
|
|
352
|
+
|
|
353
|
+
# okay if there is extra columns
|
|
354
|
+
if set(actual_cols) - set(expected_cols):
|
|
355
|
+
if self.logger:
|
|
356
|
+
self.logger.warning(f"Extra columns from history .csv file. Expecting {expected_cols} but got {actual_cols}")
|
|
357
|
+
|
|
358
|
+
optimizer.append_existing_data(previous_runs)
|
|
359
|
+
|
|
360
|
+
for row in previous_runs.to_dict(orient='records'):
|
|
361
|
+
output_list.append(row)
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
|
|
272
365
|
for i_progress in range(int(repeat_count)):
|
|
273
366
|
if self.stop_pending_event.is_set():
|
|
274
|
-
logger
|
|
367
|
+
if self.logger:
|
|
368
|
+
self.logger.info(f'Stopping execution during {run_name}: {i_progress + 1}/{int(repeat_count)}')
|
|
275
369
|
break
|
|
276
|
-
|
|
370
|
+
|
|
371
|
+
phase = WorkflowPhase(
|
|
372
|
+
run_id=run_id,
|
|
373
|
+
name="main",
|
|
374
|
+
repeat_index=i_progress,
|
|
375
|
+
start_time=datetime.now()
|
|
376
|
+
)
|
|
377
|
+
db.session.add(phase)
|
|
378
|
+
db.session.flush()
|
|
379
|
+
phase_id = phase.id
|
|
380
|
+
if self.logger:
|
|
381
|
+
self.logger.info(f'Executing {run_name} experiment: {i_progress + 1}/{int(repeat_count)}')
|
|
277
382
|
progress = (i_progress + 1) * 100 / int(repeat_count) - 0.1
|
|
278
|
-
self._emit_progress(
|
|
279
|
-
|
|
383
|
+
self._emit_progress(progress)
|
|
384
|
+
|
|
385
|
+
# Optimizer for UI
|
|
386
|
+
if optimizer:
|
|
280
387
|
try:
|
|
281
|
-
parameters
|
|
282
|
-
logger
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
388
|
+
parameters = optimizer.suggest(n=batch_size)
|
|
389
|
+
if self.logger:
|
|
390
|
+
self.logger.info(f'Parameters: {parameters}')
|
|
391
|
+
phase.parameters = parameters
|
|
392
|
+
|
|
393
|
+
output = await self.exec_steps(script, "script", phase_id, kwargs_list=parameters)
|
|
394
|
+
if output:
|
|
395
|
+
optimizer.observe(output)
|
|
396
|
+
|
|
397
|
+
else:
|
|
398
|
+
if self.logger:
|
|
399
|
+
self.logger.info('No output from script')
|
|
400
|
+
|
|
401
|
+
|
|
290
402
|
except Exception as e:
|
|
291
|
-
logger
|
|
403
|
+
if self.logger:
|
|
404
|
+
self.logger.info(f'Optimization error: {e}')
|
|
292
405
|
break
|
|
293
406
|
else:
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
output = self.exec_steps(script, "script", logger, socketio, run_id, i_progress)
|
|
407
|
+
|
|
408
|
+
output = await self.exec_steps(script, "script", phase_id, batch_size=batch_size)
|
|
297
409
|
|
|
298
410
|
if output:
|
|
299
|
-
|
|
300
|
-
|
|
411
|
+
# print("output: ", output)
|
|
412
|
+
output_list.extend(output)
|
|
413
|
+
if self.logger:
|
|
414
|
+
self.logger.info(f'Output value: {output}')
|
|
415
|
+
phase.outputs = output
|
|
416
|
+
|
|
417
|
+
phase.end_time = datetime.now()
|
|
418
|
+
db.session.commit()
|
|
419
|
+
|
|
420
|
+
if optimizer and self._check_early_stop(output, objectives):
|
|
421
|
+
if self.logger:
|
|
422
|
+
self.logger.info('Early stopping')
|
|
423
|
+
break
|
|
424
|
+
|
|
425
|
+
|
|
301
426
|
return output_list
|
|
302
427
|
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
args.extend(return_list)
|
|
428
|
+
def _save_results(self, run_name, arg_type, return_list, output_list, output_path):
|
|
429
|
+
output_columns = list(arg_type.keys()) + list(return_list)
|
|
430
|
+
|
|
307
431
|
filename = run_name + "_" + datetime.now().strftime("%Y-%m-%d %H-%M") + ".csv"
|
|
308
432
|
file_path = os.path.join(output_path, filename)
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
logger
|
|
433
|
+
df = pd.DataFrame(output_list)
|
|
434
|
+
df = df.loc[:, [c for c in output_columns if c in df.columns]]
|
|
435
|
+
|
|
436
|
+
df. to_csv(file_path, index=False)
|
|
437
|
+
if self.logger:
|
|
438
|
+
self.logger.info(f'Results saved to {file_path}')
|
|
314
439
|
return filename
|
|
315
440
|
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
socketio.emit('progress', {'progress': progress})
|
|
441
|
+
def _emit_progress(self, progress):
|
|
442
|
+
self.socketio.emit('progress', {'progress': progress})
|
|
319
443
|
|
|
320
444
|
def safe_sleep(self, duration: float):
|
|
321
445
|
interval = 1 # check every 1 second
|
|
@@ -333,4 +457,314 @@ class ScriptRunner:
|
|
|
333
457
|
"paused": self.paused,
|
|
334
458
|
"stop_pending": self.stop_pending_event.is_set(),
|
|
335
459
|
"stop_current": self.stop_current_event.is_set(),
|
|
336
|
-
}
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
async def _execute_steps_batched(self, steps: List[Dict], contexts: List[Dict[str, Any]], phase_id, section_name):
|
|
464
|
+
"""
|
|
465
|
+
Execute a list of steps for multiple samples, batching where appropriate.
|
|
466
|
+
"""
|
|
467
|
+
for step in steps:
|
|
468
|
+
action = step["action"]
|
|
469
|
+
instrument = step["instrument"]
|
|
470
|
+
action_id = step["id"]
|
|
471
|
+
if action == "if":
|
|
472
|
+
await self._execute_if_batched(step, contexts, phase_id=phase_id, step_index=action_id,
|
|
473
|
+
section_name=section_name)
|
|
474
|
+
elif action == "repeat":
|
|
475
|
+
await self._execute_repeat_batched(step, contexts, phase_id=phase_id, step_index=action_id,
|
|
476
|
+
section_name=section_name)
|
|
477
|
+
elif action == "while":
|
|
478
|
+
await self._execute_while_batched(step, contexts, phase_id=phase_id, step_index=action_id,
|
|
479
|
+
section_name=section_name)
|
|
480
|
+
elif instrument == "variable":
|
|
481
|
+
await self._execute_variable_batched(step, contexts, phase_id=phase_id, step_index=action_id,
|
|
482
|
+
section_name=section_name)
|
|
483
|
+
# print("Variable executed", "current context", contexts)
|
|
484
|
+
else:
|
|
485
|
+
# Regular action - check if batch
|
|
486
|
+
if step.get("batch_action", False):
|
|
487
|
+
# Execute once for all samples
|
|
488
|
+
await self._execute_action_once(step, contexts[0], phase_id=phase_id, step_index=action_id,
|
|
489
|
+
section_name=section_name)
|
|
490
|
+
|
|
491
|
+
else:
|
|
492
|
+
# Execute for each sample
|
|
493
|
+
for context in contexts:
|
|
494
|
+
await self._execute_action(step, context, phase_id=phase_id, step_index=action_id,
|
|
495
|
+
section_name=section_name)
|
|
496
|
+
self.pause_event.wait()
|
|
497
|
+
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
async def _execute_if_batched(self, step: Dict, contexts: List[Dict[str, Any]], phase_id, step_index, section_name):
|
|
501
|
+
"""Execute if/else block for multiple samples."""
|
|
502
|
+
# Evaluate condition for each sample
|
|
503
|
+
for context in contexts:
|
|
504
|
+
condition = self._evaluate_condition(step["args"]["statement"], context)
|
|
505
|
+
|
|
506
|
+
if condition:
|
|
507
|
+
await self._execute_steps_batched(step["if_block"], [context], phase_id=phase_id, section_name=section_name)
|
|
508
|
+
else:
|
|
509
|
+
await self._execute_steps_batched(step["else_block"], [context], phase_id=phase_id, section_name=section_name)
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
async def _execute_repeat_batched(self, step: Dict, contexts: List[Dict[str, Any]], phase_id, step_index, section_name):
|
|
513
|
+
"""Execute repeat block for multiple samples."""
|
|
514
|
+
times = step["args"].get("statement", 1)
|
|
515
|
+
|
|
516
|
+
for i in range(times):
|
|
517
|
+
# Add repeat index to all contexts
|
|
518
|
+
# for context in contexts:
|
|
519
|
+
# context["repeat_index"] = i
|
|
520
|
+
|
|
521
|
+
await self._execute_steps_batched(step["repeat_block"], contexts, phase_id=phase_id, section_name=section_name)
|
|
522
|
+
|
|
523
|
+
|
|
524
|
+
async def _execute_while_batched(self, step: Dict, contexts: List[Dict[str, Any]], phase_id, step_index, section_name):
|
|
525
|
+
"""Execute while block for multiple samples."""
|
|
526
|
+
max_iterations = step["args"].get("max_iterations", 1000)
|
|
527
|
+
active_contexts = contexts.copy()
|
|
528
|
+
iteration = 0
|
|
529
|
+
|
|
530
|
+
while iteration < max_iterations and active_contexts:
|
|
531
|
+
# Filter contexts that still meet the condition
|
|
532
|
+
still_active = []
|
|
533
|
+
|
|
534
|
+
for context in active_contexts:
|
|
535
|
+
condition = self._evaluate_condition(step["args"]["statement"], context)
|
|
536
|
+
|
|
537
|
+
if condition:
|
|
538
|
+
context["while_index"] = iteration
|
|
539
|
+
still_active.append(context)
|
|
540
|
+
|
|
541
|
+
if not still_active:
|
|
542
|
+
break
|
|
543
|
+
|
|
544
|
+
# Execute for contexts that are still active
|
|
545
|
+
await self._execute_steps_batched(step["while_block"], still_active, phase_id=phase_id, section_name=section_name)
|
|
546
|
+
active_contexts = still_active
|
|
547
|
+
iteration += 1
|
|
548
|
+
|
|
549
|
+
if iteration >= max_iterations:
|
|
550
|
+
raise RuntimeError(f"While loop exceeded max iterations ({max_iterations})")
|
|
551
|
+
|
|
552
|
+
async def _execute_action(self, step: Dict, context: Dict[str, Any], phase_id=1, step_index=1, section_name=None):
|
|
553
|
+
"""Execute a single action with parameter substitution."""
|
|
554
|
+
# Substitute parameters in args
|
|
555
|
+
if self.stop_current_event.is_set():
|
|
556
|
+
return context
|
|
557
|
+
substituted_args = self._substitute_params(step["args"], context)
|
|
558
|
+
|
|
559
|
+
# Get the component and method
|
|
560
|
+
instrument = step.get("instrument", "")
|
|
561
|
+
action = step["action"]
|
|
562
|
+
if instrument and "." in instrument:
|
|
563
|
+
instrument_type, instrument = instrument.split(".")
|
|
564
|
+
else:
|
|
565
|
+
instrument_type = ""
|
|
566
|
+
# Execute the action
|
|
567
|
+
step_db = WorkflowStep(
|
|
568
|
+
phase_id=phase_id,
|
|
569
|
+
step_index=step_index,
|
|
570
|
+
method_name=action,
|
|
571
|
+
start_time=datetime.now(),
|
|
572
|
+
)
|
|
573
|
+
db.session.add(step_db)
|
|
574
|
+
db.session.flush()
|
|
575
|
+
try:
|
|
576
|
+
|
|
577
|
+
# print(f"step {section_name}-{step_index}")
|
|
578
|
+
self.socketio.emit('execution', {'section': f"{section_name}-{step_index-1}"})
|
|
579
|
+
if action == "wait":
|
|
580
|
+
duration = float(substituted_args["statement"])
|
|
581
|
+
self.safe_sleep(duration)
|
|
582
|
+
|
|
583
|
+
elif action == "pause":
|
|
584
|
+
msg = substituted_args.get("statement", "")
|
|
585
|
+
pause(msg)
|
|
586
|
+
|
|
587
|
+
elif instrument_type == "deck" and hasattr(deck, instrument):
|
|
588
|
+
component = getattr(deck, instrument)
|
|
589
|
+
if hasattr(component, action):
|
|
590
|
+
method = getattr(component, action)
|
|
591
|
+
|
|
592
|
+
# Execute and handle return value
|
|
593
|
+
if step.get("coroutine", False):
|
|
594
|
+
result = await method(**substituted_args)
|
|
595
|
+
else:
|
|
596
|
+
result = method(**substituted_args)
|
|
597
|
+
|
|
598
|
+
# Store return value if specified
|
|
599
|
+
return_var = step.get("return", "")
|
|
600
|
+
if return_var:
|
|
601
|
+
context[return_var] = result
|
|
602
|
+
|
|
603
|
+
elif instrument_type == "blocks" and instrument in BUILDING_BLOCKS.keys():
|
|
604
|
+
# Inject all block categories
|
|
605
|
+
method_collection = BUILDING_BLOCKS[instrument]
|
|
606
|
+
if action in method_collection.keys():
|
|
607
|
+
method = method_collection[action]["func"]
|
|
608
|
+
|
|
609
|
+
# Execute and handle return value
|
|
610
|
+
# print(step.get("coroutine", False))
|
|
611
|
+
if step.get("coroutine", False):
|
|
612
|
+
result = await method(**substituted_args)
|
|
613
|
+
else:
|
|
614
|
+
result = method(**substituted_args)
|
|
615
|
+
|
|
616
|
+
# Store return value if specified
|
|
617
|
+
return_var = step.get("return", "")
|
|
618
|
+
if return_var:
|
|
619
|
+
context[return_var] = result
|
|
620
|
+
except HumanInterventionRequired as e:
|
|
621
|
+
self.logger.warning(f"Human intervention required: {e}")
|
|
622
|
+
self.socketio.emit('human_intervention', {'message': str(e)})
|
|
623
|
+
# Instead of auto-resume, explicitly stay paused until user action
|
|
624
|
+
# step.run_error = False
|
|
625
|
+
self.toggle_pause()
|
|
626
|
+
|
|
627
|
+
except Exception as e:
|
|
628
|
+
self.logger.error(f"Error during script execution: {e}")
|
|
629
|
+
self.socketio.emit('error', {'message': str(e)})
|
|
630
|
+
|
|
631
|
+
step_db.run_error = True
|
|
632
|
+
self.toggle_pause()
|
|
633
|
+
finally:
|
|
634
|
+
step_db.end_time = datetime.now()
|
|
635
|
+
step_db.output = context
|
|
636
|
+
db.session.commit()
|
|
637
|
+
|
|
638
|
+
self.pause_event.wait()
|
|
639
|
+
|
|
640
|
+
return context
|
|
641
|
+
|
|
642
|
+
async def _execute_action_once(self, step: Dict, context: Dict[str, Any], phase_id, step_index, section_name):
|
|
643
|
+
"""Execute a batch action once (not per sample)."""
|
|
644
|
+
# print(f"Executing batch action: {step['action']}")
|
|
645
|
+
return await self._execute_action(step, context, phase_id=phase_id, step_index=step_index, section_name=section_name)
|
|
646
|
+
|
|
647
|
+
@staticmethod
|
|
648
|
+
def _substitute_params(args: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
|
649
|
+
"""Substitute parameter placeholders like #param_1 with actual values."""
|
|
650
|
+
substituted = {}
|
|
651
|
+
|
|
652
|
+
for key, value in args.items():
|
|
653
|
+
if isinstance(value, str) and value.startswith("#"):
|
|
654
|
+
param_name = value[1:] # Remove '#'
|
|
655
|
+
substituted[key] = context.get(param_name)
|
|
656
|
+
else:
|
|
657
|
+
substituted[key] = value
|
|
658
|
+
|
|
659
|
+
return substituted
|
|
660
|
+
|
|
661
|
+
@staticmethod
|
|
662
|
+
def _evaluate_condition(condition_str: str, context: Dict[str, Any]) -> bool:
|
|
663
|
+
"""
|
|
664
|
+
Safely evaluate a condition string with context variables.
|
|
665
|
+
"""
|
|
666
|
+
# Create evaluation context with all variables
|
|
667
|
+
eval_context = {}
|
|
668
|
+
|
|
669
|
+
# Substitute variables in the condition string
|
|
670
|
+
substituted = condition_str
|
|
671
|
+
for key, value in context.items():
|
|
672
|
+
# Replace #variable with actual variable name for eval
|
|
673
|
+
substituted = substituted.replace(f"#{key}", key)
|
|
674
|
+
# Add variable to eval context
|
|
675
|
+
eval_context[key] = value
|
|
676
|
+
|
|
677
|
+
try:
|
|
678
|
+
# Safe evaluation with variables in scope
|
|
679
|
+
result = eval(substituted, {"__builtins__": {}}, eval_context)
|
|
680
|
+
return bool(result)
|
|
681
|
+
except Exception as e:
|
|
682
|
+
raise ValueError(f"Error evaluating condition '{condition_str}': {e}")
|
|
683
|
+
|
|
684
|
+
def _check_early_stop(self, output, objectives):
|
|
685
|
+
for row in output:
|
|
686
|
+
all_met = True
|
|
687
|
+
for obj in objectives:
|
|
688
|
+
name = obj['name']
|
|
689
|
+
minimize = obj.get('minimize', True)
|
|
690
|
+
threshold = obj.get('early_stop', None)
|
|
691
|
+
|
|
692
|
+
if threshold is None:
|
|
693
|
+
all_met = False
|
|
694
|
+
break# Skip if no early stop defined
|
|
695
|
+
|
|
696
|
+
value = row[name]
|
|
697
|
+
if minimize and value > threshold:
|
|
698
|
+
all_met = False
|
|
699
|
+
break
|
|
700
|
+
elif not minimize and value < threshold:
|
|
701
|
+
all_met = False
|
|
702
|
+
break
|
|
703
|
+
|
|
704
|
+
if all_met:
|
|
705
|
+
return True # At least one row meets all early stop thresholds
|
|
706
|
+
|
|
707
|
+
return False # No row met all thresholds
|
|
708
|
+
|
|
709
|
+
async def _execute_variable_batched(self, step: Dict, contexts: List[Dict[str, Any]], phase_id, step_index,
|
|
710
|
+
section_name):
|
|
711
|
+
"""Execute variable assignment for multiple samples."""
|
|
712
|
+
var_name = step["action"] # "vial" in your example
|
|
713
|
+
var_value = step["args"]["statement"]
|
|
714
|
+
arg_type = step["arg_types"]["statement"]
|
|
715
|
+
|
|
716
|
+
for context in contexts:
|
|
717
|
+
# Substitute any variable references in the value
|
|
718
|
+
if isinstance(var_value, str):
|
|
719
|
+
substituted_value = var_value
|
|
720
|
+
|
|
721
|
+
# Replace all variable references (with or without #) with their values
|
|
722
|
+
for key, val in context.items():
|
|
723
|
+
# Handle both #variable and variable (without #)
|
|
724
|
+
substituted_value = substituted_value.replace(f"#{key}", str(val))
|
|
725
|
+
# For expressions like "vial+10", replace variable name directly
|
|
726
|
+
# Use word boundaries to avoid partial matches
|
|
727
|
+
import re
|
|
728
|
+
substituted_value = re.sub(r'\b' + re.escape(key) + r'\b', str(val), substituted_value)
|
|
729
|
+
|
|
730
|
+
# Handle based on type
|
|
731
|
+
if arg_type == "float":
|
|
732
|
+
try:
|
|
733
|
+
# Evaluate as expression (e.g., "10.0+10" becomes 20.0)
|
|
734
|
+
result = eval(substituted_value, {"__builtins__": {}}, {})
|
|
735
|
+
context[var_name] = float(result)
|
|
736
|
+
except:
|
|
737
|
+
# If eval fails, try direct conversion
|
|
738
|
+
context[var_name] = float(substituted_value)
|
|
739
|
+
|
|
740
|
+
elif arg_type == "int":
|
|
741
|
+
try:
|
|
742
|
+
result = eval(substituted_value, {"__builtins__": {}}, {})
|
|
743
|
+
context[var_name] = int(result)
|
|
744
|
+
except:
|
|
745
|
+
context[var_name] = int(substituted_value)
|
|
746
|
+
|
|
747
|
+
elif arg_type == "bool":
|
|
748
|
+
try:
|
|
749
|
+
# Evaluate boolean expressions
|
|
750
|
+
result = eval(substituted_value, {"__builtins__": {}}, {})
|
|
751
|
+
context[var_name] = bool(result)
|
|
752
|
+
except:
|
|
753
|
+
context[var_name] = substituted_value.lower() in ['true', '1', 'yes']
|
|
754
|
+
|
|
755
|
+
else: # "str"
|
|
756
|
+
# For strings, check if it looks like an expression
|
|
757
|
+
if any(char in substituted_value for char in ['+', '-', '*', '/', '>', '<', '=', '(', ')']):
|
|
758
|
+
try:
|
|
759
|
+
# Try to evaluate as expression
|
|
760
|
+
result = eval(substituted_value, {"__builtins__": {}}, context)
|
|
761
|
+
context[var_name] = result
|
|
762
|
+
except:
|
|
763
|
+
# If eval fails, store as string
|
|
764
|
+
context[var_name] = substituted_value
|
|
765
|
+
else:
|
|
766
|
+
context[var_name] = substituted_value
|
|
767
|
+
else:
|
|
768
|
+
# Direct numeric or boolean value
|
|
769
|
+
context[var_name] = var_value
|
|
770
|
+
|