setta 0.0.14.dev6__py3-none-any.whl → 0.0.14.dev7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- setta/__init__.py +1 -1
- setta/tasks/task_runner.py +9 -21
- setta/tasks/tasks.py +62 -64
- setta/tasks/utils.py +118 -297
- {setta-0.0.14.dev6.dist-info → setta-0.0.14.dev7.dist-info}/METADATA +1 -1
- {setta-0.0.14.dev6.dist-info → setta-0.0.14.dev7.dist-info}/RECORD +10 -10
- {setta-0.0.14.dev6.dist-info → setta-0.0.14.dev7.dist-info}/LICENSE +0 -0
- {setta-0.0.14.dev6.dist-info → setta-0.0.14.dev7.dist-info}/WHEEL +0 -0
- {setta-0.0.14.dev6.dist-info → setta-0.0.14.dev7.dist-info}/entry_points.txt +0 -0
- {setta-0.0.14.dev6.dist-info → setta-0.0.14.dev7.dist-info}/top_level.txt +0 -0
setta/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "0.0.14.
|
1
|
+
__version__ = "0.0.14.dev7"
|
setta/tasks/task_runner.py
CHANGED
@@ -1,12 +1,8 @@
|
|
1
1
|
import asyncio
|
2
2
|
import inspect
|
3
|
-
import logging
|
4
|
-
import traceback
|
5
3
|
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
|
6
4
|
from enum import Enum
|
7
5
|
|
8
|
-
logger = logging.getLogger(__name__)
|
9
|
-
|
10
6
|
|
11
7
|
class RunType(Enum):
|
12
8
|
SUBPROCESS = "SUBPROCESS"
|
@@ -20,22 +16,14 @@ class TaskRunner:
|
|
20
16
|
self.process_executor = ProcessPoolExecutor(max_workers=2)
|
21
17
|
|
22
18
|
async def run(self, fn, fn_args, run_as):
|
23
|
-
|
24
|
-
|
25
|
-
if inspect.iscoroutinefunction(fn):
|
26
|
-
return await fn(*fn_args)
|
19
|
+
if inspect.iscoroutinefunction(fn):
|
20
|
+
return await fn(*fn_args)
|
27
21
|
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
22
|
+
if run_as == RunType.NONE:
|
23
|
+
return fn(*fn_args)
|
24
|
+
elif run_as == RunType.THREAD:
|
25
|
+
executor = self.thread_executor
|
26
|
+
elif run_as == RunType.SUBPROCESS:
|
27
|
+
executor = self.process_executor
|
34
28
|
|
35
|
-
|
36
|
-
executor, fn, *fn_args
|
37
|
-
)
|
38
|
-
except Exception as e:
|
39
|
-
logger.error(f"Error in TaskRunner.run: {str(e)}")
|
40
|
-
traceback.print_exc()
|
41
|
-
raise
|
29
|
+
return await asyncio.get_running_loop().run_in_executor(executor, fn, *fn_args)
|
setta/tasks/tasks.py
CHANGED
@@ -3,7 +3,6 @@ import copy
|
|
3
3
|
import json
|
4
4
|
import logging
|
5
5
|
import time
|
6
|
-
import traceback
|
7
6
|
from typing import Dict
|
8
7
|
|
9
8
|
from setta.database.utils import create_new_id
|
@@ -72,56 +71,53 @@ class Tasks:
|
|
72
71
|
call_type="call",
|
73
72
|
other_data=None,
|
74
73
|
):
|
75
|
-
# Create a list of tasks to run concurrently
|
76
|
-
tasks = []
|
77
|
-
results = []
|
78
74
|
message.content = {tuple(json.loads(k)): v for k, v in message.content.items()}
|
79
75
|
|
76
|
+
# Group tasks by subprocess to ensure sequential processing per subprocess
|
77
|
+
subprocess_tasks = {}
|
78
|
+
results = []
|
79
|
+
|
80
|
+
# First, identify all relevant subprocesses and their functions to call
|
80
81
|
for sp_key, sp_info in self.in_memory_subprocesses.items():
|
81
82
|
if (subprocess_key and sp_key != subprocess_key) or (
|
82
83
|
not match_subprocess_key(sp_key, project_config_id, section_id, idx)
|
83
84
|
):
|
84
85
|
continue
|
86
|
+
|
87
|
+
# For each matching subprocess, collect all functions that need to be called
|
88
|
+
fns_to_call = []
|
85
89
|
for fn_name, fnInfo in sp_info["fnInfo"].items():
|
86
90
|
if (
|
87
91
|
call_all
|
88
92
|
or None in fnInfo["dependencies"]
|
89
93
|
or any(k in fnInfo["dependencies"] for k in message.content.keys())
|
90
94
|
):
|
91
|
-
|
92
|
-
f"Sending message to subprocess {sp_key}, function {fn_name}, message type: {call_type}"
|
93
|
-
)
|
94
|
-
try:
|
95
|
-
# Send message to subprocess
|
96
|
-
sp_info["subprocess"].parent_conn.send(
|
97
|
-
{
|
98
|
-
"type": call_type,
|
99
|
-
"fn_name": fn_name,
|
100
|
-
"message": message,
|
101
|
-
"other_data": other_data,
|
102
|
-
}
|
103
|
-
)
|
104
|
-
except Exception as e:
|
105
|
-
logger.error(
|
106
|
-
f"Error sending message to subprocess {sp_key}: {str(e)}"
|
107
|
-
)
|
108
|
-
traceback.print_exc()
|
109
|
-
continue # Skip to next function if we can't send
|
110
|
-
|
111
|
-
# Create task for receiving response
|
112
|
-
task = asyncio.create_task(
|
113
|
-
self._handle_subprocess_response(
|
114
|
-
sp_key,
|
115
|
-
fn_name,
|
116
|
-
message.id,
|
117
|
-
sp_info["subprocess"].parent_conn.recv,
|
118
|
-
websocket_manager,
|
119
|
-
results,
|
120
|
-
)
|
121
|
-
)
|
122
|
-
tasks.append(task)
|
95
|
+
fns_to_call.append(fn_name)
|
123
96
|
|
124
|
-
|
97
|
+
if fns_to_call:
|
98
|
+
subprocess_tasks[sp_key] = {
|
99
|
+
"subprocess": sp_info["subprocess"],
|
100
|
+
"functions": fns_to_call,
|
101
|
+
}
|
102
|
+
|
103
|
+
# Create tasks to process each subprocess sequentially
|
104
|
+
tasks = []
|
105
|
+
for sp_key, sp_data in subprocess_tasks.items():
|
106
|
+
task = asyncio.create_task(
|
107
|
+
self._process_subprocess_sequentially(
|
108
|
+
sp_key,
|
109
|
+
sp_data["subprocess"],
|
110
|
+
sp_data["functions"],
|
111
|
+
message,
|
112
|
+
call_type,
|
113
|
+
other_data,
|
114
|
+
websocket_manager,
|
115
|
+
results,
|
116
|
+
)
|
117
|
+
)
|
118
|
+
tasks.append(task)
|
119
|
+
|
120
|
+
# Wait for all subprocess tasks to complete (each subprocess processed sequentially)
|
125
121
|
if tasks:
|
126
122
|
await asyncio.gather(*tasks)
|
127
123
|
|
@@ -134,47 +130,51 @@ class Tasks:
|
|
134
130
|
content.extend(r["content"])
|
135
131
|
return {"content": content, "messageType": C.WS_IN_MEMORY_FN_RETURN}
|
136
132
|
|
137
|
-
async def
|
138
|
-
self,
|
133
|
+
async def _process_subprocess_sequentially(
|
134
|
+
self,
|
135
|
+
subprocess_key,
|
136
|
+
subprocess,
|
137
|
+
fn_names,
|
138
|
+
message,
|
139
|
+
call_type,
|
140
|
+
other_data,
|
141
|
+
websocket_manager,
|
142
|
+
results,
|
139
143
|
):
|
140
|
-
#
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
144
|
+
# Process each function sequentially for this subprocess
|
145
|
+
for fn_name in fn_names:
|
146
|
+
# Send message to subprocess
|
147
|
+
subprocess.parent_conn.send(
|
148
|
+
{
|
149
|
+
"type": call_type,
|
150
|
+
"fn_name": fn_name,
|
151
|
+
"message": message,
|
152
|
+
"other_data": other_data,
|
153
|
+
}
|
149
154
|
)
|
150
155
|
|
156
|
+
# Wait for and handle the response before sending the next message
|
157
|
+
start_time = time.perf_counter()
|
158
|
+
result = await self.task_runner.run(
|
159
|
+
subprocess.parent_conn.recv, [], RunType.THREAD
|
160
|
+
)
|
151
161
|
elapsed_time = time.perf_counter() - start_time
|
162
|
+
|
152
163
|
if result["status"] == "success":
|
153
164
|
self.update_average_subprocess_fn_time(
|
154
165
|
subprocess_key, fn_name, elapsed_time
|
155
166
|
)
|
167
|
+
|
156
168
|
if websocket_manager is not None:
|
157
169
|
if result["content"]:
|
158
170
|
await websocket_manager.send_message_to_requester(
|
159
|
-
|
171
|
+
message.id, result["content"], result["messageType"]
|
160
172
|
)
|
161
173
|
await self.maybe_send_latest_run_time_info(
|
162
|
-
subprocess_key, fn_name,
|
174
|
+
subprocess_key, fn_name, message.id, websocket_manager
|
163
175
|
)
|
164
176
|
else:
|
165
177
|
results.append(result)
|
166
|
-
except EOFError:
|
167
|
-
logger.error(
|
168
|
-
f"EOF error when receiving response from subprocess {subprocess_key}, function {fn_name}"
|
169
|
-
)
|
170
|
-
# Add stack trace to see where exactly the error occurs
|
171
|
-
traceback.print_exc()
|
172
|
-
# Consider adding a placeholder result or raising to caller
|
173
|
-
except Exception as e:
|
174
|
-
logger.error(
|
175
|
-
f"Error receiving response from subprocess {subprocess_key}, function {fn_name}: {str(e)}"
|
176
|
-
)
|
177
|
-
traceback.print_exc()
|
178
178
|
|
179
179
|
async def add_custom_fns(self, code_graph, exporter_obj):
|
180
180
|
for c in code_graph:
|
@@ -182,8 +182,6 @@ class Tasks:
|
|
182
182
|
sp = self.in_memory_subprocesses.get(subprocess_key, {}).get("subprocess")
|
183
183
|
if sp:
|
184
184
|
sp.close()
|
185
|
-
del self.in_memory_subprocesses[subprocess_key]
|
186
|
-
|
187
185
|
sp = SettaInMemoryFnSubprocess(
|
188
186
|
self.stop_event, self.websockets, c["subprocessStartMethod"]
|
189
187
|
)
|
setta/tasks/utils.py
CHANGED
@@ -7,7 +7,6 @@ import sys
|
|
7
7
|
import threading
|
8
8
|
import traceback
|
9
9
|
import uuid
|
10
|
-
from collections import defaultdict
|
11
10
|
|
12
11
|
from setta.tasks.fns.utils import TaskDefinition
|
13
12
|
from setta.utils.constants import CWD
|
@@ -16,6 +15,35 @@ from setta.utils.utils import nested_access
|
|
16
15
|
logger = logging.getLogger(__name__)
|
17
16
|
|
18
17
|
|
18
|
+
def import_code_from_string(code_string, module_name=None, add_to_sys_modules=True):
|
19
|
+
# Generate a unique module name if one isn't provided
|
20
|
+
if module_name is None:
|
21
|
+
module_name = f"setta_dynamic_module_{uuid.uuid4().hex}"
|
22
|
+
|
23
|
+
# Add current directory to sys.path if it's not already there
|
24
|
+
current_dir = str(CWD)
|
25
|
+
if current_dir not in sys.path:
|
26
|
+
sys.path.insert(0, current_dir)
|
27
|
+
|
28
|
+
spec = importlib.util.spec_from_loader(module_name, loader=None)
|
29
|
+
|
30
|
+
# Create a new module based on the spec
|
31
|
+
module = importlib.util.module_from_spec(spec)
|
32
|
+
|
33
|
+
# Optionally add the module to sys.modules
|
34
|
+
if add_to_sys_modules:
|
35
|
+
print(f"adding {module_name} to sys.modules", flush=True)
|
36
|
+
sys.modules[module_name] = module
|
37
|
+
|
38
|
+
# Compile the code string
|
39
|
+
code_object = compile(code_string, module_name, "exec")
|
40
|
+
|
41
|
+
# Execute the compiled code object in the module's namespace
|
42
|
+
exec(code_object, module.__dict__)
|
43
|
+
|
44
|
+
return module
|
45
|
+
|
46
|
+
|
19
47
|
class SettaInMemoryFnSubprocess:
|
20
48
|
def __init__(self, stop_event, websockets, start_method):
|
21
49
|
logger.debug(
|
@@ -40,20 +68,20 @@ class SettaInMemoryFnSubprocess:
|
|
40
68
|
self.start_stdout_processor_task()
|
41
69
|
|
42
70
|
def _subprocess_main(self):
|
43
|
-
"""Main loop in subprocess that handles all requests
|
71
|
+
"""Main loop in subprocess that handles all requests"""
|
44
72
|
# Initialize store for imported modules
|
45
73
|
fns_dict = {}
|
46
74
|
cache = {}
|
47
75
|
|
48
|
-
|
49
|
-
|
76
|
+
class OutputCapture:
|
77
|
+
def __init__(self, stdout_pipe):
|
78
|
+
self.stdout_pipe = stdout_pipe
|
50
79
|
|
51
|
-
|
52
|
-
|
53
|
-
send_lock = threading.Lock()
|
80
|
+
def write(self, text):
|
81
|
+
self.stdout_pipe.send(text)
|
54
82
|
|
55
|
-
|
56
|
-
|
83
|
+
def flush(self):
|
84
|
+
pass
|
57
85
|
|
58
86
|
# Redirect stdout as soon as subprocess starts
|
59
87
|
output_capture = OutputCapture(self.stdout_child_conn)
|
@@ -61,27 +89,11 @@ class SettaInMemoryFnSubprocess:
|
|
61
89
|
sys.stderr = output_capture
|
62
90
|
|
63
91
|
while True:
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
logger.debug(f"Subprocess received message type: {msg_type}")
|
68
|
-
except EOFError:
|
69
|
-
logger.error("EOF error when receiving message in subprocess")
|
70
|
-
break
|
71
|
-
except Exception as e:
|
72
|
-
logger.error(f"Error receiving message in subprocess: {str(e)}")
|
73
|
-
traceback.print_exc()
|
74
|
-
break
|
92
|
+
msg = self.child_conn.recv() # Wait for requests
|
93
|
+
msg_type = msg["type"]
|
94
|
+
return_message_type = None
|
75
95
|
|
76
96
|
if msg_type == "shutdown":
|
77
|
-
# Signal all worker threads to stop
|
78
|
-
for fn_name in fn_workers:
|
79
|
-
fn_message_queues[fn_name].put(None)
|
80
|
-
|
81
|
-
# Wait for all worker threads to finish (with timeout)
|
82
|
-
for fn_name, worker in fn_workers.items():
|
83
|
-
worker.join(timeout=1.0)
|
84
|
-
|
85
97
|
break
|
86
98
|
|
87
99
|
try:
|
@@ -92,243 +104,108 @@ class SettaInMemoryFnSubprocess:
|
|
92
104
|
module_name = to_import["module_name"]
|
93
105
|
# Import and store module
|
94
106
|
module = import_code_from_string(code, module_name)
|
95
|
-
|
96
|
-
|
97
|
-
fns_dict, module, module_name
|
98
|
-
)
|
99
|
-
for k in added_fn_names:
|
100
|
-
cache[k] = msg["exporter_obj"]
|
101
|
-
dependencies[k] = get_task_metadata(
|
102
|
-
fns_dict[k], cache[k]
|
103
|
-
)
|
104
|
-
# Start a worker thread for each function
|
105
|
-
self._start_worker_for_fn(
|
106
|
-
k,
|
107
|
-
fn_workers,
|
108
|
-
fn_message_queues,
|
109
|
-
fns_dict,
|
110
|
-
cache,
|
111
|
-
lock,
|
112
|
-
send_lock,
|
113
|
-
self.child_conn,
|
114
|
-
)
|
115
|
-
|
116
|
-
with send_lock:
|
117
|
-
self.child_conn.send(
|
118
|
-
{
|
119
|
-
"status": "success",
|
120
|
-
"content": dependencies,
|
121
|
-
}
|
122
|
-
)
|
123
|
-
|
124
|
-
elif msg_type == "call" or msg_type == "call_with_new_exporter_obj":
|
125
|
-
fn_name = msg["fn_name"]
|
126
|
-
try:
|
127
|
-
logger.debug(
|
128
|
-
f"Subprocess about to process message for {fn_name}"
|
129
|
-
)
|
130
|
-
# Start a worker for this function if needed
|
131
|
-
self._start_worker_for_fn(
|
132
|
-
fn_name,
|
133
|
-
fn_workers,
|
134
|
-
fn_message_queues,
|
135
|
-
fns_dict,
|
136
|
-
cache,
|
137
|
-
lock,
|
138
|
-
send_lock,
|
139
|
-
self.child_conn,
|
140
|
-
)
|
141
|
-
|
142
|
-
# Add the message to the function's queue
|
143
|
-
fn_message_queues[fn_name].put(msg)
|
144
|
-
except Exception as e:
|
145
|
-
logger.error(
|
146
|
-
f"Error processing message for {fn_name}: {str(e)}"
|
107
|
+
added_fn_names = add_fns_from_module(
|
108
|
+
fns_dict, module, module_name
|
147
109
|
)
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
try:
|
152
|
-
self.child_conn.send(
|
153
|
-
{
|
154
|
-
"status": "error",
|
155
|
-
"error": str(e),
|
156
|
-
"messageType": None,
|
157
|
-
}
|
158
|
-
)
|
159
|
-
except:
|
160
|
-
logger.error(
|
161
|
-
"Failed to send error response back to parent"
|
162
|
-
)
|
110
|
+
for k in added_fn_names:
|
111
|
+
cache[k] = msg["exporter_obj"]
|
112
|
+
dependencies[k] = get_task_metadata(fns_dict[k], cache[k])
|
163
113
|
|
164
|
-
except Exception as e:
|
165
|
-
traceback.print_exc()
|
166
|
-
with send_lock:
|
167
114
|
self.child_conn.send(
|
168
115
|
{
|
169
|
-
"status": "
|
170
|
-
"
|
171
|
-
"messageType": None,
|
116
|
+
"status": "success",
|
117
|
+
"content": dependencies,
|
172
118
|
}
|
173
119
|
)
|
174
120
|
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
)
|
187
|
-
if msg is None: # Sentinel value to stop the thread
|
188
|
-
break
|
121
|
+
elif msg_type == "call":
|
122
|
+
result, return_message_type = self.call_imported_fn(
|
123
|
+
msg, fns_dict, cache
|
124
|
+
)
|
125
|
+
self.child_conn.send(
|
126
|
+
{
|
127
|
+
"status": "success",
|
128
|
+
"content": result,
|
129
|
+
"messageType": return_message_type,
|
130
|
+
}
|
131
|
+
)
|
189
132
|
|
190
|
-
msg_type
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
exporter_obj = cache.get(fn_name)
|
204
|
-
|
205
|
-
# Process message
|
206
|
-
message_content = process_message(msg["message"], exporter_obj)
|
207
|
-
|
208
|
-
# Call function
|
209
|
-
result = in_memory_fn_obj.fn(message_content)
|
210
|
-
return_message_type = in_memory_fn_obj.return_message_type
|
211
|
-
|
212
|
-
# Send result back
|
213
|
-
with send_lock:
|
214
|
-
child_conn.send(
|
215
|
-
{
|
216
|
-
"status": "success",
|
217
|
-
"content": result,
|
218
|
-
"messageType": return_message_type,
|
219
|
-
}
|
220
|
-
)
|
221
|
-
except Exception as e:
|
222
|
-
traceback.print_exc()
|
223
|
-
with send_lock:
|
224
|
-
child_conn.send(
|
225
|
-
{
|
226
|
-
"status": "error",
|
227
|
-
"error": str(e),
|
228
|
-
"messageType": return_message_type,
|
229
|
-
}
|
230
|
-
)
|
231
|
-
|
232
|
-
# Mark task as done
|
233
|
-
fn_message_queues[fn_name].task_done()
|
133
|
+
elif msg_type == "call_with_new_exporter_obj":
|
134
|
+
# replace old exporter_obj
|
135
|
+
cache[msg["fn_name"]] = msg["other_data"]["exporter_obj"]
|
136
|
+
result, return_message_type = self.call_imported_fn(
|
137
|
+
msg, fns_dict, cache
|
138
|
+
)
|
139
|
+
self.child_conn.send(
|
140
|
+
{
|
141
|
+
"status": "success",
|
142
|
+
"content": result,
|
143
|
+
"messageType": return_message_type,
|
144
|
+
}
|
145
|
+
)
|
234
146
|
|
235
147
|
except Exception as e:
|
236
148
|
traceback.print_exc()
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
worker = threading.Thread(
|
253
|
-
target=self._worker_thread,
|
254
|
-
args=(
|
255
|
-
fn_name,
|
256
|
-
fn_message_queues,
|
257
|
-
fns_dict,
|
258
|
-
cache,
|
259
|
-
lock,
|
260
|
-
send_lock,
|
261
|
-
child_conn,
|
262
|
-
),
|
263
|
-
daemon=True,
|
264
|
-
name=f"worker-{fn_name}",
|
265
|
-
)
|
266
|
-
fn_workers[fn_name] = worker
|
267
|
-
worker.start()
|
149
|
+
self.child_conn.send(
|
150
|
+
{
|
151
|
+
"status": "error",
|
152
|
+
"error": str(e),
|
153
|
+
"messageType": return_message_type,
|
154
|
+
}
|
155
|
+
)
|
156
|
+
|
157
|
+
def call_imported_fn(self, msg, fns_dict, cache):
|
158
|
+
fn_name = msg["fn_name"]
|
159
|
+
message = self.process_message(fn_name, msg["message"], cache)
|
160
|
+
fn = fns_dict[fn_name]
|
161
|
+
result = fn.fn(message)
|
162
|
+
return_message_type = fn.return_message_type
|
163
|
+
return result, return_message_type
|
268
164
|
|
269
165
|
def close(self):
|
270
166
|
try:
|
271
167
|
logger.debug("Initiating shutdown sequence")
|
168
|
+
self.parent_conn.send({"type": "shutdown"})
|
169
|
+
self.process.join(timeout=2) # Add timeout to process join
|
272
170
|
|
273
|
-
# Set our stop event - this signals all tasks to stop
|
274
|
-
self.stop_event.set()
|
275
|
-
|
276
|
-
# Send shutdown message to the subprocess
|
277
|
-
try:
|
278
|
-
logger.debug("Sending shutdown message to subprocess")
|
279
|
-
self.parent_conn.send({"type": "shutdown"})
|
280
|
-
logger.debug("Shutdown message sent")
|
281
|
-
except (BrokenPipeError, EOFError) as e:
|
282
|
-
logger.error(f"Error sending shutdown message: {str(e)}")
|
283
|
-
|
284
|
-
# Join the process with timeout
|
285
|
-
self.process.join(timeout=2)
|
286
|
-
|
287
|
-
# If still alive, escalate to terminate
|
288
171
|
if self.process.is_alive():
|
289
|
-
logger.debug(
|
290
|
-
"Process still alive after graceful shutdown, forcing termination"
|
291
|
-
)
|
172
|
+
logger.debug("Process still alive after timeout, forcing termination")
|
292
173
|
self.process.terminate()
|
293
174
|
self.process.join(timeout=1)
|
175
|
+
except Exception as e:
|
176
|
+
logger.debug(f"Error during process shutdown: {e}")
|
294
177
|
|
295
|
-
|
296
|
-
|
297
|
-
logger.debug(
|
298
|
-
"Process still alive after terminate, killing forcefully"
|
299
|
-
)
|
300
|
-
self.process.kill()
|
301
|
-
self.process.join(timeout=1)
|
178
|
+
# Set stop event before closing pipes
|
179
|
+
self.stop_event.set()
|
302
180
|
|
303
|
-
|
304
|
-
|
181
|
+
# Close all connections
|
182
|
+
for conn in [
|
183
|
+
self.parent_conn,
|
184
|
+
self.child_conn,
|
185
|
+
self.stdout_parent_conn,
|
186
|
+
self.stdout_child_conn,
|
187
|
+
]:
|
188
|
+
conn.close()
|
305
189
|
|
306
|
-
#
|
307
|
-
try:
|
308
|
-
# Cancel the stdout processor task if it exists
|
309
|
-
if self.stdout_processor_task:
|
310
|
-
self.stdout_processor_task.cancel()
|
311
|
-
|
312
|
-
# Close all connections - this will cause pending operations to fail fast
|
313
|
-
for conn in [
|
314
|
-
self.parent_conn,
|
315
|
-
self.child_conn,
|
316
|
-
self.stdout_parent_conn,
|
317
|
-
self.stdout_child_conn,
|
318
|
-
]:
|
319
|
-
try:
|
320
|
-
conn.close()
|
321
|
-
except:
|
322
|
-
pass
|
190
|
+
self.stdout_thread.join(timeout=2) # Add timeout to thread join
|
323
191
|
|
324
|
-
|
325
|
-
|
326
|
-
self.stdout_thread.join(timeout=2)
|
327
|
-
if self.stdout_thread.is_alive():
|
328
|
-
logger.debug("Stdout thread failed to terminate within timeout")
|
192
|
+
if self.stdout_thread.is_alive():
|
193
|
+
logger.debug("Stdout thread failed to terminate within timeout")
|
329
194
|
|
330
|
-
|
331
|
-
|
195
|
+
if self.stdout_processor_task:
|
196
|
+
self.stdout_processor_task.cancel()
|
197
|
+
|
198
|
+
def process_message(self, fn_name, message, cache):
|
199
|
+
if fn_name in cache:
|
200
|
+
exporter_obj = cache[fn_name]
|
201
|
+
for k, v in message.content.items():
|
202
|
+
nice_str = exporter_obj.var_name_mapping.get(k)
|
203
|
+
if not nice_str:
|
204
|
+
continue
|
205
|
+
p_dict, key = nested_access(exporter_obj.output, nice_str)
|
206
|
+
p_dict[key] = v
|
207
|
+
message.content = exporter_obj.output
|
208
|
+
return message.content
|
332
209
|
|
333
210
|
def start_stdout_processor_task(self):
|
334
211
|
if self.stdout_processor_task is None or self.stdout_processor_task.done():
|
@@ -413,59 +290,3 @@ def get_task_metadata(in_memory_fn, exporter_obj):
|
|
413
290
|
exporter_obj.var_name_reverse_mapping[d] for d in in_memory_fn.dependencies
|
414
291
|
)
|
415
292
|
return dependencies
|
416
|
-
|
417
|
-
|
418
|
-
# Class for capturing and redirecting stdout/stderr
|
419
|
-
class OutputCapture:
|
420
|
-
def __init__(self, stdout_pipe):
|
421
|
-
self.stdout_pipe = stdout_pipe
|
422
|
-
self.lock = threading.Lock()
|
423
|
-
|
424
|
-
def write(self, text):
|
425
|
-
with self.lock:
|
426
|
-
self.stdout_pipe.send(text)
|
427
|
-
|
428
|
-
def flush(self):
|
429
|
-
pass
|
430
|
-
|
431
|
-
|
432
|
-
def process_message(message, exporter_obj):
|
433
|
-
"""Process a message before passing it to a function"""
|
434
|
-
if exporter_obj:
|
435
|
-
for k, v in message.content.items():
|
436
|
-
nice_str = exporter_obj.var_name_mapping.get(k)
|
437
|
-
if not nice_str:
|
438
|
-
continue
|
439
|
-
p_dict, key = nested_access(exporter_obj.output, nice_str)
|
440
|
-
p_dict[key] = v
|
441
|
-
return exporter_obj.output
|
442
|
-
return message.content
|
443
|
-
|
444
|
-
|
445
|
-
def import_code_from_string(code_string, module_name=None, add_to_sys_modules=True):
|
446
|
-
# Generate a unique module name if one isn't provided
|
447
|
-
if module_name is None:
|
448
|
-
module_name = f"setta_dynamic_module_{uuid.uuid4().hex}"
|
449
|
-
|
450
|
-
# Add current directory to sys.path if it's not already there
|
451
|
-
current_dir = str(CWD)
|
452
|
-
if current_dir not in sys.path:
|
453
|
-
sys.path.insert(0, current_dir)
|
454
|
-
|
455
|
-
spec = importlib.util.spec_from_loader(module_name, loader=None)
|
456
|
-
|
457
|
-
# Create a new module based on the spec
|
458
|
-
module = importlib.util.module_from_spec(spec)
|
459
|
-
|
460
|
-
# Optionally add the module to sys.modules
|
461
|
-
if add_to_sys_modules:
|
462
|
-
print(f"adding {module_name} to sys.modules", flush=True)
|
463
|
-
sys.modules[module_name] = module
|
464
|
-
|
465
|
-
# Compile the code string
|
466
|
-
code_object = compile(code_string, module_name, "exec")
|
467
|
-
|
468
|
-
# Execute the compiled code object in the module's namespace
|
469
|
-
exec(code_object, module.__dict__)
|
470
|
-
|
471
|
-
return module
|
@@ -1,4 +1,4 @@
|
|
1
|
-
setta/__init__.py,sha256
|
1
|
+
setta/__init__.py,sha256=-qJsuUle95TwTi4vroNItCFbufX5VZwZ055MmNPMVro,28
|
2
2
|
setta/server.py,sha256=q4w9WG7SuLxwYtgXUCQyLt7t_HLmQV4y5abqvm7-uEA,4861
|
3
3
|
setta/start.py,sha256=5sMZ7WH3KV9Q0v186PsaYqsWOz7hebyrpXbBOp9wQww,3589
|
4
4
|
setta/cli/__init__.py,sha256=UxZG_VOMuF6lEBT3teUgTS9ulsK3wt3Gu3BbAQiAmt8,47
|
@@ -229,9 +229,9 @@ setta/static/frontend/assets/logo/logo.svg,sha256=k3XeAlA6hEaNfjnXG05hyb-8u1p_Fr
|
|
229
229
|
setta/static/seed/.DS_Store,sha256=ENxJvDQd7Te_U8gExcXtHE-mAeBUYOHELRfDWgN1NmA,6148
|
230
230
|
setta/static/seed/examples/.DS_Store,sha256=1lFlJ5EFymdzGAUAaI30vcaaLHt3F1LwpG7xILf9jsM,6148
|
231
231
|
setta/tasks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
232
|
-
setta/tasks/task_runner.py,sha256=
|
233
|
-
setta/tasks/tasks.py,sha256=
|
234
|
-
setta/tasks/utils.py,sha256=
|
232
|
+
setta/tasks/task_runner.py,sha256=gMXpfZWFMQbix2MfrHVCKB7BxQCjO8JH2P8cxUmt1ms,849
|
233
|
+
setta/tasks/tasks.py,sha256=jhfTdncE6fMtxMUdnmixMbbtKV-Qu_xHLvbDkmm3M4g,10993
|
234
|
+
setta/tasks/utils.py,sha256=iqbsLYBcu4Qd-MAHd0SWK9wPaJezgEh1Yg5YC9goOLU,10631
|
235
235
|
setta/tasks/fns/__init__.py,sha256=JhGzzQGaT9BWtF3pOmguh6pzIF9kdG3jdDNLyYZ2w7g,461
|
236
236
|
setta/tasks/fns/codeAreaAutocomplete.py,sha256=gJ5JbjkWDyTothr-UF-YlOxrbVzj2iyOVK7XD3lfhSQ,6416
|
237
237
|
setta/tasks/fns/codeAreaFindTemplateVars.py,sha256=vD9rY8VNPavv6VKa1bnxRPPRDNvFQy6mPIZRl-_3GnY,3708
|
@@ -252,9 +252,9 @@ setta/utils/generate_new_filename.py,sha256=KBLX6paDmTvXR-027TpqQkfijIXc7mCfhen-
|
|
252
252
|
setta/utils/section_contents.py,sha256=V2HQPik6DfSXw4j7IalbP5AZ3OEGCbtL5ub3xL-Q_Qo,4141
|
253
253
|
setta/utils/utils.py,sha256=KjzcvgM3Ab3IcE8vaWYtgBpwzPLKg0LmblnHLoYZJHM,9164
|
254
254
|
setta/utils/websocket_manager.py,sha256=MBIMI8xxOFQF4lT3on4pupi1ttEWXdWPV4fI2YP_UJU,3925
|
255
|
-
setta-0.0.14.
|
256
|
-
setta-0.0.14.
|
257
|
-
setta-0.0.14.
|
258
|
-
setta-0.0.14.
|
259
|
-
setta-0.0.14.
|
260
|
-
setta-0.0.14.
|
255
|
+
setta-0.0.14.dev7.dist-info/LICENSE,sha256=us9fuCq9wmiZVzayjKxNZ2iJYF6dROe0Qp57ToCO7XU,11361
|
256
|
+
setta-0.0.14.dev7.dist-info/METADATA,sha256=__vIVEdqunD4s14DQMi8rgEP6OOJEwD5isI9wH-6k3E,7517
|
257
|
+
setta-0.0.14.dev7.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
258
|
+
setta-0.0.14.dev7.dist-info/entry_points.txt,sha256=P0qCESy9fWF2q1EQ9JufGldCSnPHplDPn8J6Bgk5hB0,42
|
259
|
+
setta-0.0.14.dev7.dist-info/top_level.txt,sha256=8G4lmRzVOnJ11_DescPVHE6MQZH-o06A0nGsDDV2ngY,6
|
260
|
+
setta-0.0.14.dev7.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|