setta 0.0.14.dev5__py3-none-any.whl → 0.0.14.dev7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- setta/__init__.py +1 -1
- setta/tasks/task_runner.py +9 -21
- setta/tasks/tasks.py +62 -64
- setta/tasks/utils.py +117 -274
- {setta-0.0.14.dev5.dist-info → setta-0.0.14.dev7.dist-info}/METADATA +1 -1
- {setta-0.0.14.dev5.dist-info → setta-0.0.14.dev7.dist-info}/RECORD +10 -10
- {setta-0.0.14.dev5.dist-info → setta-0.0.14.dev7.dist-info}/LICENSE +0 -0
- {setta-0.0.14.dev5.dist-info → setta-0.0.14.dev7.dist-info}/WHEEL +0 -0
- {setta-0.0.14.dev5.dist-info → setta-0.0.14.dev7.dist-info}/entry_points.txt +0 -0
- {setta-0.0.14.dev5.dist-info → setta-0.0.14.dev7.dist-info}/top_level.txt +0 -0
setta/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "0.0.14.
|
1
|
+
__version__ = "0.0.14.dev7"
|
setta/tasks/task_runner.py
CHANGED
@@ -1,12 +1,8 @@
|
|
1
1
|
import asyncio
|
2
2
|
import inspect
|
3
|
-
import logging
|
4
|
-
import traceback
|
5
3
|
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
|
6
4
|
from enum import Enum
|
7
5
|
|
8
|
-
logger = logging.getLogger(__name__)
|
9
|
-
|
10
6
|
|
11
7
|
class RunType(Enum):
|
12
8
|
SUBPROCESS = "SUBPROCESS"
|
@@ -20,22 +16,14 @@ class TaskRunner:
|
|
20
16
|
self.process_executor = ProcessPoolExecutor(max_workers=2)
|
21
17
|
|
22
18
|
async def run(self, fn, fn_args, run_as):
|
23
|
-
|
24
|
-
|
25
|
-
if inspect.iscoroutinefunction(fn):
|
26
|
-
return await fn(*fn_args)
|
19
|
+
if inspect.iscoroutinefunction(fn):
|
20
|
+
return await fn(*fn_args)
|
27
21
|
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
22
|
+
if run_as == RunType.NONE:
|
23
|
+
return fn(*fn_args)
|
24
|
+
elif run_as == RunType.THREAD:
|
25
|
+
executor = self.thread_executor
|
26
|
+
elif run_as == RunType.SUBPROCESS:
|
27
|
+
executor = self.process_executor
|
34
28
|
|
35
|
-
|
36
|
-
executor, fn, *fn_args
|
37
|
-
)
|
38
|
-
except Exception as e:
|
39
|
-
logger.error(f"Error in TaskRunner.run: {str(e)}")
|
40
|
-
traceback.print_exc()
|
41
|
-
raise
|
29
|
+
return await asyncio.get_running_loop().run_in_executor(executor, fn, *fn_args)
|
setta/tasks/tasks.py
CHANGED
@@ -3,7 +3,6 @@ import copy
|
|
3
3
|
import json
|
4
4
|
import logging
|
5
5
|
import time
|
6
|
-
import traceback
|
7
6
|
from typing import Dict
|
8
7
|
|
9
8
|
from setta.database.utils import create_new_id
|
@@ -72,56 +71,53 @@ class Tasks:
|
|
72
71
|
call_type="call",
|
73
72
|
other_data=None,
|
74
73
|
):
|
75
|
-
# Create a list of tasks to run concurrently
|
76
|
-
tasks = []
|
77
|
-
results = []
|
78
74
|
message.content = {tuple(json.loads(k)): v for k, v in message.content.items()}
|
79
75
|
|
76
|
+
# Group tasks by subprocess to ensure sequential processing per subprocess
|
77
|
+
subprocess_tasks = {}
|
78
|
+
results = []
|
79
|
+
|
80
|
+
# First, identify all relevant subprocesses and their functions to call
|
80
81
|
for sp_key, sp_info in self.in_memory_subprocesses.items():
|
81
82
|
if (subprocess_key and sp_key != subprocess_key) or (
|
82
83
|
not match_subprocess_key(sp_key, project_config_id, section_id, idx)
|
83
84
|
):
|
84
85
|
continue
|
86
|
+
|
87
|
+
# For each matching subprocess, collect all functions that need to be called
|
88
|
+
fns_to_call = []
|
85
89
|
for fn_name, fnInfo in sp_info["fnInfo"].items():
|
86
90
|
if (
|
87
91
|
call_all
|
88
92
|
or None in fnInfo["dependencies"]
|
89
93
|
or any(k in fnInfo["dependencies"] for k in message.content.keys())
|
90
94
|
):
|
91
|
-
|
92
|
-
f"Sending message to subprocess {sp_key}, function {fn_name}, message type: {call_type}"
|
93
|
-
)
|
94
|
-
try:
|
95
|
-
# Send message to subprocess
|
96
|
-
sp_info["subprocess"].parent_conn.send(
|
97
|
-
{
|
98
|
-
"type": call_type,
|
99
|
-
"fn_name": fn_name,
|
100
|
-
"message": message,
|
101
|
-
"other_data": other_data,
|
102
|
-
}
|
103
|
-
)
|
104
|
-
except Exception as e:
|
105
|
-
logger.error(
|
106
|
-
f"Error sending message to subprocess {sp_key}: {str(e)}"
|
107
|
-
)
|
108
|
-
traceback.print_exc()
|
109
|
-
continue # Skip to next function if we can't send
|
110
|
-
|
111
|
-
# Create task for receiving response
|
112
|
-
task = asyncio.create_task(
|
113
|
-
self._handle_subprocess_response(
|
114
|
-
sp_key,
|
115
|
-
fn_name,
|
116
|
-
message.id,
|
117
|
-
sp_info["subprocess"].parent_conn.recv,
|
118
|
-
websocket_manager,
|
119
|
-
results,
|
120
|
-
)
|
121
|
-
)
|
122
|
-
tasks.append(task)
|
95
|
+
fns_to_call.append(fn_name)
|
123
96
|
|
124
|
-
|
97
|
+
if fns_to_call:
|
98
|
+
subprocess_tasks[sp_key] = {
|
99
|
+
"subprocess": sp_info["subprocess"],
|
100
|
+
"functions": fns_to_call,
|
101
|
+
}
|
102
|
+
|
103
|
+
# Create tasks to process each subprocess sequentially
|
104
|
+
tasks = []
|
105
|
+
for sp_key, sp_data in subprocess_tasks.items():
|
106
|
+
task = asyncio.create_task(
|
107
|
+
self._process_subprocess_sequentially(
|
108
|
+
sp_key,
|
109
|
+
sp_data["subprocess"],
|
110
|
+
sp_data["functions"],
|
111
|
+
message,
|
112
|
+
call_type,
|
113
|
+
other_data,
|
114
|
+
websocket_manager,
|
115
|
+
results,
|
116
|
+
)
|
117
|
+
)
|
118
|
+
tasks.append(task)
|
119
|
+
|
120
|
+
# Wait for all subprocess tasks to complete (each subprocess processed sequentially)
|
125
121
|
if tasks:
|
126
122
|
await asyncio.gather(*tasks)
|
127
123
|
|
@@ -134,47 +130,51 @@ class Tasks:
|
|
134
130
|
content.extend(r["content"])
|
135
131
|
return {"content": content, "messageType": C.WS_IN_MEMORY_FN_RETURN}
|
136
132
|
|
137
|
-
async def
|
138
|
-
self,
|
133
|
+
async def _process_subprocess_sequentially(
|
134
|
+
self,
|
135
|
+
subprocess_key,
|
136
|
+
subprocess,
|
137
|
+
fn_names,
|
138
|
+
message,
|
139
|
+
call_type,
|
140
|
+
other_data,
|
141
|
+
websocket_manager,
|
142
|
+
results,
|
139
143
|
):
|
140
|
-
#
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
144
|
+
# Process each function sequentially for this subprocess
|
145
|
+
for fn_name in fn_names:
|
146
|
+
# Send message to subprocess
|
147
|
+
subprocess.parent_conn.send(
|
148
|
+
{
|
149
|
+
"type": call_type,
|
150
|
+
"fn_name": fn_name,
|
151
|
+
"message": message,
|
152
|
+
"other_data": other_data,
|
153
|
+
}
|
149
154
|
)
|
150
155
|
|
156
|
+
# Wait for and handle the response before sending the next message
|
157
|
+
start_time = time.perf_counter()
|
158
|
+
result = await self.task_runner.run(
|
159
|
+
subprocess.parent_conn.recv, [], RunType.THREAD
|
160
|
+
)
|
151
161
|
elapsed_time = time.perf_counter() - start_time
|
162
|
+
|
152
163
|
if result["status"] == "success":
|
153
164
|
self.update_average_subprocess_fn_time(
|
154
165
|
subprocess_key, fn_name, elapsed_time
|
155
166
|
)
|
167
|
+
|
156
168
|
if websocket_manager is not None:
|
157
169
|
if result["content"]:
|
158
170
|
await websocket_manager.send_message_to_requester(
|
159
|
-
|
171
|
+
message.id, result["content"], result["messageType"]
|
160
172
|
)
|
161
173
|
await self.maybe_send_latest_run_time_info(
|
162
|
-
subprocess_key, fn_name,
|
174
|
+
subprocess_key, fn_name, message.id, websocket_manager
|
163
175
|
)
|
164
176
|
else:
|
165
177
|
results.append(result)
|
166
|
-
except EOFError:
|
167
|
-
logger.error(
|
168
|
-
f"EOF error when receiving response from subprocess {subprocess_key}, function {fn_name}"
|
169
|
-
)
|
170
|
-
# Add stack trace to see where exactly the error occurs
|
171
|
-
traceback.print_exc()
|
172
|
-
# Consider adding a placeholder result or raising to caller
|
173
|
-
except Exception as e:
|
174
|
-
logger.error(
|
175
|
-
f"Error receiving response from subprocess {subprocess_key}, function {fn_name}: {str(e)}"
|
176
|
-
)
|
177
|
-
traceback.print_exc()
|
178
178
|
|
179
179
|
async def add_custom_fns(self, code_graph, exporter_obj):
|
180
180
|
for c in code_graph:
|
@@ -182,8 +182,6 @@ class Tasks:
|
|
182
182
|
sp = self.in_memory_subprocesses.get(subprocess_key, {}).get("subprocess")
|
183
183
|
if sp:
|
184
184
|
sp.close()
|
185
|
-
del self.in_memory_subprocesses[subprocess_key]
|
186
|
-
|
187
185
|
sp = SettaInMemoryFnSubprocess(
|
188
186
|
self.stop_event, self.websockets, c["subprocessStartMethod"]
|
189
187
|
)
|
setta/tasks/utils.py
CHANGED
@@ -7,7 +7,6 @@ import sys
|
|
7
7
|
import threading
|
8
8
|
import traceback
|
9
9
|
import uuid
|
10
|
-
from collections import defaultdict
|
11
10
|
|
12
11
|
from setta.tasks.fns.utils import TaskDefinition
|
13
12
|
from setta.utils.constants import CWD
|
@@ -16,6 +15,35 @@ from setta.utils.utils import nested_access
|
|
16
15
|
logger = logging.getLogger(__name__)
|
17
16
|
|
18
17
|
|
18
|
+
def import_code_from_string(code_string, module_name=None, add_to_sys_modules=True):
|
19
|
+
# Generate a unique module name if one isn't provided
|
20
|
+
if module_name is None:
|
21
|
+
module_name = f"setta_dynamic_module_{uuid.uuid4().hex}"
|
22
|
+
|
23
|
+
# Add current directory to sys.path if it's not already there
|
24
|
+
current_dir = str(CWD)
|
25
|
+
if current_dir not in sys.path:
|
26
|
+
sys.path.insert(0, current_dir)
|
27
|
+
|
28
|
+
spec = importlib.util.spec_from_loader(module_name, loader=None)
|
29
|
+
|
30
|
+
# Create a new module based on the spec
|
31
|
+
module = importlib.util.module_from_spec(spec)
|
32
|
+
|
33
|
+
# Optionally add the module to sys.modules
|
34
|
+
if add_to_sys_modules:
|
35
|
+
print(f"adding {module_name} to sys.modules", flush=True)
|
36
|
+
sys.modules[module_name] = module
|
37
|
+
|
38
|
+
# Compile the code string
|
39
|
+
code_object = compile(code_string, module_name, "exec")
|
40
|
+
|
41
|
+
# Execute the compiled code object in the module's namespace
|
42
|
+
exec(code_object, module.__dict__)
|
43
|
+
|
44
|
+
return module
|
45
|
+
|
46
|
+
|
19
47
|
class SettaInMemoryFnSubprocess:
|
20
48
|
def __init__(self, stop_event, websockets, start_method):
|
21
49
|
logger.debug(
|
@@ -40,20 +68,20 @@ class SettaInMemoryFnSubprocess:
|
|
40
68
|
self.start_stdout_processor_task()
|
41
69
|
|
42
70
|
def _subprocess_main(self):
|
43
|
-
"""Main loop in subprocess that handles all requests
|
71
|
+
"""Main loop in subprocess that handles all requests"""
|
44
72
|
# Initialize store for imported modules
|
45
73
|
fns_dict = {}
|
46
74
|
cache = {}
|
47
75
|
|
48
|
-
|
49
|
-
|
76
|
+
class OutputCapture:
|
77
|
+
def __init__(self, stdout_pipe):
|
78
|
+
self.stdout_pipe = stdout_pipe
|
50
79
|
|
51
|
-
|
52
|
-
|
53
|
-
send_lock = threading.Lock()
|
80
|
+
def write(self, text):
|
81
|
+
self.stdout_pipe.send(text)
|
54
82
|
|
55
|
-
|
56
|
-
|
83
|
+
def flush(self):
|
84
|
+
pass
|
57
85
|
|
58
86
|
# Redirect stdout as soon as subprocess starts
|
59
87
|
output_capture = OutputCapture(self.stdout_child_conn)
|
@@ -61,27 +89,11 @@ class SettaInMemoryFnSubprocess:
|
|
61
89
|
sys.stderr = output_capture
|
62
90
|
|
63
91
|
while True:
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
logger.debug(f"Subprocess received message type: {msg_type}")
|
68
|
-
except EOFError:
|
69
|
-
logger.error("EOF error when receiving message in subprocess")
|
70
|
-
break
|
71
|
-
except Exception as e:
|
72
|
-
logger.error(f"Error receiving message in subprocess: {str(e)}")
|
73
|
-
traceback.print_exc()
|
74
|
-
break
|
92
|
+
msg = self.child_conn.recv() # Wait for requests
|
93
|
+
msg_type = msg["type"]
|
94
|
+
return_message_type = None
|
75
95
|
|
76
96
|
if msg_type == "shutdown":
|
77
|
-
# Signal all worker threads to stop
|
78
|
-
for fn_name in fn_workers:
|
79
|
-
fn_message_queues[fn_name].put(None)
|
80
|
-
|
81
|
-
# Wait for all worker threads to finish (with timeout)
|
82
|
-
for fn_name, worker in fn_workers.items():
|
83
|
-
worker.join(timeout=1.0)
|
84
|
-
|
85
97
|
break
|
86
98
|
|
87
99
|
try:
|
@@ -92,221 +104,108 @@ class SettaInMemoryFnSubprocess:
|
|
92
104
|
module_name = to_import["module_name"]
|
93
105
|
# Import and store module
|
94
106
|
module = import_code_from_string(code, module_name)
|
95
|
-
|
96
|
-
|
97
|
-
fns_dict, module, module_name
|
98
|
-
)
|
99
|
-
for k in added_fn_names:
|
100
|
-
cache[k] = msg["exporter_obj"]
|
101
|
-
dependencies[k] = get_task_metadata(
|
102
|
-
fns_dict[k], cache[k]
|
103
|
-
)
|
104
|
-
# Start a worker thread for each function
|
105
|
-
self._start_worker_for_fn(
|
106
|
-
k,
|
107
|
-
fn_workers,
|
108
|
-
fn_message_queues,
|
109
|
-
fns_dict,
|
110
|
-
cache,
|
111
|
-
lock,
|
112
|
-
send_lock,
|
113
|
-
self.child_conn,
|
114
|
-
)
|
115
|
-
|
116
|
-
with send_lock:
|
117
|
-
self.child_conn.send(
|
118
|
-
{
|
119
|
-
"status": "success",
|
120
|
-
"content": dependencies,
|
121
|
-
}
|
107
|
+
added_fn_names = add_fns_from_module(
|
108
|
+
fns_dict, module, module_name
|
122
109
|
)
|
110
|
+
for k in added_fn_names:
|
111
|
+
cache[k] = msg["exporter_obj"]
|
112
|
+
dependencies[k] = get_task_metadata(fns_dict[k], cache[k])
|
123
113
|
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
fn_name,
|
130
|
-
fn_workers,
|
131
|
-
fn_message_queues,
|
132
|
-
fns_dict,
|
133
|
-
cache,
|
134
|
-
lock,
|
135
|
-
send_lock,
|
136
|
-
self.child_conn,
|
114
|
+
self.child_conn.send(
|
115
|
+
{
|
116
|
+
"status": "success",
|
117
|
+
"content": dependencies,
|
118
|
+
}
|
137
119
|
)
|
138
120
|
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
traceback.print_exc()
|
144
|
-
with send_lock:
|
121
|
+
elif msg_type == "call":
|
122
|
+
result, return_message_type = self.call_imported_fn(
|
123
|
+
msg, fns_dict, cache
|
124
|
+
)
|
145
125
|
self.child_conn.send(
|
146
126
|
{
|
147
|
-
"status": "
|
148
|
-
"
|
149
|
-
"messageType":
|
127
|
+
"status": "success",
|
128
|
+
"content": result,
|
129
|
+
"messageType": return_message_type,
|
150
130
|
}
|
151
131
|
)
|
152
132
|
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
break
|
167
|
-
|
168
|
-
msg_type = msg["type"]
|
169
|
-
return_message_type = None
|
170
|
-
|
171
|
-
if msg_type == "call" or msg_type == "call_with_new_exporter_obj":
|
172
|
-
try:
|
173
|
-
# Handle updating exporter_obj for call_with_new_exporter_obj
|
174
|
-
if msg_type == "call_with_new_exporter_obj":
|
175
|
-
with lock:
|
176
|
-
cache[fn_name] = msg["other_data"]["exporter_obj"]
|
177
|
-
|
178
|
-
# Get a thread-safe copy of what we need
|
179
|
-
with lock:
|
180
|
-
in_memory_fn_obj = fns_dict[fn_name]
|
181
|
-
exporter_obj = cache.get(fn_name)
|
182
|
-
|
183
|
-
# Process message
|
184
|
-
message_content = process_message(msg["message"], exporter_obj)
|
185
|
-
|
186
|
-
# Call function
|
187
|
-
result = in_memory_fn_obj.fn(message_content)
|
188
|
-
return_message_type = in_memory_fn_obj.return_message_type
|
189
|
-
|
190
|
-
# Send result back
|
191
|
-
with send_lock:
|
192
|
-
child_conn.send(
|
193
|
-
{
|
194
|
-
"status": "success",
|
195
|
-
"content": result,
|
196
|
-
"messageType": return_message_type,
|
197
|
-
}
|
198
|
-
)
|
199
|
-
except Exception as e:
|
200
|
-
traceback.print_exc()
|
201
|
-
with send_lock:
|
202
|
-
child_conn.send(
|
203
|
-
{
|
204
|
-
"status": "error",
|
205
|
-
"error": str(e),
|
206
|
-
"messageType": return_message_type,
|
207
|
-
}
|
208
|
-
)
|
209
|
-
|
210
|
-
# Mark task as done
|
211
|
-
fn_message_queues[fn_name].task_done()
|
133
|
+
elif msg_type == "call_with_new_exporter_obj":
|
134
|
+
# replace old exporter_obj
|
135
|
+
cache[msg["fn_name"]] = msg["other_data"]["exporter_obj"]
|
136
|
+
result, return_message_type = self.call_imported_fn(
|
137
|
+
msg, fns_dict, cache
|
138
|
+
)
|
139
|
+
self.child_conn.send(
|
140
|
+
{
|
141
|
+
"status": "success",
|
142
|
+
"content": result,
|
143
|
+
"messageType": return_message_type,
|
144
|
+
}
|
145
|
+
)
|
212
146
|
|
213
147
|
except Exception as e:
|
214
148
|
traceback.print_exc()
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
worker = threading.Thread(
|
231
|
-
target=self._worker_thread,
|
232
|
-
args=(
|
233
|
-
fn_name,
|
234
|
-
fn_message_queues,
|
235
|
-
fns_dict,
|
236
|
-
cache,
|
237
|
-
lock,
|
238
|
-
send_lock,
|
239
|
-
child_conn,
|
240
|
-
),
|
241
|
-
daemon=True,
|
242
|
-
name=f"worker-{fn_name}",
|
243
|
-
)
|
244
|
-
fn_workers[fn_name] = worker
|
245
|
-
worker.start()
|
149
|
+
self.child_conn.send(
|
150
|
+
{
|
151
|
+
"status": "error",
|
152
|
+
"error": str(e),
|
153
|
+
"messageType": return_message_type,
|
154
|
+
}
|
155
|
+
)
|
156
|
+
|
157
|
+
def call_imported_fn(self, msg, fns_dict, cache):
|
158
|
+
fn_name = msg["fn_name"]
|
159
|
+
message = self.process_message(fn_name, msg["message"], cache)
|
160
|
+
fn = fns_dict[fn_name]
|
161
|
+
result = fn.fn(message)
|
162
|
+
return_message_type = fn.return_message_type
|
163
|
+
return result, return_message_type
|
246
164
|
|
247
165
|
def close(self):
|
248
166
|
try:
|
249
167
|
logger.debug("Initiating shutdown sequence")
|
168
|
+
self.parent_conn.send({"type": "shutdown"})
|
169
|
+
self.process.join(timeout=2) # Add timeout to process join
|
250
170
|
|
251
|
-
# Set our stop event - this signals all tasks to stop
|
252
|
-
self.stop_event.set()
|
253
|
-
|
254
|
-
# Send shutdown message to the subprocess
|
255
|
-
try:
|
256
|
-
logger.debug("Sending shutdown message to subprocess")
|
257
|
-
self.parent_conn.send({"type": "shutdown"})
|
258
|
-
logger.debug("Shutdown message sent")
|
259
|
-
except (BrokenPipeError, EOFError) as e:
|
260
|
-
logger.error(f"Error sending shutdown message: {str(e)}")
|
261
|
-
|
262
|
-
# Join the process with timeout
|
263
|
-
self.process.join(timeout=2)
|
264
|
-
|
265
|
-
# If still alive, escalate to terminate
|
266
171
|
if self.process.is_alive():
|
267
|
-
logger.debug(
|
268
|
-
"Process still alive after graceful shutdown, forcing termination"
|
269
|
-
)
|
172
|
+
logger.debug("Process still alive after timeout, forcing termination")
|
270
173
|
self.process.terminate()
|
271
174
|
self.process.join(timeout=1)
|
175
|
+
except Exception as e:
|
176
|
+
logger.debug(f"Error during process shutdown: {e}")
|
272
177
|
|
273
|
-
|
274
|
-
|
275
|
-
logger.debug(
|
276
|
-
"Process still alive after terminate, killing forcefully"
|
277
|
-
)
|
278
|
-
self.process.kill()
|
279
|
-
self.process.join(timeout=1)
|
178
|
+
# Set stop event before closing pipes
|
179
|
+
self.stop_event.set()
|
280
180
|
|
281
|
-
|
282
|
-
|
181
|
+
# Close all connections
|
182
|
+
for conn in [
|
183
|
+
self.parent_conn,
|
184
|
+
self.child_conn,
|
185
|
+
self.stdout_parent_conn,
|
186
|
+
self.stdout_child_conn,
|
187
|
+
]:
|
188
|
+
conn.close()
|
283
189
|
|
284
|
-
#
|
285
|
-
try:
|
286
|
-
# Cancel the stdout processor task if it exists
|
287
|
-
if self.stdout_processor_task:
|
288
|
-
self.stdout_processor_task.cancel()
|
289
|
-
|
290
|
-
# Close all connections - this will cause pending operations to fail fast
|
291
|
-
for conn in [
|
292
|
-
self.parent_conn,
|
293
|
-
self.child_conn,
|
294
|
-
self.stdout_parent_conn,
|
295
|
-
self.stdout_child_conn,
|
296
|
-
]:
|
297
|
-
try:
|
298
|
-
conn.close()
|
299
|
-
except:
|
300
|
-
pass
|
190
|
+
self.stdout_thread.join(timeout=2) # Add timeout to thread join
|
301
191
|
|
302
|
-
|
303
|
-
|
304
|
-
self.stdout_thread.join(timeout=2)
|
305
|
-
if self.stdout_thread.is_alive():
|
306
|
-
logger.debug("Stdout thread failed to terminate within timeout")
|
192
|
+
if self.stdout_thread.is_alive():
|
193
|
+
logger.debug("Stdout thread failed to terminate within timeout")
|
307
194
|
|
308
|
-
|
309
|
-
|
195
|
+
if self.stdout_processor_task:
|
196
|
+
self.stdout_processor_task.cancel()
|
197
|
+
|
198
|
+
def process_message(self, fn_name, message, cache):
|
199
|
+
if fn_name in cache:
|
200
|
+
exporter_obj = cache[fn_name]
|
201
|
+
for k, v in message.content.items():
|
202
|
+
nice_str = exporter_obj.var_name_mapping.get(k)
|
203
|
+
if not nice_str:
|
204
|
+
continue
|
205
|
+
p_dict, key = nested_access(exporter_obj.output, nice_str)
|
206
|
+
p_dict[key] = v
|
207
|
+
message.content = exporter_obj.output
|
208
|
+
return message.content
|
310
209
|
|
311
210
|
def start_stdout_processor_task(self):
|
312
211
|
if self.stdout_processor_task is None or self.stdout_processor_task.done():
|
@@ -391,59 +290,3 @@ def get_task_metadata(in_memory_fn, exporter_obj):
|
|
391
290
|
exporter_obj.var_name_reverse_mapping[d] for d in in_memory_fn.dependencies
|
392
291
|
)
|
393
292
|
return dependencies
|
394
|
-
|
395
|
-
|
396
|
-
# Class for capturing and redirecting stdout/stderr
|
397
|
-
class OutputCapture:
|
398
|
-
def __init__(self, stdout_pipe):
|
399
|
-
self.stdout_pipe = stdout_pipe
|
400
|
-
self.lock = threading.Lock()
|
401
|
-
|
402
|
-
def write(self, text):
|
403
|
-
with self.lock:
|
404
|
-
self.stdout_pipe.send(text)
|
405
|
-
|
406
|
-
def flush(self):
|
407
|
-
pass
|
408
|
-
|
409
|
-
|
410
|
-
def process_message(message, exporter_obj):
|
411
|
-
"""Process a message before passing it to a function"""
|
412
|
-
if exporter_obj:
|
413
|
-
for k, v in message.content.items():
|
414
|
-
nice_str = exporter_obj.var_name_mapping.get(k)
|
415
|
-
if not nice_str:
|
416
|
-
continue
|
417
|
-
p_dict, key = nested_access(exporter_obj.output, nice_str)
|
418
|
-
p_dict[key] = v
|
419
|
-
return exporter_obj.output
|
420
|
-
return message.content
|
421
|
-
|
422
|
-
|
423
|
-
def import_code_from_string(code_string, module_name=None, add_to_sys_modules=True):
|
424
|
-
# Generate a unique module name if one isn't provided
|
425
|
-
if module_name is None:
|
426
|
-
module_name = f"setta_dynamic_module_{uuid.uuid4().hex}"
|
427
|
-
|
428
|
-
# Add current directory to sys.path if it's not already there
|
429
|
-
current_dir = str(CWD)
|
430
|
-
if current_dir not in sys.path:
|
431
|
-
sys.path.insert(0, current_dir)
|
432
|
-
|
433
|
-
spec = importlib.util.spec_from_loader(module_name, loader=None)
|
434
|
-
|
435
|
-
# Create a new module based on the spec
|
436
|
-
module = importlib.util.module_from_spec(spec)
|
437
|
-
|
438
|
-
# Optionally add the module to sys.modules
|
439
|
-
if add_to_sys_modules:
|
440
|
-
print(f"adding {module_name} to sys.modules", flush=True)
|
441
|
-
sys.modules[module_name] = module
|
442
|
-
|
443
|
-
# Compile the code string
|
444
|
-
code_object = compile(code_string, module_name, "exec")
|
445
|
-
|
446
|
-
# Execute the compiled code object in the module's namespace
|
447
|
-
exec(code_object, module.__dict__)
|
448
|
-
|
449
|
-
return module
|
@@ -1,4 +1,4 @@
|
|
1
|
-
setta/__init__.py,sha256
|
1
|
+
setta/__init__.py,sha256=-qJsuUle95TwTi4vroNItCFbufX5VZwZ055MmNPMVro,28
|
2
2
|
setta/server.py,sha256=q4w9WG7SuLxwYtgXUCQyLt7t_HLmQV4y5abqvm7-uEA,4861
|
3
3
|
setta/start.py,sha256=5sMZ7WH3KV9Q0v186PsaYqsWOz7hebyrpXbBOp9wQww,3589
|
4
4
|
setta/cli/__init__.py,sha256=UxZG_VOMuF6lEBT3teUgTS9ulsK3wt3Gu3BbAQiAmt8,47
|
@@ -229,9 +229,9 @@ setta/static/frontend/assets/logo/logo.svg,sha256=k3XeAlA6hEaNfjnXG05hyb-8u1p_Fr
|
|
229
229
|
setta/static/seed/.DS_Store,sha256=ENxJvDQd7Te_U8gExcXtHE-mAeBUYOHELRfDWgN1NmA,6148
|
230
230
|
setta/static/seed/examples/.DS_Store,sha256=1lFlJ5EFymdzGAUAaI30vcaaLHt3F1LwpG7xILf9jsM,6148
|
231
231
|
setta/tasks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
232
|
-
setta/tasks/task_runner.py,sha256=
|
233
|
-
setta/tasks/tasks.py,sha256=
|
234
|
-
setta/tasks/utils.py,sha256=
|
232
|
+
setta/tasks/task_runner.py,sha256=gMXpfZWFMQbix2MfrHVCKB7BxQCjO8JH2P8cxUmt1ms,849
|
233
|
+
setta/tasks/tasks.py,sha256=jhfTdncE6fMtxMUdnmixMbbtKV-Qu_xHLvbDkmm3M4g,10993
|
234
|
+
setta/tasks/utils.py,sha256=iqbsLYBcu4Qd-MAHd0SWK9wPaJezgEh1Yg5YC9goOLU,10631
|
235
235
|
setta/tasks/fns/__init__.py,sha256=JhGzzQGaT9BWtF3pOmguh6pzIF9kdG3jdDNLyYZ2w7g,461
|
236
236
|
setta/tasks/fns/codeAreaAutocomplete.py,sha256=gJ5JbjkWDyTothr-UF-YlOxrbVzj2iyOVK7XD3lfhSQ,6416
|
237
237
|
setta/tasks/fns/codeAreaFindTemplateVars.py,sha256=vD9rY8VNPavv6VKa1bnxRPPRDNvFQy6mPIZRl-_3GnY,3708
|
@@ -252,9 +252,9 @@ setta/utils/generate_new_filename.py,sha256=KBLX6paDmTvXR-027TpqQkfijIXc7mCfhen-
|
|
252
252
|
setta/utils/section_contents.py,sha256=V2HQPik6DfSXw4j7IalbP5AZ3OEGCbtL5ub3xL-Q_Qo,4141
|
253
253
|
setta/utils/utils.py,sha256=KjzcvgM3Ab3IcE8vaWYtgBpwzPLKg0LmblnHLoYZJHM,9164
|
254
254
|
setta/utils/websocket_manager.py,sha256=MBIMI8xxOFQF4lT3on4pupi1ttEWXdWPV4fI2YP_UJU,3925
|
255
|
-
setta-0.0.14.
|
256
|
-
setta-0.0.14.
|
257
|
-
setta-0.0.14.
|
258
|
-
setta-0.0.14.
|
259
|
-
setta-0.0.14.
|
260
|
-
setta-0.0.14.
|
255
|
+
setta-0.0.14.dev7.dist-info/LICENSE,sha256=us9fuCq9wmiZVzayjKxNZ2iJYF6dROe0Qp57ToCO7XU,11361
|
256
|
+
setta-0.0.14.dev7.dist-info/METADATA,sha256=__vIVEdqunD4s14DQMi8rgEP6OOJEwD5isI9wH-6k3E,7517
|
257
|
+
setta-0.0.14.dev7.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
258
|
+
setta-0.0.14.dev7.dist-info/entry_points.txt,sha256=P0qCESy9fWF2q1EQ9JufGldCSnPHplDPn8J6Bgk5hB0,42
|
259
|
+
setta-0.0.14.dev7.dist-info/top_level.txt,sha256=8G4lmRzVOnJ11_DescPVHE6MQZH-o06A0nGsDDV2ngY,6
|
260
|
+
setta-0.0.14.dev7.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|