setta 0.0.14.dev2__py3-none-any.whl → 0.0.14.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
setta/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.0.14.dev2"
1
+ __version__ = "0.0.14.dev3"
setta/tasks/tasks.py CHANGED
@@ -151,6 +151,8 @@ class Tasks:
151
151
  sp = self.in_memory_subprocesses.get(subprocess_key, {}).get("subprocess")
152
152
  if sp:
153
153
  sp.close()
154
+ del self.in_memory_subprocesses[subprocess_key]
155
+
154
156
  sp = SettaInMemoryFnSubprocess(
155
157
  self.stop_event, self.websockets, c["subprocessStartMethod"]
156
158
  )
setta/tasks/utils.py CHANGED
@@ -7,6 +7,7 @@ import sys
7
7
  import threading
8
8
  import traceback
9
9
  import uuid
10
+ from collections import defaultdict
10
11
 
11
12
  from setta.tasks.fns.utils import TaskDefinition
12
13
  from setta.utils.constants import CWD
@@ -15,35 +16,6 @@ from setta.utils.utils import nested_access
15
16
  logger = logging.getLogger(__name__)
16
17
 
17
18
 
18
- def import_code_from_string(code_string, module_name=None, add_to_sys_modules=True):
19
- # Generate a unique module name if one isn't provided
20
- if module_name is None:
21
- module_name = f"setta_dynamic_module_{uuid.uuid4().hex}"
22
-
23
- # Add current directory to sys.path if it's not already there
24
- current_dir = str(CWD)
25
- if current_dir not in sys.path:
26
- sys.path.insert(0, current_dir)
27
-
28
- spec = importlib.util.spec_from_loader(module_name, loader=None)
29
-
30
- # Create a new module based on the spec
31
- module = importlib.util.module_from_spec(spec)
32
-
33
- # Optionally add the module to sys.modules
34
- if add_to_sys_modules:
35
- print(f"adding {module_name} to sys.modules", flush=True)
36
- sys.modules[module_name] = module
37
-
38
- # Compile the code string
39
- code_object = compile(code_string, module_name, "exec")
40
-
41
- # Execute the compiled code object in the module's namespace
42
- exec(code_object, module.__dict__)
43
-
44
- return module
45
-
46
-
47
19
  class SettaInMemoryFnSubprocess:
48
20
  def __init__(self, stop_event, websockets, start_method):
49
21
  logger.debug(
@@ -68,20 +40,20 @@ class SettaInMemoryFnSubprocess:
68
40
  self.start_stdout_processor_task()
69
41
 
70
42
  def _subprocess_main(self):
71
- """Main loop in subprocess that handles all requests"""
43
+ """Main loop in subprocess that handles all requests with parallel function execution"""
72
44
  # Initialize store for imported modules
73
45
  fns_dict = {}
74
46
  cache = {}
75
47
 
76
- class OutputCapture:
77
- def __init__(self, stdout_pipe):
78
- self.stdout_pipe = stdout_pipe
48
+ # Message queues per function
49
+ fn_message_queues = defaultdict(queue.Queue)
79
50
 
80
- def write(self, text):
81
- self.stdout_pipe.send(text)
51
+ # Create a lock for thread-safe operations
52
+ lock = threading.RLock()
53
+ send_lock = threading.Lock()
82
54
 
83
- def flush(self):
84
- pass
55
+ # Function worker threads
56
+ fn_workers = {}
85
57
 
86
58
  # Redirect stdout as soon as subprocess starts
87
59
  output_capture = OutputCapture(self.stdout_child_conn)
@@ -91,9 +63,16 @@ class SettaInMemoryFnSubprocess:
91
63
  while True:
92
64
  msg = self.child_conn.recv() # Wait for requests
93
65
  msg_type = msg["type"]
94
- return_message_type = None
95
66
 
96
67
  if msg_type == "shutdown":
68
+ # Signal all worker threads to stop
69
+ for fn_name in fn_workers:
70
+ fn_message_queues[fn_name].put(None)
71
+
72
+ # Wait for all worker threads to finish (with timeout)
73
+ for fn_name, worker in fn_workers.items():
74
+ worker.join(timeout=1.0)
75
+
97
76
  break
98
77
 
99
78
  try:
@@ -104,108 +83,217 @@ class SettaInMemoryFnSubprocess:
104
83
  module_name = to_import["module_name"]
105
84
  # Import and store module
106
85
  module = import_code_from_string(code, module_name)
107
- added_fn_names = add_fns_from_module(
108
- fns_dict, module, module_name
86
+ with lock:
87
+ added_fn_names = add_fns_from_module(
88
+ fns_dict, module, module_name
89
+ )
90
+ for k in added_fn_names:
91
+ cache[k] = msg["exporter_obj"]
92
+ dependencies[k] = get_task_metadata(
93
+ fns_dict[k], cache[k]
94
+ )
95
+ # Start a worker thread for each function
96
+ self._start_worker_for_fn(
97
+ k,
98
+ fn_workers,
99
+ fn_message_queues,
100
+ fns_dict,
101
+ cache,
102
+ lock,
103
+ send_lock,
104
+ self.child_conn,
105
+ )
106
+
107
+ with send_lock:
108
+ self.child_conn.send(
109
+ {
110
+ "status": "success",
111
+ "content": dependencies,
112
+ }
109
113
  )
110
- for k in added_fn_names:
111
- cache[k] = msg["exporter_obj"]
112
- dependencies[k] = get_task_metadata(fns_dict[k], cache[k])
113
114
 
114
- self.child_conn.send(
115
- {
116
- "status": "success",
117
- "content": dependencies,
118
- }
115
+ elif msg_type == "call" or msg_type == "call_with_new_exporter_obj":
116
+ fn_name = msg["fn_name"]
117
+
118
+ # Start a worker for this function if needed
119
+ self._start_worker_for_fn(
120
+ fn_name,
121
+ fn_workers,
122
+ fn_message_queues,
123
+ fns_dict,
124
+ cache,
125
+ lock,
126
+ send_lock,
127
+ self.child_conn,
119
128
  )
120
129
 
121
- elif msg_type == "call":
122
- result, return_message_type = self.call_imported_fn(
123
- msg, fns_dict, cache
124
- )
125
- self.child_conn.send(
126
- {
127
- "status": "success",
128
- "content": result,
129
- "messageType": return_message_type,
130
- }
131
- )
130
+ # Add the message to the function's queue
131
+ fn_message_queues[fn_name].put(msg)
132
132
 
133
- elif msg_type == "call_with_new_exporter_obj":
134
- # replace old exporter_obj
135
- cache[msg["fn_name"]] = msg["other_data"]["exporter_obj"]
136
- result, return_message_type = self.call_imported_fn(
137
- msg, fns_dict, cache
138
- )
133
+ except Exception as e:
134
+ traceback.print_exc()
135
+ with send_lock:
139
136
  self.child_conn.send(
140
137
  {
141
- "status": "success",
142
- "content": result,
143
- "messageType": return_message_type,
138
+ "status": "error",
139
+ "error": str(e),
140
+ "messageType": None,
144
141
  }
145
142
  )
146
143
 
144
+ def _worker_thread(
145
+ self, fn_name, fn_message_queues, fns_dict, cache, lock, send_lock, child_conn
146
+ ):
147
+ """Worker thread that processes messages for a specific function"""
148
+ while True:
149
+ try:
150
+ # Get a message from the queue
151
+ msg = fn_message_queues[fn_name].get()
152
+
153
+ if msg is None: # Sentinel value to stop the thread
154
+ break
155
+
156
+ msg_type = msg["type"]
157
+ return_message_type = None
158
+
159
+ if msg_type == "call" or msg_type == "call_with_new_exporter_obj":
160
+ try:
161
+ # Handle updating exporter_obj for call_with_new_exporter_obj
162
+ if msg_type == "call_with_new_exporter_obj":
163
+ with lock:
164
+ cache[fn_name] = msg["other_data"]["exporter_obj"]
165
+
166
+ # Get a thread-safe copy of what we need
167
+ with lock:
168
+ in_memory_fn_obj = fns_dict[fn_name]
169
+ exporter_obj = cache.get(fn_name)
170
+
171
+ # Process message
172
+ message_content = process_message(msg["message"], exporter_obj)
173
+
174
+ # Call function
175
+ result = in_memory_fn_obj.fn(message_content)
176
+ return_message_type = in_memory_fn_obj.return_message_type
177
+
178
+ # Send result back
179
+ with send_lock:
180
+ child_conn.send(
181
+ {
182
+ "status": "success",
183
+ "content": result,
184
+ "messageType": return_message_type,
185
+ }
186
+ )
187
+ except Exception as e:
188
+ traceback.print_exc()
189
+ with send_lock:
190
+ child_conn.send(
191
+ {
192
+ "status": "error",
193
+ "error": str(e),
194
+ "messageType": return_message_type,
195
+ }
196
+ )
197
+
198
+ # Mark task as done
199
+ fn_message_queues[fn_name].task_done()
200
+
147
201
  except Exception as e:
148
202
  traceback.print_exc()
149
- self.child_conn.send(
150
- {
151
- "status": "error",
152
- "error": str(e),
153
- "messageType": return_message_type,
154
- }
155
- )
156
-
157
- def call_imported_fn(self, msg, fns_dict, cache):
158
- fn_name = msg["fn_name"]
159
- message = self.process_message(fn_name, msg["message"], cache)
160
- fn = fns_dict[fn_name]
161
- result = fn.fn(message)
162
- return_message_type = fn.return_message_type
163
- return result, return_message_type
203
+ print(f"Error in worker thread for {fn_name}: {e}", flush=True)
204
+
205
+ def _start_worker_for_fn(
206
+ self,
207
+ fn_name,
208
+ fn_workers,
209
+ fn_message_queues,
210
+ fns_dict,
211
+ cache,
212
+ lock,
213
+ send_lock,
214
+ child_conn,
215
+ ):
216
+ """Start a worker thread for a function if not already running"""
217
+ if fn_name not in fn_workers or not fn_workers[fn_name].is_alive():
218
+ worker = threading.Thread(
219
+ target=self._worker_thread,
220
+ args=(
221
+ fn_name,
222
+ fn_message_queues,
223
+ fns_dict,
224
+ cache,
225
+ lock,
226
+ send_lock,
227
+ child_conn,
228
+ ),
229
+ daemon=True,
230
+ name=f"worker-{fn_name}",
231
+ )
232
+ fn_workers[fn_name] = worker
233
+ worker.start()
164
234
 
165
235
  def close(self):
166
236
  try:
167
237
  logger.debug("Initiating shutdown sequence")
168
- self.parent_conn.send({"type": "shutdown"})
169
- self.process.join(timeout=2) # Add timeout to process join
170
238
 
239
+ # Set our stop event - this signals all tasks to stop
240
+ self.stop_event.set()
241
+
242
+ # Send shutdown message to the subprocess
243
+ try:
244
+ self.parent_conn.send({"type": "shutdown"})
245
+ except (BrokenPipeError, EOFError):
246
+ # Pipe might already be closed, that's okay
247
+ pass
248
+
249
+ # Join the process with timeout
250
+ self.process.join(timeout=2)
251
+
252
+ # If still alive, escalate to terminate
171
253
  if self.process.is_alive():
172
- logger.debug("Process still alive after timeout, forcing termination")
254
+ logger.debug(
255
+ "Process still alive after graceful shutdown, forcing termination"
256
+ )
173
257
  self.process.terminate()
174
258
  self.process.join(timeout=1)
175
- except Exception as e:
176
- logger.debug(f"Error during process shutdown: {e}")
177
259
 
178
- # Set stop event before closing pipes
179
- self.stop_event.set()
260
+ # Last resort: kill
261
+ if self.process.is_alive():
262
+ logger.debug(
263
+ "Process still alive after terminate, killing forcefully"
264
+ )
265
+ self.process.kill()
266
+ self.process.join(timeout=1)
180
267
 
181
- # Close all connections
182
- for conn in [
183
- self.parent_conn,
184
- self.child_conn,
185
- self.stdout_parent_conn,
186
- self.stdout_child_conn,
187
- ]:
188
- conn.close()
268
+ except Exception as e:
269
+ logger.exception(f"Error during process shutdown: {e}")
189
270
 
190
- self.stdout_thread.join(timeout=2) # Add timeout to thread join
271
+ # Now handle the async tasks and threads
272
+ try:
273
+ # Cancel the stdout processor task if it exists
274
+ if self.stdout_processor_task:
275
+ self.stdout_processor_task.cancel()
276
+
277
+ # Close all connections - this will cause pending operations to fail fast
278
+ for conn in [
279
+ self.parent_conn,
280
+ self.child_conn,
281
+ self.stdout_parent_conn,
282
+ self.stdout_child_conn,
283
+ ]:
284
+ try:
285
+ conn.close()
286
+ except:
287
+ pass
191
288
 
192
- if self.stdout_thread.is_alive():
193
- logger.debug("Stdout thread failed to terminate within timeout")
289
+ # Join the stdout thread with timeout
290
+ if self.stdout_thread and self.stdout_thread.is_alive():
291
+ self.stdout_thread.join(timeout=2)
292
+ if self.stdout_thread.is_alive():
293
+ logger.debug("Stdout thread failed to terminate within timeout")
194
294
 
195
- if self.stdout_processor_task:
196
- self.stdout_processor_task.cancel()
197
-
198
- def process_message(self, fn_name, message, cache):
199
- if fn_name in cache:
200
- exporter_obj = cache[fn_name]
201
- for k, v in message.content.items():
202
- nice_str = exporter_obj.var_name_mapping.get(k)
203
- if not nice_str:
204
- continue
205
- p_dict, key = nested_access(exporter_obj.output, nice_str)
206
- p_dict[key] = v
207
- message.content = exporter_obj.output
208
- return message.content
295
+ except Exception as e:
296
+ logger.exception(f"Error during resource cleanup: {e}")
209
297
 
210
298
  def start_stdout_processor_task(self):
211
299
  if self.stdout_processor_task is None or self.stdout_processor_task.done():
@@ -290,3 +378,59 @@ def get_task_metadata(in_memory_fn, exporter_obj):
290
378
  exporter_obj.var_name_reverse_mapping[d] for d in in_memory_fn.dependencies
291
379
  )
292
380
  return dependencies
381
+
382
+
383
+ # Class for capturing and redirecting stdout/stderr
384
+ class OutputCapture:
385
+ def __init__(self, stdout_pipe):
386
+ self.stdout_pipe = stdout_pipe
387
+ self.lock = threading.Lock()
388
+
389
+ def write(self, text):
390
+ with self.lock:
391
+ self.stdout_pipe.send(text)
392
+
393
+ def flush(self):
394
+ pass
395
+
396
+
397
+ def process_message(message, exporter_obj):
398
+ """Process a message before passing it to a function"""
399
+ if exporter_obj:
400
+ for k, v in message.content.items():
401
+ nice_str = exporter_obj.var_name_mapping.get(k)
402
+ if not nice_str:
403
+ continue
404
+ p_dict, key = nested_access(exporter_obj.output, nice_str)
405
+ p_dict[key] = v
406
+ return exporter_obj.output
407
+ return message.content
408
+
409
+
410
+ def import_code_from_string(code_string, module_name=None, add_to_sys_modules=True):
411
+ # Generate a unique module name if one isn't provided
412
+ if module_name is None:
413
+ module_name = f"setta_dynamic_module_{uuid.uuid4().hex}"
414
+
415
+ # Add current directory to sys.path if it's not already there
416
+ current_dir = str(CWD)
417
+ if current_dir not in sys.path:
418
+ sys.path.insert(0, current_dir)
419
+
420
+ spec = importlib.util.spec_from_loader(module_name, loader=None)
421
+
422
+ # Create a new module based on the spec
423
+ module = importlib.util.module_from_spec(spec)
424
+
425
+ # Optionally add the module to sys.modules
426
+ if add_to_sys_modules:
427
+ print(f"adding {module_name} to sys.modules", flush=True)
428
+ sys.modules[module_name] = module
429
+
430
+ # Compile the code string
431
+ code_object = compile(code_string, module_name, "exec")
432
+
433
+ # Execute the compiled code object in the module's namespace
434
+ exec(code_object, module.__dict__)
435
+
436
+ return module
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: setta
3
- Version: 0.0.14.dev2
3
+ Version: 0.0.14.dev3
4
4
  Summary: Python without the donkeywork.
5
5
  Home-page: https://setta.dev
6
6
  Author: Kevin Musgrave, Jeff Musgrave
@@ -1,4 +1,4 @@
1
- setta/__init__.py,sha256=v8sPkSKHEFL488Wxp-yVEGsW9I0nkkHNUUO3b_4XqSQ,28
1
+ setta/__init__.py,sha256=dRyAeVBboRTo_BMS4gP2ZewakrwbpbNaV3_dLZTmbgQ,28
2
2
  setta/server.py,sha256=q4w9WG7SuLxwYtgXUCQyLt7t_HLmQV4y5abqvm7-uEA,4861
3
3
  setta/start.py,sha256=5sMZ7WH3KV9Q0v186PsaYqsWOz7hebyrpXbBOp9wQww,3589
4
4
  setta/cli/__init__.py,sha256=UxZG_VOMuF6lEBT3teUgTS9ulsK3wt3Gu3BbAQiAmt8,47
@@ -230,8 +230,8 @@ setta/static/seed/.DS_Store,sha256=ENxJvDQd7Te_U8gExcXtHE-mAeBUYOHELRfDWgN1NmA,6
230
230
  setta/static/seed/examples/.DS_Store,sha256=1lFlJ5EFymdzGAUAaI30vcaaLHt3F1LwpG7xILf9jsM,6148
231
231
  setta/tasks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
232
232
  setta/tasks/task_runner.py,sha256=gMXpfZWFMQbix2MfrHVCKB7BxQCjO8JH2P8cxUmt1ms,849
233
- setta/tasks/tasks.py,sha256=kgV1Z0xzM9dBS5nreO8QYGPLG-njR-zZ5-viT2rYpwg,10141
234
- setta/tasks/utils.py,sha256=iqbsLYBcu4Qd-MAHd0SWK9wPaJezgEh1Yg5YC9goOLU,10631
233
+ setta/tasks/tasks.py,sha256=uG-S3jN9qZEolHtXkJZ4LMi-8LrNPctQR6TVWPlyeTs,10206
234
+ setta/tasks/utils.py,sha256=cTmHJGPk6HHbl7nNTPV2KaTugkdSRJTow0z4A05b0lg,15901
235
235
  setta/tasks/fns/__init__.py,sha256=JhGzzQGaT9BWtF3pOmguh6pzIF9kdG3jdDNLyYZ2w7g,461
236
236
  setta/tasks/fns/codeAreaAutocomplete.py,sha256=gJ5JbjkWDyTothr-UF-YlOxrbVzj2iyOVK7XD3lfhSQ,6416
237
237
  setta/tasks/fns/codeAreaFindTemplateVars.py,sha256=vD9rY8VNPavv6VKa1bnxRPPRDNvFQy6mPIZRl-_3GnY,3708
@@ -252,9 +252,9 @@ setta/utils/generate_new_filename.py,sha256=KBLX6paDmTvXR-027TpqQkfijIXc7mCfhen-
252
252
  setta/utils/section_contents.py,sha256=V2HQPik6DfSXw4j7IalbP5AZ3OEGCbtL5ub3xL-Q_Qo,4141
253
253
  setta/utils/utils.py,sha256=KjzcvgM3Ab3IcE8vaWYtgBpwzPLKg0LmblnHLoYZJHM,9164
254
254
  setta/utils/websocket_manager.py,sha256=MBIMI8xxOFQF4lT3on4pupi1ttEWXdWPV4fI2YP_UJU,3925
255
- setta-0.0.14.dev2.dist-info/LICENSE,sha256=us9fuCq9wmiZVzayjKxNZ2iJYF6dROe0Qp57ToCO7XU,11361
256
- setta-0.0.14.dev2.dist-info/METADATA,sha256=ySjZVaCLgQKmWfS0edY3UcYcrPWaAqJeubY07dtRsNc,7517
257
- setta-0.0.14.dev2.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
258
- setta-0.0.14.dev2.dist-info/entry_points.txt,sha256=P0qCESy9fWF2q1EQ9JufGldCSnPHplDPn8J6Bgk5hB0,42
259
- setta-0.0.14.dev2.dist-info/top_level.txt,sha256=8G4lmRzVOnJ11_DescPVHE6MQZH-o06A0nGsDDV2ngY,6
260
- setta-0.0.14.dev2.dist-info/RECORD,,
255
+ setta-0.0.14.dev3.dist-info/LICENSE,sha256=us9fuCq9wmiZVzayjKxNZ2iJYF6dROe0Qp57ToCO7XU,11361
256
+ setta-0.0.14.dev3.dist-info/METADATA,sha256=_B3NlxJtJjD-3RX-EifIfhK5bIgRfFbzFnCgNsOTL04,7517
257
+ setta-0.0.14.dev3.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
258
+ setta-0.0.14.dev3.dist-info/entry_points.txt,sha256=P0qCESy9fWF2q1EQ9JufGldCSnPHplDPn8J6Bgk5hB0,42
259
+ setta-0.0.14.dev3.dist-info/top_level.txt,sha256=8G4lmRzVOnJ11_DescPVHE6MQZH-o06A0nGsDDV2ngY,6
260
+ setta-0.0.14.dev3.dist-info/RECORD,,