pyoco 0.1.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyoco/core/engine.py CHANGED
@@ -1,180 +1,431 @@
1
1
  import time
2
- from typing import Dict, Any, List, Set
3
- from .models import Flow, Task
4
- from .context import Context
2
+ import io
3
+ import sys
4
+ import traceback
5
+ from typing import Dict, Any, List, Set, Optional
6
+ import contextlib
7
+ from .models import Flow, Task, RunContext, TaskState, RunStatus
8
+ from .context import Context, LoopFrame
9
+ from .exceptions import UntilMaxIterationsExceeded
5
10
  from ..trace.backend import TraceBackend
6
11
  from ..trace.console import ConsoleTraceBackend
12
+ from ..dsl.nodes import TaskNode, RepeatNode, ForEachNode, UntilNode, SwitchNode, DEFAULT_CASE_VALUE
13
+ from ..dsl.expressions import Expression
14
+
15
+ class TeeStream:
16
+ def __init__(self, original):
17
+ self.original = original
18
+ self.buffer = io.StringIO()
19
+
20
+ def write(self, data):
21
+ self.original.write(data)
22
+ self.buffer.write(data)
23
+ return len(data)
24
+
25
+ def flush(self):
26
+ self.original.flush()
27
+
28
+ def getvalue(self):
29
+ return self.buffer.getvalue()
7
30
 
8
31
  class Engine:
32
+ """
33
+ The core execution engine for Pyoco flows.
34
+
35
+ Responsible for:
36
+ - Resolving task dependencies
37
+ - Managing parallel execution (using ThreadPoolExecutor)
38
+ - Handling input injection and artifact storage
39
+ - Delegating logging to the TraceBackend
40
+
41
+ Intentionally keeps scheduling logic simple (no distributed queue, no external DB).
42
+ """
9
43
  def __init__(self, trace_backend: TraceBackend = None):
10
44
  self.trace = trace_backend or ConsoleTraceBackend()
45
+ # Track active runs: run_id -> RunContext
46
+ from .models import RunContext
47
+ self.active_runs: Dict[str, RunContext] = {}
48
+
49
+ def get_run(self, run_id: str) -> Any:
50
+ # Return RunContext if active, else None (for now)
51
+ return self.active_runs.get(run_id)
11
52
 
12
- def run(self, flow: Flow, params: Dict[str, Any] = None) -> Context:
13
- ctx = Context(params=params or {})
14
- self.trace.on_flow_start(flow.name)
53
+ def cancel(self, run_id: str):
54
+ """
55
+ Cancel an active run.
56
+ """
57
+ from .models import RunStatus
58
+ run_ctx = self.active_runs.get(run_id)
59
+ if run_ctx:
60
+ if run_ctx.status == RunStatus.RUNNING:
61
+ run_ctx.status = RunStatus.CANCELLING
62
+ # We don't force kill threads here, the loop will handle it.
63
+
64
+ def run(self, flow: Flow, params: Dict[str, Any] = None, run_context: Optional[RunContext] = None) -> Context:
65
+ # Initialize RunContext (v0.2.0)
66
+ if run_context is None:
67
+ run_context = RunContext()
15
68
 
16
- executed: Set[Task] = set()
17
- running: Set[Any] = set() # Set of Futures
69
+ run_ctx = run_context
70
+ run_ctx.flow_name = flow.name
71
+ run_ctx.params = params or {}
18
72
 
19
- import concurrent.futures
73
+ # Initialize all tasks as PENDING
74
+ for task in flow.tasks:
75
+ run_ctx.tasks[task.name] = TaskState.PENDING
76
+ run_ctx.ensure_task_record(task.name)
77
+
78
+ ctx = Context(params=params or {}, run_context=run_ctx)
79
+ self.trace.on_flow_start(flow.name, run_id=run_ctx.run_id)
20
80
 
21
- # Use ThreadPoolExecutor for parallel execution
22
- # Max workers could be configurable, default to something reasonable
23
- with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
24
- future_to_task = {}
25
- task_deadlines: Dict[Task, float] = {}
81
+ # Register active run
82
+ self.active_runs[run_ctx.run_id] = run_ctx
83
+
84
+ if flow.has_control_flow():
85
+ try:
86
+ program = flow.build_program()
87
+ self._execute_subflow(program, ctx)
88
+ run_ctx.status = RunStatus.COMPLETED
89
+ except Exception:
90
+ run_ctx.status = RunStatus.FAILED
91
+ run_ctx.end_time = time.time()
92
+ raise
93
+ run_ctx.end_time = time.time()
94
+ return ctx
95
+
96
+ try:
97
+ executed: Set[Task] = set()
98
+ running: Set[Any] = set() # Set of Futures
26
99
 
27
- failed: Set[Task] = set()
100
+ import concurrent.futures
101
+
102
+ # Use ThreadPoolExecutor for parallel execution
103
+ # Max workers could be configurable, default to something reasonable
104
+ with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
105
+ future_to_task = {}
106
+ task_deadlines: Dict[Task, float] = {}
107
+
108
+ failed: Set[Task] = set()
28
109
 
29
- while len(executed) + len(failed) < len(flow.tasks):
30
- # Identify runnable tasks
31
- runnable = []
32
- for task in flow.tasks:
33
- if task in executed or task in failed or task in [future_to_task[f] for f in running]:
34
- continue
35
-
36
- # Check dependencies
37
- deps_met = True
38
-
39
- if task.trigger_policy == "ANY":
40
- # OR-join: Run if ANY dependency is executed (and successful)
41
- # But what if all failed? Then we can't run.
42
- # If at least one succeeded, we run.
43
- # If none succeeded yet, we wait.
44
- # If all failed, we fail (or skip).
110
+ while len(executed) + len(failed) < len(flow.tasks):
111
+ # Check for cancellation
112
+ if run_ctx.status in [RunStatus.CANCELLING, RunStatus.CANCELLED]:
113
+ # Stop submitting new tasks
114
+ # Mark all PENDING tasks as CANCELLED
115
+ for t_name, t_state in run_ctx.tasks.items():
116
+ if t_state == TaskState.PENDING:
117
+ run_ctx.tasks[t_name] = TaskState.CANCELLED
45
118
 
46
- any_success = False
47
- all_failed = True
48
-
49
- if not task.dependencies:
50
- # No deps = ready
51
- any_success = True
52
- all_failed = False
53
- else:
54
- for dep in task.dependencies:
55
- if dep in executed:
119
+ # If no running tasks, we are done
120
+ if not running:
121
+ run_ctx.status = RunStatus.CANCELLED
122
+ break
123
+ # Else continue loop to wait for running tasks (graceful shutdown)
124
+ # We still need to wait, so we fall through to the wait logic,
125
+ # but 'runnable' will be empty because we won't add anything.
126
+
127
+ # Identify runnable tasks
128
+ runnable = []
129
+ if run_ctx.status == RunStatus.RUNNING:
130
+ for task in flow.tasks:
131
+ if task in executed or task in failed or task in [future_to_task[f] for f in running]:
132
+ continue
133
+
134
+ # Check dependencies
135
+ deps_met = True
136
+
137
+ if task.trigger_policy == "ANY":
138
+ # OR-join: Run if ANY dependency is executed (and successful)
139
+ # But what if all failed? Then we can't run.
140
+ # If at least one succeeded, we run.
141
+ # If none succeeded yet, we wait.
142
+ # If all failed, we fail (or skip).
143
+
144
+ any_success = False
145
+ all_failed = True
146
+
147
+ if not task.dependencies:
148
+ # No deps = ready
56
149
  any_success = True
57
150
  all_failed = False
58
- break # Found one success
59
- if dep not in failed:
60
- all_failed = False # At least one is still running/pending
61
-
62
- if any_success:
63
- deps_met = True
64
- elif all_failed:
65
- # All deps failed, so we fail/skip
66
- failed.add(task)
67
- deps_met = False
68
- # Continue to next task loop to avoid adding to runnable
69
- continue
70
- else:
71
- # Still waiting
72
- deps_met = False
73
-
74
- else:
75
- # ALL (AND-join) - Default
76
- for dep in task.dependencies:
77
- if dep in failed:
78
- # Dependency failed
79
- if task.fail_policy == "isolate" or dep.fail_policy == "isolate":
151
+ else:
152
+ for dep in task.dependencies:
153
+ if dep in executed:
154
+ any_success = True
155
+ all_failed = False
156
+ break # Found one success
157
+ if dep not in failed:
158
+ all_failed = False # At least one is still running/pending
159
+
160
+ if any_success:
161
+ deps_met = True
162
+ elif all_failed:
163
+ # All deps failed, so we fail/skip
80
164
  failed.add(task)
165
+ run_ctx.tasks[task.name] = TaskState.FAILED
81
166
  deps_met = False
82
- break
167
+ # Continue to next task loop to avoid adding to runnable
168
+ continue
83
169
  else:
84
- pass # fail=stop handled elsewhere
170
+ # Still waiting
171
+ deps_met = False
172
+
173
+ else:
174
+ # ALL (AND-join) - Default
175
+ for dep in task.dependencies:
176
+ if dep in failed:
177
+ # Dependency failed
178
+ if task.fail_policy == "isolate" or dep.fail_policy == "isolate":
179
+ failed.add(task)
180
+ run_ctx.tasks[task.name] = TaskState.FAILED # Mark as FAILED (or SKIPPED if we had it)
181
+ deps_met = False
182
+ break
183
+ else:
184
+ pass # fail=stop handled elsewhere
185
+
186
+ if dep not in executed:
187
+ deps_met = False
188
+ break
85
189
 
86
- if dep not in executed:
87
- deps_met = False
88
- break
190
+ if deps_met and task not in failed:
191
+ runnable.append(task)
89
192
 
90
- if deps_met and task not in failed:
91
- runnable.append(task)
92
-
93
- # If no runnable tasks and no running tasks, we are stuck
94
- # But if we have failed tasks, maybe that's why?
95
- if not runnable and not running:
96
- if len(executed) + len(failed) == len(flow.tasks):
97
- # All done (some failed)
98
- break
99
- raise RuntimeError("Deadlock or cycle detected in workflow")
100
-
101
- # Submit runnable tasks
102
- for task in runnable:
103
- future = executor.submit(self._execute_task, task, ctx)
104
- running.add(future)
105
- future_to_task[future] = task
106
- # Record start time for timeout tracking
107
- # We need to track start times or deadlines.
108
- # Let's store deadline in a separate dict or attach to task?
109
- # Task is immutable-ish (dataclass).
110
- # Let's use a dict.
111
- if task.timeout_sec:
112
- task_deadlines[task] = time.time() + task.timeout_sec
113
-
114
- # Calculate wait timeout
115
- wait_timeout = None
116
- if task_deadlines:
117
- now = time.time()
118
- min_deadline = min(task_deadlines.values())
119
- wait_timeout = max(0, min_deadline - now)
120
-
121
- # Wait for at least one task to complete or timeout
122
- if running:
123
- done, _ = concurrent.futures.wait(
124
- running,
125
- timeout=wait_timeout,
126
- return_when=concurrent.futures.FIRST_COMPLETED
127
- )
193
+ # If no runnable tasks and no running tasks, we are stuck
194
+ # But if we have failed tasks, maybe that's why?
195
+ if not runnable and not running:
196
+ if len(executed) + len(failed) == len(flow.tasks):
197
+ # All done (some failed)
198
+ break
199
+
200
+ run_ctx.status = RunStatus.FAILED
201
+ run_ctx.end_time = time.time()
202
+ raise RuntimeError("Deadlock or cycle detected in workflow")
128
203
 
129
- # Check for timeouts first
130
- now = time.time()
131
- timed_out_tasks = []
132
- for task, deadline in list(task_deadlines.items()):
133
- if now >= deadline:
134
- # Task timed out
135
- # Find the future for this task
136
- # This is inefficient, but running set is small
137
- found_future = None
138
- for f, t in future_to_task.items():
139
- if t == task and f in running:
140
- found_future = f
141
- break
142
-
143
- if found_future:
144
- timed_out_tasks.append(found_future)
145
- # Remove from tracking
146
- running.remove(found_future)
147
- del task_deadlines[task]
148
-
149
- # Handle failure
150
- if task.fail_policy == "isolate":
151
- failed.add(task)
152
- self.trace.on_node_error(task.name, TimeoutError(f"Task exceeded timeout of {task.timeout_sec}s"))
153
- else:
154
- raise TimeoutError(f"Task '{task.name}' exceeded timeout of {task.timeout_sec}s")
155
-
156
- for future in done:
157
- if future in running: # Might have been removed by timeout check above (unlikely if wait returned due to completion, but possible race)
158
- running.remove(future)
159
- task = future_to_task[future]
160
- if task in task_deadlines:
161
- del task_deadlines[task]
204
+ # Submit runnable tasks
205
+ for task in runnable:
206
+ future = executor.submit(self._execute_task, task, ctx)
207
+ running.add(future)
208
+ future_to_task[future] = task
209
+ # Record start time for timeout tracking
210
+ if task.timeout_sec:
211
+ task_deadlines[task] = time.time() + task.timeout_sec
212
+
213
+ # Calculate wait timeout
214
+ wait_timeout = None
215
+ if task_deadlines:
216
+ now = time.time()
217
+ min_deadline = min(task_deadlines.values())
218
+ wait_timeout = max(0, min_deadline - now)
219
+
220
+ # Wait for at least one task to complete or timeout
221
+ if running:
222
+ done, _ = concurrent.futures.wait(
223
+ running,
224
+ timeout=wait_timeout,
225
+ return_when=concurrent.futures.FIRST_COMPLETED
226
+ )
227
+
228
+ # Check for timeouts first
229
+ now = time.time()
230
+ timed_out_tasks = []
231
+ for task, deadline in list(task_deadlines.items()):
232
+ if now >= deadline:
233
+ # Task timed out
234
+ # Find the future for this task
235
+ found_future = None
236
+ for f, t in future_to_task.items():
237
+ if t == task and f in running:
238
+ found_future = f
239
+ break
162
240
 
163
- try:
164
- future.result() # Re-raise exception if any
165
- executed.add(task)
166
- except Exception as e:
167
- if task.fail_policy == "isolate":
168
- failed.add(task)
169
- self.trace.on_node_error(task.name, e) # Log it
170
- else:
171
- # fail=stop (default)
172
- raise e
241
+ if found_future:
242
+ timed_out_tasks.append(found_future)
243
+ # Remove from tracking
244
+ running.remove(found_future)
245
+ del task_deadlines[task]
246
+
247
+ # Handle failure
248
+ if task.fail_policy == "isolate":
249
+ failed.add(task)
250
+ run_ctx.tasks[task.name] = TaskState.FAILED
251
+ self.trace.on_node_error(task.name, TimeoutError(f"Task exceeded timeout of {task.timeout_sec}s"))
252
+ else:
253
+ run_ctx.status = RunStatus.FAILED
254
+ run_ctx.end_time = time.time()
255
+ raise TimeoutError(f"Task '{task.name}' exceeded timeout of {task.timeout_sec}s")
256
+
257
+ for future in done:
258
+ if future in running: # Might have been removed by timeout check above
259
+ running.remove(future)
260
+ task = future_to_task[future]
261
+ if task in task_deadlines:
262
+ del task_deadlines[task]
263
+
264
+ try:
265
+ future.result() # Re-raise exception if any
266
+ executed.add(task)
267
+ except Exception as e:
268
+ if task.fail_policy == "isolate":
269
+ failed.add(task)
270
+ # TaskState update is handled in _execute_task on exception?
271
+ # No, _execute_task raises. So we need to update here if it failed.
272
+ # Actually _execute_task updates to FAILED before raising?
273
+ # Let's check _execute_task implementation below.
274
+ # If _execute_task raises, we catch it here.
275
+ # We should ensure FAILED state.
276
+ run_ctx.tasks[task.name] = TaskState.FAILED
277
+ self.trace.on_node_error(task.name, e) # Log it
278
+ else:
279
+ # fail=stop (default)
280
+ run_ctx.status = RunStatus.FAILED
281
+ run_ctx.end_time = time.time()
282
+ raise e
283
+
284
+ finally:
285
+ # Cleanup active run
286
+ if run_ctx.run_id in self.active_runs:
287
+ del self.active_runs[run_ctx.run_id]
173
288
 
174
289
  self.trace.on_flow_end(flow.name)
290
+
291
+ # Update final run status
292
+ if run_ctx.status == RunStatus.RUNNING:
293
+ if failed:
294
+ # Some tasks failed but were isolated
295
+ # Should run be COMPLETED or FAILED?
296
+ # Usually if flow finished (even with partial failures), it's COMPLETED (or PARTIAL_SUCCESS?)
297
+ # For now let's say COMPLETED if it didn't crash.
298
+ run_ctx.status = RunStatus.COMPLETED # Or maybe FAILED if strict?
299
+ else:
300
+ run_ctx.status = RunStatus.COMPLETED
301
+
302
+ run_ctx.end_time = time.time()
175
303
  return ctx
176
304
 
305
+ def _execute_subflow(self, subflow, ctx: Context):
306
+ for node in subflow.steps:
307
+ self._execute_node(node, ctx)
308
+
309
+ def _execute_node(self, node, ctx: Context):
310
+ if isinstance(node, TaskNode):
311
+ self._execute_task(node.task, ctx)
312
+ elif isinstance(node, RepeatNode):
313
+ self._execute_repeat(node, ctx)
314
+ elif isinstance(node, ForEachNode):
315
+ self._execute_foreach(node, ctx)
316
+ elif isinstance(node, UntilNode):
317
+ self._execute_until(node, ctx)
318
+ elif isinstance(node, SwitchNode):
319
+ self._execute_switch(node, ctx)
320
+ else:
321
+ raise TypeError(f"Unknown node type: {type(node)}")
322
+
323
+ def _execute_repeat(self, node: RepeatNode, ctx: Context):
324
+ count_value = self._resolve_repeat_count(node.count, ctx)
325
+ for index in range(count_value):
326
+ frame = LoopFrame(name="repeat", type="repeat", index=index, iteration=index + 1, count=count_value)
327
+ ctx.push_loop(frame)
328
+ try:
329
+ self._execute_subflow(node.body, ctx)
330
+ finally:
331
+ ctx.pop_loop()
332
+
333
+ def _execute_foreach(self, node: ForEachNode, ctx: Context):
334
+ sequence = self._eval_expression(node.source, ctx)
335
+ if not isinstance(sequence, (list, tuple)):
336
+ raise TypeError("ForEach source must evaluate to a list or tuple.")
337
+
338
+ total = len(sequence)
339
+ label = node.alias or node.source.source
340
+ for index, item in enumerate(sequence):
341
+ frame = LoopFrame(
342
+ name=f"foreach:{label}",
343
+ type="foreach",
344
+ index=index,
345
+ iteration=index + 1,
346
+ count=total,
347
+ item=item,
348
+ )
349
+ ctx.push_loop(frame)
350
+ if node.alias:
351
+ ctx.set_var(node.alias, item)
352
+ try:
353
+ self._execute_subflow(node.body, ctx)
354
+ finally:
355
+ if node.alias:
356
+ ctx.clear_var(node.alias)
357
+ ctx.pop_loop()
358
+
359
+ def _execute_until(self, node: UntilNode, ctx: Context):
360
+ max_iter = node.max_iter or 1000
361
+ iteration = 0
362
+ last_condition = None
363
+ while True:
364
+ iteration += 1
365
+ frame = LoopFrame(
366
+ name="until",
367
+ type="until",
368
+ index=iteration - 1,
369
+ iteration=iteration,
370
+ condition=last_condition,
371
+ count=max_iter,
372
+ )
373
+ ctx.push_loop(frame)
374
+ try:
375
+ self._execute_subflow(node.body, ctx)
376
+ condition_result = bool(self._eval_expression(node.condition, ctx))
377
+ finally:
378
+ ctx.pop_loop()
379
+
380
+ last_condition = condition_result
381
+ if condition_result:
382
+ break
383
+ if iteration >= max_iter:
384
+ raise UntilMaxIterationsExceeded(node.condition.source, max_iter)
385
+
386
+ def _execute_switch(self, node: SwitchNode, ctx: Context):
387
+ value = self._eval_expression(node.expression, ctx)
388
+ default_case = None
389
+ for case in node.cases:
390
+ if case.value == DEFAULT_CASE_VALUE:
391
+ if default_case is None:
392
+ default_case = case
393
+ continue
394
+ if case.value == value:
395
+ self._execute_subflow(case.target, ctx)
396
+ return
397
+ if default_case:
398
+ self._execute_subflow(default_case.target, ctx)
399
+ def _resolve_repeat_count(self, count_value, ctx: Context) -> int:
400
+ if isinstance(count_value, Expression):
401
+ resolved = self._eval_expression(count_value, ctx)
402
+ else:
403
+ resolved = count_value
404
+ if not isinstance(resolved, int):
405
+ raise TypeError("Repeat count must evaluate to an integer.")
406
+ if resolved < 0:
407
+ raise ValueError("Repeat count cannot be negative.")
408
+ return resolved
409
+
410
+ def _eval_expression(self, expression, ctx: Context):
411
+ if isinstance(expression, Expression):
412
+ return expression.evaluate(ctx=ctx.expression_data(), env=ctx.env_data())
413
+ return expression
414
+
177
415
  def _execute_task(self, task: Task, ctx: Context):
416
+ # Update state to RUNNING
417
+ from .models import TaskState
418
+ run_ctx = ctx.run_context
419
+ if ctx.run_context:
420
+ ctx.run_context.tasks[task.name] = TaskState.RUNNING
421
+ record = ctx.run_context.ensure_task_record(task.name)
422
+ record.state = TaskState.RUNNING
423
+ record.started_at = time.time()
424
+ record.error = None
425
+ record.traceback = None
426
+ else:
427
+ record = None
428
+
178
429
  self.trace.on_node_start(task.name)
179
430
  start_time = time.time()
180
431
  # Retry loop
@@ -206,8 +457,17 @@ class Engine:
206
457
  elif param_name in ctx.results:
207
458
  kwargs[param_name] = ctx.results[param_name]
208
459
 
209
- result = task.func(**kwargs)
460
+ if record:
461
+ record.inputs = {k: v for k, v in kwargs.items() if k != "ctx"}
462
+
463
+ stdout_capture = TeeStream(sys.stdout)
464
+ stderr_capture = TeeStream(sys.stderr)
465
+ with contextlib.redirect_stdout(stdout_capture), contextlib.redirect_stderr(stderr_capture):
466
+ result = task.func(**kwargs)
210
467
  ctx.set_result(task.name, result)
468
+ if run_ctx:
469
+ run_ctx.append_log(task.name, "stdout", stdout_capture.getvalue())
470
+ run_ctx.append_log(task.name, "stderr", stderr_capture.getvalue())
211
471
 
212
472
  # Handle outputs saving
213
473
  for target_path in task.outputs:
@@ -234,9 +494,28 @@ class Engine:
234
494
 
235
495
  duration = (time.time() - start_time) * 1000
236
496
  self.trace.on_node_end(task.name, duration)
497
+
498
+ # Update state to SUCCEEDED
499
+ if ctx.run_context:
500
+ ctx.run_context.tasks[task.name] = TaskState.SUCCEEDED
501
+ if record:
502
+ record.state = TaskState.SUCCEEDED
503
+ record.ended_at = time.time()
504
+ record.duration_ms = (record.ended_at - record.started_at) * 1000
505
+ record.output = result
506
+
237
507
  return # Success
238
508
 
239
509
  except Exception as e:
510
+ if run_ctx:
511
+ run_ctx.append_log(task.name, "stdout", stdout_capture.getvalue() if 'stdout_capture' in locals() else "")
512
+ run_ctx.append_log(task.name, "stderr", stderr_capture.getvalue() if 'stderr_capture' in locals() else "")
513
+ if record:
514
+ record.state = TaskState.FAILED
515
+ record.ended_at = time.time()
516
+ record.duration_ms = (record.ended_at - record.started_at) * 1000
517
+ record.error = str(e)
518
+ record.traceback = traceback.format_exc()
240
519
  if retries_left > 0:
241
520
  retries_left -= 1
242
521
  # Log retry?
@@ -246,4 +525,7 @@ class Engine:
246
525
  continue
247
526
  else:
248
527
  self.trace.on_node_error(task.name, e)
528
+ # Update state to FAILED
529
+ if ctx.run_context:
530
+ ctx.run_context.tasks[task.name] = TaskState.FAILED
249
531
  raise e
@@ -0,0 +1,15 @@
1
+ class ControlFlowError(Exception):
2
+ """Base error for control flow execution issues."""
3
+
4
+
5
+ class UntilMaxIterationsExceeded(ControlFlowError):
6
+ def __init__(self, expression: str, max_iter: int):
7
+ super().__init__(f"Until condition '{expression}' exceeded max_iter={max_iter}")
8
+ self.expression = expression
9
+ self.max_iter = max_iter
10
+
11
+
12
+ class SwitchNoMatch(ControlFlowError):
13
+ def __init__(self, expression: str):
14
+ super().__init__(f"Switch expression '{expression}' did not match any case.")
15
+ self.expression = expression