flatmachines 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. flatmachines/__init__.py +136 -0
  2. flatmachines/actions.py +408 -0
  3. flatmachines/adapters/__init__.py +38 -0
  4. flatmachines/adapters/flatagent.py +86 -0
  5. flatmachines/adapters/pi_agent_bridge.py +127 -0
  6. flatmachines/adapters/pi_agent_runner.mjs +99 -0
  7. flatmachines/adapters/smolagents.py +125 -0
  8. flatmachines/agents.py +144 -0
  9. flatmachines/assets/MACHINES.md +141 -0
  10. flatmachines/assets/README.md +11 -0
  11. flatmachines/assets/__init__.py +0 -0
  12. flatmachines/assets/flatagent.d.ts +219 -0
  13. flatmachines/assets/flatagent.schema.json +271 -0
  14. flatmachines/assets/flatagent.slim.d.ts +58 -0
  15. flatmachines/assets/flatagents-runtime.d.ts +523 -0
  16. flatmachines/assets/flatagents-runtime.schema.json +281 -0
  17. flatmachines/assets/flatagents-runtime.slim.d.ts +187 -0
  18. flatmachines/assets/flatmachine.d.ts +403 -0
  19. flatmachines/assets/flatmachine.schema.json +620 -0
  20. flatmachines/assets/flatmachine.slim.d.ts +106 -0
  21. flatmachines/assets/profiles.d.ts +140 -0
  22. flatmachines/assets/profiles.schema.json +93 -0
  23. flatmachines/assets/profiles.slim.d.ts +26 -0
  24. flatmachines/backends.py +222 -0
  25. flatmachines/distributed.py +835 -0
  26. flatmachines/distributed_hooks.py +351 -0
  27. flatmachines/execution.py +638 -0
  28. flatmachines/expressions/__init__.py +60 -0
  29. flatmachines/expressions/cel.py +101 -0
  30. flatmachines/expressions/simple.py +166 -0
  31. flatmachines/flatmachine.py +1263 -0
  32. flatmachines/hooks.py +381 -0
  33. flatmachines/locking.py +69 -0
  34. flatmachines/monitoring.py +505 -0
  35. flatmachines/persistence.py +213 -0
  36. flatmachines/run.py +117 -0
  37. flatmachines/utils.py +166 -0
  38. flatmachines/validation.py +79 -0
  39. flatmachines-1.0.0.dist-info/METADATA +390 -0
  40. flatmachines-1.0.0.dist-info/RECORD +41 -0
  41. flatmachines-1.0.0.dist-info/WHEEL +4 -0
@@ -0,0 +1,1263 @@
1
+ """
2
+ FlatMachine - State machine orchestration for agents.
3
+
4
+ A machine defines how agents are connected and executed:
5
+ states, transitions, conditions, and loops.
6
+
7
+ See local/flatmachines-plan.md for the full specification.
8
+ """
9
+
10
+ import asyncio
11
+ import importlib
12
+ import json
13
+ import os
14
+ import re
15
+ from typing import Any, Dict, Optional
16
+
17
+ try:
18
+ from jinja2 import Template
19
+ except ImportError:
20
+ Template = None
21
+
22
+ try:
23
+ import yaml
24
+ except ImportError:
25
+ yaml = None
26
+
27
+ from . import __version__
28
+ from .monitoring import get_logger
29
+ from .utils import check_spec_version
30
+ from .expressions import get_expression_engine, ExpressionEngine
31
+ from .execution import get_execution_type, ExecutionType
32
+ from .hooks import MachineHooks, LoggingHooks
33
+ from .agents import (
34
+ AgentAdapterContext,
35
+ AgentAdapterRegistry,
36
+ AgentExecutor,
37
+ AgentResult,
38
+ coerce_agent_result,
39
+ normalize_agent_ref,
40
+ )
41
+
42
+ import uuid
43
+ from .persistence import (
44
+ PersistenceBackend,
45
+ LocalFileBackend,
46
+ MemoryBackend,
47
+ CheckpointManager,
48
+ MachineSnapshot
49
+ )
50
+ from .backends import (
51
+ ResultBackend,
52
+ InMemoryResultBackend,
53
+ LaunchIntent,
54
+ make_uri,
55
+ get_default_result_backend,
56
+ )
57
+ from .locking import ExecutionLock, LocalFileLock, NoOpLock
58
+ from .actions import (
59
+ Action,
60
+ HookAction,
61
+ MachineInvoker,
62
+ InlineInvoker,
63
+ QueueInvoker,
64
+ )
65
+
66
+ logger = get_logger(__name__)
67
+
68
+
69
+ class FlatMachine:
70
+ """
71
+ State machine orchestration for agents.
72
+
73
+ Executes a sequence of states, evaluations transitions and
74
+ managing context flow between agents.
75
+
76
+ Supports:
77
+ - Persistence (checkpoint/resume)
78
+ - Concurrency control (locking)
79
+ - Machine launching (peer machine execution)
80
+ """
81
+
82
+ def __init__(
83
+ self,
84
+ config_file: Optional[str] = None,
85
+ config_dict: Optional[Dict] = None,
86
+ hooks: Optional[MachineHooks] = None,
87
+ persistence: Optional[PersistenceBackend] = None,
88
+ lock: Optional[ExecutionLock] = None,
89
+ invoker: Optional[MachineInvoker] = None,
90
+ result_backend: Optional[ResultBackend] = None,
91
+ agent_registry: Optional[AgentAdapterRegistry] = None,
92
+ agent_adapters: Optional[list] = None,
93
+ profiles_file: Optional[str] = None,
94
+ **kwargs
95
+ ):
96
+ """
97
+ Initialize the machine.
98
+
99
+ Args:
100
+ config_file: Path to YAML/JSON config file
101
+ config_dict: Configuration dictionary
102
+ hooks: Custom hooks for extensibility
103
+ persistence: Storage backend (overrides config)
104
+ lock: Concurrency lock (overrides config)
105
+ invoker: Strategy for invoking other machines
106
+ result_backend: Backend for inter-machine result storage
107
+ agent_registry: Registry of agent adapters (optional)
108
+ agent_adapters: List of agent adapters to register (optional)
109
+ profiles_file: Optional profiles.yml path for adapters that use it
110
+ **kwargs: Override config values
111
+ """
112
+ if Template is None:
113
+ raise ImportError("jinja2 is required. Install with: pip install jinja2")
114
+
115
+ # Extract execution_id if passed (for launched machines)
116
+ self.execution_id = kwargs.pop('_execution_id', None) or str(uuid.uuid4())
117
+ self.parent_execution_id = kwargs.pop('_parent_execution_id', None)
118
+
119
+ # Extract _config_dir override (used for launched machines)
120
+ config_dir_override = kwargs.pop('_config_dir', None)
121
+
122
+ # Adapter-specific profile hints (optional, used by adapter implementations)
123
+ self._profiles_dict = kwargs.pop('_profiles_dict', None)
124
+ self._profiles_file = profiles_file or kwargs.pop('_profiles_file', None)
125
+
126
+ self._load_config(config_file, config_dict)
127
+
128
+ # Allow launcher to override config_dir for launched machines
129
+ if config_dir_override:
130
+ self._config_dir = config_dir_override
131
+
132
+ # Merge kwargs into config data (shallow merge)
133
+ if kwargs and 'data' in self.config:
134
+ self.config['data'].update(kwargs)
135
+
136
+ self._validate_spec()
137
+ self._parse_machine_config()
138
+
139
+ # Set up Jinja2 environment with custom filters
140
+ from jinja2 import Environment
141
+ import json
142
+
143
+ def _json_finalize(value):
144
+ """Auto-serialize lists and dicts to JSON in Jinja2 output.
145
+
146
+ This ensures {{ output.items }} renders as ["a", "b"] (valid JSON)
147
+ instead of ['a', 'b'] (Python repr), allowing json.loads() to work.
148
+ """
149
+ if isinstance(value, (list, dict)):
150
+ return json.dumps(value)
151
+ return value
152
+
153
+ self._jinja_env = Environment(finalize=_json_finalize)
154
+ # Add fromjson filter for parsing JSON strings in templates
155
+ # Usage: {% for item in context.items | fromjson %}
156
+ self._jinja_env.filters['fromjson'] = json.loads
157
+
158
+ # Set up expression engine
159
+ expression_mode = self.data.get("expression_engine", "simple")
160
+ self._expression_engine = get_expression_engine(expression_mode)
161
+
162
+ # Hooks - load from config or use provided/default
163
+ self._hooks = self._load_hooks(hooks)
164
+
165
+ # Agent adapter registry
166
+ if agent_registry is not None:
167
+ self._agent_registry = agent_registry
168
+ elif agent_adapters:
169
+ self._agent_registry = AgentAdapterRegistry(agent_adapters)
170
+ else:
171
+ from .adapters import create_registry
172
+ self._agent_registry = create_registry()
173
+
174
+ # Agent executor cache
175
+ self._agents: Dict[str, AgentExecutor] = {}
176
+
177
+ # Execution tracking
178
+ self.total_api_calls = 0
179
+ self.total_cost = 0.0
180
+
181
+ # Persistence & Locking
182
+ self._initialize_persistence(persistence, lock)
183
+
184
+ # Result backend for inter-machine communication
185
+ self.result_backend = result_backend or get_default_result_backend()
186
+
187
+ # Pending launches (outbox pattern)
188
+ self._pending_launches: list[LaunchIntent] = []
189
+
190
+ # Background tasks for fire-and-forget launches
191
+ self._background_tasks: set[asyncio.Task] = set()
192
+
193
+ # Invoker (for launching peer machines)
194
+ self.invoker = invoker or InlineInvoker()
195
+
196
+ logger.info(f"Initialized FlatMachine: {self.machine_name} (ID: {self.execution_id})")
197
+
198
+ @property
199
+ def agent_registry(self) -> AgentAdapterRegistry:
200
+ return self._agent_registry
201
+
202
+ def _initialize_persistence(
203
+ self,
204
+ persistence: Optional[PersistenceBackend],
205
+ lock: Optional[ExecutionLock]
206
+ ) -> None:
207
+ """Initialize persistence and locking components."""
208
+ # Get config
209
+ p_config = self.data.get('persistence', {})
210
+ # Global features config override (simulated for now, would be in kwargs/settings)
211
+ # For now, rely on machine.yml or defaults
212
+
213
+ enabled = p_config.get('enabled', True) # Default enabled? Or disable?
214
+ # Plan says: "Global Defaults... backend: local".
215
+ # Let's default to enabled=False for backward compat if not configured?
216
+ # Or follow plan default? Plan implies explicit configure.
217
+ # Let's default to MemoryBackend if enabled but no backend specified
218
+
219
+ backend_type = p_config.get('backend', 'memory')
220
+
221
+ # Persistence Backend
222
+ if persistence:
223
+ self.persistence = persistence
224
+ elif not enabled:
225
+ self.persistence = MemoryBackend() # Fallback, unsaved
226
+ elif backend_type == 'local':
227
+ self.persistence = LocalFileBackend()
228
+ elif backend_type == 'memory':
229
+ self.persistence = MemoryBackend()
230
+ else:
231
+ logger.warning(f"Unknown backend '{backend_type}', using memory")
232
+ self.persistence = MemoryBackend()
233
+
234
+ # Lock
235
+ if lock:
236
+ self.lock = lock
237
+ elif not enabled:
238
+ self.lock = NoOpLock()
239
+ elif backend_type == 'local':
240
+ self.lock = LocalFileLock()
241
+ else:
242
+ self.lock = NoOpLock()
243
+
244
+ # Checkpoint events (default set)
245
+ default_events = ['machine_start', 'state_enter', 'execute', 'state_exit', 'machine_end']
246
+ self.checkpoint_events = set(p_config.get('checkpoint_on', default_events))
247
+
248
+
249
+ def _load_config(
250
+ self,
251
+ config_file: Optional[str],
252
+ config_dict: Optional[Dict]
253
+ ) -> None:
254
+ """Load configuration from file or dict."""
255
+ config = {}
256
+
257
+ if config_file is not None:
258
+ if not os.path.exists(config_file):
259
+ raise FileNotFoundError(f"Config file not found: {config_file}")
260
+
261
+ with open(config_file, 'r') as f:
262
+ if config_file.endswith('.json'):
263
+ config = json.load(f) or {}
264
+ else:
265
+ if yaml is None:
266
+ raise ImportError("pyyaml required for YAML files")
267
+ config = yaml.safe_load(f) or {}
268
+
269
+ # Store config file path for relative agent references
270
+ self._config_dir = os.path.dirname(os.path.abspath(config_file))
271
+ elif config_dict is not None:
272
+ config = config_dict
273
+ self._config_dir = os.getcwd()
274
+ else:
275
+ raise ValueError("Must provide config_file or config_dict")
276
+
277
+ self.config = config
278
+
279
+ def _validate_spec(self) -> None:
280
+ """Validate the spec envelope."""
281
+ spec = self.config.get('spec')
282
+ if spec != 'flatmachine':
283
+ raise ValueError(
284
+ f"Invalid spec: expected 'flatmachine', got '{spec}'"
285
+ )
286
+
287
+ if 'data' not in self.config:
288
+ raise ValueError("Config missing 'data' section")
289
+
290
+ # Version check with warning
291
+ self.spec_version = check_spec_version(self.config.get('spec_version'), __version__)
292
+
293
+ # Schema validation (warnings only, non-blocking)
294
+ try:
295
+ from .validation import validate_flatmachine_config
296
+ validate_flatmachine_config(self.config, warn=True, strict=False)
297
+ except ImportError:
298
+ pass # jsonschema not installed, skip validation
299
+
300
+ def _parse_machine_config(self) -> None:
301
+ """Parse the machine configuration."""
302
+ self.data = self.config['data']
303
+ self.metadata = self.config.get('metadata', {})
304
+
305
+ self.machine_name = self.data.get('name', 'unnamed-machine')
306
+ self.initial_context = self.data.get('context', {})
307
+ self.agent_refs = self.data.get('agents', {})
308
+ self.machine_refs = self.data.get('machines', {})
309
+ self.states = self.data.get('states', {})
310
+ self.settings = self.data.get('settings', {})
311
+
312
+ # Find initial and final states
313
+ self._initial_state = None
314
+ self._final_states = set()
315
+
316
+ for name, state in self.states.items():
317
+ if state.get('type') == 'initial':
318
+ if self._initial_state is not None:
319
+ raise ValueError("Multiple initial states defined")
320
+ self._initial_state = name
321
+ if state.get('type') == 'final':
322
+ self._final_states.add(name)
323
+
324
+ if self._initial_state is None:
325
+ # Default to 'start' if exists, otherwise first state
326
+ if 'start' in self.states:
327
+ self._initial_state = 'start'
328
+ elif self.states:
329
+ self._initial_state = next(iter(self.states))
330
+ else:
331
+ raise ValueError("No states defined")
332
+
333
+ def _load_hooks(self, hooks: Optional[MachineHooks]) -> MachineHooks:
334
+ """
335
+ Load hooks from config or use provided/default.
336
+
337
+ Invocation patterns:
338
+
339
+ LOCAL (development, single-process):
340
+ hooks:
341
+ file: "./hooks.py" # Relative to config dir
342
+ class: "MyHooks"
343
+ - Loads from file path with sys.path manipulation
344
+ - Supports sibling imports (from repl import X)
345
+ - Hooks CAN be stateful (same process)
346
+
347
+ DISTRIBUTED (Lambda, Cloud Run, multi-process):
348
+ hooks:
349
+ module: "mypackage.hooks" # Installed package
350
+ class: "MyHooks"
351
+ - Loads from installed package (pip install)
352
+ - No sys.path manipulation needed
353
+ - Hooks MUST be stateless (checkpoint/restore loses instance state)
354
+
355
+ Priority: explicit arg > file > module > default
356
+ """
357
+ if hooks is not None:
358
+ return hooks
359
+
360
+ hooks_config = self.data.get('hooks')
361
+ if not hooks_config:
362
+ return MachineHooks()
363
+
364
+ class_name = hooks_config.get('class')
365
+ if not class_name:
366
+ logger.warning(f"Hooks config missing 'class', using default")
367
+ return MachineHooks()
368
+
369
+ # Route to appropriate loader based on invocation pattern
370
+ if hooks_config.get('file'):
371
+ hooks_class = self._load_hooks_local(hooks_config)
372
+ elif hooks_config.get('module'):
373
+ hooks_class = self._load_hooks_distributed(hooks_config)
374
+ else:
375
+ logger.warning(f"Hooks config needs 'file' or 'module', using default")
376
+ return MachineHooks()
377
+
378
+ if hooks_class is None:
379
+ return MachineHooks()
380
+
381
+ # Instantiate
382
+ try:
383
+ return hooks_class(**hooks_config.get('args', {}))
384
+ except Exception as e:
385
+ logger.error(f"Failed to instantiate {class_name}: {e}")
386
+ return MachineHooks()
387
+
388
+ def _load_hooks_local(self, config: Dict[str, Any]) -> Optional[type]:
389
+ """
390
+ LOCAL: Load hooks from file path.
391
+
392
+ Use for development and single-process execution:
393
+ - File path relative to machine config
394
+ - sys.path manipulation for sibling imports
395
+ - Hooks can be stateful (same process)
396
+ """
397
+ import sys
398
+
399
+ file_path = config['file']
400
+ class_name = config['class']
401
+
402
+ if not os.path.isabs(file_path):
403
+ file_path = os.path.join(self._config_dir, file_path)
404
+
405
+ hooks_dir = os.path.dirname(os.path.abspath(file_path))
406
+ added = hooks_dir not in sys.path
407
+
408
+ try:
409
+ if added:
410
+ sys.path.insert(0, hooks_dir)
411
+ try:
412
+ spec = importlib.util.spec_from_file_location("hooks", file_path)
413
+ if spec and spec.loader:
414
+ module = importlib.util.module_from_spec(spec)
415
+ spec.loader.exec_module(module)
416
+ return getattr(module, class_name)
417
+ finally:
418
+ if added and hooks_dir in sys.path:
419
+ sys.path.remove(hooks_dir)
420
+ except Exception as e:
421
+ logger.error(f"LOCAL hooks failed ({file_path}): {e}")
422
+ return None
423
+
424
+ def _load_hooks_distributed(self, config: Dict[str, Any]) -> Optional[type]:
425
+ """
426
+ DISTRIBUTED: Load hooks from installed package.
427
+
428
+ Use for Lambda, Cloud Run, checkpoint/restore:
429
+ - Package installed via pip (requirements.txt)
430
+ - No sys.path manipulation
431
+ - Hooks MUST be stateless (state in context)
432
+ """
433
+ module_name = config['module']
434
+ class_name = config['class']
435
+
436
+ try:
437
+ module = importlib.import_module(module_name)
438
+ return getattr(module, class_name)
439
+ except (ImportError, AttributeError) as e:
440
+ logger.error(f"DISTRIBUTED hooks failed ({module_name}.{class_name}): {e}")
441
+ return None
442
+
443
+ def _get_executor(self, agent_name: str) -> AgentExecutor:
444
+ """Get or load an agent executor by name."""
445
+ if agent_name in self._agents:
446
+ return self._agents[agent_name]
447
+
448
+ if agent_name not in self.agent_refs:
449
+ raise ValueError(f"Unknown agent: {agent_name}")
450
+
451
+ raw_ref = self.agent_refs[agent_name]
452
+ agent_ref = normalize_agent_ref(raw_ref)
453
+
454
+ adapter_context = AgentAdapterContext(
455
+ config_dir=self._config_dir,
456
+ settings=self.settings,
457
+ machine_name=self.machine_name,
458
+ profiles_file=self._profiles_file,
459
+ profiles_dict=self._profiles_dict,
460
+ )
461
+
462
+ executor = self._agent_registry.create_executor(
463
+ agent_name=agent_name,
464
+ agent_ref=agent_ref,
465
+ context=adapter_context,
466
+ )
467
+
468
+ self._agents[agent_name] = executor
469
+ return executor
470
+
471
+ # Pattern for simple path references: output.foo, context.bar.baz, input.x
472
+ _PATH_PATTERN = re.compile(r'^(output|context|input)(\.[a-zA-Z_][a-zA-Z0-9_]*)+$')
473
+
474
+ def _resolve_path(self, path: str, variables: Dict[str, Any]) -> Any:
475
+ """Resolve a dotted path like 'output.chapters' to its value."""
476
+ parts = path.split('.')
477
+ value = variables
478
+ for part in parts:
479
+ if isinstance(value, dict):
480
+ value = value.get(part)
481
+ else:
482
+ return None
483
+ return value
484
+
485
+ def _render_template(self, template_str: str, variables: Dict[str, Any]) -> Any:
486
+ """Render a Jinja2 template string or resolve a simple path reference."""
487
+ if not isinstance(template_str, str):
488
+ return template_str
489
+
490
+ # Check if it's a template ({{ for expressions, {% for control flow)
491
+ if '{{' not in template_str and '{%' not in template_str:
492
+ # Check if it's a simple path reference like "output.chapters"
493
+ # This allows direct value passing without Jinja2 string conversion
494
+ if self._PATH_PATTERN.match(template_str.strip()):
495
+ return self._resolve_path(template_str.strip(), variables)
496
+ return template_str
497
+
498
+ template = self._jinja_env.from_string(template_str)
499
+ result = template.render(**variables)
500
+
501
+ # Try to parse as JSON for complex types
502
+ try:
503
+ return json.loads(result)
504
+ except (json.JSONDecodeError, TypeError):
505
+ return result
506
+
507
+ def _render_dict(self, data: Dict[str, Any], variables: Dict[str, Any]) -> Dict[str, Any]:
508
+ """Recursively render all template strings in a dict."""
509
+ result = {}
510
+ for key, value in data.items():
511
+ if isinstance(value, str):
512
+ result[key] = self._render_template(value, variables)
513
+ elif isinstance(value, dict):
514
+ result[key] = self._render_dict(value, variables)
515
+ elif isinstance(value, list):
516
+ result[key] = [
517
+ self._render_template(v, variables) if isinstance(v, str) else v
518
+ for v in value
519
+ ]
520
+ else:
521
+ result[key] = value
522
+ return result
523
+
524
+ def _evaluate_condition(self, condition: str, context: Dict[str, Any]) -> bool:
525
+ """Evaluate a transition condition."""
526
+ variables = {"context": context}
527
+ return bool(self._expression_engine.evaluate(condition, variables))
528
+
529
+ def _get_error_recovery_state(
530
+ self,
531
+ state_config: Dict[str, Any],
532
+ error: Exception
533
+ ) -> Optional[str]:
534
+ """
535
+ Get recovery state from on_error config.
536
+
537
+ Supports two formats:
538
+ - Simple: on_error: "error_state"
539
+ - Granular: on_error: {default: "error_state", RateLimitError: "retry_state"}
540
+ """
541
+ on_error = state_config.get('on_error')
542
+ if not on_error:
543
+ return None
544
+
545
+ # Simple format: on_error: "state_name"
546
+ if isinstance(on_error, str):
547
+ return on_error
548
+
549
+ # Granular format: on_error: {error_type: state_name, default: fallback}
550
+ error_type = type(error).__name__
551
+ return on_error.get(error_type) or on_error.get('default')
552
+
553
+ def _find_next_state(
554
+ self,
555
+ state_name: str,
556
+ context: Dict[str, Any]
557
+ ) -> Optional[str]:
558
+ """Find the next state based on transitions."""
559
+ state = self.states.get(state_name, {})
560
+ transitions = state.get('transitions', [])
561
+
562
+ for transition in transitions:
563
+ condition = transition.get('condition', '')
564
+ to_state = transition.get('to')
565
+
566
+ if not to_state:
567
+ continue
568
+
569
+ # No condition = default transition
570
+ if not condition:
571
+ return to_state
572
+
573
+ # Evaluate condition
574
+ if self._evaluate_condition(condition, context):
575
+ return to_state
576
+
577
+ return None
578
+
579
+ def _resolve_config(self, name: str) -> Dict[str, Any]:
580
+ """Resolve a component reference (agent/machine) to a config dict."""
581
+ ref = self.agent_refs.get(name)
582
+ if not ref:
583
+ raise ValueError(f"Unknown component reference: {name}")
584
+
585
+ if isinstance(ref, dict):
586
+ return ref
587
+
588
+ if isinstance(ref, str):
589
+ path = ref
590
+ if not os.path.isabs(path):
591
+ path = os.path.join(self._config_dir, path)
592
+
593
+ if not os.path.exists(path):
594
+ raise FileNotFoundError(f"Component file not found: {path}")
595
+
596
+ with open(path, 'r') as f:
597
+ if path.endswith('.json'):
598
+ return json.load(f) or {}
599
+ # Assume yaml
600
+ if yaml:
601
+ return yaml.safe_load(f) or {}
602
+ raise ImportError("pyyaml required for YAML files")
603
+
604
+ raise ValueError(f"Invalid reference type: {type(ref)}")
605
+
606
+ def _resolve_machine_config(self, name: str) -> tuple[Dict[str, Any], str]:
607
+ """
608
+ Resolve a machine reference to a config dict and its config directory.
609
+
610
+ Returns:
611
+ Tuple of (config_dict, config_dir) where config_dir is the directory
612
+ containing the machine config file (for resolving relative paths).
613
+ """
614
+ ref = self.machine_refs.get(name)
615
+ if not ref:
616
+ raise ValueError(f"Unknown machine reference: {name}. Check 'machines:' section in config.")
617
+
618
+ if isinstance(ref, dict):
619
+ # Inline config - use parent's config_dir
620
+ return ref, self._config_dir
621
+
622
+ if isinstance(ref, str):
623
+ path = ref
624
+ if not os.path.isabs(path):
625
+ path = os.path.join(self._config_dir, path)
626
+
627
+ if not os.path.exists(path):
628
+ raise FileNotFoundError(f"Machine config file not found: {path}")
629
+
630
+ # The peer's config_dir is the directory containing its config file
631
+ peer_config_dir = os.path.dirname(os.path.abspath(path))
632
+
633
+ with open(path, 'r') as f:
634
+ if path.endswith('.json'):
635
+ config = json.load(f) or {}
636
+ elif yaml:
637
+ config = yaml.safe_load(f) or {}
638
+ else:
639
+ raise ImportError("pyyaml required for YAML files")
640
+
641
+ return config, peer_config_dir
642
+
643
+ raise ValueError(f"Invalid machine reference type: {type(ref)}")
644
+
645
+ # =========================================================================
646
+ # Pending Launches (Outbox Pattern) - v0.4.0
647
+ # =========================================================================
648
+
649
+ def _add_pending_launch(
650
+ self,
651
+ execution_id: str,
652
+ machine: str,
653
+ input_data: Dict[str, Any]
654
+ ) -> LaunchIntent:
655
+ """Add a launch intent to the pending list (outbox pattern)."""
656
+ intent = LaunchIntent(
657
+ execution_id=execution_id,
658
+ machine=machine,
659
+ input=input_data,
660
+ launched=False
661
+ )
662
+ self._pending_launches.append(intent)
663
+ return intent
664
+
665
+ def _mark_launched(self, execution_id: str) -> None:
666
+ """Mark a pending launch as launched."""
667
+ for intent in self._pending_launches:
668
+ if intent.execution_id == execution_id:
669
+ intent.launched = True
670
+ break
671
+
672
+ def _clear_pending_launch(self, execution_id: str) -> None:
673
+ """Remove a completed launch from pending list."""
674
+ self._pending_launches = [
675
+ i for i in self._pending_launches
676
+ if i.execution_id != execution_id
677
+ ]
678
+
679
+ def _get_pending_intents(self) -> list[Dict[str, Any]]:
680
+ """Get pending launches as dicts for snapshot."""
681
+ return [intent.to_dict() for intent in self._pending_launches]
682
+
683
+ async def _resume_pending_launches(self) -> None:
684
+ """Resume any pending launches that weren't completed."""
685
+ for intent in self._pending_launches:
686
+ if intent.launched:
687
+ continue
688
+ # Check if child already has a result
689
+ uri = make_uri(intent.execution_id, "result")
690
+ if await self.result_backend.exists(uri):
691
+ continue
692
+ # Re-launch
693
+ logger.info(f"Resuming launch: {intent.machine} (ID: {intent.execution_id})")
694
+ task = asyncio.create_task(
695
+ self._launch_and_write(intent.machine, intent.execution_id, intent.input)
696
+ )
697
+ self._background_tasks.add(task)
698
+ task.add_done_callback(self._background_tasks.discard)
699
+
700
+ # =========================================================================
701
+ # Machine Invocation - v0.4.0
702
+ # =========================================================================
703
+
704
+ async def _launch_and_write(
705
+ self,
706
+ machine_name: str,
707
+ child_id: str,
708
+ input_data: Dict[str, Any]
709
+ ) -> Any:
710
+ """Launch a peer machine and write its result to the backend."""
711
+ target_config, peer_config_dir = self._resolve_machine_config(machine_name)
712
+
713
+ # Peer machines are independent - they load their own hooks from config
714
+ # (via the hooks: section in their machine.yml)
715
+ # Use peer's config_dir so relative paths (e.g., ./agents/judge.yml) resolve correctly
716
+ peer = FlatMachine(
717
+ config_dict=target_config,
718
+ result_backend=self.result_backend,
719
+ agent_registry=self.agent_registry,
720
+ _config_dir=peer_config_dir,
721
+ _execution_id=child_id,
722
+ _parent_execution_id=self.execution_id,
723
+ _profiles_dict=self._profiles_dict,
724
+ _profiles_file=self._profiles_file,
725
+ )
726
+
727
+ try:
728
+ result = await peer.execute(input=input_data)
729
+ # Write result to backend
730
+ uri = make_uri(child_id, "result")
731
+ await self.result_backend.write(uri, result)
732
+ return result
733
+ except Exception as e:
734
+ # Write error to backend so parent knows
735
+ uri = make_uri(child_id, "result")
736
+ await self.result_backend.write(uri, {"_error": str(e), "_error_type": type(e).__name__})
737
+ raise
738
+
739
+ async def _invoke_machine_single(
740
+ self,
741
+ machine_name: str,
742
+ input_data: Dict[str, Any],
743
+ timeout: Optional[float] = None
744
+ ) -> Any:
745
+ """Invoke a single peer machine with blocking read."""
746
+ child_id = str(uuid.uuid4())
747
+
748
+ # Checkpoint intent (outbox pattern)
749
+ self._add_pending_launch(child_id, machine_name, input_data)
750
+
751
+ # Launch and execute
752
+ result = await self._launch_and_write(machine_name, child_id, input_data)
753
+
754
+ # Mark completed and clear
755
+ self._mark_launched(child_id)
756
+ self._clear_pending_launch(child_id)
757
+
758
+ return result
759
+
760
+ async def _invoke_machines_parallel(
761
+ self,
762
+ machines: list[str],
763
+ input_data: Dict[str, Any],
764
+ mode: str = "settled",
765
+ timeout: Optional[float] = None
766
+ ) -> Dict[str, Any]:
767
+ """Invoke multiple machines in parallel."""
768
+ child_ids = {m: str(uuid.uuid4()) for m in machines}
769
+
770
+ # Checkpoint all intents
771
+ for machine_name, child_id in child_ids.items():
772
+ self._add_pending_launch(child_id, machine_name, input_data)
773
+
774
+ # Launch all
775
+ tasks = {}
776
+ for machine_name, child_id in child_ids.items():
777
+ task = asyncio.create_task(
778
+ self._launch_and_write(machine_name, child_id, input_data)
779
+ )
780
+ tasks[machine_name] = task
781
+
782
+ results = {}
783
+ errors = {}
784
+
785
+ if mode == "settled":
786
+ # Wait for all to complete
787
+ gathered = await asyncio.gather(*tasks.values(), return_exceptions=True)
788
+ for machine_name, result in zip(tasks.keys(), gathered):
789
+ if isinstance(result, Exception):
790
+ errors[machine_name] = result
791
+ results[machine_name] = {"_error": str(result), "_error_type": type(result).__name__}
792
+ else:
793
+ results[machine_name] = result
794
+
795
+ elif mode == "any":
796
+ # Wait for first to complete
797
+ done, pending = await asyncio.wait(
798
+ tasks.values(),
799
+ return_when=asyncio.FIRST_COMPLETED,
800
+ timeout=timeout
801
+ )
802
+ # Find which machine finished
803
+ for machine_name, task in tasks.items():
804
+ if task in done:
805
+ try:
806
+ results[machine_name] = task.result()
807
+ except Exception as e:
808
+ results[machine_name] = {"_error": str(e), "_error_type": type(e).__name__}
809
+ break
810
+ # Let pending tasks continue in background
811
+ for task in pending:
812
+ self._background_tasks.add(task)
813
+ task.add_done_callback(self._background_tasks.discard)
814
+
815
+ # Clear pending launches
816
+ for child_id in child_ids.values():
817
+ self._mark_launched(child_id)
818
+ self._clear_pending_launch(child_id)
819
+
820
+ return results
821
+
822
+ async def _invoke_foreach(
823
+ self,
824
+ items: list,
825
+ as_var: str,
826
+ key_expr: Optional[str],
827
+ machine_name: str,
828
+ input_template: Dict[str, Any],
829
+ mode: str = "settled",
830
+ timeout: Optional[float] = None
831
+ ) -> Any:
832
+ """Invoke a machine for each item in a list."""
833
+ child_ids = {}
834
+ item_inputs = {}
835
+
836
+ for i, item in enumerate(items):
837
+ # Compute key
838
+ if key_expr:
839
+ variables = {as_var: item, "context": {}, "input": {}}
840
+ item_key = self._render_template(key_expr, variables)
841
+ else:
842
+ item_key = i
843
+
844
+ child_id = str(uuid.uuid4())
845
+ child_ids[item_key] = child_id
846
+
847
+ # Render input for this item
848
+ variables = {as_var: item, "context": {}, "input": {}}
849
+ item_input = self._render_dict(input_template, variables)
850
+ item_inputs[item_key] = item_input
851
+
852
+ self._add_pending_launch(child_id, machine_name, item_input)
853
+
854
+ # Launch all
855
+ tasks = {}
856
+ for item_key, child_id in child_ids.items():
857
+ task = asyncio.create_task(
858
+ self._launch_and_write(machine_name, child_id, item_inputs[item_key])
859
+ )
860
+ tasks[item_key] = task
861
+
862
+ results = {}
863
+
864
+ if mode == "settled":
865
+ gathered = await asyncio.gather(*tasks.values(), return_exceptions=True)
866
+ for item_key, result in zip(tasks.keys(), gathered):
867
+ if isinstance(result, Exception):
868
+ results[item_key] = {"_error": str(result), "_error_type": type(result).__name__}
869
+ else:
870
+ results[item_key] = result
871
+
872
+ elif mode == "any":
873
+ done, pending = await asyncio.wait(
874
+ tasks.values(),
875
+ return_when=asyncio.FIRST_COMPLETED,
876
+ timeout=timeout
877
+ )
878
+ for item_key, task in tasks.items():
879
+ if task in done:
880
+ try:
881
+ results[item_key] = task.result()
882
+ except Exception as e:
883
+ results[item_key] = {"_error": str(e), "_error_type": type(e).__name__}
884
+ break
885
+ for task in pending:
886
+ self._background_tasks.add(task)
887
+ task.add_done_callback(self._background_tasks.discard)
888
+
889
+ # Clear pending launches
890
+ for child_id in child_ids.values():
891
+ self._mark_launched(child_id)
892
+ self._clear_pending_launch(child_id)
893
+
894
+ # Return dict if key_expr provided, else list
895
+ if key_expr:
896
+ return results
897
+ else:
898
+ return [results[i] for i in sorted(results.keys()) if isinstance(i, int)]
899
+
900
+ async def _launch_fire_and_forget(
901
+ self,
902
+ machines: list[str],
903
+ input_data: Dict[str, Any]
904
+ ) -> None:
905
+ """Launch machines without waiting for results (fire-and-forget).
906
+
907
+ Delegates to self.invoker.launch() for cloud-agnostic execution.
908
+ The invoker determines HOW the launch happens (inline task, queue, etc).
909
+ """
910
+ for machine_name in machines:
911
+ child_id = str(uuid.uuid4())
912
+ target_config, _ = self._resolve_machine_config(machine_name)
913
+
914
+ # Record intent before launch (outbox pattern)
915
+ self._add_pending_launch(child_id, machine_name, input_data)
916
+
917
+ # Delegate to invoker
918
+ await self.invoker.launch(
919
+ caller_machine=self,
920
+ target_config=target_config,
921
+ input_data=input_data,
922
+ execution_id=child_id
923
+ )
924
+
925
+ self._mark_launched(child_id)
926
+
927
+ async def _run_hook(self, method_name: str, *args) -> Any:
928
+ """Run a hook method, awaiting if it's a coroutine."""
929
+ method = getattr(self._hooks, method_name)
930
+ result = method(*args)
931
+ if asyncio.iscoroutine(result):
932
+ return await result
933
+ return result
934
+
935
+ async def _execute_state(
936
+ self,
937
+ state_name: str,
938
+ context: Dict[str, Any]
939
+ ) -> tuple[Dict[str, Any], Optional[Dict[str, Any]]]:
940
+ """
941
+ Execute a single state.
942
+
943
+ Returns:
944
+ Tuple of (updated_context, agent_output)
945
+ """
946
+ state = self.states.get(state_name, {})
947
+ output = None
948
+
949
+ # 1. Handle 'action' (hooks/custom actions)
950
+ action_name = state.get('action')
951
+ if action_name:
952
+ action_impl = HookAction(self._hooks)
953
+ context = await action_impl.execute(action_name, context, config={})
954
+
955
+ # 2. Handle 'launch' (fire-and-forget machine execution)
956
+ launch_spec = state.get('launch')
957
+ if launch_spec:
958
+ launch_input_spec = state.get('launch_input', {})
959
+ variables = {"context": context, "input": context}
960
+ launch_input = self._render_dict(launch_input_spec, variables)
961
+
962
+ # Normalize to list
963
+ machines_to_launch = [launch_spec] if isinstance(launch_spec, str) else launch_spec
964
+ await self._launch_fire_and_forget(machines_to_launch, launch_input)
965
+
966
+ # 3. Handle 'machine' (peer machine execution with blocking read)
967
+ machine_spec = state.get('machine')
968
+ foreach_expr = state.get('foreach')
969
+
970
+ if machine_spec or foreach_expr:
971
+ input_spec = state.get('input', {})
972
+ variables = {"context": context, "input": context}
973
+ mode = state.get('mode', 'settled')
974
+ timeout = state.get('timeout')
975
+
976
+ if foreach_expr:
977
+ # Dynamic parallelism: foreach
978
+ items = self._render_template(foreach_expr, variables)
979
+ if not isinstance(items, list):
980
+ raise ValueError(f"foreach expression must yield a list, got {type(items)}")
981
+
982
+ as_var = state.get('as', 'item')
983
+ key_expr = state.get('key')
984
+ machine_name = machine_spec if isinstance(machine_spec, str) else machine_spec[0]
985
+
986
+ output = await self._invoke_foreach(
987
+ items=items,
988
+ as_var=as_var,
989
+ key_expr=key_expr,
990
+ machine_name=machine_name,
991
+ input_template=input_spec,
992
+ mode=mode,
993
+ timeout=timeout
994
+ )
995
+
996
+ elif isinstance(machine_spec, list):
997
+ # Parallel execution: machine: [a, b, c]
998
+ machine_input = self._render_dict(input_spec, variables)
999
+
1000
+ # Handle MachineInput objects (with per-machine inputs)
1001
+ if machine_spec and isinstance(machine_spec[0], dict):
1002
+ # machine: [{name: a, input: {...}}, ...]
1003
+ machine_names = [m['name'] for m in machine_spec]
1004
+ # TODO: Support per-machine inputs
1005
+ output = await self._invoke_machines_parallel(
1006
+ machines=machine_names,
1007
+ input_data=machine_input,
1008
+ mode=mode,
1009
+ timeout=timeout
1010
+ )
1011
+ else:
1012
+ # machine: [a, b, c]
1013
+ output = await self._invoke_machines_parallel(
1014
+ machines=machine_spec,
1015
+ input_data=machine_input,
1016
+ mode=mode,
1017
+ timeout=timeout
1018
+ )
1019
+
1020
+ else:
1021
+ # Single machine: machine: child
1022
+ machine_input = self._render_dict(input_spec, variables)
1023
+ output = await self._invoke_machine_single(
1024
+ machine_name=machine_spec,
1025
+ input_data=machine_input,
1026
+ timeout=timeout
1027
+ )
1028
+
1029
+ output_mapping = state.get('output_to_context', {})
1030
+ if output_mapping:
1031
+ safe_output = output or {}
1032
+ variables = {"context": context, "output": safe_output, "input": context}
1033
+ for ctx_key, template in output_mapping.items():
1034
+ context[ctx_key] = self._render_template(template, variables)
1035
+
1036
+ # 4. Handle 'agent' (LLM execution)
1037
+ agent_name = state.get('agent')
1038
+ if agent_name:
1039
+ executor = self._get_executor(agent_name)
1040
+ input_spec = state.get('input', {})
1041
+ variables = {"context": context, "input": context}
1042
+ agent_input = self._render_dict(input_spec, variables)
1043
+
1044
+ execution_config = state.get('execution')
1045
+ execution_type = get_execution_type(execution_config)
1046
+ result = await execution_type.execute(executor, agent_input, context=context)
1047
+ agent_result = coerce_agent_result(result)
1048
+ output = agent_result.output_payload()
1049
+
1050
+ self._accumulate_agent_metrics(agent_result)
1051
+
1052
+ output_mapping = state.get('output_to_context', {})
1053
+ if output_mapping:
1054
+ variables = {"context": context, "output": output, "input": context}
1055
+ for ctx_key, template in output_mapping.items():
1056
+ context[ctx_key] = self._render_template(template, variables)
1057
+
1058
+ # Handle final state output
1059
+ if state.get('type') == 'final':
1060
+ output_spec = state.get('output', {})
1061
+ if output_spec:
1062
+ variables = {"context": context}
1063
+ output = self._render_dict(output_spec, variables)
1064
+
1065
+ return context, output
1066
+
1067
+ def _accumulate_agent_metrics(self, result: AgentResult) -> None:
1068
+ """Accumulate usage/cost from an AgentResult."""
1069
+ if result.cost is not None:
1070
+ self.total_cost += result.cost
1071
+
1072
+ usage = result.usage or {}
1073
+ if isinstance(usage, dict):
1074
+ api_calls = usage.get("api_calls")
1075
+ if api_calls is None:
1076
+ api_calls = usage.get("requests") or usage.get("calls")
1077
+ if api_calls:
1078
+ self.total_api_calls += api_calls
1079
+
1080
+ if result.cost is None:
1081
+ cost = usage.get("cost")
1082
+ if isinstance(cost, (int, float)):
1083
+ self.total_cost += cost
1084
+ elif isinstance(cost, dict):
1085
+ total = cost.get("total")
1086
+ if isinstance(total, (int, float)):
1087
+ self.total_cost += total
1088
+
1089
+ async def _save_checkpoint(
1090
+ self,
1091
+ event: str,
1092
+ state_name: str,
1093
+ step: int,
1094
+ context: Dict[str, Any],
1095
+ output: Optional[Dict[str, Any]] = None
1096
+ ) -> None:
1097
+ """Save a checkpoint if configured."""
1098
+ if event not in self.checkpoint_events:
1099
+ return
1100
+
1101
+ snapshot = MachineSnapshot(
1102
+ execution_id=self.execution_id,
1103
+ machine_name=self.machine_name,
1104
+ spec_version=self.spec_version,
1105
+ current_state=state_name,
1106
+ context=context,
1107
+ step=step,
1108
+ event=event,
1109
+ output=output,
1110
+ total_api_calls=self.total_api_calls,
1111
+ total_cost=self.total_cost,
1112
+ parent_execution_id=self.parent_execution_id,
1113
+ pending_launches=self._get_pending_intents() if self._pending_launches else None,
1114
+ )
1115
+
1116
+ manager = CheckpointManager(self.persistence, self.execution_id)
1117
+ await manager.save_checkpoint(snapshot)
1118
+
1119
+ async def execute(
1120
+ self,
1121
+ input: Optional[Dict[str, Any]] = None,
1122
+ max_steps: int = 1000,
1123
+ max_agent_calls: Optional[int] = None,
1124
+ resume_from: Optional[str] = None
1125
+ ) -> Dict[str, Any]:
1126
+ """Execute the machine."""
1127
+ if resume_from:
1128
+ self.execution_id = resume_from
1129
+ logger.info(f"Resuming execution: {self.execution_id}")
1130
+
1131
+ if not await self.lock.acquire(self.execution_id):
1132
+ raise RuntimeError(f"Could not acquire lock for execution {self.execution_id}")
1133
+
1134
+ try:
1135
+ context = {}
1136
+ current_state = None
1137
+ step = 0
1138
+ final_output = {}
1139
+ hit_agent_limit = False
1140
+ manager = CheckpointManager(self.persistence, self.execution_id)
1141
+
1142
+ if resume_from:
1143
+ snapshot = await manager.load_latest()
1144
+ if snapshot:
1145
+ context = snapshot.context
1146
+ step = snapshot.step
1147
+ current_state = snapshot.current_state
1148
+ # Restore execution metrics
1149
+ self.total_api_calls = snapshot.total_api_calls or 0
1150
+ self.total_cost = snapshot.total_cost or 0.0
1151
+ # Restore pending launches (outbox pattern)
1152
+ if snapshot.pending_launches:
1153
+ self._pending_launches = [
1154
+ LaunchIntent.from_dict(intent)
1155
+ for intent in snapshot.pending_launches
1156
+ ]
1157
+ await self._resume_pending_launches()
1158
+ if snapshot.event == 'machine_end':
1159
+ logger.info("Execution already completed.")
1160
+ return snapshot.output or {}
1161
+ logger.info(f"Restored from snapshot: step={step}, state={current_state}")
1162
+ else:
1163
+ logger.warning(f"No snapshot found for {resume_from}, starting fresh.")
1164
+
1165
+ if not current_state:
1166
+ current_state = self._initial_state
1167
+ input = input or {}
1168
+ variables = {"input": input}
1169
+ context = self._render_dict(self.initial_context, variables)
1170
+
1171
+
1172
+ await self._save_checkpoint('machine_start', 'start', step, context)
1173
+ context = await self._run_hook('on_machine_start', context)
1174
+
1175
+ logger.info(f"Starting execution loop at: {current_state}")
1176
+
1177
+ while current_state and step < max_steps:
1178
+ if max_agent_calls is not None and self.total_api_calls >= max_agent_calls:
1179
+ hit_agent_limit = True
1180
+ break
1181
+ step += 1
1182
+ is_final = current_state in self._final_states
1183
+
1184
+ await self._save_checkpoint('state_enter', current_state, step, context)
1185
+ context = await self._run_hook('on_state_enter', current_state, context)
1186
+
1187
+ await self._save_checkpoint('execute', current_state, step, context)
1188
+
1189
+ try:
1190
+ context, output = await self._execute_state(current_state, context)
1191
+ if output and is_final:
1192
+ final_output = output
1193
+ except Exception as e:
1194
+ context['last_error'] = str(e)
1195
+ context['last_error_type'] = type(e).__name__
1196
+
1197
+ state_config = self.states.get(current_state, {})
1198
+ recovery_state = self._get_error_recovery_state(state_config, e)
1199
+
1200
+ if not recovery_state:
1201
+ recovery_state = await self._run_hook('on_error', current_state, e, context)
1202
+
1203
+ if recovery_state:
1204
+ logger.warning(f"Error in {current_state}, transitioning to {recovery_state}: {e}")
1205
+ current_state = recovery_state
1206
+ continue
1207
+ raise
1208
+
1209
+ await self._save_checkpoint(
1210
+ 'state_exit',
1211
+ current_state,
1212
+ step,
1213
+ context,
1214
+ output=output if is_final else None
1215
+ )
1216
+
1217
+ output = await self._run_hook('on_state_exit', current_state, context, output)
1218
+
1219
+ if max_agent_calls is not None and self.total_api_calls >= max_agent_calls:
1220
+ hit_agent_limit = True
1221
+ break
1222
+
1223
+ if is_final:
1224
+ logger.info(f"Reached final state: {current_state}")
1225
+ break
1226
+
1227
+ next_state = self._find_next_state(current_state, context)
1228
+
1229
+ if next_state:
1230
+ next_state = await self._run_hook('on_transition', current_state, next_state, context)
1231
+
1232
+ logger.debug(f"Transition: {current_state} -> {next_state}")
1233
+ current_state = next_state
1234
+
1235
+ if step >= max_steps:
1236
+ logger.warning(f"Machine hit max_steps limit ({max_steps})")
1237
+ if hit_agent_limit and max_agent_calls is not None:
1238
+ logger.warning(f"Machine hit max_agent_calls limit ({max_agent_calls})")
1239
+
1240
+ await self._save_checkpoint('machine_end', 'end', step, context, output=final_output)
1241
+ final_output = await self._run_hook('on_machine_end', context, final_output)
1242
+
1243
+ return final_output
1244
+
1245
+ finally:
1246
+ # Wait for any launched peer machines to complete
1247
+ # This ensures peer equality - launched machines have equal right to finish
1248
+ if self._background_tasks:
1249
+ await asyncio.gather(*self._background_tasks, return_exceptions=True)
1250
+ await self.lock.release(self.execution_id)
1251
+
1252
+ def execute_sync(
1253
+ self,
1254
+ input: Optional[Dict[str, Any]] = None,
1255
+ max_steps: int = 1000,
1256
+ max_agent_calls: Optional[int] = None
1257
+ ) -> Dict[str, Any]:
1258
+ """Synchronous wrapper for execute()."""
1259
+ import asyncio
1260
+ return asyncio.run(self.execute(input=input, max_steps=max_steps, max_agent_calls=max_agent_calls))
1261
+
1262
+
1263
+ __all__ = ["FlatMachine"]