pyoco 0.1.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyoco/cli/main.py CHANGED
@@ -1,11 +1,16 @@
1
1
  import argparse
2
+ import json
2
3
  import sys
3
4
  import os
5
+ import signal
6
+ import time
4
7
  from ..schemas.config import PyocoConfig
5
8
  from ..discovery.loader import TaskLoader
6
9
  from ..core.models import Flow
7
10
  from ..core.engine import Engine
8
11
  from ..trace.console import ConsoleTraceBackend
12
+ from ..client import Client
13
+ from ..discovery.plugins import list_available_plugins
9
14
 
10
15
  def main():
11
16
  parser = argparse.ArgumentParser(description="Pyoco Workflow Engine")
@@ -20,54 +25,220 @@ def main():
20
25
  run_parser.add_argument("--non-cute", action="store_false", dest="cute", help="Use plain trace style")
21
26
  # Allow overriding params via CLI
22
27
  run_parser.add_argument("--param", action="append", help="Override params (key=value)")
28
+ run_parser.add_argument("--server", help="Server URL for remote execution")
23
29
 
24
30
  # Check command
25
31
  check_parser = subparsers.add_parser("check", help="Verify a workflow")
26
32
  check_parser.add_argument("--config", required=True, help="Path to flow.yaml")
27
33
  check_parser.add_argument("--flow", default="main", help="Flow name to check")
34
+ check_parser.add_argument("--dry-run", action="store_true", help="Traverse flow without executing tasks")
35
+ check_parser.add_argument("--json", action="store_true", help="Output report as JSON")
28
36
 
29
37
  # List tasks command
30
38
  list_parser = subparsers.add_parser("list-tasks", help="List available tasks")
31
39
  list_parser.add_argument("--config", required=True, help="Path to flow.yaml")
32
40
 
41
+ # Server command
42
+ server_parser = subparsers.add_parser("server", help="Manage Kanban Server")
43
+ server_subparsers = server_parser.add_subparsers(dest="server_command")
44
+ server_start = server_subparsers.add_parser("start", help="Start the server")
45
+ server_start.add_argument("--host", default="0.0.0.0", help="Host to bind")
46
+ server_start.add_argument("--port", type=int, default=8000, help="Port to bind")
47
+
48
+ # Worker command
49
+ worker_parser = subparsers.add_parser("worker", help="Manage Worker")
50
+ worker_subparsers = worker_parser.add_subparsers(dest="worker_command")
51
+ worker_start = worker_subparsers.add_parser("start", help="Start a worker")
52
+ worker_start.add_argument("--server", required=True, help="Server URL")
53
+ worker_start.add_argument("--config", required=True, help="Path to flow.yaml")
54
+ worker_start.add_argument("--tags", help="Comma-separated tags")
55
+
56
+ # Runs command
57
+ runs_parser = subparsers.add_parser("runs", help="Manage runs")
58
+ runs_subparsers = runs_parser.add_subparsers(dest="runs_command")
59
+
60
+ runs_list = runs_subparsers.add_parser("list", help="List runs")
61
+ runs_list.add_argument("--server", default="http://localhost:8000", help="Server URL")
62
+ runs_list.add_argument("--status", help="Filter by status")
63
+ runs_list.add_argument("--flow", help="Filter by flow name")
64
+ runs_list.add_argument("--limit", type=int, help="Maximum number of runs to show")
65
+
66
+ runs_show = runs_subparsers.add_parser("show", help="Show run details")
67
+ runs_show.add_argument("run_id", help="Run ID")
68
+ runs_show.add_argument("--server", default="http://localhost:8000", help="Server URL")
69
+
70
+ runs_cancel = runs_subparsers.add_parser("cancel", help="Cancel a run")
71
+ runs_cancel.add_argument("run_id", help="Run ID")
72
+ runs_cancel.add_argument("--server", default="http://localhost:8000", help="Server URL")
73
+
74
+ runs_inspect = runs_subparsers.add_parser("inspect", help="Inspect run details")
75
+ runs_inspect.add_argument("run_id", help="Run ID")
76
+ runs_inspect.add_argument("--server", default="http://localhost:8000", help="Server URL")
77
+ runs_inspect.add_argument("--json", action="store_true", help="Output JSON payload")
78
+
79
+ runs_logs = runs_subparsers.add_parser("logs", help="Show run logs")
80
+ runs_logs.add_argument("run_id", help="Run ID")
81
+ runs_logs.add_argument("--server", default="http://localhost:8000", help="Server URL")
82
+ runs_logs.add_argument("--task", help="Filter logs by task")
83
+ runs_logs.add_argument("--tail", type=int, help="Show last N log entries")
84
+ runs_logs.add_argument("--follow", action="store_true", help="Stream logs until completion")
85
+ runs_logs.add_argument("--allow-failure", action="store_true", help="Don't exit non-zero when run failed")
86
+
87
+ plugins_parser = subparsers.add_parser("plugins", help="Inspect plug-in entry points")
88
+ plugins_sub = plugins_parser.add_subparsers(dest="plugins_command")
89
+ plugins_list = plugins_sub.add_parser("list", help="List discovered plug-ins")
90
+ plugins_list.add_argument("--json", action="store_true", help="Output JSON payload")
91
+
33
92
  args = parser.parse_args()
34
93
 
35
94
  if not args.command:
36
95
  parser.print_help()
37
96
  sys.exit(1)
38
97
 
39
- # Load config
40
- try:
41
- config = PyocoConfig.from_yaml(args.config)
42
- except Exception as e:
43
- print(f"Error loading config: {e}")
44
- sys.exit(1)
98
+ # Load config only if needed
99
+ config = None
100
+ if hasattr(args, 'config') and args.config:
101
+ try:
102
+ config = PyocoConfig.from_yaml(args.config)
103
+ except Exception as e:
104
+ print(f"Error loading config: {e}")
105
+ sys.exit(1)
45
106
 
46
- # Discover tasks
47
- loader = TaskLoader(config)
48
- loader.load()
107
+ # Discover tasks only if config is loaded
108
+ loader = None
109
+ if config:
110
+ loader = TaskLoader(config)
111
+ loader.load()
49
112
 
50
113
  if args.command == "list-tasks":
114
+ if not loader:
115
+ print("Error: Config not loaded.")
116
+ sys.exit(1)
51
117
  print("Available tasks:")
52
118
  for name in loader.tasks:
53
119
  print(f" - {name}")
54
120
  return
55
121
 
122
+ if args.command == "plugins":
123
+ infos = list_available_plugins()
124
+ if args.plugins_command == "list":
125
+ if getattr(args, "json", False):
126
+ print(json.dumps(infos, indent=2))
127
+ else:
128
+ if not infos:
129
+ print("No plug-ins registered under group 'pyoco.tasks'.")
130
+ else:
131
+ print("Discovered plug-ins:")
132
+ for info in infos:
133
+ mod = info.get("module") or info.get("value")
134
+ print(f" - {info.get('name')} ({mod})")
135
+ return
136
+
137
+ if args.command == "server":
138
+ if args.server_command == "start":
139
+ import uvicorn
140
+ print(f"🐇 Starting Kanban Server on {args.host}:{args.port}")
141
+ uvicorn.run("pyoco.server.api:app", host=args.host, port=args.port, log_level="info")
142
+ return
143
+
144
+ if args.command == "worker":
145
+ if args.worker_command == "start":
146
+ from ..worker.runner import Worker
147
+ tags = args.tags.split(",") if args.tags else []
148
+ worker = Worker(args.server, config, tags)
149
+ worker.start()
150
+ return
151
+
152
+ if args.command == "runs":
153
+ client = Client(args.server)
154
+ try:
155
+ if args.runs_command == "list":
156
+ runs = client.list_runs(status=args.status, flow=args.flow, limit=args.limit)
157
+ print(f"🐇 Active Runs ({len(runs)}):")
158
+ print(f"{'ID':<36} | {'Status':<12} | {'Flow':<15}")
159
+ print("-" * 70)
160
+ for r in runs:
161
+ # RunContext doesn't have flow_name in core model, but store adds it.
162
+ # We need to access it safely.
163
+ flow_name = r.get("flow_name", "???")
164
+ print(f"{r['run_id']:<36} | {r['status']:<12} | {flow_name:<15}")
165
+
166
+ elif args.runs_command == "show":
167
+ run = client.get_run(args.run_id)
168
+ print(f"🐇 Run: {run['run_id']}")
169
+ print(f"Status: {run['status']}")
170
+ print("Tasks:")
171
+ for t_name, t_state in run.get("tasks", {}).items():
172
+ print(f" [{t_state}] {t_name}")
173
+
174
+ elif args.runs_command == "cancel":
175
+ client.cancel_run(args.run_id)
176
+ print(f"🛑 Cancellation requested for run {args.run_id}")
177
+ elif args.runs_command == "inspect":
178
+ run = client.get_run(args.run_id)
179
+ if args.json:
180
+ print(json.dumps(run, indent=2))
181
+ else:
182
+ print(f"🐇 Run: {run['run_id']} ({run.get('flow_name', 'n/a')})")
183
+ print(f"Status: {run['status']}")
184
+ if run.get("start_time"):
185
+ print(f"Started: {run['start_time']}")
186
+ if run.get("end_time"):
187
+ print(f"Ended: {run['end_time']}")
188
+ print("Tasks:")
189
+ records = run.get("task_records", {})
190
+ for name, info in records.items():
191
+ state = info.get("state", run["tasks"].get(name))
192
+ duration = info.get("duration_ms")
193
+ duration_str = f"{duration:.2f} ms" if duration else "-"
194
+ print(f" - {name}: {state} ({duration_str})")
195
+ if info.get("error"):
196
+ print(f" error: {info['error']}")
197
+ if not records:
198
+ for t_name, t_state in run.get("tasks", {}).items():
199
+ print(f" - {t_name}: {t_state}")
200
+ elif args.runs_command == "logs":
201
+ _stream_logs(client, args)
202
+ except Exception as e:
203
+ print(f"Error: {e}")
204
+ return
205
+
56
206
  if args.command == "run":
57
207
  flow_conf = config.flows.get(args.flow)
58
208
  if not flow_conf:
59
209
  print(f"Flow '{args.flow}' not found in config.")
60
210
  sys.exit(1)
61
211
 
212
+ # Params
213
+ params = flow_conf.defaults.copy()
214
+ if args.param:
215
+ for p in args.param:
216
+ if "=" in p:
217
+ k, v = p.split("=", 1)
218
+ params[k] = v # Simple string parsing for now
219
+
220
+ if args.server:
221
+ # Remote execution
222
+ client = Client(args.server)
223
+ try:
224
+ run_id = client.submit_run(args.flow, params)
225
+ print(f"🚀 Flow submitted! Run ID: {run_id}")
226
+ print(f"📋 View status: pyoco runs show {run_id} --server {args.server}")
227
+ except Exception as e:
228
+ print(f"Error submitting flow: {e}")
229
+ sys.exit(1)
230
+ return
62
231
  # Build Flow from graph string
63
- from ..dsl.syntax import TaskWrapper
232
+ from ..dsl.syntax import TaskWrapper, switch
64
233
  eval_context = {name: TaskWrapper(task) for name, task in loader.tasks.items()}
234
+ eval_context["switch"] = switch
65
235
 
66
236
  try:
67
237
  # Create Flow and add all loaded tasks
68
238
  flow = Flow(name=args.flow)
69
239
  for t in loader.tasks.values():
70
240
  flow.add_task(t)
241
+ eval_context["flow"] = flow
71
242
 
72
243
  # Evaluate graph to set up dependencies
73
244
  exec(flow_conf.graph, {}, eval_context)
@@ -76,13 +247,15 @@ def main():
76
247
  backend = ConsoleTraceBackend(style="cute" if args.cute else "plain")
77
248
  engine = Engine(trace_backend=backend)
78
249
 
79
- # Params
80
- params = flow_conf.defaults.copy()
81
- if args.param:
82
- for p in args.param:
83
- if "=" in p:
84
- k, v = p.split("=", 1)
85
- params[k] = v # Simple string parsing for now
250
+ # Params (Moved up)
251
+
252
+ # Signal handler for cancellation
253
+ def signal_handler(sig, frame):
254
+ print("\n🛑 Ctrl+C detected. Cancelling active runs...")
255
+ for rid in list(engine.active_runs.keys()):
256
+ engine.cancel(rid)
257
+
258
+ signal.signal(signal.SIGINT, signal_handler)
86
259
 
87
260
  engine.run(flow, params)
88
261
 
@@ -104,36 +277,36 @@ def main():
104
277
 
105
278
  # 1. Check imports (already done by loader.load(), but we can check for missing tasks in graph)
106
279
  # 2. Build flow to check graph
107
- from ..dsl.syntax import TaskWrapper
280
+ from ..dsl.syntax import TaskWrapper, switch
108
281
  eval_context = {name: TaskWrapper(task) for name, task in loader.tasks.items()}
282
+ eval_context["switch"] = switch
109
283
 
110
284
  try:
111
285
  flow = Flow(name=args.flow)
112
286
  for t in loader.tasks.values():
113
287
  flow.add_task(t)
288
+ eval_context["flow"] = flow
114
289
 
115
290
  eval(flow_conf.graph, {}, eval_context)
116
291
 
117
292
  # 3. Reachability / Orphans
118
- # Nodes with no deps and no dependents (except if single node flow)
119
293
  if len(flow.tasks) > 1:
120
294
  for t in flow.tasks:
121
295
  if not t.dependencies and not t.dependents:
122
296
  warnings.append(f"Task '{t.name}' is orphaned (no dependencies or dependents).")
123
297
 
124
298
  # 4. Cycles
125
- # Simple DFS for cycle detection
126
299
  visited = set()
127
300
  path = set()
301
+
128
302
  def visit(node):
129
303
  if node in path:
130
- return True # Cycle
304
+ return True
131
305
  if node in visited:
132
306
  return False
133
-
134
307
  visited.add(node)
135
308
  path.add(node)
136
- for dep in node.dependencies: # Check upstream
309
+ for dep in node.dependencies:
137
310
  if visit(dep):
138
311
  return True
139
312
  path.remove(node)
@@ -149,29 +322,75 @@ def main():
149
322
  for t in flow.tasks:
150
323
  sig = inspect.signature(t.func)
151
324
  for name, param in sig.parameters.items():
152
- if name == 'ctx': continue
153
- # Check if input provided in task config or defaults
154
- # This is hard because inputs are resolved at runtime.
155
- # But we can check if 'inputs' mapping exists for it.
325
+ if name == 'ctx':
326
+ continue
156
327
  if name not in t.inputs and name not in flow_conf.defaults:
157
- # Warning: might be missing input
158
328
  warnings.append(f"Task '{t.name}' argument '{name}' might be missing input (not in inputs or defaults).")
159
329
 
160
330
  except Exception as e:
161
331
  errors.append(f"Graph evaluation failed: {e}")
162
332
 
163
- # Report
164
- print("\n--- Check Report ---")
165
- if not errors and not warnings:
166
- print("✅ All checks passed!")
333
+ if args.dry_run:
334
+ from ..dsl.validator import FlowValidator
335
+ try:
336
+ validator = FlowValidator(flow)
337
+ dr_report = validator.validate()
338
+ warnings.extend(dr_report.warnings)
339
+ errors.extend(dr_report.errors)
340
+ except Exception as exc:
341
+ print(f"❌ Dry run internal error: {exc}")
342
+ import traceback
343
+ traceback.print_exc()
344
+ sys.exit(3)
345
+
346
+ status = "ok"
347
+ if errors:
348
+ status = "error"
349
+ elif warnings:
350
+ status = "warning"
351
+
352
+ report = {"status": status, "warnings": warnings, "errors": errors}
353
+
354
+ if args.json:
355
+ print(json.dumps(report, indent=2))
167
356
  else:
168
- for w in warnings:
169
- print(f"⚠️ {w}")
170
- for e in errors:
171
- print(f" {e}")
172
-
173
- if errors:
357
+ print("\n--- Check Report ---")
358
+ print(f"Status: {status}")
359
+ if not errors and not warnings:
360
+ print(" All checks passed!")
361
+ else:
362
+ for w in warnings:
363
+ print(f"⚠️ {w}")
364
+ for e in errors:
365
+ print(f"❌ {e}")
366
+
367
+ if errors:
368
+ sys.exit(2 if args.dry_run else 1)
369
+ return
370
+
371
+ def _stream_logs(client, args):
372
+ seen_seq = -1
373
+ follow = args.follow
374
+ while True:
375
+ tail = args.tail if (args.tail and seen_seq == -1 and not follow) else None
376
+ data = client.get_run_logs(args.run_id, task=args.task, tail=tail)
377
+ logs = data.get("logs", [])
378
+ logs.sort(key=lambda entry: entry.get("seq", 0))
379
+ for entry in logs:
380
+ seq = entry.get("seq", 0)
381
+ if seq <= seen_seq:
382
+ continue
383
+ line = entry.get("text", "")
384
+ line = line.rstrip("\n")
385
+ print(f"[{entry.get('task', 'unknown')}][{entry.get('stream', '')}] {line}")
386
+ seen_seq = seq
387
+ status = data.get("run_status", "UNKNOWN")
388
+ if not follow or status in ("COMPLETED", "FAILED", "CANCELLED"):
389
+ if status == "FAILED" and not args.allow_failure:
174
390
  sys.exit(1)
391
+ break
392
+ time.sleep(1)
393
+
175
394
 
176
395
  if __name__ == "__main__":
177
396
  main()
pyoco/client.py ADDED
@@ -0,0 +1,89 @@
1
+ import httpx
2
+ from typing import Dict, List, Optional, Any
3
+ from .core.models import RunStatus, TaskState, RunContext
4
+
5
+ class Client:
6
+ def __init__(self, server_url: str, client_id: str = "cli"):
7
+ self.server_url = server_url.rstrip("/")
8
+ self.client_id = client_id
9
+ self.client = httpx.Client(base_url=self.server_url)
10
+
11
+ def submit_run(self, flow_name: str, params: Dict[str, Any], tags: List[str] = []) -> str:
12
+ resp = self.client.post("/runs", json={
13
+ "flow_name": flow_name,
14
+ "params": params,
15
+ "tags": tags
16
+ })
17
+ resp.raise_for_status()
18
+ return resp.json()["run_id"]
19
+
20
+ def list_runs(
21
+ self,
22
+ status: Optional[str] = None,
23
+ flow: Optional[str] = None,
24
+ limit: Optional[int] = None,
25
+ ) -> List[Dict]:
26
+ params = {}
27
+ if status:
28
+ params["status"] = status
29
+ if flow:
30
+ params["flow"] = flow
31
+ if limit:
32
+ params["limit"] = limit
33
+ resp = self.client.get("/runs", params=params)
34
+ resp.raise_for_status()
35
+ return resp.json()
36
+
37
+ def get_run(self, run_id: str) -> Dict:
38
+ resp = self.client.get(f"/runs/{run_id}")
39
+ resp.raise_for_status()
40
+ return resp.json()
41
+
42
+ def cancel_run(self, run_id: str):
43
+ resp = self.client.post(f"/runs/{run_id}/cancel")
44
+ resp.raise_for_status()
45
+
46
+ def poll(self, tags: List[str] = []) -> Optional[Dict[str, Any]]:
47
+ try:
48
+ resp = self.client.post("/workers/poll", json={
49
+ "worker_id": self.client_id,
50
+ "tags": tags
51
+ })
52
+ resp.raise_for_status()
53
+ data = resp.json()
54
+ if data.get("run_id"):
55
+ return data
56
+ return None
57
+ except Exception as e:
58
+ # print(f"Poll failed: {e}")
59
+ return None
60
+
61
+ def heartbeat(self, run_ctx: RunContext) -> bool:
62
+ """
63
+ Sends heartbeat. Returns True if cancellation is requested.
64
+ """
65
+ try:
66
+ states_json = {k: v.value if hasattr(v, 'value') else v for k, v in run_ctx.tasks.items()}
67
+ status_value = run_ctx.status.value if hasattr(run_ctx.status, 'value') else run_ctx.status
68
+ payload = {
69
+ "task_states": states_json,
70
+ "task_records": run_ctx.serialize_task_records(),
71
+ "logs": run_ctx.drain_logs(),
72
+ "run_status": status_value
73
+ }
74
+ resp = self.client.post(f"/runs/{run_ctx.run_id}/heartbeat", json=payload)
75
+ resp.raise_for_status()
76
+ return resp.json().get("cancel_requested", False)
77
+ except Exception as e:
78
+ print(f"Heartbeat failed: {e}")
79
+ return False
80
+
81
+ def get_run_logs(self, run_id: str, task: Optional[str] = None, tail: Optional[int] = None) -> Dict[str, Any]:
82
+ params = {}
83
+ if task:
84
+ params["task"] = task
85
+ if tail:
86
+ params["tail"] = tail
87
+ resp = self.client.get(f"/runs/{run_id}/logs", params=params)
88
+ resp.raise_for_status()
89
+ return resp.json()
pyoco/core/context.py CHANGED
@@ -1,21 +1,100 @@
1
1
  import threading
2
- from typing import Any, Dict, Optional
2
+ from typing import Any, Dict, List, Optional, Sequence
3
3
  from dataclasses import dataclass, field
4
+ from .models import RunContext
5
+
6
+
7
+ @dataclass
8
+ class LoopFrame:
9
+ name: str
10
+ type: str
11
+ index: Optional[int] = None
12
+ iteration: Optional[int] = None
13
+ count: Optional[int] = None
14
+ item: Any = None
15
+ condition: Optional[bool] = None
16
+ path: Optional[str] = None
17
+
18
+
19
+ class LoopStack:
20
+ def __init__(self):
21
+ self._frames: List[LoopFrame] = []
22
+
23
+ def push(self, frame: LoopFrame) -> LoopFrame:
24
+ parent_path = self._frames[-1].path if self._frames else ""
25
+ segment = frame.name
26
+ if frame.index is not None:
27
+ segment = f"{segment}[{frame.index}]"
28
+ frame.path = f"{parent_path}.{segment}" if parent_path else segment
29
+ self._frames.append(frame)
30
+ return frame
31
+
32
+ def pop(self) -> LoopFrame:
33
+ if not self._frames:
34
+ raise RuntimeError("Loop stack underflow")
35
+ return self._frames.pop()
36
+
37
+ @property
38
+ def current(self) -> Optional[LoopFrame]:
39
+ return self._frames[-1] if self._frames else None
40
+
41
+ def snapshot(self) -> Sequence[LoopFrame]:
42
+ return tuple(self._frames)
4
43
 
5
44
  @dataclass
6
45
  class Context:
46
+ """
47
+ Execution context passed to tasks.
48
+ """
7
49
  params: Dict[str, Any] = field(default_factory=dict)
8
- env: Dict[str, str] = field(default_factory=dict)
9
50
  results: Dict[str, Any] = field(default_factory=dict)
10
51
  scratch: Dict[str, Any] = field(default_factory=dict)
11
52
  artifacts: Dict[str, Any] = field(default_factory=dict)
12
- run_id: Optional[str] = None
13
- artifact_dir: str = field(default="./artifacts")
53
+ env: Dict[str, str] = field(default_factory=dict)
54
+ artifact_dir: Optional[str] = None
55
+ _vars: Dict[str, Any] = field(default_factory=dict, repr=False)
56
+
57
+ # Reference to the parent run context (v0.2.0+)
58
+ run_context: Optional[RunContext] = None
14
59
 
15
60
  _lock: threading.Lock = field(default_factory=threading.Lock, repr=False)
61
+ _loop_stack: LoopStack = field(default_factory=LoopStack, repr=False)
62
+
63
+ @property
64
+ def is_cancelled(self) -> bool:
65
+ if self.run_context:
66
+ from .models import RunStatus
67
+ return self.run_context.status in [RunStatus.CANCELLING, RunStatus.CANCELLED]
68
+ return False
69
+
70
+ @property
71
+ def loop(self) -> Optional[LoopFrame]:
72
+ return self._loop_stack.current
73
+
74
+ @property
75
+ def loops(self) -> Sequence[LoopFrame]:
76
+ return self._loop_stack.snapshot()
77
+
78
+ def push_loop(self, frame: LoopFrame) -> LoopFrame:
79
+ return self._loop_stack.push(frame)
80
+
81
+ def pop_loop(self) -> LoopFrame:
82
+ return self._loop_stack.pop()
83
+
84
+ def set_var(self, name: str, value: Any):
85
+ self._vars[name] = value
86
+
87
+ def get_var(self, name: str, default=None):
88
+ return self._vars.get(name, default)
89
+
90
+ def clear_var(self, name: str):
91
+ self._vars.pop(name, None)
16
92
 
17
93
  def __post_init__(self):
18
94
  # Ensure artifact directory exists
95
+ if self.artifact_dir is None:
96
+ self.artifact_dir = "./artifacts"
97
+
19
98
  import pathlib
20
99
  pathlib.Path(self.artifact_dir).mkdir(parents=True, exist_ok=True)
21
100
 
@@ -108,3 +187,20 @@ class Context:
108
187
 
109
188
  return value
110
189
 
190
+ def expression_data(self) -> Dict[str, Any]:
191
+ data: Dict[str, Any] = {}
192
+ data.update(self._vars)
193
+ data["params"] = self.params
194
+ data["results"] = self.results
195
+ data["scratch"] = self.scratch
196
+ data["artifacts"] = self.artifacts
197
+ data["loop"] = self.loop
198
+ data["loops"] = list(self.loops)
199
+ return data
200
+
201
+ def env_data(self) -> Dict[str, str]:
202
+ import os
203
+
204
+ env_data = dict(os.environ)
205
+ env_data.update(self.env)
206
+ return env_data