AbstractRuntime 0.0.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. abstractruntime/__init__.py +104 -2
  2. abstractruntime/core/__init__.py +26 -0
  3. abstractruntime/core/config.py +101 -0
  4. abstractruntime/core/models.py +282 -0
  5. abstractruntime/core/policy.py +166 -0
  6. abstractruntime/core/runtime.py +736 -0
  7. abstractruntime/core/spec.py +53 -0
  8. abstractruntime/core/vars.py +94 -0
  9. abstractruntime/identity/__init__.py +7 -0
  10. abstractruntime/identity/fingerprint.py +57 -0
  11. abstractruntime/integrations/__init__.py +11 -0
  12. abstractruntime/integrations/abstractcore/__init__.py +47 -0
  13. abstractruntime/integrations/abstractcore/effect_handlers.py +119 -0
  14. abstractruntime/integrations/abstractcore/factory.py +187 -0
  15. abstractruntime/integrations/abstractcore/llm_client.py +397 -0
  16. abstractruntime/integrations/abstractcore/logging.py +27 -0
  17. abstractruntime/integrations/abstractcore/tool_executor.py +168 -0
  18. abstractruntime/scheduler/__init__.py +13 -0
  19. abstractruntime/scheduler/convenience.py +324 -0
  20. abstractruntime/scheduler/registry.py +101 -0
  21. abstractruntime/scheduler/scheduler.py +431 -0
  22. abstractruntime/storage/__init__.py +25 -0
  23. abstractruntime/storage/artifacts.py +519 -0
  24. abstractruntime/storage/base.py +107 -0
  25. abstractruntime/storage/in_memory.py +119 -0
  26. abstractruntime/storage/json_files.py +208 -0
  27. abstractruntime/storage/ledger_chain.py +153 -0
  28. abstractruntime/storage/snapshots.py +217 -0
  29. abstractruntime-0.2.0.dist-info/METADATA +163 -0
  30. abstractruntime-0.2.0.dist-info/RECORD +32 -0
  31. {abstractruntime-0.0.0.dist-info → abstractruntime-0.2.0.dist-info}/licenses/LICENSE +3 -1
  32. abstractruntime-0.0.0.dist-info/METADATA +0 -89
  33. abstractruntime-0.0.0.dist-info/RECORD +0 -5
  34. {abstractruntime-0.0.0.dist-info → abstractruntime-0.2.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,397 @@
1
+ """abstractruntime.integrations.abstractcore.llm_client
2
+
3
+ AbstractCore-backed LLM clients for AbstractRuntime.
4
+
5
+ Design intent:
6
+ - Keep `RunState.vars` JSON-safe: normalize outputs into dicts.
7
+ - Support both execution topologies:
8
+ - local/in-process: call AbstractCore's `create_llm(...).generate(...)`
9
+ - remote: call AbstractCore server `/v1/chat/completions`
10
+
11
+ Remote mode is the preferred way to support per-request dynamic routing (e.g. `base_url`).
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ from dataclasses import asdict, dataclass, is_dataclass
18
+ from typing import Any, Dict, List, Optional, Protocol, Tuple
19
+
20
+ from .logging import get_logger
21
+
22
+ logger = get_logger(__name__)
23
+
24
+
25
+ @dataclass(frozen=True)
26
+ class HttpResponse:
27
+ body: Dict[str, Any]
28
+ headers: Dict[str, str]
29
+
30
+
31
+ class RequestSender(Protocol):
32
+ def post(
33
+ self,
34
+ url: str,
35
+ *,
36
+ headers: Dict[str, str],
37
+ json: Dict[str, Any],
38
+ timeout: float,
39
+ ) -> Any: ...
40
+
41
+
42
+ class AbstractCoreLLMClient(Protocol):
43
+ def generate(
44
+ self,
45
+ *,
46
+ prompt: str,
47
+ messages: Optional[List[Dict[str, str]]] = None,
48
+ system_prompt: Optional[str] = None,
49
+ tools: Optional[List[Dict[str, Any]]] = None,
50
+ params: Optional[Dict[str, Any]] = None,
51
+ ) -> Dict[str, Any]:
52
+ """Return a JSON-safe dict with at least: content/tool_calls/usage/model."""
53
+
54
+
55
+ def _jsonable(value: Any) -> Any:
56
+ """Best-effort conversion to JSON-safe objects.
57
+
58
+ This is intentionally conservative: if a value isn't naturally JSON-serializable,
59
+ we fall back to `str(value)`.
60
+ """
61
+
62
+ if value is None:
63
+ return None
64
+ if isinstance(value, (str, int, float, bool)):
65
+ return value
66
+ if isinstance(value, dict):
67
+ return {str(k): _jsonable(v) for k, v in value.items()}
68
+ if isinstance(value, list):
69
+ return [_jsonable(v) for v in value]
70
+ if is_dataclass(value):
71
+ return _jsonable(asdict(value))
72
+
73
+ # Pydantic v2
74
+ model_dump = getattr(value, "model_dump", None)
75
+ if callable(model_dump):
76
+ return _jsonable(model_dump())
77
+
78
+ # Pydantic v1
79
+ to_dict = getattr(value, "dict", None)
80
+ if callable(to_dict):
81
+ return _jsonable(to_dict())
82
+
83
+ return str(value)
84
+
85
+
86
+ def _normalize_local_response(resp: Any) -> Dict[str, Any]:
87
+ """Normalize an AbstractCore local `generate()` result into JSON."""
88
+
89
+ # Dict-like already
90
+ if isinstance(resp, dict):
91
+ out = _jsonable(resp)
92
+ if isinstance(out, dict):
93
+ meta = out.get("metadata")
94
+ if isinstance(meta, dict) and "trace_id" in meta and "trace_id" not in out:
95
+ out["trace_id"] = meta["trace_id"]
96
+ return out
97
+
98
+ # Pydantic structured output
99
+ if hasattr(resp, "model_dump") or hasattr(resp, "dict"):
100
+ return {
101
+ "content": None,
102
+ "data": _jsonable(resp),
103
+ "tool_calls": None,
104
+ "usage": None,
105
+ "model": None,
106
+ "finish_reason": None,
107
+ "metadata": None,
108
+ "trace_id": None,
109
+ }
110
+
111
+ # AbstractCore GenerateResponse
112
+ content = getattr(resp, "content", None)
113
+ tool_calls = getattr(resp, "tool_calls", None)
114
+ usage = getattr(resp, "usage", None)
115
+ model = getattr(resp, "model", None)
116
+ finish_reason = getattr(resp, "finish_reason", None)
117
+ metadata = getattr(resp, "metadata", None)
118
+ trace_id: Optional[str] = None
119
+ if isinstance(metadata, dict):
120
+ raw = metadata.get("trace_id")
121
+ if raw is not None:
122
+ trace_id = str(raw)
123
+
124
+ return {
125
+ "content": content,
126
+ "data": None,
127
+ "tool_calls": _jsonable(tool_calls) if tool_calls is not None else None,
128
+ "usage": _jsonable(usage) if usage is not None else None,
129
+ "model": model,
130
+ "finish_reason": finish_reason,
131
+ "metadata": _jsonable(metadata) if metadata is not None else None,
132
+ "trace_id": trace_id,
133
+ }
134
+
135
+
136
+ class LocalAbstractCoreLLMClient:
137
+ """In-process LLM client using AbstractCore's provider stack."""
138
+
139
+ def __init__(
140
+ self,
141
+ *,
142
+ provider: str,
143
+ model: str,
144
+ llm_kwargs: Optional[Dict[str, Any]] = None,
145
+ ):
146
+ from abstractcore import create_llm
147
+ from abstractcore.tools.handler import UniversalToolHandler
148
+
149
+ self._provider = provider
150
+ self._model = model
151
+ kwargs = dict(llm_kwargs or {})
152
+ kwargs.setdefault("enable_tracing", True)
153
+ if kwargs.get("enable_tracing"):
154
+ kwargs.setdefault("max_traces", 0)
155
+ self._llm = create_llm(provider, model=model, **kwargs)
156
+ self._tool_handler = UniversalToolHandler(model)
157
+
158
+ def generate(
159
+ self,
160
+ *,
161
+ prompt: str,
162
+ messages: Optional[List[Dict[str, str]]] = None,
163
+ system_prompt: Optional[str] = None,
164
+ tools: Optional[List[Dict[str, Any]]] = None,
165
+ params: Optional[Dict[str, Any]] = None,
166
+ ) -> Dict[str, Any]:
167
+ params = dict(params or {})
168
+
169
+ # `base_url` is a provider construction concern in local mode. We intentionally
170
+ # do not create new providers per call unless the host explicitly chooses to.
171
+ params.pop("base_url", None)
172
+
173
+ capabilities: List[str] = []
174
+ get_capabilities = getattr(self._llm, "get_capabilities", None)
175
+ if callable(get_capabilities):
176
+ try:
177
+ capabilities = list(get_capabilities())
178
+ except Exception:
179
+ capabilities = []
180
+ supports_tools = "tools" in set(c.lower() for c in capabilities)
181
+
182
+ if tools and not supports_tools:
183
+ # Fallback tool calling via prompting for providers/models without native tool support.
184
+ from abstractcore.tools import ToolDefinition
185
+
186
+ tool_defs = [
187
+ ToolDefinition(
188
+ name=t.get("name", ""),
189
+ description=t.get("description", ""),
190
+ parameters=t.get("parameters", {}),
191
+ )
192
+ for t in tools
193
+ ]
194
+ tools_prompt = self._tool_handler.format_tools_prompt(tool_defs)
195
+ effective_prompt = f"{tools_prompt}\n\nUser request: {prompt}"
196
+
197
+ resp = self._llm.generate(
198
+ prompt=effective_prompt,
199
+ messages=messages,
200
+ system_prompt=system_prompt,
201
+ stream=False,
202
+ **params,
203
+ )
204
+ result = _normalize_local_response(resp)
205
+
206
+ # Parse tool calls from response content.
207
+ if result.get("content"):
208
+ parsed = self._tool_handler.parse_response(result["content"], mode="prompted")
209
+ if parsed.tool_calls:
210
+ result["tool_calls"] = [
211
+ {"name": tc.name, "arguments": tc.arguments, "call_id": tc.call_id}
212
+ for tc in parsed.tool_calls
213
+ ]
214
+ return result
215
+
216
+ resp = self._llm.generate(
217
+ prompt=str(prompt or ""),
218
+ messages=messages,
219
+ system_prompt=system_prompt,
220
+ tools=tools,
221
+ stream=False,
222
+ **params,
223
+ )
224
+ return _normalize_local_response(resp)
225
+
226
+ def get_model_capabilities(self) -> Dict[str, Any]:
227
+ """Get model capabilities including max_tokens, vision_support, etc.
228
+
229
+ Uses AbstractCore's architecture detection system to query model limits
230
+ and features. This allows the runtime to be aware of model constraints
231
+ for resource tracking and warnings.
232
+
233
+ Returns:
234
+ Dict with model capabilities. Always includes 'max_tokens' (default 32768).
235
+ """
236
+ try:
237
+ from abstractcore.architectures.detection import get_model_capabilities
238
+ return get_model_capabilities(self._model)
239
+ except Exception:
240
+ # Safe fallback if detection fails
241
+ return {"max_tokens": 32768}
242
+
243
+
244
+ class HttpxRequestSender:
245
+ """Default request sender based on httpx (sync)."""
246
+
247
+ def __init__(self):
248
+ import httpx
249
+
250
+ self._httpx = httpx
251
+
252
+ def post(
253
+ self,
254
+ url: str,
255
+ *,
256
+ headers: Dict[str, str],
257
+ json: Dict[str, Any],
258
+ timeout: float,
259
+ ) -> HttpResponse:
260
+ resp = self._httpx.post(url, headers=headers, json=json, timeout=timeout)
261
+ resp.raise_for_status()
262
+ return HttpResponse(body=resp.json(), headers=dict(resp.headers))
263
+
264
+
265
+ def _unwrap_http_response(value: Any) -> Tuple[Dict[str, Any], Dict[str, str]]:
266
+ if isinstance(value, dict):
267
+ return value, {}
268
+ body = getattr(value, "body", None)
269
+ headers = getattr(value, "headers", None)
270
+ if isinstance(body, dict) and isinstance(headers, dict):
271
+ return body, headers
272
+ json_fn = getattr(value, "json", None)
273
+ hdrs = getattr(value, "headers", None)
274
+ if callable(json_fn) and hdrs is not None:
275
+ try:
276
+ payload = json_fn()
277
+ except Exception:
278
+ payload = {}
279
+ return payload if isinstance(payload, dict) else {"data": _jsonable(payload)}, dict(hdrs)
280
+ return {"data": _jsonable(value)}, {}
281
+
282
+
283
+ class RemoteAbstractCoreLLMClient:
284
+ """Remote LLM client calling an AbstractCore server endpoint."""
285
+
286
+ def __init__(
287
+ self,
288
+ *,
289
+ server_base_url: str,
290
+ model: str,
291
+ timeout_s: float = 60.0,
292
+ headers: Optional[Dict[str, str]] = None,
293
+ request_sender: Optional[RequestSender] = None,
294
+ ):
295
+ self._server_base_url = server_base_url.rstrip("/")
296
+ self._model = model
297
+ self._timeout_s = timeout_s
298
+ self._headers = dict(headers or {})
299
+ self._sender = request_sender or HttpxRequestSender()
300
+
301
+ def generate(
302
+ self,
303
+ *,
304
+ prompt: str,
305
+ messages: Optional[List[Dict[str, str]]] = None,
306
+ system_prompt: Optional[str] = None,
307
+ tools: Optional[List[Dict[str, Any]]] = None,
308
+ params: Optional[Dict[str, Any]] = None,
309
+ ) -> Dict[str, Any]:
310
+ params = dict(params or {})
311
+ req_headers = dict(self._headers)
312
+
313
+ trace_metadata = params.pop("trace_metadata", None)
314
+ if isinstance(trace_metadata, dict) and trace_metadata:
315
+ req_headers["X-AbstractCore-Trace-Metadata"] = json.dumps(
316
+ trace_metadata, ensure_ascii=False, separators=(",", ":")
317
+ )
318
+ header_map = {
319
+ "actor_id": "X-AbstractCore-Actor-Id",
320
+ "session_id": "X-AbstractCore-Session-Id",
321
+ "run_id": "X-AbstractCore-Run-Id",
322
+ "parent_run_id": "X-AbstractCore-Parent-Run-Id",
323
+ }
324
+ for key, header in header_map.items():
325
+ val = trace_metadata.get(key)
326
+ if val is not None and header not in req_headers:
327
+ req_headers[header] = str(val)
328
+
329
+ # Build OpenAI-like messages for AbstractCore server.
330
+ out_messages: List[Dict[str, str]] = []
331
+ if system_prompt:
332
+ out_messages.append({"role": "system", "content": system_prompt})
333
+
334
+ if messages:
335
+ out_messages.extend(messages)
336
+ else:
337
+ out_messages.append({"role": "user", "content": prompt})
338
+
339
+ body: Dict[str, Any] = {
340
+ "model": self._model,
341
+ "messages": out_messages,
342
+ "stream": False,
343
+ }
344
+
345
+ # Dynamic routing support (AbstractCore server feature).
346
+ base_url = params.pop("base_url", None)
347
+ if base_url:
348
+ body["base_url"] = base_url
349
+
350
+ # Pass through common OpenAI-compatible parameters.
351
+ for key in (
352
+ "temperature",
353
+ "max_tokens",
354
+ "stop",
355
+ "seed",
356
+ "frequency_penalty",
357
+ "presence_penalty",
358
+ ):
359
+ if key in params and params[key] is not None:
360
+ body[key] = params[key]
361
+
362
+ if tools is not None:
363
+ body["tools"] = tools
364
+
365
+ url = f"{self._server_base_url}/v1/chat/completions"
366
+ raw = self._sender.post(url, headers=req_headers, json=body, timeout=self._timeout_s)
367
+ resp, resp_headers = _unwrap_http_response(raw)
368
+ lower_headers = {str(k).lower(): str(v) for k, v in resp_headers.items()}
369
+ trace_id = lower_headers.get("x-abstractcore-trace-id") or lower_headers.get("x-trace-id")
370
+
371
+ # Normalize OpenAI-like response.
372
+ try:
373
+ choice0 = (resp.get("choices") or [])[0]
374
+ msg = choice0.get("message") or {}
375
+ return {
376
+ "content": msg.get("content"),
377
+ "data": None,
378
+ "tool_calls": _jsonable(msg.get("tool_calls")) if msg.get("tool_calls") is not None else None,
379
+ "usage": _jsonable(resp.get("usage")) if resp.get("usage") is not None else None,
380
+ "model": resp.get("model"),
381
+ "finish_reason": choice0.get("finish_reason"),
382
+ "metadata": {"trace_id": trace_id} if trace_id else None,
383
+ "trace_id": trace_id,
384
+ }
385
+ except Exception:
386
+ # Fallback: return the raw response in JSON-safe form.
387
+ logger.warning("Remote LLM response normalization failed; returning raw JSON")
388
+ return {
389
+ "content": None,
390
+ "data": _jsonable(resp),
391
+ "tool_calls": None,
392
+ "usage": None,
393
+ "model": resp.get("model") if isinstance(resp, dict) else None,
394
+ "finish_reason": None,
395
+ "metadata": {"trace_id": trace_id} if trace_id else None,
396
+ "trace_id": trace_id,
397
+ }
@@ -0,0 +1,27 @@
1
+ """abstractruntime.integrations.abstractcore.logging
2
+
3
+ Logging adapter for the AbstractCore-integrated runtime.
4
+
5
+ We prefer AbstractCore's structured logger for consistency across the stack.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import Any
11
+
12
+
13
+ def get_logger(name: str) -> Any:
14
+ """Return a logger compatible with AbstractCore's structured logger.
15
+
16
+ This is intentionally a thin wrapper to keep the integration layer small.
17
+ """
18
+
19
+ try:
20
+ from abstractcore.utils.structured_logging import get_logger as _get_logger
21
+
22
+ return _get_logger(name)
23
+ except Exception: # pragma: no cover
24
+ import logging
25
+
26
+ return logging.getLogger(name)
27
+
@@ -0,0 +1,168 @@
1
+ """abstractruntime.integrations.abstractcore.tool_executor
2
+
3
+ Tool execution adapters.
4
+
5
+ - `AbstractCoreToolExecutor`: executes tool calls in-process using AbstractCore's
6
+ global tool registry.
7
+ - `PassthroughToolExecutor`: does not execute; returns tool calls to the host.
8
+
9
+ The runtime can use passthrough mode for untrusted environments (server/edge) and
10
+ pause until the host resumes with the tool results.
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from dataclasses import asdict, is_dataclass
16
+ from typing import Any, Callable, Dict, List, Optional, Protocol, Sequence
17
+
18
+ from .logging import get_logger
19
+
20
+ logger = get_logger(__name__)
21
+
22
+
23
+ class ToolExecutor(Protocol):
24
+ def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]: ...
25
+
26
+
27
+ class MappingToolExecutor:
28
+ """Executes tool calls using an explicit {tool_name -> callable} mapping.
29
+
30
+ This is the recommended durable execution path: the mapping is held by the
31
+ host/runtime process and is never persisted inside RunState.
32
+ """
33
+
34
+ def __init__(self, tool_map: Dict[str, Callable[..., Any]]):
35
+ self._tool_map = dict(tool_map)
36
+
37
+ @classmethod
38
+ def from_tools(cls, tools: Sequence[Callable[..., Any]]) -> "MappingToolExecutor":
39
+ tool_map: Dict[str, Callable[..., Any]] = {}
40
+ for t in tools:
41
+ tool_def = getattr(t, "_tool_definition", None)
42
+ if tool_def is not None:
43
+ name = str(getattr(tool_def, "name", "") or "")
44
+ func = getattr(tool_def, "function", None) or t
45
+ else:
46
+ name = str(getattr(t, "__name__", "") or "")
47
+ func = t
48
+
49
+ if not name:
50
+ raise ValueError("Tool is missing a name")
51
+ if not callable(func):
52
+ raise ValueError(f"Tool '{name}' is not callable")
53
+ if name in tool_map:
54
+ raise ValueError(f"Duplicate tool name '{name}'")
55
+
56
+ tool_map[name] = func
57
+
58
+ return cls(tool_map)
59
+
60
+ def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]:
61
+ results: List[Dict[str, Any]] = []
62
+
63
+ for tc in tool_calls:
64
+ name = str(tc.get("name", "") or "")
65
+ arguments = dict(tc.get("arguments") or {})
66
+ call_id = str(tc.get("call_id") or "")
67
+
68
+ func = self._tool_map.get(name)
69
+ if func is None:
70
+ results.append(
71
+ {
72
+ "call_id": call_id,
73
+ "name": name,
74
+ "success": False,
75
+ "output": None,
76
+ "error": f"Tool '{name}' not found",
77
+ }
78
+ )
79
+ continue
80
+
81
+ try:
82
+ output = func(**arguments)
83
+ results.append(
84
+ {
85
+ "call_id": call_id,
86
+ "name": name,
87
+ "success": True,
88
+ "output": _jsonable(output),
89
+ "error": None,
90
+ }
91
+ )
92
+ except Exception as e:
93
+ results.append(
94
+ {
95
+ "call_id": call_id,
96
+ "name": name,
97
+ "success": False,
98
+ "output": None,
99
+ "error": str(e),
100
+ }
101
+ )
102
+
103
+ return {"mode": "executed", "results": results}
104
+
105
+
106
+ def _jsonable(value: Any) -> Any:
107
+ if value is None:
108
+ return None
109
+ if isinstance(value, (str, int, float, bool)):
110
+ return value
111
+ if isinstance(value, dict):
112
+ return {str(k): _jsonable(v) for k, v in value.items()}
113
+ if isinstance(value, list):
114
+ return [_jsonable(v) for v in value]
115
+ if is_dataclass(value):
116
+ return _jsonable(asdict(value))
117
+
118
+ model_dump = getattr(value, "model_dump", None)
119
+ if callable(model_dump):
120
+ return _jsonable(model_dump())
121
+
122
+ to_dict = getattr(value, "dict", None)
123
+ if callable(to_dict):
124
+ return _jsonable(to_dict())
125
+
126
+ return str(value)
127
+
128
+
129
+ class AbstractCoreToolExecutor:
130
+ """Executes tool calls using AbstractCore's global tool registry."""
131
+
132
+ def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]:
133
+ from abstractcore.tools.core import ToolCall
134
+ from abstractcore.tools.registry import execute_tools
135
+
136
+ calls = [
137
+ ToolCall(
138
+ name=str(tc.get("name")),
139
+ arguments=dict(tc.get("arguments") or {}),
140
+ call_id=tc.get("call_id"),
141
+ )
142
+ for tc in tool_calls
143
+ ]
144
+
145
+ results = execute_tools(calls)
146
+ normalized = []
147
+ for call, r in zip(calls, results):
148
+ normalized.append(
149
+ {
150
+ "call_id": getattr(r, "call_id", ""),
151
+ "name": getattr(call, "name", ""),
152
+ "success": bool(getattr(r, "success", False)),
153
+ "output": _jsonable(getattr(r, "output", None)),
154
+ "error": getattr(r, "error", None),
155
+ }
156
+ )
157
+
158
+ return {"mode": "executed", "results": normalized}
159
+
160
+
161
+ class PassthroughToolExecutor:
162
+ """Returns tool calls unchanged without executing them."""
163
+
164
+ def __init__(self, *, mode: str = "passthrough"):
165
+ self._mode = mode
166
+
167
+ def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]:
168
+ return {"mode": self._mode, "tool_calls": _jsonable(tool_calls)}
@@ -0,0 +1,13 @@
1
+ """Scheduler for automatic run resumption."""
2
+
3
+ from .registry import WorkflowRegistry
4
+ from .scheduler import Scheduler, SchedulerStats
5
+ from .convenience import create_scheduled_runtime, ScheduledRuntime
6
+
7
+ __all__ = [
8
+ "WorkflowRegistry",
9
+ "Scheduler",
10
+ "SchedulerStats",
11
+ "create_scheduled_runtime",
12
+ "ScheduledRuntime",
13
+ ]