AbstractRuntime 0.0.0__py3-none-any.whl → 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. abstractruntime/__init__.py +104 -2
  2. abstractruntime/core/__init__.py +19 -0
  3. abstractruntime/core/models.py +239 -0
  4. abstractruntime/core/policy.py +166 -0
  5. abstractruntime/core/runtime.py +581 -0
  6. abstractruntime/core/spec.py +53 -0
  7. abstractruntime/identity/__init__.py +7 -0
  8. abstractruntime/identity/fingerprint.py +57 -0
  9. abstractruntime/integrations/__init__.py +11 -0
  10. abstractruntime/integrations/abstractcore/__init__.py +43 -0
  11. abstractruntime/integrations/abstractcore/effect_handlers.py +89 -0
  12. abstractruntime/integrations/abstractcore/factory.py +150 -0
  13. abstractruntime/integrations/abstractcore/llm_client.py +296 -0
  14. abstractruntime/integrations/abstractcore/logging.py +27 -0
  15. abstractruntime/integrations/abstractcore/tool_executor.py +89 -0
  16. abstractruntime/scheduler/__init__.py +13 -0
  17. abstractruntime/scheduler/convenience.py +324 -0
  18. abstractruntime/scheduler/registry.py +101 -0
  19. abstractruntime/scheduler/scheduler.py +431 -0
  20. abstractruntime/storage/__init__.py +25 -0
  21. abstractruntime/storage/artifacts.py +488 -0
  22. abstractruntime/storage/base.py +107 -0
  23. abstractruntime/storage/in_memory.py +119 -0
  24. abstractruntime/storage/json_files.py +208 -0
  25. abstractruntime/storage/ledger_chain.py +153 -0
  26. abstractruntime/storage/snapshots.py +217 -0
  27. abstractruntime-0.0.1.dist-info/METADATA +163 -0
  28. abstractruntime-0.0.1.dist-info/RECORD +30 -0
  29. {abstractruntime-0.0.0.dist-info → abstractruntime-0.0.1.dist-info}/licenses/LICENSE +3 -1
  30. abstractruntime-0.0.0.dist-info/METADATA +0 -89
  31. abstractruntime-0.0.0.dist-info/RECORD +0 -5
  32. {abstractruntime-0.0.0.dist-info → abstractruntime-0.0.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,11 @@
1
+ """abstractruntime.integrations
2
+
3
+ Integration modules live here.
4
+
5
+ Design rule (layered coupling):
6
+ - The **kernel** (`abstractruntime.core`, `abstractruntime.storage`, `abstractruntime.identity`) stays dependency-light.
7
+ - Optional integration packages may import heavier dependencies (e.g. AbstractCore) and provide effect handlers.
8
+
9
+ This package intentionally does not import any integration by default.
10
+ """
11
+
@@ -0,0 +1,43 @@
1
+ """abstractruntime.integrations.abstractcore
2
+
3
+ AbstractCore integration package.
4
+
5
+ Provides:
6
+ - LLM clients (local + remote)
7
+ - Tool executors (executed + passthrough)
8
+ - Effect handlers wiring
9
+ - Convenience runtime factories for local/remote/hybrid modes
10
+
11
+ Importing this module is the explicit opt-in to an AbstractCore dependency.
12
+ """
13
+
14
+ from .llm_client import (
15
+ AbstractCoreLLMClient,
16
+ LocalAbstractCoreLLMClient,
17
+ RemoteAbstractCoreLLMClient,
18
+ )
19
+ from .tool_executor import AbstractCoreToolExecutor, PassthroughToolExecutor, ToolExecutor
20
+ from .effect_handlers import build_effect_handlers
21
+ from .factory import (
22
+ create_hybrid_runtime,
23
+ create_local_file_runtime,
24
+ create_local_runtime,
25
+ create_remote_file_runtime,
26
+ create_remote_runtime,
27
+ )
28
+
29
+ __all__ = [
30
+ "AbstractCoreLLMClient",
31
+ "LocalAbstractCoreLLMClient",
32
+ "RemoteAbstractCoreLLMClient",
33
+ "ToolExecutor",
34
+ "AbstractCoreToolExecutor",
35
+ "PassthroughToolExecutor",
36
+ "build_effect_handlers",
37
+ "create_local_runtime",
38
+ "create_remote_runtime",
39
+ "create_hybrid_runtime",
40
+ "create_local_file_runtime",
41
+ "create_remote_file_runtime",
42
+ ]
43
+
@@ -0,0 +1,89 @@
1
+ """abstractruntime.integrations.abstractcore.effect_handlers
2
+
3
+ Effect handlers wiring for AbstractRuntime.
4
+
5
+ These handlers implement:
6
+ - `EffectType.LLM_CALL`
7
+ - `EffectType.TOOL_CALLS`
8
+
9
+ They are designed to keep `RunState.vars` JSON-safe.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ from typing import Any, Dict, Optional
15
+
16
+ from ...core.models import Effect, EffectType, RunState, WaitReason, WaitState
17
+ from ...core.runtime import EffectOutcome, EffectHandler
18
+ from .llm_client import AbstractCoreLLMClient
19
+ from .tool_executor import ToolExecutor
20
+ from .logging import get_logger
21
+
22
+ logger = get_logger(__name__)
23
+
24
+
25
+ def make_llm_call_handler(*, llm: AbstractCoreLLMClient) -> EffectHandler:
26
+ def _handler(run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
27
+ payload = dict(effect.payload or {})
28
+ prompt = payload.get("prompt")
29
+ messages = payload.get("messages")
30
+ system_prompt = payload.get("system_prompt")
31
+ tools = payload.get("tools")
32
+ params = payload.get("params")
33
+
34
+ if not prompt and not messages:
35
+ return EffectOutcome.failed("llm_call requires payload.prompt or payload.messages")
36
+
37
+ try:
38
+ result = llm.generate(
39
+ prompt=str(prompt or ""),
40
+ messages=messages,
41
+ system_prompt=system_prompt,
42
+ tools=tools,
43
+ params=params,
44
+ )
45
+ return EffectOutcome.completed(result=result)
46
+ except Exception as e:
47
+ logger.error("LLM_CALL failed", error=str(e))
48
+ return EffectOutcome.failed(str(e))
49
+
50
+ return _handler
51
+
52
+
53
+ def make_tool_calls_handler(*, tools: ToolExecutor) -> EffectHandler:
54
+ def _handler(run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
55
+ payload = dict(effect.payload or {})
56
+ tool_calls = payload.get("tool_calls")
57
+ if not isinstance(tool_calls, list):
58
+ return EffectOutcome.failed("tool_calls requires payload.tool_calls (list)")
59
+
60
+ try:
61
+ result = tools.execute(tool_calls=tool_calls)
62
+ except Exception as e:
63
+ logger.error("TOOL_CALLS execution failed", error=str(e))
64
+ return EffectOutcome.failed(str(e))
65
+
66
+ mode = result.get("mode")
67
+ if mode and mode != "executed":
68
+ # Passthrough/untrusted mode: pause until an external host resumes with tool results.
69
+ wait_key = payload.get("wait_key") or f"tool_calls:{run.run_id}:{run.current_node}"
70
+ wait = WaitState(
71
+ reason=WaitReason.EVENT,
72
+ wait_key=str(wait_key),
73
+ resume_to_node=payload.get("resume_to_node") or default_next_node,
74
+ result_key=effect.result_key,
75
+ details={"mode": mode, "tool_calls": tool_calls},
76
+ )
77
+ return EffectOutcome.waiting(wait)
78
+
79
+ return EffectOutcome.completed(result=result)
80
+
81
+ return _handler
82
+
83
+
84
+ def build_effect_handlers(*, llm: AbstractCoreLLMClient, tools: ToolExecutor) -> Dict[EffectType, Any]:
85
+ return {
86
+ EffectType.LLM_CALL: make_llm_call_handler(llm=llm),
87
+ EffectType.TOOL_CALLS: make_tool_calls_handler(tools=tools),
88
+ }
89
+
@@ -0,0 +1,150 @@
1
+ """abstractruntime.integrations.abstractcore.factory
2
+
3
+ Convenience constructors for a Runtime wired to AbstractCore.
4
+
5
+ These helpers implement the three supported execution modes:
6
+ - local: in-process LLM + local tool execution
7
+ - remote: HTTP to AbstractCore server + tool passthrough
8
+ - hybrid: HTTP to AbstractCore server + local tool execution
9
+
10
+ The caller supplies storage backends (in-memory or file-based).
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from pathlib import Path
16
+ from typing import Any, Dict, Optional
17
+
18
+ from ...core.runtime import Runtime
19
+ from ...storage.in_memory import InMemoryLedgerStore, InMemoryRunStore
20
+ from ...storage.json_files import JsonFileRunStore, JsonlLedgerStore
21
+ from ...storage.base import LedgerStore, RunStore
22
+
23
+ from .effect_handlers import build_effect_handlers
24
+ from .llm_client import LocalAbstractCoreLLMClient, RemoteAbstractCoreLLMClient
25
+ from .tool_executor import AbstractCoreToolExecutor, PassthroughToolExecutor, ToolExecutor
26
+
27
+
28
+ def _default_in_memory_stores() -> tuple[RunStore, LedgerStore]:
29
+ return InMemoryRunStore(), InMemoryLedgerStore()
30
+
31
+
32
+ def _default_file_stores(*, base_dir: str | Path) -> tuple[RunStore, LedgerStore]:
33
+ base = Path(base_dir)
34
+ base.mkdir(parents=True, exist_ok=True)
35
+ return JsonFileRunStore(base), JsonlLedgerStore(base)
36
+
37
+
38
+ def create_local_runtime(
39
+ *,
40
+ provider: str,
41
+ model: str,
42
+ llm_kwargs: Optional[Dict[str, Any]] = None,
43
+ run_store: Optional[RunStore] = None,
44
+ ledger_store: Optional[LedgerStore] = None,
45
+ tool_executor: Optional[ToolExecutor] = None,
46
+ context: Optional[Any] = None,
47
+ effect_policy: Optional[Any] = None,
48
+ ) -> Runtime:
49
+ if run_store is None or ledger_store is None:
50
+ run_store, ledger_store = _default_in_memory_stores()
51
+
52
+ llm_client = LocalAbstractCoreLLMClient(provider=provider, model=model, llm_kwargs=llm_kwargs)
53
+ tools = tool_executor or AbstractCoreToolExecutor()
54
+ handlers = build_effect_handlers(llm=llm_client, tools=tools)
55
+
56
+ return Runtime(run_store=run_store, ledger_store=ledger_store, effect_handlers=handlers, context=context, effect_policy=effect_policy)
57
+
58
+
59
+ def create_remote_runtime(
60
+ *,
61
+ server_base_url: str,
62
+ model: str,
63
+ headers: Optional[Dict[str, str]] = None,
64
+ timeout_s: float = 60.0,
65
+ run_store: Optional[RunStore] = None,
66
+ ledger_store: Optional[LedgerStore] = None,
67
+ tool_executor: Optional[ToolExecutor] = None,
68
+ context: Optional[Any] = None,
69
+ ) -> Runtime:
70
+ if run_store is None or ledger_store is None:
71
+ run_store, ledger_store = _default_in_memory_stores()
72
+
73
+ llm_client = RemoteAbstractCoreLLMClient(
74
+ server_base_url=server_base_url,
75
+ model=model,
76
+ headers=headers,
77
+ timeout_s=timeout_s,
78
+ )
79
+ tools = tool_executor or PassthroughToolExecutor()
80
+ handlers = build_effect_handlers(llm=llm_client, tools=tools)
81
+
82
+ return Runtime(run_store=run_store, ledger_store=ledger_store, effect_handlers=handlers, context=context)
83
+
84
+
85
+ def create_hybrid_runtime(
86
+ *,
87
+ server_base_url: str,
88
+ model: str,
89
+ headers: Optional[Dict[str, str]] = None,
90
+ timeout_s: float = 60.0,
91
+ run_store: Optional[RunStore] = None,
92
+ ledger_store: Optional[LedgerStore] = None,
93
+ context: Optional[Any] = None,
94
+ ) -> Runtime:
95
+ """Remote LLM via AbstractCore server, local tool execution."""
96
+
97
+ if run_store is None or ledger_store is None:
98
+ run_store, ledger_store = _default_in_memory_stores()
99
+
100
+ llm_client = RemoteAbstractCoreLLMClient(
101
+ server_base_url=server_base_url,
102
+ model=model,
103
+ headers=headers,
104
+ timeout_s=timeout_s,
105
+ )
106
+ tools = AbstractCoreToolExecutor()
107
+ handlers = build_effect_handlers(llm=llm_client, tools=tools)
108
+
109
+ return Runtime(run_store=run_store, ledger_store=ledger_store, effect_handlers=handlers, context=context)
110
+
111
+
112
+ def create_local_file_runtime(
113
+ *,
114
+ base_dir: str | Path,
115
+ provider: str,
116
+ model: str,
117
+ llm_kwargs: Optional[Dict[str, Any]] = None,
118
+ context: Optional[Any] = None,
119
+ ) -> Runtime:
120
+ run_store, ledger_store = _default_file_stores(base_dir=base_dir)
121
+ return create_local_runtime(
122
+ provider=provider,
123
+ model=model,
124
+ llm_kwargs=llm_kwargs,
125
+ run_store=run_store,
126
+ ledger_store=ledger_store,
127
+ context=context,
128
+ )
129
+
130
+
131
+ def create_remote_file_runtime(
132
+ *,
133
+ base_dir: str | Path,
134
+ server_base_url: str,
135
+ model: str,
136
+ headers: Optional[Dict[str, str]] = None,
137
+ timeout_s: float = 60.0,
138
+ context: Optional[Any] = None,
139
+ ) -> Runtime:
140
+ run_store, ledger_store = _default_file_stores(base_dir=base_dir)
141
+ return create_remote_runtime(
142
+ server_base_url=server_base_url,
143
+ model=model,
144
+ headers=headers,
145
+ timeout_s=timeout_s,
146
+ run_store=run_store,
147
+ ledger_store=ledger_store,
148
+ context=context,
149
+ )
150
+
@@ -0,0 +1,296 @@
1
+ """abstractruntime.integrations.abstractcore.llm_client
2
+
3
+ AbstractCore-backed LLM clients for AbstractRuntime.
4
+
5
+ Design intent:
6
+ - Keep `RunState.vars` JSON-safe: normalize outputs into dicts.
7
+ - Support both execution topologies:
8
+ - local/in-process: call AbstractCore's `create_llm(...).generate(...)`
9
+ - remote: call AbstractCore server `/v1/chat/completions`
10
+
11
+ Remote mode is the preferred way to support per-request dynamic routing (e.g. `base_url`).
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, is_dataclass
17
+ from typing import Any, Dict, List, Optional, Protocol
18
+
19
+ from .logging import get_logger
20
+
21
+ logger = get_logger(__name__)
22
+
23
+
24
+ class RequestSender(Protocol):
25
+ def post(
26
+ self,
27
+ url: str,
28
+ *,
29
+ headers: Dict[str, str],
30
+ json: Dict[str, Any],
31
+ timeout: float,
32
+ ) -> Dict[str, Any]: ...
33
+
34
+
35
+ class AbstractCoreLLMClient(Protocol):
36
+ def generate(
37
+ self,
38
+ *,
39
+ prompt: str,
40
+ messages: Optional[List[Dict[str, str]]] = None,
41
+ system_prompt: Optional[str] = None,
42
+ tools: Optional[List[Dict[str, Any]]] = None,
43
+ params: Optional[Dict[str, Any]] = None,
44
+ ) -> Dict[str, Any]:
45
+ """Return a JSON-safe dict with at least: content/tool_calls/usage/model."""
46
+
47
+
48
+ def _jsonable(value: Any) -> Any:
49
+ """Best-effort conversion to JSON-safe objects.
50
+
51
+ This is intentionally conservative: if a value isn't naturally JSON-serializable,
52
+ we fall back to `str(value)`.
53
+ """
54
+
55
+ if value is None:
56
+ return None
57
+ if isinstance(value, (str, int, float, bool)):
58
+ return value
59
+ if isinstance(value, dict):
60
+ return {str(k): _jsonable(v) for k, v in value.items()}
61
+ if isinstance(value, list):
62
+ return [_jsonable(v) for v in value]
63
+ if is_dataclass(value):
64
+ return _jsonable(asdict(value))
65
+
66
+ # Pydantic v2
67
+ model_dump = getattr(value, "model_dump", None)
68
+ if callable(model_dump):
69
+ return _jsonable(model_dump())
70
+
71
+ # Pydantic v1
72
+ to_dict = getattr(value, "dict", None)
73
+ if callable(to_dict):
74
+ return _jsonable(to_dict())
75
+
76
+ return str(value)
77
+
78
+
79
+ def _normalize_local_response(resp: Any) -> Dict[str, Any]:
80
+ """Normalize an AbstractCore local `generate()` result into JSON."""
81
+
82
+ # Dict-like already
83
+ if isinstance(resp, dict):
84
+ return _jsonable(resp)
85
+
86
+ # Pydantic structured output
87
+ if hasattr(resp, "model_dump") or hasattr(resp, "dict"):
88
+ return {
89
+ "content": None,
90
+ "data": _jsonable(resp),
91
+ "tool_calls": None,
92
+ "usage": None,
93
+ "model": None,
94
+ "finish_reason": None,
95
+ }
96
+
97
+ # AbstractCore GenerateResponse
98
+ content = getattr(resp, "content", None)
99
+ tool_calls = getattr(resp, "tool_calls", None)
100
+ usage = getattr(resp, "usage", None)
101
+ model = getattr(resp, "model", None)
102
+ finish_reason = getattr(resp, "finish_reason", None)
103
+
104
+ return {
105
+ "content": content,
106
+ "data": None,
107
+ "tool_calls": _jsonable(tool_calls) if tool_calls is not None else None,
108
+ "usage": _jsonable(usage) if usage is not None else None,
109
+ "model": model,
110
+ "finish_reason": finish_reason,
111
+ }
112
+
113
+
114
+ class LocalAbstractCoreLLMClient:
115
+ """In-process LLM client using AbstractCore's provider stack."""
116
+
117
+ def __init__(
118
+ self,
119
+ *,
120
+ provider: str,
121
+ model: str,
122
+ llm_kwargs: Optional[Dict[str, Any]] = None,
123
+ ):
124
+ from abstractcore import create_llm
125
+ from abstractcore.tools.handler import UniversalToolHandler
126
+
127
+ self._provider = provider
128
+ self._model = model
129
+ self._llm = create_llm(provider, model=model, **(llm_kwargs or {}))
130
+ self._tool_handler = UniversalToolHandler(model)
131
+
132
+ def generate(
133
+ self,
134
+ *,
135
+ prompt: str,
136
+ messages: Optional[List[Dict[str, str]]] = None,
137
+ system_prompt: Optional[str] = None,
138
+ tools: Optional[List[Dict[str, Any]]] = None,
139
+ params: Optional[Dict[str, Any]] = None,
140
+ ) -> Dict[str, Any]:
141
+ params = dict(params or {})
142
+
143
+ # `base_url` is a provider construction concern in local mode. We intentionally
144
+ # do not create new providers per call unless the host explicitly chooses to.
145
+ params.pop("base_url", None)
146
+
147
+ # If tools provided, use UniversalToolHandler to format them into prompt
148
+ # This works for models without native tool support
149
+ effective_prompt = prompt
150
+ if tools:
151
+ from abstractcore.tools import ToolDefinition
152
+ tool_defs = []
153
+ for t in tools:
154
+ tool_defs.append(ToolDefinition(
155
+ name=t.get("name", ""),
156
+ description=t.get("description", ""),
157
+ parameters=t.get("parameters", {}),
158
+ ))
159
+ tools_prompt = self._tool_handler.format_tools_prompt(tool_defs)
160
+ effective_prompt = f"{tools_prompt}\n\nUser request: {prompt}"
161
+
162
+ resp = self._llm.generate(
163
+ prompt=effective_prompt,
164
+ messages=messages,
165
+ system_prompt=system_prompt,
166
+ stream=False,
167
+ **params,
168
+ )
169
+
170
+ result = _normalize_local_response(resp)
171
+
172
+ # Parse tool calls from response if tools were provided
173
+ if tools and result.get("content"):
174
+ parsed = self._tool_handler.parse_response(result["content"], mode="prompted")
175
+ if parsed.tool_calls:
176
+ result["tool_calls"] = [
177
+ {"name": tc.name, "arguments": tc.arguments, "call_id": tc.call_id}
178
+ for tc in parsed.tool_calls
179
+ ]
180
+
181
+ return result
182
+
183
+
184
+ class HttpxRequestSender:
185
+ """Default request sender based on httpx (sync)."""
186
+
187
+ def __init__(self):
188
+ import httpx
189
+
190
+ self._httpx = httpx
191
+
192
+ def post(
193
+ self,
194
+ url: str,
195
+ *,
196
+ headers: Dict[str, str],
197
+ json: Dict[str, Any],
198
+ timeout: float,
199
+ ) -> Dict[str, Any]:
200
+ resp = self._httpx.post(url, headers=headers, json=json, timeout=timeout)
201
+ resp.raise_for_status()
202
+ return resp.json()
203
+
204
+
205
+ class RemoteAbstractCoreLLMClient:
206
+ """Remote LLM client calling an AbstractCore server endpoint."""
207
+
208
+ def __init__(
209
+ self,
210
+ *,
211
+ server_base_url: str,
212
+ model: str,
213
+ timeout_s: float = 60.0,
214
+ headers: Optional[Dict[str, str]] = None,
215
+ request_sender: Optional[RequestSender] = None,
216
+ ):
217
+ self._server_base_url = server_base_url.rstrip("/")
218
+ self._model = model
219
+ self._timeout_s = timeout_s
220
+ self._headers = dict(headers or {})
221
+ self._sender = request_sender or HttpxRequestSender()
222
+
223
+ def generate(
224
+ self,
225
+ *,
226
+ prompt: str,
227
+ messages: Optional[List[Dict[str, str]]] = None,
228
+ system_prompt: Optional[str] = None,
229
+ tools: Optional[List[Dict[str, Any]]] = None,
230
+ params: Optional[Dict[str, Any]] = None,
231
+ ) -> Dict[str, Any]:
232
+ params = dict(params or {})
233
+
234
+ # Build OpenAI-like messages for AbstractCore server.
235
+ out_messages: List[Dict[str, str]] = []
236
+ if system_prompt:
237
+ out_messages.append({"role": "system", "content": system_prompt})
238
+
239
+ if messages:
240
+ out_messages.extend(messages)
241
+ else:
242
+ out_messages.append({"role": "user", "content": prompt})
243
+
244
+ body: Dict[str, Any] = {
245
+ "model": self._model,
246
+ "messages": out_messages,
247
+ "stream": False,
248
+ }
249
+
250
+ # Dynamic routing support (AbstractCore server feature).
251
+ base_url = params.pop("base_url", None)
252
+ if base_url:
253
+ body["base_url"] = base_url
254
+
255
+ # Pass through common OpenAI-compatible parameters.
256
+ for key in (
257
+ "temperature",
258
+ "max_tokens",
259
+ "stop",
260
+ "seed",
261
+ "frequency_penalty",
262
+ "presence_penalty",
263
+ ):
264
+ if key in params and params[key] is not None:
265
+ body[key] = params[key]
266
+
267
+ if tools is not None:
268
+ body["tools"] = tools
269
+
270
+ url = f"{self._server_base_url}/v1/chat/completions"
271
+ resp = self._sender.post(url, headers=self._headers, json=body, timeout=self._timeout_s)
272
+
273
+ # Normalize OpenAI-like response.
274
+ try:
275
+ choice0 = (resp.get("choices") or [])[0]
276
+ msg = choice0.get("message") or {}
277
+ return {
278
+ "content": msg.get("content"),
279
+ "data": None,
280
+ "tool_calls": _jsonable(msg.get("tool_calls")) if msg.get("tool_calls") is not None else None,
281
+ "usage": _jsonable(resp.get("usage")) if resp.get("usage") is not None else None,
282
+ "model": resp.get("model"),
283
+ "finish_reason": choice0.get("finish_reason"),
284
+ }
285
+ except Exception:
286
+ # Fallback: return the raw response in JSON-safe form.
287
+ logger.warning("Remote LLM response normalization failed; returning raw JSON")
288
+ return {
289
+ "content": None,
290
+ "data": _jsonable(resp),
291
+ "tool_calls": None,
292
+ "usage": None,
293
+ "model": resp.get("model") if isinstance(resp, dict) else None,
294
+ "finish_reason": None,
295
+ }
296
+
@@ -0,0 +1,27 @@
1
+ """abstractruntime.integrations.abstractcore.logging
2
+
3
+ Logging adapter for the AbstractCore-integrated runtime.
4
+
5
+ We prefer AbstractCore's structured logger for consistency across the stack.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import Any
11
+
12
+
13
+ def get_logger(name: str) -> Any:
14
+ """Return a logger compatible with AbstractCore's structured logger.
15
+
16
+ This is intentionally a thin wrapper to keep the integration layer small.
17
+ """
18
+
19
+ try:
20
+ from abstractcore.utils.structured_logging import get_logger as _get_logger
21
+
22
+ return _get_logger(name)
23
+ except Exception: # pragma: no cover
24
+ import logging
25
+
26
+ return logging.getLogger(name)
27
+