AbstractRuntime 0.2.0__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractruntime/__init__.py +83 -3
- abstractruntime/core/config.py +82 -2
- abstractruntime/core/event_keys.py +62 -0
- abstractruntime/core/models.py +17 -1
- abstractruntime/core/policy.py +74 -3
- abstractruntime/core/runtime.py +3334 -28
- abstractruntime/core/vars.py +103 -2
- abstractruntime/evidence/__init__.py +10 -0
- abstractruntime/evidence/recorder.py +325 -0
- abstractruntime/history_bundle.py +772 -0
- abstractruntime/integrations/abstractcore/__init__.py +6 -0
- abstractruntime/integrations/abstractcore/constants.py +19 -0
- abstractruntime/integrations/abstractcore/default_tools.py +258 -0
- abstractruntime/integrations/abstractcore/effect_handlers.py +2622 -32
- abstractruntime/integrations/abstractcore/embeddings_client.py +69 -0
- abstractruntime/integrations/abstractcore/factory.py +149 -16
- abstractruntime/integrations/abstractcore/llm_client.py +891 -55
- abstractruntime/integrations/abstractcore/mcp_worker.py +587 -0
- abstractruntime/integrations/abstractcore/observability.py +80 -0
- abstractruntime/integrations/abstractcore/session_attachments.py +946 -0
- abstractruntime/integrations/abstractcore/summarizer.py +154 -0
- abstractruntime/integrations/abstractcore/tool_executor.py +509 -31
- abstractruntime/integrations/abstractcore/workspace_scoped_tools.py +561 -0
- abstractruntime/integrations/abstractmemory/__init__.py +3 -0
- abstractruntime/integrations/abstractmemory/effect_handlers.py +946 -0
- abstractruntime/memory/__init__.py +21 -0
- abstractruntime/memory/active_context.py +751 -0
- abstractruntime/memory/active_memory.py +452 -0
- abstractruntime/memory/compaction.py +105 -0
- abstractruntime/memory/kg_packets.py +164 -0
- abstractruntime/memory/memact_composer.py +175 -0
- abstractruntime/memory/recall_levels.py +163 -0
- abstractruntime/memory/token_budget.py +86 -0
- abstractruntime/rendering/__init__.py +17 -0
- abstractruntime/rendering/agent_trace_report.py +256 -0
- abstractruntime/rendering/json_stringify.py +136 -0
- abstractruntime/scheduler/scheduler.py +93 -2
- abstractruntime/storage/__init__.py +7 -2
- abstractruntime/storage/artifacts.py +175 -32
- abstractruntime/storage/base.py +17 -1
- abstractruntime/storage/commands.py +339 -0
- abstractruntime/storage/in_memory.py +41 -1
- abstractruntime/storage/json_files.py +210 -14
- abstractruntime/storage/observable.py +136 -0
- abstractruntime/storage/offloading.py +433 -0
- abstractruntime/storage/sqlite.py +836 -0
- abstractruntime/visualflow_compiler/__init__.py +29 -0
- abstractruntime/visualflow_compiler/adapters/__init__.py +11 -0
- abstractruntime/visualflow_compiler/adapters/agent_adapter.py +126 -0
- abstractruntime/visualflow_compiler/adapters/context_adapter.py +109 -0
- abstractruntime/visualflow_compiler/adapters/control_adapter.py +615 -0
- abstractruntime/visualflow_compiler/adapters/effect_adapter.py +1051 -0
- abstractruntime/visualflow_compiler/adapters/event_adapter.py +307 -0
- abstractruntime/visualflow_compiler/adapters/function_adapter.py +97 -0
- abstractruntime/visualflow_compiler/adapters/memact_adapter.py +114 -0
- abstractruntime/visualflow_compiler/adapters/subflow_adapter.py +74 -0
- abstractruntime/visualflow_compiler/adapters/variable_adapter.py +316 -0
- abstractruntime/visualflow_compiler/compiler.py +3832 -0
- abstractruntime/visualflow_compiler/flow.py +247 -0
- abstractruntime/visualflow_compiler/visual/__init__.py +13 -0
- abstractruntime/visualflow_compiler/visual/agent_ids.py +29 -0
- abstractruntime/visualflow_compiler/visual/builtins.py +1376 -0
- abstractruntime/visualflow_compiler/visual/code_executor.py +214 -0
- abstractruntime/visualflow_compiler/visual/executor.py +2804 -0
- abstractruntime/visualflow_compiler/visual/models.py +211 -0
- abstractruntime/workflow_bundle/__init__.py +52 -0
- abstractruntime/workflow_bundle/models.py +236 -0
- abstractruntime/workflow_bundle/packer.py +317 -0
- abstractruntime/workflow_bundle/reader.py +87 -0
- abstractruntime/workflow_bundle/registry.py +587 -0
- abstractruntime-0.4.1.dist-info/METADATA +177 -0
- abstractruntime-0.4.1.dist-info/RECORD +86 -0
- abstractruntime-0.4.1.dist-info/entry_points.txt +2 -0
- abstractruntime-0.2.0.dist-info/METADATA +0 -163
- abstractruntime-0.2.0.dist-info/RECORD +0 -32
- {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.1.dist-info}/WHEEL +0 -0
- {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1051 @@
|
|
|
1
|
+
"""Adapter for creating effect nodes in visual flows.
|
|
2
|
+
|
|
3
|
+
This adapter creates node handlers that produce AbstractRuntime Effects,
|
|
4
|
+
enabling visual flows to pause and wait for external input (user prompts,
|
|
5
|
+
events, delays, etc.).
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from abstractruntime.core.models import RunState, StepPlan
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def create_ask_user_handler(
|
|
17
|
+
node_id: str,
|
|
18
|
+
next_node: Optional[str],
|
|
19
|
+
input_key: Optional[str] = None,
|
|
20
|
+
output_key: Optional[str] = None,
|
|
21
|
+
allow_free_text: bool = True,
|
|
22
|
+
) -> Callable:
|
|
23
|
+
"""Create a node handler that asks the user for input.
|
|
24
|
+
|
|
25
|
+
This handler produces an ASK_USER effect that pauses the flow
|
|
26
|
+
until the user provides a response.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
node_id: Unique identifier for this node
|
|
30
|
+
next_node: ID of the next node to transition to after response
|
|
31
|
+
input_key: Key in run.vars to read prompt/choices from
|
|
32
|
+
output_key: Key in run.vars to write the response to
|
|
33
|
+
allow_free_text: Whether to allow free text response
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
A node handler that produces ASK_USER effect
|
|
37
|
+
"""
|
|
38
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
39
|
+
|
|
40
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
41
|
+
"""Ask user and wait for response."""
|
|
42
|
+
# Get input from vars
|
|
43
|
+
if input_key:
|
|
44
|
+
input_data = run.vars.get(input_key, {})
|
|
45
|
+
else:
|
|
46
|
+
input_data = run.vars
|
|
47
|
+
|
|
48
|
+
# Extract prompt and choices
|
|
49
|
+
if isinstance(input_data, dict):
|
|
50
|
+
prompt = input_data.get("prompt", "Please respond:")
|
|
51
|
+
choices = input_data.get("choices", [])
|
|
52
|
+
else:
|
|
53
|
+
prompt = str(input_data) if input_data else "Please respond:"
|
|
54
|
+
choices = []
|
|
55
|
+
|
|
56
|
+
# Ensure choices is a list
|
|
57
|
+
if not isinstance(choices, list):
|
|
58
|
+
choices = []
|
|
59
|
+
|
|
60
|
+
# Create the effect
|
|
61
|
+
effect = Effect(
|
|
62
|
+
type=EffectType.ASK_USER,
|
|
63
|
+
payload={
|
|
64
|
+
"prompt": prompt,
|
|
65
|
+
"choices": choices,
|
|
66
|
+
"allow_free_text": allow_free_text,
|
|
67
|
+
},
|
|
68
|
+
result_key=output_key or "_temp.user_response",
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
return StepPlan(
|
|
72
|
+
node_id=node_id,
|
|
73
|
+
effect=effect,
|
|
74
|
+
next_node=next_node,
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
return handler
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def create_answer_user_handler(
|
|
81
|
+
node_id: str,
|
|
82
|
+
next_node: Optional[str],
|
|
83
|
+
input_key: Optional[str] = None,
|
|
84
|
+
output_key: Optional[str] = None,
|
|
85
|
+
) -> Callable:
|
|
86
|
+
"""Create a node handler that requests the host UI to display a message.
|
|
87
|
+
|
|
88
|
+
This handler produces an ANSWER_USER effect that completes immediately.
|
|
89
|
+
"""
|
|
90
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
91
|
+
|
|
92
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
93
|
+
if input_key:
|
|
94
|
+
input_data = run.vars.get(input_key, {})
|
|
95
|
+
else:
|
|
96
|
+
input_data = run.vars
|
|
97
|
+
|
|
98
|
+
if isinstance(input_data, dict):
|
|
99
|
+
message = input_data.get("message") or input_data.get("text") or ""
|
|
100
|
+
level_raw = input_data.get("level")
|
|
101
|
+
else:
|
|
102
|
+
message = str(input_data) if input_data is not None else ""
|
|
103
|
+
level_raw = None
|
|
104
|
+
|
|
105
|
+
level = str(level_raw).strip().lower() if isinstance(level_raw, str) else ""
|
|
106
|
+
if level == "warn":
|
|
107
|
+
level = "warning"
|
|
108
|
+
if level == "info":
|
|
109
|
+
level = "message"
|
|
110
|
+
if level not in {"message", "warning", "error"}:
|
|
111
|
+
level = "message"
|
|
112
|
+
|
|
113
|
+
effect = Effect(
|
|
114
|
+
type=EffectType.ANSWER_USER,
|
|
115
|
+
payload={"message": str(message), "level": level},
|
|
116
|
+
result_key=output_key or "_temp.answer_user",
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
return StepPlan(
|
|
120
|
+
node_id=node_id,
|
|
121
|
+
effect=effect,
|
|
122
|
+
next_node=next_node,
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
return handler
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def create_wait_until_handler(
|
|
129
|
+
node_id: str,
|
|
130
|
+
next_node: Optional[str],
|
|
131
|
+
input_key: Optional[str] = None,
|
|
132
|
+
output_key: Optional[str] = None,
|
|
133
|
+
duration_type: str = "seconds",
|
|
134
|
+
) -> Callable:
|
|
135
|
+
"""Create a node handler that waits until a specified time.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
node_id: Unique identifier for this node
|
|
139
|
+
next_node: ID of the next node to transition to after waiting
|
|
140
|
+
input_key: Key in run.vars to read duration from
|
|
141
|
+
output_key: Key in run.vars to write the completion info to
|
|
142
|
+
duration_type: How to interpret duration (seconds/minutes/hours/timestamp)
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
A node handler that produces WAIT_UNTIL effect
|
|
146
|
+
"""
|
|
147
|
+
from datetime import datetime, timedelta, timezone
|
|
148
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
149
|
+
|
|
150
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
151
|
+
"""Wait until time and then continue."""
|
|
152
|
+
# Get input from vars
|
|
153
|
+
if input_key:
|
|
154
|
+
input_data = run.vars.get(input_key, {})
|
|
155
|
+
else:
|
|
156
|
+
input_data = run.vars
|
|
157
|
+
|
|
158
|
+
# Extract duration
|
|
159
|
+
if isinstance(input_data, dict):
|
|
160
|
+
duration = input_data.get("duration", 0)
|
|
161
|
+
else:
|
|
162
|
+
duration = input_data
|
|
163
|
+
|
|
164
|
+
# Convert to seconds
|
|
165
|
+
try:
|
|
166
|
+
amount = float(duration) if duration else 0
|
|
167
|
+
except (TypeError, ValueError):
|
|
168
|
+
amount = 0
|
|
169
|
+
|
|
170
|
+
# Calculate target time
|
|
171
|
+
now = datetime.now(timezone.utc)
|
|
172
|
+
|
|
173
|
+
if duration_type == "timestamp":
|
|
174
|
+
# Already an ISO timestamp
|
|
175
|
+
until = str(duration)
|
|
176
|
+
elif duration_type == "minutes":
|
|
177
|
+
until = (now + timedelta(minutes=amount)).isoformat()
|
|
178
|
+
elif duration_type == "hours":
|
|
179
|
+
until = (now + timedelta(hours=amount)).isoformat()
|
|
180
|
+
else: # seconds
|
|
181
|
+
until = (now + timedelta(seconds=amount)).isoformat()
|
|
182
|
+
|
|
183
|
+
# Create the effect
|
|
184
|
+
effect = Effect(
|
|
185
|
+
type=EffectType.WAIT_UNTIL,
|
|
186
|
+
payload={"until": until},
|
|
187
|
+
result_key=output_key or "_temp.wait_result",
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
return StepPlan(
|
|
191
|
+
node_id=node_id,
|
|
192
|
+
effect=effect,
|
|
193
|
+
next_node=next_node,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
return handler
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def create_wait_event_handler(
|
|
200
|
+
node_id: str,
|
|
201
|
+
next_node: Optional[str],
|
|
202
|
+
input_key: Optional[str] = None,
|
|
203
|
+
output_key: Optional[str] = None,
|
|
204
|
+
) -> Callable:
|
|
205
|
+
"""Create a node handler that waits for an external event.
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
node_id: Unique identifier for this node
|
|
209
|
+
next_node: ID of the next node to transition to after event
|
|
210
|
+
input_key: Key in run.vars to read event_key from
|
|
211
|
+
output_key: Key in run.vars to write the event data to
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
A node handler that produces WAIT_EVENT effect
|
|
215
|
+
"""
|
|
216
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
217
|
+
|
|
218
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
219
|
+
"""Wait for event and then continue."""
|
|
220
|
+
# Get input from vars
|
|
221
|
+
if input_key:
|
|
222
|
+
input_data = run.vars.get(input_key, {})
|
|
223
|
+
else:
|
|
224
|
+
input_data = run.vars
|
|
225
|
+
|
|
226
|
+
# Extract event key + optional host UX fields (prompt/choices).
|
|
227
|
+
if isinstance(input_data, dict):
|
|
228
|
+
event_key = input_data.get("event_key")
|
|
229
|
+
if event_key is None:
|
|
230
|
+
event_key = input_data.get("wait_key")
|
|
231
|
+
if not event_key:
|
|
232
|
+
event_key = "default"
|
|
233
|
+
prompt = input_data.get("prompt")
|
|
234
|
+
choices = input_data.get("choices")
|
|
235
|
+
allow_free_text = input_data.get("allow_free_text")
|
|
236
|
+
if allow_free_text is None:
|
|
237
|
+
allow_free_text = input_data.get("allowFreeText")
|
|
238
|
+
else:
|
|
239
|
+
event_key = str(input_data) if input_data else "default"
|
|
240
|
+
prompt = None
|
|
241
|
+
choices = None
|
|
242
|
+
allow_free_text = None
|
|
243
|
+
|
|
244
|
+
# Create the effect
|
|
245
|
+
effect = Effect(
|
|
246
|
+
type=EffectType.WAIT_EVENT,
|
|
247
|
+
payload={
|
|
248
|
+
"wait_key": str(event_key),
|
|
249
|
+
**({"prompt": prompt} if isinstance(prompt, str) and prompt.strip() else {}),
|
|
250
|
+
**({"choices": choices} if isinstance(choices, list) else {}),
|
|
251
|
+
**({"allow_free_text": bool(allow_free_text)} if allow_free_text is not None else {}),
|
|
252
|
+
},
|
|
253
|
+
result_key=output_key or "_temp.event_data",
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
return StepPlan(
|
|
257
|
+
node_id=node_id,
|
|
258
|
+
effect=effect,
|
|
259
|
+
next_node=next_node,
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
return handler
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def create_memory_note_handler(
|
|
266
|
+
node_id: str,
|
|
267
|
+
next_node: Optional[str],
|
|
268
|
+
input_key: Optional[str] = None,
|
|
269
|
+
output_key: Optional[str] = None,
|
|
270
|
+
) -> Callable:
|
|
271
|
+
"""Create a node handler that stores a memory note.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
node_id: Unique identifier for this node
|
|
275
|
+
next_node: ID of the next node to transition to after storing
|
|
276
|
+
input_key: Key in run.vars to read note content from
|
|
277
|
+
output_key: Key in run.vars to write the note_id to
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
A node handler that produces MEMORY_NOTE effect
|
|
281
|
+
"""
|
|
282
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
283
|
+
|
|
284
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
285
|
+
"""Store memory note and continue."""
|
|
286
|
+
# Get input from vars
|
|
287
|
+
if input_key:
|
|
288
|
+
input_data = run.vars.get(input_key, {})
|
|
289
|
+
else:
|
|
290
|
+
input_data = run.vars
|
|
291
|
+
|
|
292
|
+
# Extract content
|
|
293
|
+
if isinstance(input_data, dict):
|
|
294
|
+
content = input_data.get("content", "")
|
|
295
|
+
tags = input_data.get("tags") if isinstance(input_data.get("tags"), dict) else {}
|
|
296
|
+
sources = input_data.get("sources") if isinstance(input_data.get("sources"), dict) else None
|
|
297
|
+
scope = input_data.get("scope") if isinstance(input_data.get("scope"), str) else None
|
|
298
|
+
location = input_data.get("location") if isinstance(input_data.get("location"), str) else None
|
|
299
|
+
keep_in_context = input_data.get("keep_in_context")
|
|
300
|
+
if keep_in_context is None:
|
|
301
|
+
keep_in_context = input_data.get("keepInContext")
|
|
302
|
+
else:
|
|
303
|
+
content = str(input_data) if input_data else ""
|
|
304
|
+
tags = {}
|
|
305
|
+
sources = None
|
|
306
|
+
scope = None
|
|
307
|
+
location = None
|
|
308
|
+
keep_in_context = None
|
|
309
|
+
|
|
310
|
+
# Create the effect
|
|
311
|
+
payload: Dict[str, Any] = {"note": content, "tags": tags}
|
|
312
|
+
if sources is not None:
|
|
313
|
+
payload["sources"] = sources
|
|
314
|
+
if scope:
|
|
315
|
+
payload["scope"] = scope
|
|
316
|
+
if isinstance(location, str) and location.strip():
|
|
317
|
+
payload["location"] = location.strip()
|
|
318
|
+
if keep_in_context is not None:
|
|
319
|
+
payload["keep_in_context"] = keep_in_context
|
|
320
|
+
|
|
321
|
+
effect = Effect(
|
|
322
|
+
type=EffectType.MEMORY_NOTE,
|
|
323
|
+
payload=payload,
|
|
324
|
+
result_key=output_key or "_temp.note_id",
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
return StepPlan(
|
|
328
|
+
node_id=node_id,
|
|
329
|
+
effect=effect,
|
|
330
|
+
next_node=next_node,
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
return handler
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
def create_memory_query_handler(
|
|
337
|
+
node_id: str,
|
|
338
|
+
next_node: Optional[str],
|
|
339
|
+
input_key: Optional[str] = None,
|
|
340
|
+
output_key: Optional[str] = None,
|
|
341
|
+
) -> Callable:
|
|
342
|
+
"""Create a node handler that queries memory.
|
|
343
|
+
|
|
344
|
+
Args:
|
|
345
|
+
node_id: Unique identifier for this node
|
|
346
|
+
next_node: ID of the next node to transition to after query
|
|
347
|
+
input_key: Key in run.vars to read query from
|
|
348
|
+
output_key: Key in run.vars to write results to
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
A node handler that produces MEMORY_QUERY effect
|
|
352
|
+
"""
|
|
353
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
354
|
+
|
|
355
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
356
|
+
"""Query memory and continue."""
|
|
357
|
+
# Get input from vars
|
|
358
|
+
if input_key:
|
|
359
|
+
input_data = run.vars.get(input_key, {})
|
|
360
|
+
else:
|
|
361
|
+
input_data = run.vars
|
|
362
|
+
|
|
363
|
+
# Extract query params
|
|
364
|
+
if isinstance(input_data, dict):
|
|
365
|
+
query = input_data.get("query", "")
|
|
366
|
+
limit = input_data.get("limit", 10)
|
|
367
|
+
tags = input_data.get("tags") if isinstance(input_data.get("tags"), dict) else None
|
|
368
|
+
tags_mode = input_data.get("tags_mode")
|
|
369
|
+
if tags_mode is None:
|
|
370
|
+
tags_mode = input_data.get("tagsMode")
|
|
371
|
+
usernames = input_data.get("usernames")
|
|
372
|
+
locations = input_data.get("locations")
|
|
373
|
+
since = input_data.get("since")
|
|
374
|
+
until = input_data.get("until")
|
|
375
|
+
scope = input_data.get("scope") if isinstance(input_data.get("scope"), str) else None
|
|
376
|
+
else:
|
|
377
|
+
query = str(input_data) if input_data else ""
|
|
378
|
+
limit = 10
|
|
379
|
+
tags = None
|
|
380
|
+
tags_mode = None
|
|
381
|
+
usernames = None
|
|
382
|
+
locations = None
|
|
383
|
+
since = None
|
|
384
|
+
until = None
|
|
385
|
+
scope = None
|
|
386
|
+
|
|
387
|
+
def _normalize_str_list(raw: Any) -> Optional[List[str]]:
|
|
388
|
+
if raw is None:
|
|
389
|
+
return None
|
|
390
|
+
if isinstance(raw, str):
|
|
391
|
+
s = raw.strip()
|
|
392
|
+
return [s] if s else None
|
|
393
|
+
if not isinstance(raw, list):
|
|
394
|
+
return None
|
|
395
|
+
out: List[str] = []
|
|
396
|
+
for x in raw:
|
|
397
|
+
if isinstance(x, str) and x.strip():
|
|
398
|
+
out.append(x.strip())
|
|
399
|
+
return out or None
|
|
400
|
+
|
|
401
|
+
# Create the effect
|
|
402
|
+
payload: Dict[str, Any] = {"query": query, "limit_spans": limit, "return": "both"}
|
|
403
|
+
if tags is not None:
|
|
404
|
+
payload["tags"] = tags
|
|
405
|
+
if isinstance(tags_mode, str) and tags_mode.strip():
|
|
406
|
+
payload["tags_mode"] = tags_mode.strip()
|
|
407
|
+
usernames_list = _normalize_str_list(usernames)
|
|
408
|
+
if usernames_list is not None:
|
|
409
|
+
payload["usernames"] = usernames_list
|
|
410
|
+
locations_list = _normalize_str_list(locations)
|
|
411
|
+
if locations_list is not None:
|
|
412
|
+
payload["locations"] = locations_list
|
|
413
|
+
if since is not None:
|
|
414
|
+
payload["since"] = since
|
|
415
|
+
if until is not None:
|
|
416
|
+
payload["until"] = until
|
|
417
|
+
if scope:
|
|
418
|
+
payload["scope"] = scope
|
|
419
|
+
|
|
420
|
+
effect = Effect(
|
|
421
|
+
type=EffectType.MEMORY_QUERY,
|
|
422
|
+
payload=payload,
|
|
423
|
+
result_key=output_key or "_temp.memory_results",
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
return StepPlan(
|
|
427
|
+
node_id=node_id,
|
|
428
|
+
effect=effect,
|
|
429
|
+
next_node=next_node,
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
return handler
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
def create_memory_kg_assert_handler(
|
|
436
|
+
node_id: str,
|
|
437
|
+
next_node: Optional[str],
|
|
438
|
+
input_key: Optional[str] = None,
|
|
439
|
+
output_key: Optional[str] = None,
|
|
440
|
+
) -> Callable:
|
|
441
|
+
"""Create a node handler that asserts triples into AbstractMemory (host-provided handler)."""
|
|
442
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
443
|
+
|
|
444
|
+
def _normalize_assertions(raw: Any) -> list[Dict[str, Any]]:
|
|
445
|
+
if raw is None:
|
|
446
|
+
return []
|
|
447
|
+
if isinstance(raw, dict):
|
|
448
|
+
return [dict(raw)]
|
|
449
|
+
if isinstance(raw, list):
|
|
450
|
+
out: list[Dict[str, Any]] = []
|
|
451
|
+
for x in raw:
|
|
452
|
+
if isinstance(x, dict):
|
|
453
|
+
out.append(dict(x))
|
|
454
|
+
return out
|
|
455
|
+
return []
|
|
456
|
+
|
|
457
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
458
|
+
del ctx
|
|
459
|
+
if input_key:
|
|
460
|
+
input_data = run.vars.get(input_key, {})
|
|
461
|
+
else:
|
|
462
|
+
input_data = run.vars
|
|
463
|
+
|
|
464
|
+
assertions_raw: Any = None
|
|
465
|
+
scope: Optional[str] = None
|
|
466
|
+
owner_id: Optional[str] = None
|
|
467
|
+
span_id: Optional[str] = None
|
|
468
|
+
attributes_defaults: Optional[Dict[str, Any]] = None
|
|
469
|
+
allow_custom_predicates: Optional[bool] = None
|
|
470
|
+
if isinstance(input_data, dict):
|
|
471
|
+
assertions_raw = input_data.get("assertions")
|
|
472
|
+
if assertions_raw is None:
|
|
473
|
+
assertions_raw = input_data.get("triples")
|
|
474
|
+
if assertions_raw is None:
|
|
475
|
+
assertions_raw = input_data.get("items")
|
|
476
|
+
scope = input_data.get("scope") if isinstance(input_data.get("scope"), str) else None
|
|
477
|
+
owner_id = input_data.get("owner_id") if isinstance(input_data.get("owner_id"), str) else None
|
|
478
|
+
span_id = input_data.get("span_id") if isinstance(input_data.get("span_id"), str) else None
|
|
479
|
+
attributes_defaults = input_data.get("attributes_defaults") if isinstance(input_data.get("attributes_defaults"), dict) else None
|
|
480
|
+
allow_custom_predicates = (
|
|
481
|
+
input_data.get("allow_custom_predicates")
|
|
482
|
+
if isinstance(input_data.get("allow_custom_predicates"), bool)
|
|
483
|
+
else input_data.get("allow_custom")
|
|
484
|
+
if isinstance(input_data.get("allow_custom"), bool)
|
|
485
|
+
else None
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
assertions = _normalize_assertions(assertions_raw)
|
|
489
|
+
payload: Dict[str, Any] = {"assertions": assertions}
|
|
490
|
+
if scope:
|
|
491
|
+
payload["scope"] = scope
|
|
492
|
+
if owner_id:
|
|
493
|
+
payload["owner_id"] = owner_id
|
|
494
|
+
if span_id:
|
|
495
|
+
payload["span_id"] = span_id
|
|
496
|
+
if attributes_defaults:
|
|
497
|
+
payload["attributes_defaults"] = dict(attributes_defaults)
|
|
498
|
+
if allow_custom_predicates is not None:
|
|
499
|
+
payload["allow_custom_predicates"] = bool(allow_custom_predicates)
|
|
500
|
+
|
|
501
|
+
return StepPlan(
|
|
502
|
+
node_id=node_id,
|
|
503
|
+
effect=Effect(type=EffectType.MEMORY_KG_ASSERT, payload=payload, result_key=output_key or "_temp.memory_kg_assert"),
|
|
504
|
+
next_node=next_node,
|
|
505
|
+
)
|
|
506
|
+
|
|
507
|
+
return handler
|
|
508
|
+
|
|
509
|
+
|
|
510
|
+
def create_memory_kg_query_handler(
|
|
511
|
+
node_id: str,
|
|
512
|
+
next_node: Optional[str],
|
|
513
|
+
input_key: Optional[str] = None,
|
|
514
|
+
output_key: Optional[str] = None,
|
|
515
|
+
) -> Callable:
|
|
516
|
+
"""Create a node handler that queries AbstractMemory triples (host-provided handler)."""
|
|
517
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
518
|
+
|
|
519
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
520
|
+
del ctx
|
|
521
|
+
if input_key:
|
|
522
|
+
input_data = run.vars.get(input_key, {})
|
|
523
|
+
else:
|
|
524
|
+
input_data = run.vars
|
|
525
|
+
|
|
526
|
+
payload: Dict[str, Any] = {}
|
|
527
|
+
if isinstance(input_data, dict):
|
|
528
|
+
for k in (
|
|
529
|
+
"subject",
|
|
530
|
+
"predicate",
|
|
531
|
+
"object",
|
|
532
|
+
"scope",
|
|
533
|
+
"owner_id",
|
|
534
|
+
"since",
|
|
535
|
+
"until",
|
|
536
|
+
"active_at",
|
|
537
|
+
"query_text",
|
|
538
|
+
"order",
|
|
539
|
+
):
|
|
540
|
+
v = input_data.get(k)
|
|
541
|
+
if isinstance(v, str) and v.strip():
|
|
542
|
+
payload[k] = v.strip()
|
|
543
|
+
min_score = input_data.get("min_score")
|
|
544
|
+
if min_score is not None and not isinstance(min_score, bool):
|
|
545
|
+
try:
|
|
546
|
+
payload["min_score"] = float(min_score)
|
|
547
|
+
except Exception:
|
|
548
|
+
pass
|
|
549
|
+
limit = input_data.get("limit")
|
|
550
|
+
if limit is None:
|
|
551
|
+
limit = input_data.get("limit_spans")
|
|
552
|
+
if limit is not None and not isinstance(limit, bool):
|
|
553
|
+
try:
|
|
554
|
+
payload["limit"] = int(limit)
|
|
555
|
+
except Exception:
|
|
556
|
+
pass
|
|
557
|
+
|
|
558
|
+
return StepPlan(
|
|
559
|
+
node_id=node_id,
|
|
560
|
+
effect=Effect(type=EffectType.MEMORY_KG_QUERY, payload=payload, result_key=output_key or "_temp.memory_kg_query"),
|
|
561
|
+
next_node=next_node,
|
|
562
|
+
)
|
|
563
|
+
|
|
564
|
+
return handler
|
|
565
|
+
|
|
566
|
+
|
|
567
|
+
def create_memory_kg_resolve_handler(
|
|
568
|
+
node_id: str,
|
|
569
|
+
next_node: Optional[str],
|
|
570
|
+
input_key: Optional[str] = None,
|
|
571
|
+
output_key: Optional[str] = None,
|
|
572
|
+
) -> Callable:
|
|
573
|
+
"""Create a node handler that resolves entity candidates from AbstractMemory triples."""
|
|
574
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
575
|
+
|
|
576
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
577
|
+
del ctx
|
|
578
|
+
if input_key:
|
|
579
|
+
input_data = run.vars.get(input_key, {})
|
|
580
|
+
else:
|
|
581
|
+
input_data = run.vars
|
|
582
|
+
|
|
583
|
+
payload: Dict[str, Any] = {}
|
|
584
|
+
if isinstance(input_data, dict):
|
|
585
|
+
for k in ("label", "expected_type", "scope", "owner_id", "recall_level"):
|
|
586
|
+
v = input_data.get(k)
|
|
587
|
+
if isinstance(v, str) and v.strip():
|
|
588
|
+
payload[k] = v.strip()
|
|
589
|
+
|
|
590
|
+
min_score = input_data.get("min_score")
|
|
591
|
+
if min_score is not None and not isinstance(min_score, bool):
|
|
592
|
+
try:
|
|
593
|
+
payload["min_score"] = float(min_score)
|
|
594
|
+
except Exception:
|
|
595
|
+
pass
|
|
596
|
+
|
|
597
|
+
max_candidates = input_data.get("max_candidates")
|
|
598
|
+
if max_candidates is None:
|
|
599
|
+
max_candidates = input_data.get("limit")
|
|
600
|
+
if max_candidates is not None and not isinstance(max_candidates, bool):
|
|
601
|
+
try:
|
|
602
|
+
payload["max_candidates"] = int(max_candidates)
|
|
603
|
+
except Exception:
|
|
604
|
+
pass
|
|
605
|
+
|
|
606
|
+
return StepPlan(
|
|
607
|
+
node_id=node_id,
|
|
608
|
+
effect=Effect(type=EffectType.MEMORY_KG_RESOLVE, payload=payload, result_key=output_key or "_temp.memory_kg_resolve"),
|
|
609
|
+
next_node=next_node,
|
|
610
|
+
)
|
|
611
|
+
|
|
612
|
+
return handler
|
|
613
|
+
|
|
614
|
+
|
|
615
|
+
def create_memory_tag_handler(
|
|
616
|
+
node_id: str,
|
|
617
|
+
next_node: Optional[str],
|
|
618
|
+
input_key: Optional[str] = None,
|
|
619
|
+
output_key: Optional[str] = None,
|
|
620
|
+
) -> Callable:
|
|
621
|
+
"""Create a node handler that applies tags to an existing memory span record."""
|
|
622
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
623
|
+
|
|
624
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
625
|
+
del ctx
|
|
626
|
+
if input_key:
|
|
627
|
+
input_data = run.vars.get(input_key, {})
|
|
628
|
+
else:
|
|
629
|
+
input_data = run.vars
|
|
630
|
+
|
|
631
|
+
span_id: Any = None
|
|
632
|
+
tags: Dict[str, Any] = {}
|
|
633
|
+
merge: Optional[bool] = None
|
|
634
|
+
scope: Optional[str] = None
|
|
635
|
+
if isinstance(input_data, dict):
|
|
636
|
+
span_id = input_data.get("span_id")
|
|
637
|
+
if span_id is None:
|
|
638
|
+
span_id = input_data.get("spanId")
|
|
639
|
+
raw_tags = input_data.get("tags")
|
|
640
|
+
tags = raw_tags if isinstance(raw_tags, dict) else {}
|
|
641
|
+
if "merge" in input_data:
|
|
642
|
+
merge = bool(input_data.get("merge"))
|
|
643
|
+
if isinstance(input_data.get("scope"), str) and str(input_data.get("scope") or "").strip():
|
|
644
|
+
scope = str(input_data.get("scope") or "").strip()
|
|
645
|
+
|
|
646
|
+
payload: Dict[str, Any] = {"span_id": span_id, "tags": tags}
|
|
647
|
+
if merge is not None:
|
|
648
|
+
payload["merge"] = merge
|
|
649
|
+
if scope is not None:
|
|
650
|
+
payload["scope"] = scope
|
|
651
|
+
|
|
652
|
+
return StepPlan(
|
|
653
|
+
node_id=node_id,
|
|
654
|
+
effect=Effect(type=EffectType.MEMORY_TAG, payload=payload, result_key=output_key or "_temp.memory_tag"),
|
|
655
|
+
next_node=next_node,
|
|
656
|
+
)
|
|
657
|
+
|
|
658
|
+
return handler
|
|
659
|
+
|
|
660
|
+
|
|
661
|
+
def create_memory_compact_handler(
|
|
662
|
+
node_id: str,
|
|
663
|
+
next_node: Optional[str],
|
|
664
|
+
input_key: Optional[str] = None,
|
|
665
|
+
output_key: Optional[str] = None,
|
|
666
|
+
) -> Callable:
|
|
667
|
+
"""Create a node handler that requests runtime-owned memory compaction."""
|
|
668
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
669
|
+
|
|
670
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
671
|
+
del ctx
|
|
672
|
+
if input_key:
|
|
673
|
+
input_data = run.vars.get(input_key, {})
|
|
674
|
+
else:
|
|
675
|
+
input_data = run.vars
|
|
676
|
+
|
|
677
|
+
preserve_recent: Optional[int] = None
|
|
678
|
+
compression_mode: Optional[str] = None
|
|
679
|
+
focus: Optional[str] = None
|
|
680
|
+
if isinstance(input_data, dict):
|
|
681
|
+
if input_data.get("preserve_recent") is not None:
|
|
682
|
+
try:
|
|
683
|
+
preserve_recent = int(input_data.get("preserve_recent"))
|
|
684
|
+
except Exception:
|
|
685
|
+
preserve_recent = None
|
|
686
|
+
compression_mode = input_data.get("compression_mode") if isinstance(input_data.get("compression_mode"), str) else None
|
|
687
|
+
focus = input_data.get("focus") if isinstance(input_data.get("focus"), str) else None
|
|
688
|
+
|
|
689
|
+
payload: Dict[str, Any] = {}
|
|
690
|
+
if preserve_recent is not None:
|
|
691
|
+
payload["preserve_recent"] = preserve_recent
|
|
692
|
+
if isinstance(compression_mode, str) and compression_mode.strip():
|
|
693
|
+
payload["compression_mode"] = compression_mode.strip()
|
|
694
|
+
if isinstance(focus, str) and focus.strip():
|
|
695
|
+
payload["focus"] = focus.strip()
|
|
696
|
+
|
|
697
|
+
return StepPlan(
|
|
698
|
+
node_id=node_id,
|
|
699
|
+
effect=Effect(type=EffectType.MEMORY_COMPACT, payload=payload, result_key=output_key or "_temp.memory_compact"),
|
|
700
|
+
next_node=next_node,
|
|
701
|
+
)
|
|
702
|
+
|
|
703
|
+
return handler
|
|
704
|
+
|
|
705
|
+
|
|
706
|
+
def create_memory_rehydrate_handler(
|
|
707
|
+
node_id: str,
|
|
708
|
+
next_node: Optional[str],
|
|
709
|
+
input_key: Optional[str] = None,
|
|
710
|
+
output_key: Optional[str] = None,
|
|
711
|
+
) -> Callable:
|
|
712
|
+
"""Create a node handler that rehydrates recalled spans into context.messages.
|
|
713
|
+
|
|
714
|
+
This produces a runtime-owned `EffectType.MEMORY_REHYDRATE` so rehydration is durable and host-agnostic.
|
|
715
|
+
"""
|
|
716
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
717
|
+
|
|
718
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
719
|
+
del ctx
|
|
720
|
+
if input_key:
|
|
721
|
+
input_data = run.vars.get(input_key, {})
|
|
722
|
+
else:
|
|
723
|
+
input_data = run.vars
|
|
724
|
+
|
|
725
|
+
span_ids = []
|
|
726
|
+
placement = "after_summary"
|
|
727
|
+
max_messages = None
|
|
728
|
+
if isinstance(input_data, dict):
|
|
729
|
+
raw = input_data.get("span_ids")
|
|
730
|
+
if raw is None:
|
|
731
|
+
raw = input_data.get("span_id")
|
|
732
|
+
if isinstance(raw, list):
|
|
733
|
+
span_ids = list(raw)
|
|
734
|
+
elif raw is not None:
|
|
735
|
+
span_ids = [raw]
|
|
736
|
+
if isinstance(input_data.get("placement"), str):
|
|
737
|
+
placement = str(input_data.get("placement") or "").strip() or placement
|
|
738
|
+
if input_data.get("max_messages") is not None:
|
|
739
|
+
max_messages = input_data.get("max_messages")
|
|
740
|
+
|
|
741
|
+
payload: Dict[str, Any] = {"span_ids": span_ids, "placement": placement}
|
|
742
|
+
if max_messages is not None:
|
|
743
|
+
payload["max_messages"] = max_messages
|
|
744
|
+
|
|
745
|
+
return StepPlan(
|
|
746
|
+
node_id=node_id,
|
|
747
|
+
effect=Effect(
|
|
748
|
+
type=EffectType.MEMORY_REHYDRATE,
|
|
749
|
+
payload=payload,
|
|
750
|
+
result_key=output_key or "_temp.memory_rehydrate",
|
|
751
|
+
),
|
|
752
|
+
next_node=next_node,
|
|
753
|
+
)
|
|
754
|
+
|
|
755
|
+
return handler
|
|
756
|
+
|
|
757
|
+
|
|
758
|
+
def create_llm_call_handler(
|
|
759
|
+
node_id: str,
|
|
760
|
+
next_node: Optional[str],
|
|
761
|
+
input_key: Optional[str] = None,
|
|
762
|
+
output_key: Optional[str] = None,
|
|
763
|
+
provider: Optional[str] = None,
|
|
764
|
+
model: Optional[str] = None,
|
|
765
|
+
temperature: float = 0.7,
|
|
766
|
+
seed: int = -1,
|
|
767
|
+
) -> Callable:
|
|
768
|
+
"""Create a node handler that makes an LLM call.
|
|
769
|
+
|
|
770
|
+
Args:
|
|
771
|
+
node_id: Unique identifier for this node
|
|
772
|
+
next_node: ID of the next node to transition to after LLM response
|
|
773
|
+
input_key: Key in run.vars to read prompt/system from
|
|
774
|
+
output_key: Key in run.vars to write response to
|
|
775
|
+
provider: LLM provider to use
|
|
776
|
+
model: Model name to use
|
|
777
|
+
temperature: Temperature parameter
|
|
778
|
+
seed: Seed parameter (-1 means random/unset)
|
|
779
|
+
|
|
780
|
+
Returns:
|
|
781
|
+
A node handler that produces LLM_CALL effect
|
|
782
|
+
"""
|
|
783
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
784
|
+
|
|
785
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
786
|
+
"""Make LLM call and continue."""
|
|
787
|
+
# Get input from vars
|
|
788
|
+
if input_key:
|
|
789
|
+
input_data = run.vars.get(input_key, {})
|
|
790
|
+
else:
|
|
791
|
+
input_data = run.vars
|
|
792
|
+
|
|
793
|
+
# Extract prompt and system
|
|
794
|
+
if isinstance(input_data, dict):
|
|
795
|
+
prompt = input_data.get("prompt", "")
|
|
796
|
+
system = input_data.get("system", "")
|
|
797
|
+
else:
|
|
798
|
+
prompt = str(input_data) if input_data else ""
|
|
799
|
+
system = ""
|
|
800
|
+
|
|
801
|
+
# Build messages for LLM
|
|
802
|
+
messages = []
|
|
803
|
+
if system:
|
|
804
|
+
messages.append({"role": "system", "content": system})
|
|
805
|
+
messages.append({"role": "user", "content": prompt})
|
|
806
|
+
|
|
807
|
+
# Create the effect
|
|
808
|
+
effect = Effect(
|
|
809
|
+
type=EffectType.LLM_CALL,
|
|
810
|
+
payload={
|
|
811
|
+
"messages": messages,
|
|
812
|
+
"provider": provider,
|
|
813
|
+
"model": model,
|
|
814
|
+
"params": {
|
|
815
|
+
"temperature": float(temperature),
|
|
816
|
+
},
|
|
817
|
+
},
|
|
818
|
+
result_key=output_key or "_temp.llm_response",
|
|
819
|
+
)
|
|
820
|
+
try:
|
|
821
|
+
seed_i = int(seed)
|
|
822
|
+
except Exception:
|
|
823
|
+
seed_i = -1
|
|
824
|
+
if seed_i >= 0:
|
|
825
|
+
effect.payload["params"]["seed"] = seed_i
|
|
826
|
+
|
|
827
|
+
return StepPlan(
|
|
828
|
+
node_id=node_id,
|
|
829
|
+
effect=effect,
|
|
830
|
+
next_node=next_node,
|
|
831
|
+
)
|
|
832
|
+
|
|
833
|
+
return handler
|
|
834
|
+
|
|
835
|
+
|
|
836
|
+
def create_tool_calls_handler(
|
|
837
|
+
node_id: str,
|
|
838
|
+
next_node: Optional[str],
|
|
839
|
+
input_key: Optional[str] = None,
|
|
840
|
+
output_key: Optional[str] = None,
|
|
841
|
+
allowed_tools: Optional[List[str]] = None,
|
|
842
|
+
) -> Callable:
|
|
843
|
+
"""Create a node handler that executes tool calls via AbstractRuntime.
|
|
844
|
+
|
|
845
|
+
This produces a durable `EffectType.TOOL_CALLS` so tool execution stays runtime-owned.
|
|
846
|
+
|
|
847
|
+
Inputs:
|
|
848
|
+
- `tool_calls`: list[dict] (or a single dict) in the common shape
|
|
849
|
+
`{name, arguments, call_id?}`.
|
|
850
|
+
- Optional `allowed_tools`: list[str] allowlist. If provided as a list, the
|
|
851
|
+
runtime effect handler enforces it (empty list => allow none).
|
|
852
|
+
"""
|
|
853
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
854
|
+
|
|
855
|
+
def _normalize_tool_calls(raw: Any) -> list[Dict[str, Any]]:
|
|
856
|
+
if raw is None:
|
|
857
|
+
return []
|
|
858
|
+
if isinstance(raw, dict):
|
|
859
|
+
return [dict(raw)]
|
|
860
|
+
if isinstance(raw, list):
|
|
861
|
+
out: list[Dict[str, Any]] = []
|
|
862
|
+
for x in raw:
|
|
863
|
+
if isinstance(x, dict):
|
|
864
|
+
out.append(dict(x))
|
|
865
|
+
return out
|
|
866
|
+
return []
|
|
867
|
+
|
|
868
|
+
def _normalize_str_list(raw: Any) -> list[str]:
|
|
869
|
+
if not isinstance(raw, list):
|
|
870
|
+
return []
|
|
871
|
+
out: list[str] = []
|
|
872
|
+
for x in raw:
|
|
873
|
+
if isinstance(x, str) and x.strip():
|
|
874
|
+
out.append(x.strip())
|
|
875
|
+
return out
|
|
876
|
+
|
|
877
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
878
|
+
del ctx
|
|
879
|
+
if input_key:
|
|
880
|
+
input_data = run.vars.get(input_key, {})
|
|
881
|
+
else:
|
|
882
|
+
input_data = run.vars
|
|
883
|
+
|
|
884
|
+
tool_calls: list[Dict[str, Any]] = []
|
|
885
|
+
allowlist: Optional[list[str]] = list(allowed_tools) if isinstance(allowed_tools, list) else None
|
|
886
|
+
|
|
887
|
+
if isinstance(input_data, dict):
|
|
888
|
+
raw_calls = input_data.get("tool_calls")
|
|
889
|
+
if raw_calls is None:
|
|
890
|
+
raw_calls = input_data.get("toolCalls")
|
|
891
|
+
tool_calls = _normalize_tool_calls(raw_calls)
|
|
892
|
+
|
|
893
|
+
# Optional override when the input explicitly provides an allowlist.
|
|
894
|
+
if "allowed_tools" in input_data or "allowedTools" in input_data:
|
|
895
|
+
raw_allowed = input_data.get("allowed_tools")
|
|
896
|
+
if raw_allowed is None:
|
|
897
|
+
raw_allowed = input_data.get("allowedTools")
|
|
898
|
+
allowlist = _normalize_str_list(raw_allowed)
|
|
899
|
+
else:
|
|
900
|
+
tool_calls = _normalize_tool_calls(input_data)
|
|
901
|
+
|
|
902
|
+
payload: Dict[str, Any] = {"tool_calls": tool_calls}
|
|
903
|
+
if isinstance(allowlist, list):
|
|
904
|
+
payload["allowed_tools"] = _normalize_str_list(allowlist)
|
|
905
|
+
|
|
906
|
+
return StepPlan(
|
|
907
|
+
node_id=node_id,
|
|
908
|
+
effect=Effect(
|
|
909
|
+
type=EffectType.TOOL_CALLS,
|
|
910
|
+
payload=payload,
|
|
911
|
+
result_key=output_key or "_temp.tool_calls",
|
|
912
|
+
),
|
|
913
|
+
next_node=next_node,
|
|
914
|
+
)
|
|
915
|
+
|
|
916
|
+
return handler
|
|
917
|
+
|
|
918
|
+
|
|
919
|
+
def create_call_tool_handler(
|
|
920
|
+
node_id: str,
|
|
921
|
+
next_node: Optional[str],
|
|
922
|
+
input_key: Optional[str] = None,
|
|
923
|
+
output_key: Optional[str] = None,
|
|
924
|
+
allowed_tools: Optional[List[str]] = None,
|
|
925
|
+
) -> Callable:
|
|
926
|
+
"""Create a node handler that executes a single tool call via AbstractRuntime.
|
|
927
|
+
|
|
928
|
+
This is a convenience wrapper over `EffectType.TOOL_CALLS` that accepts a single
|
|
929
|
+
tool call dict (instead of an array) and returns a 1-element tool_calls list.
|
|
930
|
+
|
|
931
|
+
Inputs:
|
|
932
|
+
- `tool_call`: dict in the common shape `{name, arguments, call_id?}`.
|
|
933
|
+
- Optional `allowed_tools`: list[str] allowlist. If provided as a list, the
|
|
934
|
+
runtime effect handler enforces it (empty list => allow none).
|
|
935
|
+
"""
|
|
936
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
937
|
+
|
|
938
|
+
def _normalize_tool_call(raw: Any) -> Optional[Dict[str, Any]]:
|
|
939
|
+
if raw is None:
|
|
940
|
+
return None
|
|
941
|
+
if isinstance(raw, dict):
|
|
942
|
+
return dict(raw)
|
|
943
|
+
return None
|
|
944
|
+
|
|
945
|
+
def _normalize_str_list(raw: Any) -> list[str]:
|
|
946
|
+
if not isinstance(raw, list):
|
|
947
|
+
return []
|
|
948
|
+
out: list[str] = []
|
|
949
|
+
for x in raw:
|
|
950
|
+
if isinstance(x, str) and x.strip():
|
|
951
|
+
out.append(x.strip())
|
|
952
|
+
return out
|
|
953
|
+
|
|
954
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
955
|
+
del ctx
|
|
956
|
+
if input_key:
|
|
957
|
+
input_data = run.vars.get(input_key, {})
|
|
958
|
+
else:
|
|
959
|
+
input_data = run.vars
|
|
960
|
+
|
|
961
|
+
tool_call: Optional[Dict[str, Any]] = None
|
|
962
|
+
allowlist: Optional[list[str]] = list(allowed_tools) if isinstance(allowed_tools, list) else None
|
|
963
|
+
|
|
964
|
+
if isinstance(input_data, dict):
|
|
965
|
+
raw_call = input_data.get("tool_call")
|
|
966
|
+
if raw_call is None:
|
|
967
|
+
raw_call = input_data.get("toolCall")
|
|
968
|
+
tool_call = _normalize_tool_call(raw_call)
|
|
969
|
+
|
|
970
|
+
# Optional override when the input explicitly provides an allowlist.
|
|
971
|
+
if "allowed_tools" in input_data or "allowedTools" in input_data:
|
|
972
|
+
raw_allowed = input_data.get("allowed_tools")
|
|
973
|
+
if raw_allowed is None:
|
|
974
|
+
raw_allowed = input_data.get("allowedTools")
|
|
975
|
+
allowlist = _normalize_str_list(raw_allowed)
|
|
976
|
+
else:
|
|
977
|
+
tool_call = _normalize_tool_call(input_data)
|
|
978
|
+
|
|
979
|
+
payload: Dict[str, Any] = {"tool_calls": [tool_call] if isinstance(tool_call, dict) else []}
|
|
980
|
+
if isinstance(allowlist, list):
|
|
981
|
+
payload["allowed_tools"] = _normalize_str_list(allowlist)
|
|
982
|
+
|
|
983
|
+
return StepPlan(
|
|
984
|
+
node_id=node_id,
|
|
985
|
+
effect=Effect(
|
|
986
|
+
type=EffectType.TOOL_CALLS,
|
|
987
|
+
payload=payload,
|
|
988
|
+
result_key=output_key or "_temp.call_tool",
|
|
989
|
+
),
|
|
990
|
+
next_node=next_node,
|
|
991
|
+
)
|
|
992
|
+
|
|
993
|
+
return handler
|
|
994
|
+
|
|
995
|
+
|
|
996
|
+
def create_start_subworkflow_handler(
|
|
997
|
+
node_id: str,
|
|
998
|
+
next_node: Optional[str],
|
|
999
|
+
input_key: Optional[str] = None,
|
|
1000
|
+
output_key: Optional[str] = None,
|
|
1001
|
+
workflow_id: Optional[str] = None,
|
|
1002
|
+
) -> Callable:
|
|
1003
|
+
"""Create a node handler that starts a subworkflow by workflow id.
|
|
1004
|
+
|
|
1005
|
+
This is the effect-level equivalent of `create_subflow_node_handler`, but it
|
|
1006
|
+
defers lookup/execution to the runtime's workflow registry.
|
|
1007
|
+
"""
|
|
1008
|
+
from abstractruntime.core.models import StepPlan, Effect, EffectType
|
|
1009
|
+
|
|
1010
|
+
def handler(run: "RunState", ctx: Any) -> "StepPlan":
|
|
1011
|
+
if not workflow_id:
|
|
1012
|
+
return StepPlan(
|
|
1013
|
+
node_id=node_id,
|
|
1014
|
+
complete_output={
|
|
1015
|
+
"success": False,
|
|
1016
|
+
"error": "start_subworkflow requires workflow_id (node config missing)",
|
|
1017
|
+
},
|
|
1018
|
+
)
|
|
1019
|
+
|
|
1020
|
+
if input_key:
|
|
1021
|
+
input_data = run.vars.get(input_key, {})
|
|
1022
|
+
else:
|
|
1023
|
+
input_data = run.vars
|
|
1024
|
+
|
|
1025
|
+
sub_vars: Dict[str, Any] = {}
|
|
1026
|
+
if isinstance(input_data, dict):
|
|
1027
|
+
# Prefer explicit "vars" field, else pass through common "input" field.
|
|
1028
|
+
if isinstance(input_data.get("vars"), dict):
|
|
1029
|
+
sub_vars = dict(input_data["vars"])
|
|
1030
|
+
elif isinstance(input_data.get("input"), dict):
|
|
1031
|
+
sub_vars = dict(input_data["input"])
|
|
1032
|
+
else:
|
|
1033
|
+
sub_vars = dict(input_data)
|
|
1034
|
+
else:
|
|
1035
|
+
sub_vars = {"input": input_data}
|
|
1036
|
+
|
|
1037
|
+
return StepPlan(
|
|
1038
|
+
node_id=node_id,
|
|
1039
|
+
effect=Effect(
|
|
1040
|
+
type=EffectType.START_SUBWORKFLOW,
|
|
1041
|
+
payload={
|
|
1042
|
+
"workflow_id": workflow_id,
|
|
1043
|
+
"vars": sub_vars,
|
|
1044
|
+
"async": False,
|
|
1045
|
+
},
|
|
1046
|
+
result_key=output_key or f"_temp.effects.{node_id}",
|
|
1047
|
+
),
|
|
1048
|
+
next_node=next_node,
|
|
1049
|
+
)
|
|
1050
|
+
|
|
1051
|
+
return handler
|