abstractcode 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractcode/__init__.py +1 -1
- abstractcode/cli.py +612 -1
- abstractcode/file_mentions.py +276 -0
- abstractcode/fullscreen_ui.py +1592 -74
- abstractcode/gateway_cli.py +715 -0
- abstractcode/react_shell.py +2474 -116
- abstractcode/terminal_markdown.py +426 -37
- abstractcode/theme.py +244 -0
- abstractcode/workflow_agent.py +630 -112
- abstractcode/workflow_cli.py +229 -0
- abstractcode-0.3.1.dist-info/METADATA +158 -0
- abstractcode-0.3.1.dist-info/RECORD +21 -0
- {abstractcode-0.3.0.dist-info → abstractcode-0.3.1.dist-info}/WHEEL +1 -1
- abstractcode-0.3.0.dist-info/METADATA +0 -270
- abstractcode-0.3.0.dist-info/RECORD +0 -17
- {abstractcode-0.3.0.dist-info → abstractcode-0.3.1.dist-info}/entry_points.txt +0 -0
- {abstractcode-0.3.0.dist-info → abstractcode-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {abstractcode-0.3.0.dist-info → abstractcode-0.3.1.dist-info}/top_level.txt +0 -0
abstractcode/__init__.py
CHANGED
abstractcode/cli.py
CHANGED
|
@@ -49,6 +49,7 @@ def build_agent_parser() -> argparse.ArgumentParser:
|
|
|
49
49
|
epilog=(
|
|
50
50
|
"Workflows:\n"
|
|
51
51
|
" abstractcode flow --help Run AbstractFlow workflows from the terminal\n"
|
|
52
|
+
" abstractcode workflow --help Install/list workflow bundles\n"
|
|
52
53
|
"REPL:\n"
|
|
53
54
|
" Use /flow inside the REPL to run workflows while keeping chat context.\n"
|
|
54
55
|
),
|
|
@@ -60,7 +61,10 @@ def build_agent_parser() -> argparse.ArgumentParser:
|
|
|
60
61
|
help=(
|
|
61
62
|
"Agent selector:\n"
|
|
62
63
|
" - Built-ins: react | codeact | memact\n"
|
|
63
|
-
" - Workflow agent
|
|
64
|
+
" - Workflow agent:\n"
|
|
65
|
+
" <flow_id> | <flow_name> | </path/to/flow.json>\n"
|
|
66
|
+
" <bundle_id>[@version] | </path/to/bundle.flow>\n"
|
|
67
|
+
" <bundle_id>[@version]:<flow_id>\n"
|
|
64
68
|
" (must implement interface 'abstractcode.agent.v1')"
|
|
65
69
|
),
|
|
66
70
|
)
|
|
@@ -124,10 +128,385 @@ def build_agent_parser() -> argparse.ArgumentParser:
|
|
|
124
128
|
default=_default_max_tokens(),
|
|
125
129
|
help="Maximum context tokens for LLM calls (-1 = auto from model capabilities).",
|
|
126
130
|
)
|
|
131
|
+
parser.add_argument(
|
|
132
|
+
"--prompt",
|
|
133
|
+
default=None,
|
|
134
|
+
help="Run a single prompt and exit (supports @file mentions).",
|
|
135
|
+
)
|
|
127
136
|
parser.add_argument("--no-color", action="store_true", help="Disable ANSI colors")
|
|
137
|
+
parser.add_argument(
|
|
138
|
+
"--gateway-url",
|
|
139
|
+
default=None,
|
|
140
|
+
help=(
|
|
141
|
+
"AbstractGateway base URL (for host metrics like /gpu).\n"
|
|
142
|
+
"Overrides $ABSTRACTCODE_GATEWAY_URL for this run."
|
|
143
|
+
),
|
|
144
|
+
)
|
|
145
|
+
parser.add_argument(
|
|
146
|
+
"--gateway-token",
|
|
147
|
+
default=None,
|
|
148
|
+
help=(
|
|
149
|
+
"AbstractGateway auth token (Bearer) (for host metrics like /gpu).\n"
|
|
150
|
+
"Overrides $ABSTRACTCODE_GATEWAY_TOKEN for this run (not persisted)."
|
|
151
|
+
),
|
|
152
|
+
)
|
|
153
|
+
return parser
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def build_workflow_parser() -> argparse.ArgumentParser:
|
|
157
|
+
parser = argparse.ArgumentParser(
|
|
158
|
+
prog="abstractcode workflow",
|
|
159
|
+
description="Manage WorkflowBundle (.flow) bundles on an AbstractGateway host (upload/remove/discovery).",
|
|
160
|
+
)
|
|
161
|
+
sub = parser.add_subparsers(dest="command")
|
|
162
|
+
|
|
163
|
+
common = argparse.ArgumentParser(add_help=False)
|
|
164
|
+
common.add_argument("--gateway-url", default=None, help="Gateway base URL (default: $ABSTRACTCODE_GATEWAY_URL)")
|
|
165
|
+
common.add_argument("--gateway-token", default=None, help="Gateway auth token (default: $ABSTRACTCODE_GATEWAY_TOKEN)")
|
|
166
|
+
|
|
167
|
+
install = sub.add_parser("install", parents=[common], help="Upload/install a .flow bundle onto the gateway")
|
|
168
|
+
install.add_argument("source", help="Path to a .flow file")
|
|
169
|
+
install.add_argument("--overwrite", action="store_true", help="Overwrite if already installed")
|
|
170
|
+
install.add_argument("--json", action="store_true", help="Output JSON")
|
|
171
|
+
|
|
172
|
+
ls = sub.add_parser("list", parents=[common], help="List available workflow entrypoints (from gateway bundles)")
|
|
173
|
+
ls.add_argument("--interface", default=None, help="Filter entrypoints by interface id")
|
|
174
|
+
ls.add_argument("--all", action="store_true", help="Include all versions (default: latest only)")
|
|
175
|
+
ls.add_argument("--include-deprecated", action="store_true", help="Include deprecated workflows")
|
|
176
|
+
ls.add_argument("--json", action="store_true", help="Output JSON")
|
|
177
|
+
|
|
178
|
+
info = sub.add_parser("info", parents=[common], help="Show details for an installed bundle")
|
|
179
|
+
info.add_argument("bundle", help="Bundle ref: bundle_id or bundle_id@version")
|
|
180
|
+
info.add_argument("--json", action="store_true", help="Output JSON")
|
|
181
|
+
|
|
182
|
+
rm = sub.add_parser("remove", parents=[common], help="Remove an installed bundle (bundle_id or bundle_id@version)")
|
|
183
|
+
rm.add_argument("bundle", help="Bundle ref: bundle_id or bundle_id@version")
|
|
184
|
+
rm.add_argument("--json", action="store_true", help="Output JSON")
|
|
185
|
+
|
|
186
|
+
dep = sub.add_parser("deprecate", parents=[common], help="Deprecate a workflow bundle on the gateway (hide + block launch)")
|
|
187
|
+
dep.add_argument("bundle", help="Bundle id (bundle_id)")
|
|
188
|
+
dep.add_argument("--flow-id", default=None, help="Optional entrypoint flow_id (default: all entrypoints)")
|
|
189
|
+
dep.add_argument("--reason", default=None, help="Optional reason")
|
|
190
|
+
dep.add_argument("--json", action="store_true", help="Output JSON")
|
|
191
|
+
|
|
192
|
+
undep = sub.add_parser("undeprecate", parents=[common], help="Undeprecate a workflow bundle on the gateway")
|
|
193
|
+
undep.add_argument("bundle", help="Bundle id (bundle_id)")
|
|
194
|
+
undep.add_argument("--flow-id", default=None, help="Optional entrypoint flow_id (default: all entrypoints)")
|
|
195
|
+
undep.add_argument("--json", action="store_true", help="Output JSON")
|
|
196
|
+
|
|
128
197
|
return parser
|
|
129
198
|
|
|
130
199
|
|
|
200
|
+
def _run_one_shot_prompt(*, shell: ReactShell, prompt: str) -> int:
|
|
201
|
+
"""Run one task and exit (no full-screen UI)."""
|
|
202
|
+
from .file_mentions import extract_at_file_mentions, normalize_relative_path
|
|
203
|
+
from .flow_cli import _ApprovalState, _approve_and_execute
|
|
204
|
+
|
|
205
|
+
# Lazy imports: keep `abstractcode --help` fast.
|
|
206
|
+
from abstractruntime.core.models import RunStatus, WaitReason
|
|
207
|
+
|
|
208
|
+
text = str(prompt or "").strip()
|
|
209
|
+
if not text:
|
|
210
|
+
return 0
|
|
211
|
+
|
|
212
|
+
def _stderr_print(msg: str) -> None:
|
|
213
|
+
print(msg, file=sys.stderr)
|
|
214
|
+
|
|
215
|
+
cleaned, mentions = extract_at_file_mentions(text)
|
|
216
|
+
paths: list[str] = []
|
|
217
|
+
for m in mentions:
|
|
218
|
+
norm = normalize_relative_path(m)
|
|
219
|
+
if norm:
|
|
220
|
+
paths.append(norm)
|
|
221
|
+
|
|
222
|
+
# De-dup while preserving order.
|
|
223
|
+
seen: set[str] = set()
|
|
224
|
+
paths = [p for p in paths if not (p in seen or seen.add(p))]
|
|
225
|
+
|
|
226
|
+
attachment_refs = shell._ingest_workspace_attachments(paths) if paths else []
|
|
227
|
+
if attachment_refs:
|
|
228
|
+
joined = ", ".join(
|
|
229
|
+
[
|
|
230
|
+
str(a.get("source_path") or a.get("filename") or "?")
|
|
231
|
+
for a in attachment_refs
|
|
232
|
+
if isinstance(a, dict)
|
|
233
|
+
]
|
|
234
|
+
)
|
|
235
|
+
if joined:
|
|
236
|
+
print(f"Attachments: {joined}", file=sys.stderr)
|
|
237
|
+
|
|
238
|
+
cleaned = str(cleaned or "").strip()
|
|
239
|
+
if not cleaned:
|
|
240
|
+
# Attachment-only invocation: allow users to attach files without issuing a prompt.
|
|
241
|
+
return 0
|
|
242
|
+
|
|
243
|
+
run_id = shell._agent.start(cleaned, allowed_tools=shell._allowed_tools, attachments=attachment_refs or None)
|
|
244
|
+
try:
|
|
245
|
+
shell._sync_tool_prompt_settings_to_run(run_id)
|
|
246
|
+
except Exception:
|
|
247
|
+
pass
|
|
248
|
+
if getattr(shell, "_state_file", None):
|
|
249
|
+
try:
|
|
250
|
+
shell._agent.save_state(shell._state_file) # type: ignore[arg-type]
|
|
251
|
+
except Exception:
|
|
252
|
+
pass
|
|
253
|
+
|
|
254
|
+
approval_state = _ApprovalState()
|
|
255
|
+
|
|
256
|
+
def _drive_subworkflow_wait(*, top_run_id: str) -> int:
|
|
257
|
+
"""Drive async subworkflow waits until top run can advance or blocks on a real wait."""
|
|
258
|
+
|
|
259
|
+
def _extract_sub_run_id(wait_state: object) -> Optional[str]:
|
|
260
|
+
details = getattr(wait_state, "details", None)
|
|
261
|
+
if isinstance(details, dict):
|
|
262
|
+
sub_run_id = details.get("sub_run_id")
|
|
263
|
+
if isinstance(sub_run_id, str) and sub_run_id:
|
|
264
|
+
return sub_run_id
|
|
265
|
+
wait_key = getattr(wait_state, "wait_key", None)
|
|
266
|
+
if isinstance(wait_key, str) and wait_key.startswith("subworkflow:"):
|
|
267
|
+
return wait_key.split("subworkflow:", 1)[1] or None
|
|
268
|
+
return None
|
|
269
|
+
|
|
270
|
+
def _workflow_for(run_state: object):
|
|
271
|
+
reg = getattr(shell._runtime, "workflow_registry", None)
|
|
272
|
+
getter = getattr(reg, "get", None) if reg is not None else None
|
|
273
|
+
if callable(getter):
|
|
274
|
+
wf = getter(run_state.workflow_id)
|
|
275
|
+
if wf is not None:
|
|
276
|
+
return wf
|
|
277
|
+
if getattr(shell._agent.workflow, "workflow_id", None) == run_state.workflow_id:
|
|
278
|
+
return shell._agent.workflow
|
|
279
|
+
raise RuntimeError(f"Workflow '{run_state.workflow_id}' not found in runtime registry")
|
|
280
|
+
|
|
281
|
+
def _bubble_completion(child_state: object) -> Optional[str]:
|
|
282
|
+
parent_id = getattr(child_state, "parent_run_id", None)
|
|
283
|
+
if not isinstance(parent_id, str) or not parent_id:
|
|
284
|
+
return None
|
|
285
|
+
parent_state = shell._runtime.get_state(parent_id)
|
|
286
|
+
parent_wait = getattr(parent_state, "waiting", None)
|
|
287
|
+
if parent_state.status != RunStatus.WAITING or parent_wait is None:
|
|
288
|
+
return None
|
|
289
|
+
if parent_wait.reason != WaitReason.SUBWORKFLOW:
|
|
290
|
+
return None
|
|
291
|
+
shell._runtime.resume(
|
|
292
|
+
workflow=_workflow_for(parent_state),
|
|
293
|
+
run_id=parent_id,
|
|
294
|
+
wait_key=None,
|
|
295
|
+
payload={
|
|
296
|
+
"sub_run_id": child_state.run_id,
|
|
297
|
+
"output": getattr(child_state, "output", None),
|
|
298
|
+
"node_traces": shell._runtime.get_node_traces(child_state.run_id),
|
|
299
|
+
},
|
|
300
|
+
max_steps=0,
|
|
301
|
+
)
|
|
302
|
+
return parent_id
|
|
303
|
+
|
|
304
|
+
# Drive subruns until we either make progress or hit a non-subworkflow wait.
|
|
305
|
+
for _ in range(200):
|
|
306
|
+
# Descend to the deepest sub-run referenced by SUBWORKFLOW waits.
|
|
307
|
+
current_run_id = top_run_id
|
|
308
|
+
for _ in range(25):
|
|
309
|
+
cur_state = shell._runtime.get_state(current_run_id)
|
|
310
|
+
cur_wait = getattr(cur_state, "waiting", None)
|
|
311
|
+
if cur_state.status != RunStatus.WAITING or cur_wait is None:
|
|
312
|
+
break
|
|
313
|
+
if cur_wait.reason != WaitReason.SUBWORKFLOW:
|
|
314
|
+
break
|
|
315
|
+
next_id = _extract_sub_run_id(cur_wait)
|
|
316
|
+
if not next_id:
|
|
317
|
+
break
|
|
318
|
+
current_run_id = next_id
|
|
319
|
+
|
|
320
|
+
current_state = shell._runtime.get_state(current_run_id)
|
|
321
|
+
|
|
322
|
+
# Tick running subruns until they block/complete.
|
|
323
|
+
if current_state.status == RunStatus.RUNNING:
|
|
324
|
+
current_state = shell._runtime.tick(
|
|
325
|
+
workflow=_workflow_for(current_state),
|
|
326
|
+
run_id=current_run_id,
|
|
327
|
+
max_steps=100,
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
if current_state.status == RunStatus.RUNNING:
|
|
331
|
+
continue
|
|
332
|
+
|
|
333
|
+
if current_state.status == RunStatus.FAILED:
|
|
334
|
+
_stderr_print(f"Run failed: {current_state.error or 'Subworkflow failed'}")
|
|
335
|
+
return 1
|
|
336
|
+
|
|
337
|
+
if current_state.status == RunStatus.CANCELLED:
|
|
338
|
+
_stderr_print("Run cancelled.")
|
|
339
|
+
return 1
|
|
340
|
+
|
|
341
|
+
if current_state.status == RunStatus.WAITING:
|
|
342
|
+
cur_wait = getattr(current_state, "waiting", None)
|
|
343
|
+
if cur_wait is None:
|
|
344
|
+
break
|
|
345
|
+
if cur_wait.reason == WaitReason.SUBWORKFLOW:
|
|
346
|
+
continue
|
|
347
|
+
|
|
348
|
+
if cur_wait.reason == WaitReason.USER:
|
|
349
|
+
prompt_text = str(cur_wait.prompt or "Please respond:").strip()
|
|
350
|
+
response = input(prompt_text + " ")
|
|
351
|
+
shell._runtime.resume(
|
|
352
|
+
workflow=_workflow_for(current_state),
|
|
353
|
+
run_id=current_run_id,
|
|
354
|
+
wait_key=cur_wait.wait_key,
|
|
355
|
+
payload={"response": response},
|
|
356
|
+
)
|
|
357
|
+
continue
|
|
358
|
+
|
|
359
|
+
if cur_wait.reason == WaitReason.EVENT:
|
|
360
|
+
details = cur_wait.details if isinstance(cur_wait.details, dict) else {}
|
|
361
|
+
tool_calls = details.get("tool_calls")
|
|
362
|
+
if isinstance(tool_calls, list):
|
|
363
|
+
payload = _approve_and_execute(
|
|
364
|
+
tool_calls=tool_calls,
|
|
365
|
+
tool_runner=shell._tool_runner,
|
|
366
|
+
auto_approve=bool(shell._auto_approve),
|
|
367
|
+
approval_state=approval_state,
|
|
368
|
+
prompt_fn=input,
|
|
369
|
+
print_fn=_stderr_print,
|
|
370
|
+
)
|
|
371
|
+
if payload is None:
|
|
372
|
+
_stderr_print("Aborted (tool calls not executed).")
|
|
373
|
+
return 1
|
|
374
|
+
shell._runtime.resume(
|
|
375
|
+
workflow=_workflow_for(current_state),
|
|
376
|
+
run_id=current_run_id,
|
|
377
|
+
wait_key=cur_wait.wait_key,
|
|
378
|
+
payload=payload,
|
|
379
|
+
)
|
|
380
|
+
continue
|
|
381
|
+
|
|
382
|
+
if isinstance(cur_wait.prompt, str) and cur_wait.prompt.strip() and isinstance(cur_wait.wait_key, str) and cur_wait.wait_key:
|
|
383
|
+
response = input(cur_wait.prompt.strip() + " ")
|
|
384
|
+
shell._runtime.resume(
|
|
385
|
+
workflow=_workflow_for(current_state),
|
|
386
|
+
run_id=current_run_id,
|
|
387
|
+
wait_key=cur_wait.wait_key,
|
|
388
|
+
payload={"response": response},
|
|
389
|
+
)
|
|
390
|
+
continue
|
|
391
|
+
|
|
392
|
+
_stderr_print(f"Run waiting: {cur_wait.reason.value} ({cur_wait.wait_key})")
|
|
393
|
+
return 2
|
|
394
|
+
|
|
395
|
+
if current_state.status != RunStatus.COMPLETED:
|
|
396
|
+
break
|
|
397
|
+
|
|
398
|
+
parent_id = _bubble_completion(current_state)
|
|
399
|
+
if not parent_id:
|
|
400
|
+
break
|
|
401
|
+
if parent_id == top_run_id:
|
|
402
|
+
break
|
|
403
|
+
|
|
404
|
+
return 0
|
|
405
|
+
|
|
406
|
+
state = None
|
|
407
|
+
while True:
|
|
408
|
+
state = shell._agent.step()
|
|
409
|
+
if state.status in (RunStatus.COMPLETED, RunStatus.FAILED, RunStatus.CANCELLED):
|
|
410
|
+
break
|
|
411
|
+
|
|
412
|
+
if state.status != RunStatus.WAITING or not getattr(state, "waiting", None):
|
|
413
|
+
continue
|
|
414
|
+
|
|
415
|
+
wait = state.waiting
|
|
416
|
+
|
|
417
|
+
if wait.reason == WaitReason.USER:
|
|
418
|
+
prompt_text = str(wait.prompt or "Please respond:").strip()
|
|
419
|
+
response = input(prompt_text + " ")
|
|
420
|
+
shell._agent.resume(response)
|
|
421
|
+
continue
|
|
422
|
+
|
|
423
|
+
if wait.reason == WaitReason.SUBWORKFLOW:
|
|
424
|
+
rc = _drive_subworkflow_wait(top_run_id=run_id)
|
|
425
|
+
if rc != 0:
|
|
426
|
+
return rc
|
|
427
|
+
continue
|
|
428
|
+
|
|
429
|
+
if wait.reason == WaitReason.EVENT:
|
|
430
|
+
details = wait.details or {}
|
|
431
|
+
tool_calls = details.get("tool_calls")
|
|
432
|
+
if isinstance(tool_calls, list):
|
|
433
|
+
payload = _approve_and_execute(
|
|
434
|
+
tool_calls=tool_calls,
|
|
435
|
+
tool_runner=shell._tool_runner,
|
|
436
|
+
auto_approve=bool(shell._auto_approve),
|
|
437
|
+
approval_state=approval_state,
|
|
438
|
+
prompt_fn=input,
|
|
439
|
+
print_fn=_stderr_print,
|
|
440
|
+
)
|
|
441
|
+
if payload is None:
|
|
442
|
+
print("Aborted (tool calls not executed).", file=sys.stderr)
|
|
443
|
+
return 1
|
|
444
|
+
|
|
445
|
+
shell._runtime.resume(
|
|
446
|
+
workflow=shell._agent.workflow,
|
|
447
|
+
run_id=run_id,
|
|
448
|
+
wait_key=wait.wait_key,
|
|
449
|
+
payload=payload,
|
|
450
|
+
)
|
|
451
|
+
continue
|
|
452
|
+
|
|
453
|
+
if isinstance(wait.prompt, str) and wait.prompt.strip() and isinstance(wait.wait_key, str) and wait.wait_key:
|
|
454
|
+
response = input(wait.prompt.strip() + " ")
|
|
455
|
+
shell._runtime.resume(
|
|
456
|
+
workflow=shell._agent.workflow,
|
|
457
|
+
run_id=run_id,
|
|
458
|
+
wait_key=wait.wait_key,
|
|
459
|
+
payload={"response": response},
|
|
460
|
+
)
|
|
461
|
+
continue
|
|
462
|
+
|
|
463
|
+
print(f"Run waiting: {wait.reason.value} ({wait.wait_key})", file=sys.stderr)
|
|
464
|
+
return 2
|
|
465
|
+
|
|
466
|
+
if state is None:
|
|
467
|
+
print("Run failed: no state produced.", file=sys.stderr)
|
|
468
|
+
return 1
|
|
469
|
+
|
|
470
|
+
def _pick_textish(value):
|
|
471
|
+
if isinstance(value, str):
|
|
472
|
+
return value.strip()
|
|
473
|
+
if value is None:
|
|
474
|
+
return ""
|
|
475
|
+
if isinstance(value, bool):
|
|
476
|
+
return str(value).lower()
|
|
477
|
+
if isinstance(value, (int, float)):
|
|
478
|
+
return str(value)
|
|
479
|
+
return ""
|
|
480
|
+
|
|
481
|
+
def _extract_answer_text(output):
|
|
482
|
+
if not isinstance(output, dict):
|
|
483
|
+
return ""
|
|
484
|
+
payload = output.get("result") if isinstance(output.get("result"), dict) else output
|
|
485
|
+
text = _pick_textish(payload.get("response"))
|
|
486
|
+
if not text:
|
|
487
|
+
text = (
|
|
488
|
+
_pick_textish(payload.get("answer"))
|
|
489
|
+
or _pick_textish(payload.get("message"))
|
|
490
|
+
or _pick_textish(payload.get("text"))
|
|
491
|
+
or _pick_textish(payload.get("content"))
|
|
492
|
+
)
|
|
493
|
+
if not text and isinstance(output.get("result"), str):
|
|
494
|
+
text = str(output.get("result") or "").strip()
|
|
495
|
+
return text
|
|
496
|
+
|
|
497
|
+
output = getattr(state, "output", None)
|
|
498
|
+
answer_text = _extract_answer_text(output)
|
|
499
|
+
if isinstance(answer_text, str) and answer_text.strip():
|
|
500
|
+
print(answer_text.strip())
|
|
501
|
+
|
|
502
|
+
if state.status == RunStatus.COMPLETED:
|
|
503
|
+
return 0
|
|
504
|
+
|
|
505
|
+
err = str(getattr(state, "error", None) or "unknown error")
|
|
506
|
+
print(f"Run failed: {err}", file=sys.stderr)
|
|
507
|
+
return 1
|
|
508
|
+
|
|
509
|
+
|
|
131
510
|
def build_flow_parser() -> argparse.ArgumentParser:
|
|
132
511
|
parser = argparse.ArgumentParser(
|
|
133
512
|
prog="abstractcode flow",
|
|
@@ -285,9 +664,148 @@ def build_flow_parser() -> argparse.ArgumentParser:
|
|
|
285
664
|
return parser
|
|
286
665
|
|
|
287
666
|
|
|
667
|
+
def build_gateway_parser() -> argparse.ArgumentParser:
|
|
668
|
+
parser = argparse.ArgumentParser(
|
|
669
|
+
prog="abstractcode gateway",
|
|
670
|
+
description="Run/observe workflows via AbstractGateway (HTTP control plane).",
|
|
671
|
+
)
|
|
672
|
+
sub = parser.add_subparsers(dest="command")
|
|
673
|
+
|
|
674
|
+
run = sub.add_parser("run", help="Start a new gateway run and follow it")
|
|
675
|
+
run.add_argument("flow_id", help="Flow id to start (or 'bundle:flow')")
|
|
676
|
+
run.add_argument("--bundle-id", default=None, help="Bundle id (optional if flow_id is namespaced)")
|
|
677
|
+
run.add_argument("--gateway-url", default=None, help="Gateway base URL (default: $ABSTRACTCODE_GATEWAY_URL)")
|
|
678
|
+
run.add_argument("--gateway-token", default=None, help="Gateway auth token (default: $ABSTRACTCODE_GATEWAY_TOKEN)")
|
|
679
|
+
run.add_argument(
|
|
680
|
+
"--input-json",
|
|
681
|
+
default=None,
|
|
682
|
+
help='JSON object string passed to the flow entry (e.g. \'{"prompt":"..."}\')',
|
|
683
|
+
)
|
|
684
|
+
run.add_argument(
|
|
685
|
+
"--input-file",
|
|
686
|
+
"--input-json-file",
|
|
687
|
+
dest="input_file",
|
|
688
|
+
default=None,
|
|
689
|
+
help="Path to a JSON file (object) passed to the flow entry",
|
|
690
|
+
)
|
|
691
|
+
run.add_argument(
|
|
692
|
+
"--param",
|
|
693
|
+
action="append",
|
|
694
|
+
default=[],
|
|
695
|
+
help="Set an input param as key=value (repeatable). Example: --param max_iterations=5",
|
|
696
|
+
)
|
|
697
|
+
run.add_argument("--no-follow", action="store_true", help="Do not tail the run; only print run_id")
|
|
698
|
+
run.add_argument("--poll-s", type=float, default=0.25, help="Polling interval when following (default: 0.25)")
|
|
699
|
+
|
|
700
|
+
attach = sub.add_parser("attach", help="Attach to an existing run_id and follow it")
|
|
701
|
+
attach.add_argument("run_id", help="Existing run_id to follow")
|
|
702
|
+
attach.add_argument("--gateway-url", default=None, help="Gateway base URL (default: $ABSTRACTCODE_GATEWAY_URL)")
|
|
703
|
+
attach.add_argument("--gateway-token", default=None, help="Gateway auth token (default: $ABSTRACTCODE_GATEWAY_TOKEN)")
|
|
704
|
+
attach.add_argument("--poll-s", type=float, default=0.25, help="Polling interval when following (default: 0.25)")
|
|
705
|
+
|
|
706
|
+
kg = sub.add_parser("kg", help="Query/dump the persisted KG (AbstractMemory triple store)")
|
|
707
|
+
kg.add_argument(
|
|
708
|
+
"id",
|
|
709
|
+
nargs="?",
|
|
710
|
+
default=None,
|
|
711
|
+
help="run_id or session_id (optional when using --scope global or --all-owners)",
|
|
712
|
+
)
|
|
713
|
+
kg.add_argument("--gateway-url", default=None, help="Gateway base URL (default: $ABSTRACTCODE_GATEWAY_URL)")
|
|
714
|
+
kg.add_argument("--gateway-token", default=None, help="Gateway auth token (default: $ABSTRACTCODE_GATEWAY_TOKEN)")
|
|
715
|
+
kg.add_argument("--scope", choices=("run", "session", "global", "all"), default="session", help="KG scope (default: session)")
|
|
716
|
+
kg.add_argument("--owner-id", default=None, help="Explicit owner_id override (bypasses scope owner resolution)")
|
|
717
|
+
kg.add_argument("--all-owners", action="store_true", help="Query across all owner_ids within the selected scope(s) (debug/audit)")
|
|
718
|
+
kg.add_argument("--subject", default=None, help="Filter: exact subject")
|
|
719
|
+
kg.add_argument("--predicate", default=None, help="Filter: exact predicate")
|
|
720
|
+
kg.add_argument("--object", dest="object", default=None, help="Filter: exact object")
|
|
721
|
+
kg.add_argument("--since", default=None, help="Filter: observed_at >= since (ISO 8601 string compare)")
|
|
722
|
+
kg.add_argument("--until", default=None, help="Filter: observed_at <= until (ISO 8601 string compare)")
|
|
723
|
+
kg.add_argument("--active-at", dest="active_at", default=None, help="Filter: valid_from/valid_until window intersection")
|
|
724
|
+
kg.add_argument("--query-text", dest="query_text", default=None, help="Optional semantic query text (requires embedder configured on the store)")
|
|
725
|
+
kg.add_argument("--min-score", dest="min_score", type=float, default=None, help="Semantic similarity threshold (0..1)")
|
|
726
|
+
kg.add_argument("--limit", type=int, default=0, help="Max results (default: 0 = unlimited; -1 = unlimited; positive = limit)")
|
|
727
|
+
kg.add_argument("--order", choices=("asc", "desc"), default="desc", help="Order by observed_at for non-semantic queries (default: desc)")
|
|
728
|
+
kg.add_argument(
|
|
729
|
+
"--format",
|
|
730
|
+
choices=("triples", "jsonl", "json"),
|
|
731
|
+
default="triples",
|
|
732
|
+
help="Output format: triples|jsonl|json (default: triples)",
|
|
733
|
+
)
|
|
734
|
+
kg.add_argument("--pretty", action="store_true", help="Pretty-print JSON output (json format only)")
|
|
735
|
+
|
|
736
|
+
return parser
|
|
737
|
+
|
|
738
|
+
|
|
288
739
|
def main(argv: Optional[Sequence[str]] = None) -> int:
|
|
289
740
|
argv_list = list(argv) if argv is not None else sys.argv[1:]
|
|
290
741
|
|
|
742
|
+
if argv_list and argv_list[0] == "gateway":
|
|
743
|
+
parser = build_gateway_parser()
|
|
744
|
+
args, unknown = parser.parse_known_args(argv_list[1:])
|
|
745
|
+
from .gateway_cli import attach_gateway_run_command, query_gateway_kg_command, run_gateway_flow_command
|
|
746
|
+
|
|
747
|
+
cmd = getattr(args, "command", None)
|
|
748
|
+
if cmd == "run":
|
|
749
|
+
from .flow_cli import _parse_input_json, _parse_kv_list, _parse_unknown_params
|
|
750
|
+
|
|
751
|
+
input_data = _parse_input_json(raw_json=args.input_json, json_path=args.input_file)
|
|
752
|
+
input_data.update(_parse_kv_list(list(getattr(args, "param", []) or [])))
|
|
753
|
+
# Allow unknown args to be interpreted as params (same as `flow run`).
|
|
754
|
+
input_data.update(_parse_unknown_params(list(unknown or [])))
|
|
755
|
+
|
|
756
|
+
run_gateway_flow_command(
|
|
757
|
+
gateway_url=args.gateway_url,
|
|
758
|
+
gateway_token=args.gateway_token,
|
|
759
|
+
flow_id=str(args.flow_id),
|
|
760
|
+
bundle_id=str(args.bundle_id).strip() if isinstance(args.bundle_id, str) and str(args.bundle_id).strip() else None,
|
|
761
|
+
input_data=input_data,
|
|
762
|
+
follow=not bool(getattr(args, "no_follow", False)),
|
|
763
|
+
poll_s=float(getattr(args, "poll_s", 0.25) or 0.25),
|
|
764
|
+
)
|
|
765
|
+
return 0
|
|
766
|
+
|
|
767
|
+
if cmd == "attach":
|
|
768
|
+
if unknown:
|
|
769
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
770
|
+
attach_gateway_run_command(
|
|
771
|
+
gateway_url=args.gateway_url,
|
|
772
|
+
gateway_token=args.gateway_token,
|
|
773
|
+
run_id=str(args.run_id),
|
|
774
|
+
follow=True,
|
|
775
|
+
poll_s=float(getattr(args, "poll_s", 0.25) or 0.25),
|
|
776
|
+
)
|
|
777
|
+
return 0
|
|
778
|
+
|
|
779
|
+
if cmd == "kg":
|
|
780
|
+
if unknown:
|
|
781
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
782
|
+
id_raw = getattr(args, "id", None)
|
|
783
|
+
id_value = str(id_raw).strip() if isinstance(id_raw, str) and str(id_raw).strip() else None
|
|
784
|
+
query_gateway_kg_command(
|
|
785
|
+
gateway_url=args.gateway_url,
|
|
786
|
+
gateway_token=args.gateway_token,
|
|
787
|
+
run_id=id_value,
|
|
788
|
+
scope=str(args.scope),
|
|
789
|
+
owner_id=getattr(args, "owner_id", None),
|
|
790
|
+
all_owners=bool(getattr(args, "all_owners", False)),
|
|
791
|
+
subject=getattr(args, "subject", None),
|
|
792
|
+
predicate=getattr(args, "predicate", None),
|
|
793
|
+
object_value=getattr(args, "object", None),
|
|
794
|
+
since=getattr(args, "since", None),
|
|
795
|
+
until=getattr(args, "until", None),
|
|
796
|
+
active_at=getattr(args, "active_at", None),
|
|
797
|
+
query_text=getattr(args, "query_text", None),
|
|
798
|
+
min_score=getattr(args, "min_score", None),
|
|
799
|
+
limit=int(getattr(args, "limit", 0)),
|
|
800
|
+
order=str(getattr(args, "order", "desc") or "desc"),
|
|
801
|
+
fmt=str(getattr(args, "format", "triples") or "triples"),
|
|
802
|
+
pretty=bool(getattr(args, "pretty", False)),
|
|
803
|
+
)
|
|
804
|
+
return 0
|
|
805
|
+
|
|
806
|
+
build_gateway_parser().print_help()
|
|
807
|
+
return 2
|
|
808
|
+
|
|
291
809
|
if argv_list and argv_list[0] == "flow":
|
|
292
810
|
parser = build_flow_parser()
|
|
293
811
|
args, unknown = parser.parse_known_args(argv_list[1:])
|
|
@@ -376,9 +894,92 @@ def main(argv: Optional[Sequence[str]] = None) -> int:
|
|
|
376
894
|
build_flow_parser().print_help()
|
|
377
895
|
return 2
|
|
378
896
|
|
|
897
|
+
if argv_list and argv_list[0] == "workflow":
|
|
898
|
+
parser = build_workflow_parser()
|
|
899
|
+
args, unknown = parser.parse_known_args(argv_list[1:])
|
|
900
|
+
if unknown:
|
|
901
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
902
|
+
from .workflow_cli import (
|
|
903
|
+
deprecate_workflow_bundle_command,
|
|
904
|
+
install_workflow_bundle_command,
|
|
905
|
+
list_workflow_bundles_command,
|
|
906
|
+
remove_workflow_bundle_command,
|
|
907
|
+
undeprecate_workflow_bundle_command,
|
|
908
|
+
workflow_bundle_info_command,
|
|
909
|
+
)
|
|
910
|
+
|
|
911
|
+
cmd = getattr(args, "command", None)
|
|
912
|
+
if cmd == "install":
|
|
913
|
+
install_workflow_bundle_command(
|
|
914
|
+
source=str(args.source),
|
|
915
|
+
gateway_url=getattr(args, "gateway_url", None),
|
|
916
|
+
gateway_token=getattr(args, "gateway_token", None),
|
|
917
|
+
overwrite=bool(getattr(args, "overwrite", False)),
|
|
918
|
+
output_json=bool(getattr(args, "json", False)),
|
|
919
|
+
)
|
|
920
|
+
return 0
|
|
921
|
+
if cmd == "list":
|
|
922
|
+
list_workflow_bundles_command(
|
|
923
|
+
gateway_url=getattr(args, "gateway_url", None),
|
|
924
|
+
gateway_token=getattr(args, "gateway_token", None),
|
|
925
|
+
interface=getattr(args, "interface", None),
|
|
926
|
+
all_versions=bool(getattr(args, "all", False)),
|
|
927
|
+
include_deprecated=bool(getattr(args, "include_deprecated", False)),
|
|
928
|
+
output_json=bool(getattr(args, "json", False)),
|
|
929
|
+
)
|
|
930
|
+
return 0
|
|
931
|
+
if cmd == "info":
|
|
932
|
+
workflow_bundle_info_command(
|
|
933
|
+
bundle_ref=str(args.bundle),
|
|
934
|
+
gateway_url=getattr(args, "gateway_url", None),
|
|
935
|
+
gateway_token=getattr(args, "gateway_token", None),
|
|
936
|
+
output_json=bool(getattr(args, "json", False)),
|
|
937
|
+
)
|
|
938
|
+
return 0
|
|
939
|
+
if cmd == "remove":
|
|
940
|
+
remove_workflow_bundle_command(
|
|
941
|
+
bundle_ref=str(args.bundle),
|
|
942
|
+
gateway_url=getattr(args, "gateway_url", None),
|
|
943
|
+
gateway_token=getattr(args, "gateway_token", None),
|
|
944
|
+
output_json=bool(getattr(args, "json", False)),
|
|
945
|
+
)
|
|
946
|
+
return 0
|
|
947
|
+
|
|
948
|
+
if cmd == "deprecate":
|
|
949
|
+
deprecate_workflow_bundle_command(
|
|
950
|
+
bundle_id=str(args.bundle),
|
|
951
|
+
flow_id=getattr(args, "flow_id", None),
|
|
952
|
+
reason=getattr(args, "reason", None),
|
|
953
|
+
gateway_url=getattr(args, "gateway_url", None),
|
|
954
|
+
gateway_token=getattr(args, "gateway_token", None),
|
|
955
|
+
output_json=bool(getattr(args, "json", False)),
|
|
956
|
+
)
|
|
957
|
+
return 0
|
|
958
|
+
|
|
959
|
+
if cmd == "undeprecate":
|
|
960
|
+
undeprecate_workflow_bundle_command(
|
|
961
|
+
bundle_id=str(args.bundle),
|
|
962
|
+
flow_id=getattr(args, "flow_id", None),
|
|
963
|
+
gateway_url=getattr(args, "gateway_url", None),
|
|
964
|
+
gateway_token=getattr(args, "gateway_token", None),
|
|
965
|
+
output_json=bool(getattr(args, "json", False)),
|
|
966
|
+
)
|
|
967
|
+
return 0
|
|
968
|
+
|
|
969
|
+
build_workflow_parser().print_help()
|
|
970
|
+
return 2
|
|
971
|
+
|
|
379
972
|
args = build_agent_parser().parse_args(argv_list)
|
|
380
973
|
state_file = None if args.no_state else args.state_file
|
|
381
974
|
|
|
975
|
+
# Best-effort: pass gateway settings to the TUI via env vars (not persisted).
|
|
976
|
+
gw_url = getattr(args, "gateway_url", None)
|
|
977
|
+
if isinstance(gw_url, str) and gw_url.strip():
|
|
978
|
+
os.environ["ABSTRACTCODE_GATEWAY_URL"] = gw_url.strip()
|
|
979
|
+
gw_token = getattr(args, "gateway_token", None)
|
|
980
|
+
if isinstance(gw_token, str) and gw_token.strip():
|
|
981
|
+
os.environ["ABSTRACTCODE_GATEWAY_TOKEN"] = gw_token.strip()
|
|
982
|
+
|
|
382
983
|
shell = ReactShell(
|
|
383
984
|
agent=str(args.agent),
|
|
384
985
|
provider=args.provider,
|
|
@@ -393,6 +994,16 @@ def main(argv: Optional[Sequence[str]] = None) -> int:
|
|
|
393
994
|
max_tokens=args.max_tokens,
|
|
394
995
|
color=not bool(args.no_color),
|
|
395
996
|
)
|
|
997
|
+
|
|
998
|
+
prompt = getattr(args, "prompt", None)
|
|
999
|
+
if isinstance(prompt, str) and prompt.strip():
|
|
1000
|
+
if state_file:
|
|
1001
|
+
try:
|
|
1002
|
+
shell._try_load_state()
|
|
1003
|
+
except Exception:
|
|
1004
|
+
pass
|
|
1005
|
+
return _run_one_shot_prompt(shell=shell, prompt=prompt)
|
|
1006
|
+
|
|
396
1007
|
shell.run()
|
|
397
1008
|
return 0
|
|
398
1009
|
|