abstractcode 0.2.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractcode/__init__.py +1 -1
- abstractcode/cli.py +911 -9
- abstractcode/file_mentions.py +276 -0
- abstractcode/flow_cli.py +1413 -0
- abstractcode/fullscreen_ui.py +2473 -158
- abstractcode/gateway_cli.py +715 -0
- abstractcode/py.typed +1 -0
- abstractcode/react_shell.py +8140 -546
- abstractcode/recall.py +384 -0
- abstractcode/remember.py +184 -0
- abstractcode/terminal_markdown.py +557 -0
- abstractcode/theme.py +244 -0
- abstractcode/workflow_agent.py +1412 -0
- abstractcode/workflow_cli.py +229 -0
- abstractcode-0.3.1.dist-info/METADATA +158 -0
- abstractcode-0.3.1.dist-info/RECORD +21 -0
- {abstractcode-0.2.0.dist-info → abstractcode-0.3.1.dist-info}/WHEEL +1 -1
- abstractcode-0.2.0.dist-info/METADATA +0 -160
- abstractcode-0.2.0.dist-info/RECORD +0 -11
- {abstractcode-0.2.0.dist-info → abstractcode-0.3.1.dist-info}/entry_points.txt +0 -0
- {abstractcode-0.2.0.dist-info → abstractcode-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {abstractcode-0.2.0.dist-info → abstractcode-0.3.1.dist-info}/top_level.txt +0 -0
abstractcode/cli.py
CHANGED
|
@@ -36,25 +36,45 @@ def _default_max_tokens() -> Optional[int]:
|
|
|
36
36
|
value = int(env)
|
|
37
37
|
except ValueError:
|
|
38
38
|
raise SystemExit("ABSTRACTCODE_MAX_TOKENS must be an integer.")
|
|
39
|
-
if value < 1024:
|
|
40
|
-
raise SystemExit("ABSTRACTCODE_MAX_TOKENS must be >= 1024.")
|
|
39
|
+
if value != -1 and value < 1024:
|
|
40
|
+
raise SystemExit("ABSTRACTCODE_MAX_TOKENS must be -1 (auto) or >= 1024.")
|
|
41
41
|
return value
|
|
42
|
-
return
|
|
42
|
+
return -1 # Auto (use model capabilities)
|
|
43
43
|
|
|
44
44
|
|
|
45
|
-
def
|
|
45
|
+
def build_agent_parser() -> argparse.ArgumentParser:
|
|
46
46
|
parser = argparse.ArgumentParser(
|
|
47
47
|
prog="abstractcode",
|
|
48
|
-
description="AbstractCode: an interactive terminal shell for AbstractFramework agents
|
|
48
|
+
description="AbstractCode: an interactive terminal shell for AbstractFramework (agents + workflows).",
|
|
49
|
+
epilog=(
|
|
50
|
+
"Workflows:\n"
|
|
51
|
+
" abstractcode flow --help Run AbstractFlow workflows from the terminal\n"
|
|
52
|
+
" abstractcode workflow --help Install/list workflow bundles\n"
|
|
53
|
+
"REPL:\n"
|
|
54
|
+
" Use /flow inside the REPL to run workflows while keeping chat context.\n"
|
|
55
|
+
),
|
|
56
|
+
formatter_class=argparse.RawTextHelpFormatter,
|
|
49
57
|
)
|
|
50
58
|
parser.add_argument(
|
|
51
59
|
"--agent",
|
|
52
|
-
choices=("react", "codeact"),
|
|
53
60
|
default=os.getenv("ABSTRACTCODE_AGENT", "react"),
|
|
54
|
-
help=
|
|
61
|
+
help=(
|
|
62
|
+
"Agent selector:\n"
|
|
63
|
+
" - Built-ins: react | codeact | memact\n"
|
|
64
|
+
" - Workflow agent:\n"
|
|
65
|
+
" <flow_id> | <flow_name> | </path/to/flow.json>\n"
|
|
66
|
+
" <bundle_id>[@version] | </path/to/bundle.flow>\n"
|
|
67
|
+
" <bundle_id>[@version]:<flow_id>\n"
|
|
68
|
+
" (must implement interface 'abstractcode.agent.v1')"
|
|
69
|
+
),
|
|
55
70
|
)
|
|
56
71
|
parser.add_argument("--provider", default="ollama", help="LLM provider (e.g. ollama, openai)")
|
|
57
72
|
parser.add_argument("--model", default="qwen3:1.7b-q4_K_M", help="Model name")
|
|
73
|
+
parser.add_argument(
|
|
74
|
+
"--base-url",
|
|
75
|
+
default=os.getenv("ABSTRACTCODE_BASE_URL"),
|
|
76
|
+
help="Provider base URL (e.g. http://localhost:1234/v1). Also supports ABSTRACTCODE_BASE_URL.",
|
|
77
|
+
)
|
|
58
78
|
parser.add_argument(
|
|
59
79
|
"--state-file",
|
|
60
80
|
default=_default_state_file(),
|
|
@@ -72,6 +92,30 @@ def build_parser() -> argparse.ArgumentParser:
|
|
|
72
92
|
dest="auto_approve",
|
|
73
93
|
help="Automatically approve tool calls (unsafe; disables interactive approvals).",
|
|
74
94
|
)
|
|
95
|
+
parser.add_argument(
|
|
96
|
+
"--plan",
|
|
97
|
+
action="store_true",
|
|
98
|
+
help="Enable Plan mode (agent generates a TODO plan before acting).",
|
|
99
|
+
)
|
|
100
|
+
parser.add_argument(
|
|
101
|
+
"--review",
|
|
102
|
+
action="store_true",
|
|
103
|
+
dest="review",
|
|
104
|
+
help="Enable verifier mode (default: enabled).",
|
|
105
|
+
)
|
|
106
|
+
parser.add_argument(
|
|
107
|
+
"--no-review",
|
|
108
|
+
action="store_false",
|
|
109
|
+
dest="review",
|
|
110
|
+
help="Disable verifier mode (not recommended).",
|
|
111
|
+
)
|
|
112
|
+
parser.set_defaults(review=True)
|
|
113
|
+
parser.add_argument(
|
|
114
|
+
"--review-max-rounds",
|
|
115
|
+
type=int,
|
|
116
|
+
default=3,
|
|
117
|
+
help="Max verifier rounds per task (default: 3).",
|
|
118
|
+
)
|
|
75
119
|
parser.add_argument(
|
|
76
120
|
"--max-iterations",
|
|
77
121
|
type=int,
|
|
@@ -82,26 +126,884 @@ def build_parser() -> argparse.ArgumentParser:
|
|
|
82
126
|
"--max-tokens",
|
|
83
127
|
type=int,
|
|
84
128
|
default=_default_max_tokens(),
|
|
85
|
-
help="Maximum context tokens for LLM calls (
|
|
129
|
+
help="Maximum context tokens for LLM calls (-1 = auto from model capabilities).",
|
|
130
|
+
)
|
|
131
|
+
parser.add_argument(
|
|
132
|
+
"--prompt",
|
|
133
|
+
default=None,
|
|
134
|
+
help="Run a single prompt and exit (supports @file mentions).",
|
|
86
135
|
)
|
|
87
136
|
parser.add_argument("--no-color", action="store_true", help="Disable ANSI colors")
|
|
137
|
+
parser.add_argument(
|
|
138
|
+
"--gateway-url",
|
|
139
|
+
default=None,
|
|
140
|
+
help=(
|
|
141
|
+
"AbstractGateway base URL (for host metrics like /gpu).\n"
|
|
142
|
+
"Overrides $ABSTRACTCODE_GATEWAY_URL for this run."
|
|
143
|
+
),
|
|
144
|
+
)
|
|
145
|
+
parser.add_argument(
|
|
146
|
+
"--gateway-token",
|
|
147
|
+
default=None,
|
|
148
|
+
help=(
|
|
149
|
+
"AbstractGateway auth token (Bearer) (for host metrics like /gpu).\n"
|
|
150
|
+
"Overrides $ABSTRACTCODE_GATEWAY_TOKEN for this run (not persisted)."
|
|
151
|
+
),
|
|
152
|
+
)
|
|
153
|
+
return parser
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def build_workflow_parser() -> argparse.ArgumentParser:
|
|
157
|
+
parser = argparse.ArgumentParser(
|
|
158
|
+
prog="abstractcode workflow",
|
|
159
|
+
description="Manage WorkflowBundle (.flow) bundles on an AbstractGateway host (upload/remove/discovery).",
|
|
160
|
+
)
|
|
161
|
+
sub = parser.add_subparsers(dest="command")
|
|
162
|
+
|
|
163
|
+
common = argparse.ArgumentParser(add_help=False)
|
|
164
|
+
common.add_argument("--gateway-url", default=None, help="Gateway base URL (default: $ABSTRACTCODE_GATEWAY_URL)")
|
|
165
|
+
common.add_argument("--gateway-token", default=None, help="Gateway auth token (default: $ABSTRACTCODE_GATEWAY_TOKEN)")
|
|
166
|
+
|
|
167
|
+
install = sub.add_parser("install", parents=[common], help="Upload/install a .flow bundle onto the gateway")
|
|
168
|
+
install.add_argument("source", help="Path to a .flow file")
|
|
169
|
+
install.add_argument("--overwrite", action="store_true", help="Overwrite if already installed")
|
|
170
|
+
install.add_argument("--json", action="store_true", help="Output JSON")
|
|
171
|
+
|
|
172
|
+
ls = sub.add_parser("list", parents=[common], help="List available workflow entrypoints (from gateway bundles)")
|
|
173
|
+
ls.add_argument("--interface", default=None, help="Filter entrypoints by interface id")
|
|
174
|
+
ls.add_argument("--all", action="store_true", help="Include all versions (default: latest only)")
|
|
175
|
+
ls.add_argument("--include-deprecated", action="store_true", help="Include deprecated workflows")
|
|
176
|
+
ls.add_argument("--json", action="store_true", help="Output JSON")
|
|
177
|
+
|
|
178
|
+
info = sub.add_parser("info", parents=[common], help="Show details for an installed bundle")
|
|
179
|
+
info.add_argument("bundle", help="Bundle ref: bundle_id or bundle_id@version")
|
|
180
|
+
info.add_argument("--json", action="store_true", help="Output JSON")
|
|
181
|
+
|
|
182
|
+
rm = sub.add_parser("remove", parents=[common], help="Remove an installed bundle (bundle_id or bundle_id@version)")
|
|
183
|
+
rm.add_argument("bundle", help="Bundle ref: bundle_id or bundle_id@version")
|
|
184
|
+
rm.add_argument("--json", action="store_true", help="Output JSON")
|
|
185
|
+
|
|
186
|
+
dep = sub.add_parser("deprecate", parents=[common], help="Deprecate a workflow bundle on the gateway (hide + block launch)")
|
|
187
|
+
dep.add_argument("bundle", help="Bundle id (bundle_id)")
|
|
188
|
+
dep.add_argument("--flow-id", default=None, help="Optional entrypoint flow_id (default: all entrypoints)")
|
|
189
|
+
dep.add_argument("--reason", default=None, help="Optional reason")
|
|
190
|
+
dep.add_argument("--json", action="store_true", help="Output JSON")
|
|
191
|
+
|
|
192
|
+
undep = sub.add_parser("undeprecate", parents=[common], help="Undeprecate a workflow bundle on the gateway")
|
|
193
|
+
undep.add_argument("bundle", help="Bundle id (bundle_id)")
|
|
194
|
+
undep.add_argument("--flow-id", default=None, help="Optional entrypoint flow_id (default: all entrypoints)")
|
|
195
|
+
undep.add_argument("--json", action="store_true", help="Output JSON")
|
|
196
|
+
|
|
197
|
+
return parser
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def _run_one_shot_prompt(*, shell: ReactShell, prompt: str) -> int:
|
|
201
|
+
"""Run one task and exit (no full-screen UI)."""
|
|
202
|
+
from .file_mentions import extract_at_file_mentions, normalize_relative_path
|
|
203
|
+
from .flow_cli import _ApprovalState, _approve_and_execute
|
|
204
|
+
|
|
205
|
+
# Lazy imports: keep `abstractcode --help` fast.
|
|
206
|
+
from abstractruntime.core.models import RunStatus, WaitReason
|
|
207
|
+
|
|
208
|
+
text = str(prompt or "").strip()
|
|
209
|
+
if not text:
|
|
210
|
+
return 0
|
|
211
|
+
|
|
212
|
+
def _stderr_print(msg: str) -> None:
|
|
213
|
+
print(msg, file=sys.stderr)
|
|
214
|
+
|
|
215
|
+
cleaned, mentions = extract_at_file_mentions(text)
|
|
216
|
+
paths: list[str] = []
|
|
217
|
+
for m in mentions:
|
|
218
|
+
norm = normalize_relative_path(m)
|
|
219
|
+
if norm:
|
|
220
|
+
paths.append(norm)
|
|
221
|
+
|
|
222
|
+
# De-dup while preserving order.
|
|
223
|
+
seen: set[str] = set()
|
|
224
|
+
paths = [p for p in paths if not (p in seen or seen.add(p))]
|
|
225
|
+
|
|
226
|
+
attachment_refs = shell._ingest_workspace_attachments(paths) if paths else []
|
|
227
|
+
if attachment_refs:
|
|
228
|
+
joined = ", ".join(
|
|
229
|
+
[
|
|
230
|
+
str(a.get("source_path") or a.get("filename") or "?")
|
|
231
|
+
for a in attachment_refs
|
|
232
|
+
if isinstance(a, dict)
|
|
233
|
+
]
|
|
234
|
+
)
|
|
235
|
+
if joined:
|
|
236
|
+
print(f"Attachments: {joined}", file=sys.stderr)
|
|
237
|
+
|
|
238
|
+
cleaned = str(cleaned or "").strip()
|
|
239
|
+
if not cleaned:
|
|
240
|
+
# Attachment-only invocation: allow users to attach files without issuing a prompt.
|
|
241
|
+
return 0
|
|
242
|
+
|
|
243
|
+
run_id = shell._agent.start(cleaned, allowed_tools=shell._allowed_tools, attachments=attachment_refs or None)
|
|
244
|
+
try:
|
|
245
|
+
shell._sync_tool_prompt_settings_to_run(run_id)
|
|
246
|
+
except Exception:
|
|
247
|
+
pass
|
|
248
|
+
if getattr(shell, "_state_file", None):
|
|
249
|
+
try:
|
|
250
|
+
shell._agent.save_state(shell._state_file) # type: ignore[arg-type]
|
|
251
|
+
except Exception:
|
|
252
|
+
pass
|
|
253
|
+
|
|
254
|
+
approval_state = _ApprovalState()
|
|
255
|
+
|
|
256
|
+
def _drive_subworkflow_wait(*, top_run_id: str) -> int:
|
|
257
|
+
"""Drive async subworkflow waits until top run can advance or blocks on a real wait."""
|
|
258
|
+
|
|
259
|
+
def _extract_sub_run_id(wait_state: object) -> Optional[str]:
|
|
260
|
+
details = getattr(wait_state, "details", None)
|
|
261
|
+
if isinstance(details, dict):
|
|
262
|
+
sub_run_id = details.get("sub_run_id")
|
|
263
|
+
if isinstance(sub_run_id, str) and sub_run_id:
|
|
264
|
+
return sub_run_id
|
|
265
|
+
wait_key = getattr(wait_state, "wait_key", None)
|
|
266
|
+
if isinstance(wait_key, str) and wait_key.startswith("subworkflow:"):
|
|
267
|
+
return wait_key.split("subworkflow:", 1)[1] or None
|
|
268
|
+
return None
|
|
269
|
+
|
|
270
|
+
def _workflow_for(run_state: object):
|
|
271
|
+
reg = getattr(shell._runtime, "workflow_registry", None)
|
|
272
|
+
getter = getattr(reg, "get", None) if reg is not None else None
|
|
273
|
+
if callable(getter):
|
|
274
|
+
wf = getter(run_state.workflow_id)
|
|
275
|
+
if wf is not None:
|
|
276
|
+
return wf
|
|
277
|
+
if getattr(shell._agent.workflow, "workflow_id", None) == run_state.workflow_id:
|
|
278
|
+
return shell._agent.workflow
|
|
279
|
+
raise RuntimeError(f"Workflow '{run_state.workflow_id}' not found in runtime registry")
|
|
280
|
+
|
|
281
|
+
def _bubble_completion(child_state: object) -> Optional[str]:
|
|
282
|
+
parent_id = getattr(child_state, "parent_run_id", None)
|
|
283
|
+
if not isinstance(parent_id, str) or not parent_id:
|
|
284
|
+
return None
|
|
285
|
+
parent_state = shell._runtime.get_state(parent_id)
|
|
286
|
+
parent_wait = getattr(parent_state, "waiting", None)
|
|
287
|
+
if parent_state.status != RunStatus.WAITING or parent_wait is None:
|
|
288
|
+
return None
|
|
289
|
+
if parent_wait.reason != WaitReason.SUBWORKFLOW:
|
|
290
|
+
return None
|
|
291
|
+
shell._runtime.resume(
|
|
292
|
+
workflow=_workflow_for(parent_state),
|
|
293
|
+
run_id=parent_id,
|
|
294
|
+
wait_key=None,
|
|
295
|
+
payload={
|
|
296
|
+
"sub_run_id": child_state.run_id,
|
|
297
|
+
"output": getattr(child_state, "output", None),
|
|
298
|
+
"node_traces": shell._runtime.get_node_traces(child_state.run_id),
|
|
299
|
+
},
|
|
300
|
+
max_steps=0,
|
|
301
|
+
)
|
|
302
|
+
return parent_id
|
|
303
|
+
|
|
304
|
+
# Drive subruns until we either make progress or hit a non-subworkflow wait.
|
|
305
|
+
for _ in range(200):
|
|
306
|
+
# Descend to the deepest sub-run referenced by SUBWORKFLOW waits.
|
|
307
|
+
current_run_id = top_run_id
|
|
308
|
+
for _ in range(25):
|
|
309
|
+
cur_state = shell._runtime.get_state(current_run_id)
|
|
310
|
+
cur_wait = getattr(cur_state, "waiting", None)
|
|
311
|
+
if cur_state.status != RunStatus.WAITING or cur_wait is None:
|
|
312
|
+
break
|
|
313
|
+
if cur_wait.reason != WaitReason.SUBWORKFLOW:
|
|
314
|
+
break
|
|
315
|
+
next_id = _extract_sub_run_id(cur_wait)
|
|
316
|
+
if not next_id:
|
|
317
|
+
break
|
|
318
|
+
current_run_id = next_id
|
|
319
|
+
|
|
320
|
+
current_state = shell._runtime.get_state(current_run_id)
|
|
321
|
+
|
|
322
|
+
# Tick running subruns until they block/complete.
|
|
323
|
+
if current_state.status == RunStatus.RUNNING:
|
|
324
|
+
current_state = shell._runtime.tick(
|
|
325
|
+
workflow=_workflow_for(current_state),
|
|
326
|
+
run_id=current_run_id,
|
|
327
|
+
max_steps=100,
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
if current_state.status == RunStatus.RUNNING:
|
|
331
|
+
continue
|
|
332
|
+
|
|
333
|
+
if current_state.status == RunStatus.FAILED:
|
|
334
|
+
_stderr_print(f"Run failed: {current_state.error or 'Subworkflow failed'}")
|
|
335
|
+
return 1
|
|
336
|
+
|
|
337
|
+
if current_state.status == RunStatus.CANCELLED:
|
|
338
|
+
_stderr_print("Run cancelled.")
|
|
339
|
+
return 1
|
|
340
|
+
|
|
341
|
+
if current_state.status == RunStatus.WAITING:
|
|
342
|
+
cur_wait = getattr(current_state, "waiting", None)
|
|
343
|
+
if cur_wait is None:
|
|
344
|
+
break
|
|
345
|
+
if cur_wait.reason == WaitReason.SUBWORKFLOW:
|
|
346
|
+
continue
|
|
347
|
+
|
|
348
|
+
if cur_wait.reason == WaitReason.USER:
|
|
349
|
+
prompt_text = str(cur_wait.prompt or "Please respond:").strip()
|
|
350
|
+
response = input(prompt_text + " ")
|
|
351
|
+
shell._runtime.resume(
|
|
352
|
+
workflow=_workflow_for(current_state),
|
|
353
|
+
run_id=current_run_id,
|
|
354
|
+
wait_key=cur_wait.wait_key,
|
|
355
|
+
payload={"response": response},
|
|
356
|
+
)
|
|
357
|
+
continue
|
|
358
|
+
|
|
359
|
+
if cur_wait.reason == WaitReason.EVENT:
|
|
360
|
+
details = cur_wait.details if isinstance(cur_wait.details, dict) else {}
|
|
361
|
+
tool_calls = details.get("tool_calls")
|
|
362
|
+
if isinstance(tool_calls, list):
|
|
363
|
+
payload = _approve_and_execute(
|
|
364
|
+
tool_calls=tool_calls,
|
|
365
|
+
tool_runner=shell._tool_runner,
|
|
366
|
+
auto_approve=bool(shell._auto_approve),
|
|
367
|
+
approval_state=approval_state,
|
|
368
|
+
prompt_fn=input,
|
|
369
|
+
print_fn=_stderr_print,
|
|
370
|
+
)
|
|
371
|
+
if payload is None:
|
|
372
|
+
_stderr_print("Aborted (tool calls not executed).")
|
|
373
|
+
return 1
|
|
374
|
+
shell._runtime.resume(
|
|
375
|
+
workflow=_workflow_for(current_state),
|
|
376
|
+
run_id=current_run_id,
|
|
377
|
+
wait_key=cur_wait.wait_key,
|
|
378
|
+
payload=payload,
|
|
379
|
+
)
|
|
380
|
+
continue
|
|
381
|
+
|
|
382
|
+
if isinstance(cur_wait.prompt, str) and cur_wait.prompt.strip() and isinstance(cur_wait.wait_key, str) and cur_wait.wait_key:
|
|
383
|
+
response = input(cur_wait.prompt.strip() + " ")
|
|
384
|
+
shell._runtime.resume(
|
|
385
|
+
workflow=_workflow_for(current_state),
|
|
386
|
+
run_id=current_run_id,
|
|
387
|
+
wait_key=cur_wait.wait_key,
|
|
388
|
+
payload={"response": response},
|
|
389
|
+
)
|
|
390
|
+
continue
|
|
391
|
+
|
|
392
|
+
_stderr_print(f"Run waiting: {cur_wait.reason.value} ({cur_wait.wait_key})")
|
|
393
|
+
return 2
|
|
394
|
+
|
|
395
|
+
if current_state.status != RunStatus.COMPLETED:
|
|
396
|
+
break
|
|
397
|
+
|
|
398
|
+
parent_id = _bubble_completion(current_state)
|
|
399
|
+
if not parent_id:
|
|
400
|
+
break
|
|
401
|
+
if parent_id == top_run_id:
|
|
402
|
+
break
|
|
403
|
+
|
|
404
|
+
return 0
|
|
405
|
+
|
|
406
|
+
state = None
|
|
407
|
+
while True:
|
|
408
|
+
state = shell._agent.step()
|
|
409
|
+
if state.status in (RunStatus.COMPLETED, RunStatus.FAILED, RunStatus.CANCELLED):
|
|
410
|
+
break
|
|
411
|
+
|
|
412
|
+
if state.status != RunStatus.WAITING or not getattr(state, "waiting", None):
|
|
413
|
+
continue
|
|
414
|
+
|
|
415
|
+
wait = state.waiting
|
|
416
|
+
|
|
417
|
+
if wait.reason == WaitReason.USER:
|
|
418
|
+
prompt_text = str(wait.prompt or "Please respond:").strip()
|
|
419
|
+
response = input(prompt_text + " ")
|
|
420
|
+
shell._agent.resume(response)
|
|
421
|
+
continue
|
|
422
|
+
|
|
423
|
+
if wait.reason == WaitReason.SUBWORKFLOW:
|
|
424
|
+
rc = _drive_subworkflow_wait(top_run_id=run_id)
|
|
425
|
+
if rc != 0:
|
|
426
|
+
return rc
|
|
427
|
+
continue
|
|
428
|
+
|
|
429
|
+
if wait.reason == WaitReason.EVENT:
|
|
430
|
+
details = wait.details or {}
|
|
431
|
+
tool_calls = details.get("tool_calls")
|
|
432
|
+
if isinstance(tool_calls, list):
|
|
433
|
+
payload = _approve_and_execute(
|
|
434
|
+
tool_calls=tool_calls,
|
|
435
|
+
tool_runner=shell._tool_runner,
|
|
436
|
+
auto_approve=bool(shell._auto_approve),
|
|
437
|
+
approval_state=approval_state,
|
|
438
|
+
prompt_fn=input,
|
|
439
|
+
print_fn=_stderr_print,
|
|
440
|
+
)
|
|
441
|
+
if payload is None:
|
|
442
|
+
print("Aborted (tool calls not executed).", file=sys.stderr)
|
|
443
|
+
return 1
|
|
444
|
+
|
|
445
|
+
shell._runtime.resume(
|
|
446
|
+
workflow=shell._agent.workflow,
|
|
447
|
+
run_id=run_id,
|
|
448
|
+
wait_key=wait.wait_key,
|
|
449
|
+
payload=payload,
|
|
450
|
+
)
|
|
451
|
+
continue
|
|
452
|
+
|
|
453
|
+
if isinstance(wait.prompt, str) and wait.prompt.strip() and isinstance(wait.wait_key, str) and wait.wait_key:
|
|
454
|
+
response = input(wait.prompt.strip() + " ")
|
|
455
|
+
shell._runtime.resume(
|
|
456
|
+
workflow=shell._agent.workflow,
|
|
457
|
+
run_id=run_id,
|
|
458
|
+
wait_key=wait.wait_key,
|
|
459
|
+
payload={"response": response},
|
|
460
|
+
)
|
|
461
|
+
continue
|
|
462
|
+
|
|
463
|
+
print(f"Run waiting: {wait.reason.value} ({wait.wait_key})", file=sys.stderr)
|
|
464
|
+
return 2
|
|
465
|
+
|
|
466
|
+
if state is None:
|
|
467
|
+
print("Run failed: no state produced.", file=sys.stderr)
|
|
468
|
+
return 1
|
|
469
|
+
|
|
470
|
+
def _pick_textish(value):
|
|
471
|
+
if isinstance(value, str):
|
|
472
|
+
return value.strip()
|
|
473
|
+
if value is None:
|
|
474
|
+
return ""
|
|
475
|
+
if isinstance(value, bool):
|
|
476
|
+
return str(value).lower()
|
|
477
|
+
if isinstance(value, (int, float)):
|
|
478
|
+
return str(value)
|
|
479
|
+
return ""
|
|
480
|
+
|
|
481
|
+
def _extract_answer_text(output):
|
|
482
|
+
if not isinstance(output, dict):
|
|
483
|
+
return ""
|
|
484
|
+
payload = output.get("result") if isinstance(output.get("result"), dict) else output
|
|
485
|
+
text = _pick_textish(payload.get("response"))
|
|
486
|
+
if not text:
|
|
487
|
+
text = (
|
|
488
|
+
_pick_textish(payload.get("answer"))
|
|
489
|
+
or _pick_textish(payload.get("message"))
|
|
490
|
+
or _pick_textish(payload.get("text"))
|
|
491
|
+
or _pick_textish(payload.get("content"))
|
|
492
|
+
)
|
|
493
|
+
if not text and isinstance(output.get("result"), str):
|
|
494
|
+
text = str(output.get("result") or "").strip()
|
|
495
|
+
return text
|
|
496
|
+
|
|
497
|
+
output = getattr(state, "output", None)
|
|
498
|
+
answer_text = _extract_answer_text(output)
|
|
499
|
+
if isinstance(answer_text, str) and answer_text.strip():
|
|
500
|
+
print(answer_text.strip())
|
|
501
|
+
|
|
502
|
+
if state.status == RunStatus.COMPLETED:
|
|
503
|
+
return 0
|
|
504
|
+
|
|
505
|
+
err = str(getattr(state, "error", None) or "unknown error")
|
|
506
|
+
print(f"Run failed: {err}", file=sys.stderr)
|
|
507
|
+
return 1
|
|
508
|
+
|
|
509
|
+
|
|
510
|
+
def build_flow_parser() -> argparse.ArgumentParser:
|
|
511
|
+
parser = argparse.ArgumentParser(
|
|
512
|
+
prog="abstractcode flow",
|
|
513
|
+
description="Run AbstractFlow visual workflows from AbstractCode.",
|
|
514
|
+
)
|
|
515
|
+
sub = parser.add_subparsers(dest="command")
|
|
516
|
+
|
|
517
|
+
run = sub.add_parser("run", help="Start a new flow run")
|
|
518
|
+
run.add_argument("flow", help="Flow id (from flows dir) or path to a VisualFlow .json file")
|
|
519
|
+
run.add_argument("--flows-dir", default=None, help="Directory containing VisualFlow JSON files")
|
|
520
|
+
run.add_argument(
|
|
521
|
+
"--input-json",
|
|
522
|
+
default=None,
|
|
523
|
+
help='JSON object string passed to the flow entry (e.g. \'{"query":"..."}\')',
|
|
524
|
+
)
|
|
525
|
+
run.add_argument(
|
|
526
|
+
"--input-file",
|
|
527
|
+
"--input-json-file",
|
|
528
|
+
dest="input_file",
|
|
529
|
+
default=None,
|
|
530
|
+
help="Path to a JSON file (object) passed to the flow entry",
|
|
531
|
+
)
|
|
532
|
+
run.add_argument(
|
|
533
|
+
"--param",
|
|
534
|
+
action="append",
|
|
535
|
+
default=[],
|
|
536
|
+
help="Set an input param as key=value (repeatable). Example: --param max_web_search=15",
|
|
537
|
+
)
|
|
538
|
+
run.add_argument(
|
|
539
|
+
"--flow-state-file",
|
|
540
|
+
default=None,
|
|
541
|
+
help="Path to store the last flow run reference (default: ~/.abstractcode/flow_state.json).",
|
|
542
|
+
)
|
|
543
|
+
run.add_argument("--no-state", action="store_true", help="Disable persistence (cannot resume after quitting).")
|
|
544
|
+
run.add_argument(
|
|
545
|
+
"--auto-approve",
|
|
546
|
+
"--accept-tools",
|
|
547
|
+
"--auto-accept",
|
|
548
|
+
action="store_true",
|
|
549
|
+
dest="auto_approve",
|
|
550
|
+
help="Automatically approve tool calls (unsafe; disables interactive approvals).",
|
|
551
|
+
)
|
|
552
|
+
run.add_argument(
|
|
553
|
+
"--verbosity",
|
|
554
|
+
choices=("none", "default", "full"),
|
|
555
|
+
default="default",
|
|
556
|
+
help="Observability level: none|default|full (default: default).",
|
|
557
|
+
)
|
|
558
|
+
run.add_argument(
|
|
559
|
+
"--wait-until",
|
|
560
|
+
action="store_true",
|
|
561
|
+
help="If waiting on a time-based event (WAIT_UNTIL), keep sleeping and resuming automatically.",
|
|
562
|
+
)
|
|
563
|
+
|
|
564
|
+
resume = sub.add_parser("resume", help="Resume the last saved flow run and drive until it blocks again")
|
|
565
|
+
resume.add_argument(
|
|
566
|
+
"--flow-state-file",
|
|
567
|
+
default=None,
|
|
568
|
+
help="Path to the saved run reference (default: ~/.abstractcode/flow_state.json).",
|
|
569
|
+
)
|
|
570
|
+
resume.add_argument(
|
|
571
|
+
"--auto-approve",
|
|
572
|
+
"--accept-tools",
|
|
573
|
+
"--auto-accept",
|
|
574
|
+
action="store_true",
|
|
575
|
+
dest="auto_approve",
|
|
576
|
+
help="Automatically approve tool calls (unsafe; disables interactive approvals).",
|
|
577
|
+
)
|
|
578
|
+
resume.add_argument(
|
|
579
|
+
"--verbosity",
|
|
580
|
+
choices=("none", "default", "full"),
|
|
581
|
+
default="default",
|
|
582
|
+
help="Observability level: none|default|full (default: default).",
|
|
583
|
+
)
|
|
584
|
+
resume.add_argument(
|
|
585
|
+
"--wait-until",
|
|
586
|
+
action="store_true",
|
|
587
|
+
help="If waiting on a time-based event (WAIT_UNTIL), keep sleeping and resuming automatically.",
|
|
588
|
+
)
|
|
589
|
+
|
|
590
|
+
pause = sub.add_parser("pause", help="Pause the last saved flow run (best-effort includes descendants)")
|
|
591
|
+
pause.add_argument(
|
|
592
|
+
"--flow-state-file",
|
|
593
|
+
default=None,
|
|
594
|
+
help="Path to the saved run reference (default: ~/.abstractcode/flow_state.json).",
|
|
595
|
+
)
|
|
596
|
+
|
|
597
|
+
resume_run = sub.add_parser("resume-run", help="Resume a previously paused run (does not advance execution)")
|
|
598
|
+
resume_run.add_argument(
|
|
599
|
+
"--flow-state-file",
|
|
600
|
+
default=None,
|
|
601
|
+
help="Path to the saved run reference (default: ~/.abstractcode/flow_state.json).",
|
|
602
|
+
)
|
|
603
|
+
|
|
604
|
+
cancel = sub.add_parser("cancel", help="Cancel the last saved flow run (best-effort includes descendants)")
|
|
605
|
+
cancel.add_argument(
|
|
606
|
+
"--flow-state-file",
|
|
607
|
+
default=None,
|
|
608
|
+
help="Path to the saved run reference (default: ~/.abstractcode/flow_state.json).",
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
runs = sub.add_parser("runs", help="List recent flow runs from the flow store")
|
|
612
|
+
runs.add_argument(
|
|
613
|
+
"--flow-state-file",
|
|
614
|
+
default=None,
|
|
615
|
+
help="Path to the saved run reference (default: ~/.abstractcode/flow_state.json).",
|
|
616
|
+
)
|
|
617
|
+
runs.add_argument("--limit", type=int, default=20, help="Maximum runs to show (default: 20)")
|
|
618
|
+
|
|
619
|
+
attach = sub.add_parser("attach", help="Attach to an existing flow run_id (sets the current flow_state.json ref)")
|
|
620
|
+
attach.add_argument("run_id", help="Existing run_id to attach to")
|
|
621
|
+
attach.add_argument("--flows-dir", default=None, help="Directory containing VisualFlow JSON files")
|
|
622
|
+
attach.add_argument(
|
|
623
|
+
"--flow-state-file",
|
|
624
|
+
default=None,
|
|
625
|
+
help="Path to the saved run reference (default: ~/.abstractcode/flow_state.json).",
|
|
626
|
+
)
|
|
627
|
+
|
|
628
|
+
emit = sub.add_parser("emit", help="Emit a custom event (or resume a raw wait_key) for the current flow session")
|
|
629
|
+
emit.add_argument("--name", default=None, help="Custom event name to emit")
|
|
630
|
+
emit.add_argument("--wait-key", default=None, help="Raw wait_key to resume (advanced)")
|
|
631
|
+
emit.add_argument("--scope", default="session", help="Event scope: session|workflow|run|global (default: session)")
|
|
632
|
+
emit.add_argument("--payload-json", default=None, help="Event payload as JSON (object preferred)")
|
|
633
|
+
emit.add_argument(
|
|
634
|
+
"--payload-file",
|
|
635
|
+
default=None,
|
|
636
|
+
help="Path to a JSON file containing the event payload",
|
|
637
|
+
)
|
|
638
|
+
emit.add_argument(
|
|
639
|
+
"--session-id",
|
|
640
|
+
default=None,
|
|
641
|
+
help="Target session id (defaults to current root run_id for session scope)",
|
|
642
|
+
)
|
|
643
|
+
emit.add_argument(
|
|
644
|
+
"--max-steps",
|
|
645
|
+
type=int,
|
|
646
|
+
default=0,
|
|
647
|
+
help="Tick budget per resumed run (default: 0; host drives execution)",
|
|
648
|
+
)
|
|
649
|
+
emit.add_argument("--flows-dir", default=None, help="Directory containing VisualFlow JSON files")
|
|
650
|
+
emit.add_argument(
|
|
651
|
+
"--flow-state-file",
|
|
652
|
+
default=None,
|
|
653
|
+
help="Path to the saved run reference (default: ~/.abstractcode/flow_state.json).",
|
|
654
|
+
)
|
|
655
|
+
emit.add_argument(
|
|
656
|
+
"--auto-approve",
|
|
657
|
+
"--accept-tools",
|
|
658
|
+
"--auto-accept",
|
|
659
|
+
action="store_true",
|
|
660
|
+
dest="auto_approve",
|
|
661
|
+
help="Automatically approve tool calls (unsafe; disables interactive approvals).",
|
|
662
|
+
)
|
|
663
|
+
|
|
664
|
+
return parser
|
|
665
|
+
|
|
666
|
+
|
|
667
|
+
def build_gateway_parser() -> argparse.ArgumentParser:
|
|
668
|
+
parser = argparse.ArgumentParser(
|
|
669
|
+
prog="abstractcode gateway",
|
|
670
|
+
description="Run/observe workflows via AbstractGateway (HTTP control plane).",
|
|
671
|
+
)
|
|
672
|
+
sub = parser.add_subparsers(dest="command")
|
|
673
|
+
|
|
674
|
+
run = sub.add_parser("run", help="Start a new gateway run and follow it")
|
|
675
|
+
run.add_argument("flow_id", help="Flow id to start (or 'bundle:flow')")
|
|
676
|
+
run.add_argument("--bundle-id", default=None, help="Bundle id (optional if flow_id is namespaced)")
|
|
677
|
+
run.add_argument("--gateway-url", default=None, help="Gateway base URL (default: $ABSTRACTCODE_GATEWAY_URL)")
|
|
678
|
+
run.add_argument("--gateway-token", default=None, help="Gateway auth token (default: $ABSTRACTCODE_GATEWAY_TOKEN)")
|
|
679
|
+
run.add_argument(
|
|
680
|
+
"--input-json",
|
|
681
|
+
default=None,
|
|
682
|
+
help='JSON object string passed to the flow entry (e.g. \'{"prompt":"..."}\')',
|
|
683
|
+
)
|
|
684
|
+
run.add_argument(
|
|
685
|
+
"--input-file",
|
|
686
|
+
"--input-json-file",
|
|
687
|
+
dest="input_file",
|
|
688
|
+
default=None,
|
|
689
|
+
help="Path to a JSON file (object) passed to the flow entry",
|
|
690
|
+
)
|
|
691
|
+
run.add_argument(
|
|
692
|
+
"--param",
|
|
693
|
+
action="append",
|
|
694
|
+
default=[],
|
|
695
|
+
help="Set an input param as key=value (repeatable). Example: --param max_iterations=5",
|
|
696
|
+
)
|
|
697
|
+
run.add_argument("--no-follow", action="store_true", help="Do not tail the run; only print run_id")
|
|
698
|
+
run.add_argument("--poll-s", type=float, default=0.25, help="Polling interval when following (default: 0.25)")
|
|
699
|
+
|
|
700
|
+
attach = sub.add_parser("attach", help="Attach to an existing run_id and follow it")
|
|
701
|
+
attach.add_argument("run_id", help="Existing run_id to follow")
|
|
702
|
+
attach.add_argument("--gateway-url", default=None, help="Gateway base URL (default: $ABSTRACTCODE_GATEWAY_URL)")
|
|
703
|
+
attach.add_argument("--gateway-token", default=None, help="Gateway auth token (default: $ABSTRACTCODE_GATEWAY_TOKEN)")
|
|
704
|
+
attach.add_argument("--poll-s", type=float, default=0.25, help="Polling interval when following (default: 0.25)")
|
|
705
|
+
|
|
706
|
+
kg = sub.add_parser("kg", help="Query/dump the persisted KG (AbstractMemory triple store)")
|
|
707
|
+
kg.add_argument(
|
|
708
|
+
"id",
|
|
709
|
+
nargs="?",
|
|
710
|
+
default=None,
|
|
711
|
+
help="run_id or session_id (optional when using --scope global or --all-owners)",
|
|
712
|
+
)
|
|
713
|
+
kg.add_argument("--gateway-url", default=None, help="Gateway base URL (default: $ABSTRACTCODE_GATEWAY_URL)")
|
|
714
|
+
kg.add_argument("--gateway-token", default=None, help="Gateway auth token (default: $ABSTRACTCODE_GATEWAY_TOKEN)")
|
|
715
|
+
kg.add_argument("--scope", choices=("run", "session", "global", "all"), default="session", help="KG scope (default: session)")
|
|
716
|
+
kg.add_argument("--owner-id", default=None, help="Explicit owner_id override (bypasses scope owner resolution)")
|
|
717
|
+
kg.add_argument("--all-owners", action="store_true", help="Query across all owner_ids within the selected scope(s) (debug/audit)")
|
|
718
|
+
kg.add_argument("--subject", default=None, help="Filter: exact subject")
|
|
719
|
+
kg.add_argument("--predicate", default=None, help="Filter: exact predicate")
|
|
720
|
+
kg.add_argument("--object", dest="object", default=None, help="Filter: exact object")
|
|
721
|
+
kg.add_argument("--since", default=None, help="Filter: observed_at >= since (ISO 8601 string compare)")
|
|
722
|
+
kg.add_argument("--until", default=None, help="Filter: observed_at <= until (ISO 8601 string compare)")
|
|
723
|
+
kg.add_argument("--active-at", dest="active_at", default=None, help="Filter: valid_from/valid_until window intersection")
|
|
724
|
+
kg.add_argument("--query-text", dest="query_text", default=None, help="Optional semantic query text (requires embedder configured on the store)")
|
|
725
|
+
kg.add_argument("--min-score", dest="min_score", type=float, default=None, help="Semantic similarity threshold (0..1)")
|
|
726
|
+
kg.add_argument("--limit", type=int, default=0, help="Max results (default: 0 = unlimited; -1 = unlimited; positive = limit)")
|
|
727
|
+
kg.add_argument("--order", choices=("asc", "desc"), default="desc", help="Order by observed_at for non-semantic queries (default: desc)")
|
|
728
|
+
kg.add_argument(
|
|
729
|
+
"--format",
|
|
730
|
+
choices=("triples", "jsonl", "json"),
|
|
731
|
+
default="triples",
|
|
732
|
+
help="Output format: triples|jsonl|json (default: triples)",
|
|
733
|
+
)
|
|
734
|
+
kg.add_argument("--pretty", action="store_true", help="Pretty-print JSON output (json format only)")
|
|
735
|
+
|
|
88
736
|
return parser
|
|
89
737
|
|
|
90
738
|
|
|
91
739
|
def main(argv: Optional[Sequence[str]] = None) -> int:
|
|
92
|
-
|
|
740
|
+
argv_list = list(argv) if argv is not None else sys.argv[1:]
|
|
741
|
+
|
|
742
|
+
if argv_list and argv_list[0] == "gateway":
|
|
743
|
+
parser = build_gateway_parser()
|
|
744
|
+
args, unknown = parser.parse_known_args(argv_list[1:])
|
|
745
|
+
from .gateway_cli import attach_gateway_run_command, query_gateway_kg_command, run_gateway_flow_command
|
|
746
|
+
|
|
747
|
+
cmd = getattr(args, "command", None)
|
|
748
|
+
if cmd == "run":
|
|
749
|
+
from .flow_cli import _parse_input_json, _parse_kv_list, _parse_unknown_params
|
|
750
|
+
|
|
751
|
+
input_data = _parse_input_json(raw_json=args.input_json, json_path=args.input_file)
|
|
752
|
+
input_data.update(_parse_kv_list(list(getattr(args, "param", []) or [])))
|
|
753
|
+
# Allow unknown args to be interpreted as params (same as `flow run`).
|
|
754
|
+
input_data.update(_parse_unknown_params(list(unknown or [])))
|
|
755
|
+
|
|
756
|
+
run_gateway_flow_command(
|
|
757
|
+
gateway_url=args.gateway_url,
|
|
758
|
+
gateway_token=args.gateway_token,
|
|
759
|
+
flow_id=str(args.flow_id),
|
|
760
|
+
bundle_id=str(args.bundle_id).strip() if isinstance(args.bundle_id, str) and str(args.bundle_id).strip() else None,
|
|
761
|
+
input_data=input_data,
|
|
762
|
+
follow=not bool(getattr(args, "no_follow", False)),
|
|
763
|
+
poll_s=float(getattr(args, "poll_s", 0.25) or 0.25),
|
|
764
|
+
)
|
|
765
|
+
return 0
|
|
766
|
+
|
|
767
|
+
if cmd == "attach":
|
|
768
|
+
if unknown:
|
|
769
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
770
|
+
attach_gateway_run_command(
|
|
771
|
+
gateway_url=args.gateway_url,
|
|
772
|
+
gateway_token=args.gateway_token,
|
|
773
|
+
run_id=str(args.run_id),
|
|
774
|
+
follow=True,
|
|
775
|
+
poll_s=float(getattr(args, "poll_s", 0.25) or 0.25),
|
|
776
|
+
)
|
|
777
|
+
return 0
|
|
778
|
+
|
|
779
|
+
if cmd == "kg":
|
|
780
|
+
if unknown:
|
|
781
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
782
|
+
id_raw = getattr(args, "id", None)
|
|
783
|
+
id_value = str(id_raw).strip() if isinstance(id_raw, str) and str(id_raw).strip() else None
|
|
784
|
+
query_gateway_kg_command(
|
|
785
|
+
gateway_url=args.gateway_url,
|
|
786
|
+
gateway_token=args.gateway_token,
|
|
787
|
+
run_id=id_value,
|
|
788
|
+
scope=str(args.scope),
|
|
789
|
+
owner_id=getattr(args, "owner_id", None),
|
|
790
|
+
all_owners=bool(getattr(args, "all_owners", False)),
|
|
791
|
+
subject=getattr(args, "subject", None),
|
|
792
|
+
predicate=getattr(args, "predicate", None),
|
|
793
|
+
object_value=getattr(args, "object", None),
|
|
794
|
+
since=getattr(args, "since", None),
|
|
795
|
+
until=getattr(args, "until", None),
|
|
796
|
+
active_at=getattr(args, "active_at", None),
|
|
797
|
+
query_text=getattr(args, "query_text", None),
|
|
798
|
+
min_score=getattr(args, "min_score", None),
|
|
799
|
+
limit=int(getattr(args, "limit", 0)),
|
|
800
|
+
order=str(getattr(args, "order", "desc") or "desc"),
|
|
801
|
+
fmt=str(getattr(args, "format", "triples") or "triples"),
|
|
802
|
+
pretty=bool(getattr(args, "pretty", False)),
|
|
803
|
+
)
|
|
804
|
+
return 0
|
|
805
|
+
|
|
806
|
+
build_gateway_parser().print_help()
|
|
807
|
+
return 2
|
|
808
|
+
|
|
809
|
+
if argv_list and argv_list[0] == "flow":
|
|
810
|
+
parser = build_flow_parser()
|
|
811
|
+
args, unknown = parser.parse_known_args(argv_list[1:])
|
|
812
|
+
from .flow_cli import (
|
|
813
|
+
attach_flow_run_command,
|
|
814
|
+
control_flow_command,
|
|
815
|
+
emit_flow_event_command,
|
|
816
|
+
list_flow_runs_command,
|
|
817
|
+
resume_flow_command,
|
|
818
|
+
run_flow_command,
|
|
819
|
+
)
|
|
820
|
+
|
|
821
|
+
cmd = getattr(args, "command", None)
|
|
822
|
+
if cmd == "run":
|
|
823
|
+
run_flow_command(
|
|
824
|
+
flow_ref=str(args.flow),
|
|
825
|
+
flows_dir=args.flows_dir,
|
|
826
|
+
input_json=args.input_json,
|
|
827
|
+
input_file=args.input_file,
|
|
828
|
+
params=list(getattr(args, "param", []) or []),
|
|
829
|
+
extra_args=list(unknown or []),
|
|
830
|
+
flow_state_file=args.flow_state_file,
|
|
831
|
+
no_state=bool(args.no_state),
|
|
832
|
+
auto_approve=bool(args.auto_approve),
|
|
833
|
+
wait_until=bool(args.wait_until),
|
|
834
|
+
verbosity=str(getattr(args, "verbosity", "default") or "default"),
|
|
835
|
+
)
|
|
836
|
+
return 0
|
|
837
|
+
if cmd == "resume":
|
|
838
|
+
if unknown:
|
|
839
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
840
|
+
resume_flow_command(
|
|
841
|
+
flow_state_file=args.flow_state_file,
|
|
842
|
+
no_state=False,
|
|
843
|
+
auto_approve=bool(args.auto_approve),
|
|
844
|
+
wait_until=bool(args.wait_until),
|
|
845
|
+
verbosity=str(getattr(args, "verbosity", "default") or "default"),
|
|
846
|
+
)
|
|
847
|
+
return 0
|
|
848
|
+
if cmd == "pause":
|
|
849
|
+
if unknown:
|
|
850
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
851
|
+
control_flow_command(action="pause", flow_state_file=args.flow_state_file)
|
|
852
|
+
return 0
|
|
853
|
+
if cmd == "resume-run":
|
|
854
|
+
if unknown:
|
|
855
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
856
|
+
control_flow_command(action="resume", flow_state_file=args.flow_state_file)
|
|
857
|
+
return 0
|
|
858
|
+
if cmd == "cancel":
|
|
859
|
+
if unknown:
|
|
860
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
861
|
+
control_flow_command(action="cancel", flow_state_file=args.flow_state_file)
|
|
862
|
+
return 0
|
|
863
|
+
if cmd == "runs":
|
|
864
|
+
if unknown:
|
|
865
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
866
|
+
list_flow_runs_command(flow_state_file=args.flow_state_file, limit=int(args.limit or 20))
|
|
867
|
+
return 0
|
|
868
|
+
if cmd == "attach":
|
|
869
|
+
if unknown:
|
|
870
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
871
|
+
attach_flow_run_command(
|
|
872
|
+
run_id=str(args.run_id),
|
|
873
|
+
flows_dir=args.flows_dir,
|
|
874
|
+
flow_state_file=args.flow_state_file,
|
|
875
|
+
)
|
|
876
|
+
return 0
|
|
877
|
+
if cmd == "emit":
|
|
878
|
+
if unknown:
|
|
879
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
880
|
+
emit_flow_event_command(
|
|
881
|
+
name=args.name,
|
|
882
|
+
wait_key=args.wait_key,
|
|
883
|
+
scope=args.scope,
|
|
884
|
+
payload_json=args.payload_json,
|
|
885
|
+
payload_file=args.payload_file,
|
|
886
|
+
session_id=args.session_id,
|
|
887
|
+
max_steps=int(args.max_steps or 0),
|
|
888
|
+
flows_dir=args.flows_dir,
|
|
889
|
+
flow_state_file=args.flow_state_file,
|
|
890
|
+
auto_approve=bool(args.auto_approve),
|
|
891
|
+
)
|
|
892
|
+
return 0
|
|
893
|
+
|
|
894
|
+
build_flow_parser().print_help()
|
|
895
|
+
return 2
|
|
896
|
+
|
|
897
|
+
if argv_list and argv_list[0] == "workflow":
|
|
898
|
+
parser = build_workflow_parser()
|
|
899
|
+
args, unknown = parser.parse_known_args(argv_list[1:])
|
|
900
|
+
if unknown:
|
|
901
|
+
parser.error(f"Unknown arguments: {' '.join(unknown)}")
|
|
902
|
+
from .workflow_cli import (
|
|
903
|
+
deprecate_workflow_bundle_command,
|
|
904
|
+
install_workflow_bundle_command,
|
|
905
|
+
list_workflow_bundles_command,
|
|
906
|
+
remove_workflow_bundle_command,
|
|
907
|
+
undeprecate_workflow_bundle_command,
|
|
908
|
+
workflow_bundle_info_command,
|
|
909
|
+
)
|
|
910
|
+
|
|
911
|
+
cmd = getattr(args, "command", None)
|
|
912
|
+
if cmd == "install":
|
|
913
|
+
install_workflow_bundle_command(
|
|
914
|
+
source=str(args.source),
|
|
915
|
+
gateway_url=getattr(args, "gateway_url", None),
|
|
916
|
+
gateway_token=getattr(args, "gateway_token", None),
|
|
917
|
+
overwrite=bool(getattr(args, "overwrite", False)),
|
|
918
|
+
output_json=bool(getattr(args, "json", False)),
|
|
919
|
+
)
|
|
920
|
+
return 0
|
|
921
|
+
if cmd == "list":
|
|
922
|
+
list_workflow_bundles_command(
|
|
923
|
+
gateway_url=getattr(args, "gateway_url", None),
|
|
924
|
+
gateway_token=getattr(args, "gateway_token", None),
|
|
925
|
+
interface=getattr(args, "interface", None),
|
|
926
|
+
all_versions=bool(getattr(args, "all", False)),
|
|
927
|
+
include_deprecated=bool(getattr(args, "include_deprecated", False)),
|
|
928
|
+
output_json=bool(getattr(args, "json", False)),
|
|
929
|
+
)
|
|
930
|
+
return 0
|
|
931
|
+
if cmd == "info":
|
|
932
|
+
workflow_bundle_info_command(
|
|
933
|
+
bundle_ref=str(args.bundle),
|
|
934
|
+
gateway_url=getattr(args, "gateway_url", None),
|
|
935
|
+
gateway_token=getattr(args, "gateway_token", None),
|
|
936
|
+
output_json=bool(getattr(args, "json", False)),
|
|
937
|
+
)
|
|
938
|
+
return 0
|
|
939
|
+
if cmd == "remove":
|
|
940
|
+
remove_workflow_bundle_command(
|
|
941
|
+
bundle_ref=str(args.bundle),
|
|
942
|
+
gateway_url=getattr(args, "gateway_url", None),
|
|
943
|
+
gateway_token=getattr(args, "gateway_token", None),
|
|
944
|
+
output_json=bool(getattr(args, "json", False)),
|
|
945
|
+
)
|
|
946
|
+
return 0
|
|
947
|
+
|
|
948
|
+
if cmd == "deprecate":
|
|
949
|
+
deprecate_workflow_bundle_command(
|
|
950
|
+
bundle_id=str(args.bundle),
|
|
951
|
+
flow_id=getattr(args, "flow_id", None),
|
|
952
|
+
reason=getattr(args, "reason", None),
|
|
953
|
+
gateway_url=getattr(args, "gateway_url", None),
|
|
954
|
+
gateway_token=getattr(args, "gateway_token", None),
|
|
955
|
+
output_json=bool(getattr(args, "json", False)),
|
|
956
|
+
)
|
|
957
|
+
return 0
|
|
958
|
+
|
|
959
|
+
if cmd == "undeprecate":
|
|
960
|
+
undeprecate_workflow_bundle_command(
|
|
961
|
+
bundle_id=str(args.bundle),
|
|
962
|
+
flow_id=getattr(args, "flow_id", None),
|
|
963
|
+
gateway_url=getattr(args, "gateway_url", None),
|
|
964
|
+
gateway_token=getattr(args, "gateway_token", None),
|
|
965
|
+
output_json=bool(getattr(args, "json", False)),
|
|
966
|
+
)
|
|
967
|
+
return 0
|
|
968
|
+
|
|
969
|
+
build_workflow_parser().print_help()
|
|
970
|
+
return 2
|
|
971
|
+
|
|
972
|
+
args = build_agent_parser().parse_args(argv_list)
|
|
93
973
|
state_file = None if args.no_state else args.state_file
|
|
94
974
|
|
|
975
|
+
# Best-effort: pass gateway settings to the TUI via env vars (not persisted).
|
|
976
|
+
gw_url = getattr(args, "gateway_url", None)
|
|
977
|
+
if isinstance(gw_url, str) and gw_url.strip():
|
|
978
|
+
os.environ["ABSTRACTCODE_GATEWAY_URL"] = gw_url.strip()
|
|
979
|
+
gw_token = getattr(args, "gateway_token", None)
|
|
980
|
+
if isinstance(gw_token, str) and gw_token.strip():
|
|
981
|
+
os.environ["ABSTRACTCODE_GATEWAY_TOKEN"] = gw_token.strip()
|
|
982
|
+
|
|
95
983
|
shell = ReactShell(
|
|
96
984
|
agent=str(args.agent),
|
|
97
985
|
provider=args.provider,
|
|
98
986
|
model=args.model,
|
|
987
|
+
base_url=getattr(args, "base_url", None),
|
|
99
988
|
state_file=state_file,
|
|
100
989
|
auto_approve=bool(args.auto_approve),
|
|
990
|
+
plan_mode=bool(args.plan),
|
|
991
|
+
review_mode=bool(args.review),
|
|
992
|
+
review_max_rounds=int(args.review_max_rounds),
|
|
101
993
|
max_iterations=int(args.max_iterations),
|
|
102
994
|
max_tokens=args.max_tokens,
|
|
103
995
|
color=not bool(args.no_color),
|
|
104
996
|
)
|
|
997
|
+
|
|
998
|
+
prompt = getattr(args, "prompt", None)
|
|
999
|
+
if isinstance(prompt, str) and prompt.strip():
|
|
1000
|
+
if state_file:
|
|
1001
|
+
try:
|
|
1002
|
+
shell._try_load_state()
|
|
1003
|
+
except Exception:
|
|
1004
|
+
pass
|
|
1005
|
+
return _run_one_shot_prompt(shell=shell, prompt=prompt)
|
|
1006
|
+
|
|
105
1007
|
shell.run()
|
|
106
1008
|
return 0
|
|
107
1009
|
|