@rong/agentscript 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/CHANGELOG.md +22 -0
  2. package/INSTALL.md +92 -0
  3. package/LICENSE +21 -0
  4. package/README.md +246 -0
  5. package/dist/ast/constants.js +1 -0
  6. package/dist/ast/format.js +41 -0
  7. package/dist/ast/types.js +1 -0
  8. package/dist/bin/agentscript.js +234 -0
  9. package/dist/bin/input.js +19 -0
  10. package/dist/bin/repl.js +290 -0
  11. package/dist/index.js +26 -0
  12. package/dist/parser/errors.js +8 -0
  13. package/dist/parser/parser.js +661 -0
  14. package/dist/parser/tokenizer.js +246 -0
  15. package/dist/providers/llm/anthropic.js +36 -0
  16. package/dist/providers/llm/index.js +3 -0
  17. package/dist/providers/llm/ollama.js +19 -0
  18. package/dist/providers/llm/openai.js +31 -0
  19. package/dist/providers/llm/protocol.js +45 -0
  20. package/dist/providers/llm/shared.js +147 -0
  21. package/dist/providers/llm/types.js +1 -0
  22. package/dist/providers/llm/uri.js +24 -0
  23. package/dist/providers/memory/file.js +44 -0
  24. package/dist/providers/memory/host.js +66 -0
  25. package/dist/providers/memory/index.js +1 -0
  26. package/dist/providers/memory/shared.js +56 -0
  27. package/dist/providers/memory/sqlite.js +98 -0
  28. package/dist/providers/mock/index.js +32 -0
  29. package/dist/providers/tools/env.js +11 -0
  30. package/dist/providers/tools/file.js +99 -0
  31. package/dist/providers/tools/host.js +34 -0
  32. package/dist/providers/tools/http.js +40 -0
  33. package/dist/providers/tools/index.js +2 -0
  34. package/dist/providers/tools/scheme.js +16 -0
  35. package/dist/providers/tools/shared.js +92 -0
  36. package/dist/providers/tools/shell.js +80 -0
  37. package/dist/runtime/context.js +160 -0
  38. package/dist/runtime/errors.js +14 -0
  39. package/dist/runtime/evaluator.js +276 -0
  40. package/dist/runtime/generate.js +175 -0
  41. package/dist/runtime/guards.js +39 -0
  42. package/dist/runtime/input.js +38 -0
  43. package/dist/runtime/interpreter.js +314 -0
  44. package/dist/runtime/json.js +59 -0
  45. package/dist/runtime/loader.js +146 -0
  46. package/dist/runtime/scope.js +47 -0
  47. package/dist/runtime/shape.js +132 -0
  48. package/dist/runtime/trace.js +54 -0
  49. package/dist/runtime/truth.js +13 -0
  50. package/dist/runtime/types.js +1 -0
  51. package/dist/runtime/uri.js +10 -0
  52. package/dist/semantic/analyzer.js +519 -0
  53. package/dist/semantic/diagnostics.js +16 -0
  54. package/dist/utils/assert.js +3 -0
  55. package/docs/cn/context-engineering.md +389 -0
  56. package/docs/cn/language.md +478 -0
  57. package/docs/design-history/v0-design.md +365 -0
  58. package/docs/design-history/v0-implement.md +274 -0
  59. package/docs/design-history/v1-design.md +323 -0
  60. package/docs/design-history/v1-implement.md +267 -0
  61. package/docs/design-history/v2-design.md +387 -0
  62. package/docs/design-history/v2-implement.md +399 -0
  63. package/docs/en/context-engineering.md +332 -0
  64. package/docs/en/language.md +478 -0
  65. package/examples/changelog.as +29 -0
  66. package/examples/extract.as +29 -0
  67. package/examples/review.as +38 -0
  68. package/examples/summarize.as +28 -0
  69. package/examples/translate.as +33 -0
  70. package/package.json +59 -0
  71. package/tutorials/cli.as +22 -0
  72. package/tutorials/helloworld.as +14 -0
  73. package/tutorials/memory.as +19 -0
  74. package/tutorials/plan-execute.as +155 -0
  75. package/tutorials/react.as +98 -0
  76. package/tutorials/repl.as +31 -0
  77. package/tutorials/self-improve.as +60 -0
package/CHANGELOG.md ADDED
@@ -0,0 +1,22 @@
1
+ # Changelog
2
+
3
+ All notable changes to AgentScript will be documented in this file.
4
+
5
+ ## 1.0.0 - 2026-05-07
6
+
7
+ ### Added
8
+
9
+ - Initial public release of AgentScript.
10
+ - Parser, semantic analyzer, and interpreter for `.as` programs.
11
+ - CLI support for parse, check, execute, REPL, trace, quiet, and verbose modes.
12
+ - LLM providers for OpenAI, Anthropic, and Ollama protocol URIs.
13
+ - Tool providers for Find, Grep, Sed, File, Env, and Http operations.
14
+ - Explicit prompt context model with `use`, scoped context inheritance, and context budgets.
15
+ - `generate` with structured output shapes, validation, limited repair attempts, and trace output.
16
+ - Agent/function composition including cross-agent calls and imported agents.
17
+ - Control flow support for `if`/`else`, `loop until`, `repeat`, and `for item in list < n`.
18
+ - List and JSON helpers including item access, `.length`, `.add(value)`, and `.summary`.
19
+ - File import support for static context inputs.
20
+ - Memory imports with explicit `add` and `query` operations.
21
+ - File JSONL and SQLite memory backends.
22
+ - Tutorials, examples, regression fixtures, and automated tests.
package/INSTALL.md ADDED
@@ -0,0 +1,92 @@
1
+ # Install AgentScript
2
+
3
+ AgentScript is distributed as an npm package and can also be run from source.
4
+
5
+ ## Requirements
6
+
7
+ - Node.js compatible with the runtime features used by this project.
8
+ - npm.
9
+ - Optional: Ollama, OpenAI, or Anthropic credentials when running with `--real-llm`.
10
+
11
+ The current development setup uses Node.js 25 types and the SQLite memory backend uses Node's built-in `node:sqlite` module.
12
+
13
+ ## Install from npm
14
+
15
+ After the package is published:
16
+
17
+ ```bash
18
+ npm install -g agentscript
19
+ agentscript examples/review.as --input '{"path":"src"}'
20
+ ```
21
+
22
+ ## Run with npx
23
+
24
+ After the package is published:
25
+
26
+ ```bash
27
+ npx agentscript examples/review.as --input '{"path":"src"}'
28
+ ```
29
+
30
+ ## Run from source
31
+
32
+ ```bash
33
+ git clone https://github.com/<owner>/<repo>.git
34
+ cd <repo>
35
+ npm install
36
+ npm run build
37
+ npm run execute -- examples/review.as --input '{"path":"src"}'
38
+ ```
39
+
40
+ During local development, prefer:
41
+
42
+ ```bash
43
+ npm run execute -- tutorials/react.as --input '{"question":"What is AgentScript?"}'
44
+ npm run check -- examples/review.as
45
+ npm run parse -- examples/review.as
46
+ ```
47
+
48
+ ## Real LLM providers
49
+
50
+ By default, AgentScript uses a mock LLM provider for local flow checks. Add `--real-llm` to call a real provider.
51
+
52
+ ### OpenAI
53
+
54
+ ```bash
55
+ export OPENAI_API_KEY="..."
56
+ agentscript examples/review.as --input '{"path":"src"}' --real-llm
57
+ ```
58
+
59
+ Use an AgentScript import such as:
60
+
61
+ ```agentscript
62
+ import llm OpenAI from "openai://gpt-4.1-mini"
63
+ ```
64
+
65
+ ### Anthropic
66
+
67
+ ```bash
68
+ export ANTHROPIC_API_KEY="..."
69
+ agentscript examples/review.as --input '{"path":"src"}' --real-llm
70
+ ```
71
+
72
+ Use an AgentScript import such as:
73
+
74
+ ```agentscript
75
+ import llm Claude from "anthropic://claude-sonnet-4-0"
76
+ ```
77
+
78
+ ### Ollama
79
+
80
+ Run Ollama locally, then use an import such as:
81
+
82
+ ```agentscript
83
+ import llm Qwen from "ollama://localhost:11434/qwen3.6"
84
+ ```
85
+
86
+ ## Validate a checkout
87
+
88
+ ```bash
89
+ npm run typecheck
90
+ npm test
91
+ npm run build
92
+ ```
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Rong Zhou
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,246 @@
1
+ # AgentScript
2
+
3
+ > **Prompt context as a first-class citizen.**
4
+ > `use` declares what the model sees. `generate` defines what it returns.
5
+ > Zero runtime dependencies. TypeScript-powered.
6
+
7
+ ```agentscript
8
+ use scratch.summary < 2k
9
+ return generate({ input: "Answer from observations" }) {
10
+ return { ok boolean, text string }
11
+ }
12
+ ```
13
+
14
+ [![License: ISC](https://img.shields.io/badge/license-ISC-blue.svg)](LICENSE)
15
+ ![Zero Dependencies](https://img.shields.io/badge/dependencies-0-brightgreen)
16
+ ![Node >= 25](https://img.shields.io/badge/node-%3E%3D25-green)
17
+
18
+ [中文版](./README-CN.md)
19
+
20
+ LLMs are stateless by nature. Each call is a fresh start. To give an agent continuity of thought, every input must be carefully assembled — what researchers and practitioners call context engineering.
21
+
22
+ After building agents with Python and TypeScript, the author kept running into the same problem: prompt context management. What data actually reaches the LLM? Where does one agent's context end and another's begin? How do you audit what the model saw?
23
+
24
+ AgentScript was designed to solve this — not as a general-purpose language, nor a declarative config, nor a prompt template, but as a **DSL** that mixes imperative control flow with explicit, scope-governed context declarations.
25
+
26
+ It gives you two things that general-purpose languages don't: a first-class `use` keyword that declares *which* data enters the LLM prompt, and a first-class `generate` expression that defines *what* the LLM must return. Everything else — variables, functions, agents, imports, loops — exists to support this core workflow. Scopes enforce context boundaries naturally: what's `use`d in one function stays there; child scopes inherit but never leak upward.
27
+
28
+ The result is a language purpose-built for composing agent patterns — ReAct, Plan-and-Execute, Reflection, Multi-Agent — where prompt context is always visible, auditable, and under your control.
29
+
30
+ ## How it works
31
+
32
+ ```mermaid
33
+ graph LR
34
+ A[".as source"] --> B["Parser"]
35
+ B --> C["AST"]
36
+ C --> D["Semantic Analyzer"]
37
+ D --> E["Runtime"]
38
+ E --> F["LLM Provider<br/>(OpenAI / Anthropic / Ollama)"]
39
+ E --> G["Tools<br/>(Find / Grep / File / HTTP / ...)"]
40
+ E --> H["Memory<br/>(JSONL / SQLite)"]
41
+ E --> I["Trace Output"]
42
+ ```
43
+
44
+ ## Agent patterns as composable primitives
45
+
46
+ AgentScript doesn't hardcode agent patterns as keywords. You compose them from the same primitives:
47
+
48
+ | Pattern | Tutorial | What it demonstrates |
49
+ |---------|----------|---------------------|
50
+ | **ReAct** | `tutorials/react.as` | Reason → Act → Observe loop with explicit context |
51
+ | **Plan-and-Execute** | `tutorials/plan-execute.as` | Generate plan, execute steps, verify, re-plan on failure |
52
+ | **Reflection / Self-Improvement** | `tutorials/self-improve.as` | Query past lessons → generate → reflect → persist new lessons |
53
+ | **Multi-Agent** | `tutorials/plan-execute.as` | Independent agents with isolated context boundaries |
54
+
55
+ Every pattern is explicit — which data enters the prompt, which tools each agent can use, and which output shape each LLM call must satisfy.
56
+
57
+ ## Install
58
+
59
+ ```bash
60
+ npm install -g agentscript
61
+ ```
62
+
63
+ Or run without installing:
64
+
65
+ ```bash
66
+ npx agentscript examples/review.as --input '{"path":"src"}'
67
+ ```
68
+
69
+ ## Quick start
70
+
71
+ ```bash
72
+ # Run with mock LLM (default, no API key needed)
73
+ agentscript examples/summarize.as --input '{"path":"README.md"}'
74
+
75
+ # Run with real LLM
76
+ agentscript examples/summarize.as --input '{"path":"README.md"}' --real-llm
77
+ ```
78
+
79
+ The `summarize.as` file reads a local file, includes it in the LLM context, and returns a structured summary:
80
+
81
+ ```agentscript
82
+ -- examples/summarize.as
83
+ import llm Qwen from "ollama://localhost:11434/qwen3.6"
84
+ import tool File from "file://workspace"
85
+
86
+ main agent FileSummarizer {
87
+ model Qwen
88
+ role "Technical Writer"
89
+ description "Read one local file and produce a useful structured summary."
90
+
91
+ main func(input { path string }) {
92
+ content = File.read({ path: input.path })
93
+ use input.path
94
+ use content < 8k
95
+
96
+ return generate({
97
+ input: "Summarize the file for a busy teammate"
98
+ limit: 1000
99
+ }) {
100
+ return {
101
+ title string
102
+ summary string
103
+ key_points list[string]
104
+ action_items list[string]
105
+ }
106
+ }
107
+ }
108
+ }
109
+ ```
110
+
111
+ Expected output (with mock LLM):
112
+
113
+ ```json
114
+ {
115
+ "value": {
116
+ "title": "",
117
+ "summary": "",
118
+ "key_points": [],
119
+ "action_items": []
120
+ },
121
+ "trace": [ ... ]
122
+ }
123
+ ```
124
+
125
+ With `--real-llm`, the fields are populated by the model.
126
+
127
+ ## Language at a glance
128
+
129
+ ```agentscript
130
+ import llm Qwen from "ollama://localhost:11434/qwen3.6"
131
+ import tool Search from "mcp://tools/search"
132
+ import memory Lessons from "file://./.agentscript/lessons.jsonl"
133
+
134
+ main agent ResearchAgent {
135
+ model Qwen
136
+ role "Senior Researcher"
137
+ description "Answer questions with search and structured reasoning."
138
+
139
+ main func(input {
140
+ question string
141
+ }) {
142
+ use input.question
143
+
144
+ scratch = []
145
+ use scratch.summary < 2k
146
+
147
+ done = false
148
+ loop until done < 6 {
149
+ thought = reason(input.question, scratch)
150
+ obs = Search.search(thought.focus)
151
+ scratch.add(obs)
152
+ done = enough(input.question, scratch)
153
+ }
154
+
155
+ return answer(input.question, scratch)
156
+ }
157
+
158
+ func answer(question, scratch) {
159
+ use question
160
+ use scratch.summary < 2k
161
+ return generate({ input: "Answer using only the observations" }) {
162
+ return {
163
+ ok boolean
164
+ text string
165
+ error string
166
+ }
167
+ }
168
+ }
169
+ }
170
+ ```
171
+
172
+ ## Key ideas
173
+
174
+ 1. **`use` is explicit context** — nothing enters the LLM prompt unless `use`d
175
+ 2. **`generate` is the only LLM call site** — with a required input instruction and a return shape
176
+ 3. **Scope is context boundary** — functions, agents, and blocks isolate prompt visibility
177
+ 4. **Tools, memory, and files are imported resources** — with auditable access
178
+ 5. **Trace is built in** — every `generate` and `use` is recorded for debugging
179
+
180
+ ## Why not just Python or TypeScript?
181
+
182
+ | | Python / TypeScript | AgentScript |
183
+ |---|---|---|
184
+ | Context management | Implicit (string concatenation, array append) | Explicit (`use` declaration) |
185
+ | LLM call site | Anywhere in the code | One `generate` expression |
186
+ | Context isolation | Manual discipline | Scope-inherited, auto-isolated |
187
+ | Trace / audit | External tooling needed | Built-in, per-call |
188
+
189
+ Python and TypeScript are excellent general-purpose tools, but they have no concept of "prompt context" as a language primitive. Every agent project reinvents the same patterns. AgentScript bakes them in.
190
+
191
+ ## CLI
192
+
193
+ ```bash
194
+ agentscript examples/review.as # run with mock LLM
195
+ agentscript examples/review.as --check # parse + semantic check only
196
+ agentscript examples/review.as --parse # parse and output AST
197
+ agentscript examples/review.as --trace pretty # human-readable trace
198
+ agentscript examples/review.as --quiet # value only, no trace
199
+ ```
200
+
201
+ | Option | Description |
202
+ |--------|-------------|
203
+ | `--input '<json>'` | JSON input for the entry function |
204
+ | `--input-file <path>` | Read input from a JSON file |
205
+ | `--agent <name>` | Select a specific entry agent |
206
+ | `--function <name>` | Select a specific entry function |
207
+ | `--check` | Parse + semantic analysis (no execution) |
208
+ | `--parse` | Parse and output AST as JSON |
209
+ | `--real-llm` | Use real LLM provider instead of mock |
210
+ | `--trace <file>` | Write execution trace to file |
211
+ | `--trace pretty` | Print human-readable trace |
212
+ | `--verbose` | Print detailed trace |
213
+ | `--quiet` | Output only the final value |
214
+
215
+ ## Documentation
216
+
217
+ | Language | Links |
218
+ |----------|-------|
219
+ | English | [Language Reference](docs/en/language.md) · [Context Engineering](docs/en/context-engineering.md) · [Design History](docs/design-history/) |
220
+ | 中文 | [README-CN](./README-CN.md) · [语言参考](docs/cn/language.md) · [Context Engineering](docs/cn/context-engineering.md) |
221
+
222
+ ### Design principles
223
+
224
+ - Context is explicit: ordinary variables, tool results, memory records, and trace events never enter prompts unless selected with `use`.
225
+ - Scope controls variable lifetime, context inheritance, and prompt exposure.
226
+ - LLM, tool, file, agent, and memory imports are runtime capabilities with explicit boundaries.
227
+ - Pattern names such as planner, executor, verifier, reflect, improve, and evolve are ordinary identifiers.
228
+ - Trace is for debugging and audit; it is not prompt context.
229
+
230
+ ## Contributing
231
+
232
+ See [CONTRIBUTING.md](./CONTRIBUTING.md).
233
+
234
+ ## Development
235
+
236
+ ```bash
237
+ npm run typecheck
238
+ npm test
239
+ npm run build
240
+ ```
241
+
242
+ Zero runtime dependencies. Built with TypeScript.
243
+
244
+ ## License
245
+
246
+ MIT
@@ -0,0 +1 @@
1
+ export const SHAPE_TYPE_NAMES = new Set(["string", "number", "boolean", "json", "list"]);
@@ -0,0 +1,41 @@
1
+ import { assertNever } from "../utils/assert.js";
2
+ export function formatExpressionSource(expr) {
3
+ switch (expr.kind) {
4
+ case "IdentifierExpr":
5
+ return expr.name;
6
+ case "StringExpr":
7
+ return JSON.stringify(expr.value);
8
+ case "NumberExpr":
9
+ return expr.raw;
10
+ case "BooleanExpr":
11
+ return String(expr.value);
12
+ case "NullExpr":
13
+ return "none";
14
+ case "ListExpr":
15
+ return `[${formatItems(expr.items)}]`;
16
+ case "ObjectExpr":
17
+ return `{ ${formatProperties(expr.properties)} }`;
18
+ case "ShapeObjectExpr":
19
+ return "{ shape }";
20
+ case "MemberExpr":
21
+ return `${formatExpressionSource(expr.object)}.${expr.property}`;
22
+ case "IndexExpr":
23
+ return `${formatExpressionSource(expr.object)}[${formatExpressionSource(expr.index)}]`;
24
+ case "UnaryExpr":
25
+ return `${expr.operator} ${formatExpressionSource(expr.value)}`;
26
+ case "BinaryExpr":
27
+ return `${formatExpressionSource(expr.left)} ${expr.operator} ${formatExpressionSource(expr.right)}`;
28
+ case "CallExpr":
29
+ return `${formatExpressionSource(expr.callee)}(${formatItems(expr.args)})`;
30
+ case "GenerateExpr":
31
+ return `generate({ ${formatProperties(expr.options.properties)} })`;
32
+ default:
33
+ assertNever(expr);
34
+ }
35
+ }
36
+ function formatItems(items) {
37
+ return items.map(formatExpressionSource).join(", ");
38
+ }
39
+ function formatProperties(properties) {
40
+ return properties.map((p) => `${p.key}: ${formatExpressionSource(p.value)}`).join(", ");
41
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,234 @@
1
+ #!/usr/bin/env node
2
+ import { readFileSync, writeFileSync } from "node:fs";
3
+ import { basename, dirname, join } from "node:path";
4
+ import { fileURLToPath } from "node:url";
5
+ import { stdin as inputStream, stdout as outputStream } from "node:process";
6
+ import { createInterface } from "node:readline/promises";
7
+ import { executeAgent } from "../runtime/interpreter.js";
8
+ import { loadProgram } from "../runtime/loader.js";
9
+ import { ProtocolLlmProvider } from "../providers/llm/index.js";
10
+ import { sanitizeForJson } from "../runtime/json.js";
11
+ import { formatTrace } from "../runtime/trace.js";
12
+ import { analyze } from "../semantic/analyzer.js";
13
+ import { formatSemanticDiagnostics } from "../semantic/diagnostics.js";
14
+ import { parseInteractiveInputValue, parseJsonObjectInput } from "./input.js";
15
+ import { runRepl } from "./repl.js";
16
+ export async function main(argv = process.argv.slice(2)) {
17
+ try {
18
+ if (argv.length === 0) {
19
+ return runRepl();
20
+ }
21
+ const options = parseArgs(argv);
22
+ if (options.help) {
23
+ printUsage(console.log);
24
+ return 0;
25
+ }
26
+ if (options.version) {
27
+ console.log(readPackageVersion());
28
+ return 0;
29
+ }
30
+ if (!options.file) {
31
+ printUsage(console.error);
32
+ return 1;
33
+ }
34
+ if (options.parse && options.check) {
35
+ throw new Error("Use either --parse or --check, not both");
36
+ }
37
+ if (options.parse) {
38
+ return runParse(options);
39
+ }
40
+ if (options.check) {
41
+ return runCheck(options);
42
+ }
43
+ return await runAgent(options);
44
+ }
45
+ catch (error) {
46
+ console.error(error instanceof Error ? error.message : String(error));
47
+ return 1;
48
+ }
49
+ }
50
+ function parseArgs(argv) {
51
+ const options = {
52
+ check: false,
53
+ help: false,
54
+ parse: false,
55
+ quiet: false,
56
+ realLlm: false,
57
+ tracePretty: false,
58
+ verbose: false,
59
+ version: false
60
+ };
61
+ const positional = [];
62
+ for (let index = 0; index < argv.length; index += 1) {
63
+ const arg = argv[index];
64
+ switch (arg) {
65
+ case "--agent":
66
+ options.agentName = readOptionValue(argv, ++index, arg);
67
+ break;
68
+ case "--function":
69
+ options.functionName = readOptionValue(argv, ++index, arg);
70
+ break;
71
+ case "--input":
72
+ options.input = readOptionValue(argv, ++index, arg);
73
+ break;
74
+ case "--input-file":
75
+ options.inputFile = readOptionValue(argv, ++index, arg);
76
+ break;
77
+ case "--check":
78
+ options.check = true;
79
+ break;
80
+ case "--help":
81
+ case "-h":
82
+ options.help = true;
83
+ break;
84
+ case "--parse":
85
+ options.parse = true;
86
+ break;
87
+ case "--quiet":
88
+ options.quiet = true;
89
+ break;
90
+ case "--real-llm":
91
+ options.realLlm = true;
92
+ break;
93
+ case "--version":
94
+ case "-v":
95
+ options.version = true;
96
+ break;
97
+ case "--trace":
98
+ {
99
+ const value = readOptionValue(argv, ++index, arg);
100
+ if (value === "pretty") {
101
+ options.tracePretty = true;
102
+ }
103
+ else {
104
+ options.traceFile = value;
105
+ }
106
+ }
107
+ break;
108
+ case "--verbose":
109
+ options.verbose = true;
110
+ break;
111
+ default:
112
+ if (arg.startsWith("--")) {
113
+ throw new Error(`Unknown option '${arg}'`);
114
+ }
115
+ positional.push(arg);
116
+ }
117
+ }
118
+ options.file = positional[0];
119
+ if (positional.length > 1) {
120
+ throw new Error("Unexpected positional argument. Use --input to pass input JSON.");
121
+ }
122
+ if (options.quiet && (options.verbose || options.tracePretty)) {
123
+ throw new Error("Use either --quiet or verbose trace output, not both");
124
+ }
125
+ return options;
126
+ }
127
+ function readOptionValue(args, index, option) {
128
+ const value = args[index];
129
+ if (!value) {
130
+ throw new Error(`Expected value after ${option}`);
131
+ }
132
+ return value;
133
+ }
134
+ function runParse(options) {
135
+ printJson(loadCliProgram(options));
136
+ return 0;
137
+ }
138
+ function runCheck(options) {
139
+ const result = analyze(loadCliProgram(options));
140
+ if (result.diagnostics.length > 0) {
141
+ console.error(formatSemanticDiagnostics(result.diagnostics));
142
+ }
143
+ return result.diagnostics.some((diagnostic) => diagnostic.severity === "error") ? 1 : 0;
144
+ }
145
+ async function runAgent(options) {
146
+ const input = readInput(options);
147
+ const inputProvider = terminalInputProvider();
148
+ const result = await executeAgent(loadCliProgram(options), input, {
149
+ agentName: options.agentName,
150
+ functionName: options.functionName,
151
+ inputProvider,
152
+ llmProvider: options.realLlm ? new ProtocolLlmProvider() : undefined,
153
+ sourcePath: options.file,
154
+ }).finally(() => inputProvider?.close?.());
155
+ if (options.traceFile) {
156
+ writeFileSync(options.traceFile, `${JSON.stringify(result.trace, null, 2)}\n`);
157
+ }
158
+ const value = sanitizeForJson(result.value);
159
+ if (options.quiet) {
160
+ printJson(value);
161
+ }
162
+ else {
163
+ printJson({ value, trace: options.traceFile ? { file: options.traceFile } : result.trace });
164
+ }
165
+ if (options.tracePretty || options.verbose) {
166
+ console.log(formatTrace(result.trace));
167
+ }
168
+ return 0;
169
+ }
170
+ function loadCliProgram(options) {
171
+ if (!options.file) {
172
+ throw new Error("Missing AgentScript file");
173
+ }
174
+ return loadProgram(options.file);
175
+ }
176
+ function readInput(options) {
177
+ if (options.input && options.inputFile) {
178
+ throw new Error("Use either --input or --input-file, not both");
179
+ }
180
+ if (options.inputFile) {
181
+ return parseJsonObjectInput(readFileSync(options.inputFile, "utf8"), "--input-file");
182
+ }
183
+ if (options.input) {
184
+ return parseJsonObjectInput(options.input, "--input");
185
+ }
186
+ return {};
187
+ }
188
+ function terminalInputProvider() {
189
+ if (!inputStream.isTTY || !outputStream.isTTY) {
190
+ return undefined;
191
+ }
192
+ return new TerminalInputProvider();
193
+ }
194
+ class TerminalInputProvider {
195
+ interface;
196
+ async read(request) {
197
+ const answer = await this.reader().question(`${request.path.join(".")}: `);
198
+ return parseInteractiveInputValue(answer);
199
+ }
200
+ reader() {
201
+ this.interface ??= createInterface({ input: inputStream, output: outputStream });
202
+ return this.interface;
203
+ }
204
+ close() {
205
+ this.interface?.close();
206
+ }
207
+ }
208
+ function printUsage(write) {
209
+ write([
210
+ "Usage:",
211
+ " agentscript <file.as> --input '{\"question\":\"...\"}'",
212
+ " agentscript <file.as>",
213
+ " agentscript <file.as> --input-file input.json --agent AgentName",
214
+ " agentscript <file.as> --input '{}' --quiet",
215
+ " agentscript <file.as> --input '{}' --verbose",
216
+ " agentscript <file.as> --input '{}' --trace pretty",
217
+ " agentscript <file.as> --check",
218
+ " agentscript <file.as> --parse",
219
+ " agentscript",
220
+ ].join("\n"));
221
+ }
222
+ function readPackageVersion() {
223
+ const packagePath = join(dirname(fileURLToPath(import.meta.url)), "../../package.json");
224
+ const packageJson = JSON.parse(readFileSync(packagePath, "utf8"));
225
+ return typeof packageJson.version === "string" ? packageJson.version : "0.0.0";
226
+ }
227
+ function printJson(value) {
228
+ console.log(JSON.stringify(value, null, 2));
229
+ }
230
+ const ENTRYPOINT_NAMES = new Set(["agentscript", "agentscript.js", "agentscript.ts"]);
231
+ const isEntrypoint = process.argv[1] ? ENTRYPOINT_NAMES.has(basename(process.argv[1])) : false;
232
+ if (isEntrypoint) {
233
+ process.exitCode = await main();
234
+ }
@@ -0,0 +1,19 @@
1
+ export function parseJsonObjectInput(source, label) {
2
+ const value = JSON.parse(source);
3
+ if (!value || typeof value !== "object" || Array.isArray(value)) {
4
+ throw new Error(`${label} must be a JSON object`);
5
+ }
6
+ return value;
7
+ }
8
+ export function parseInteractiveInputValue(value) {
9
+ const trimmed = value.trim();
10
+ if (trimmed.length === 0) {
11
+ return "";
12
+ }
13
+ try {
14
+ return JSON.parse(trimmed);
15
+ }
16
+ catch {
17
+ return value;
18
+ }
19
+ }