zapcode-ai 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,27 @@
1
+ # Rust
2
+ **/target
3
+ **/*.rs.bk
4
+
5
+ # Node.js
6
+ node_modules/
7
+ dist/
8
+ package-lock.json
9
+
10
+ # NAPI-RS build artifacts
11
+ *.node
12
+ crates/zapcode-js/npm/
13
+
14
+ # Python
15
+ __pycache__/
16
+ *.pyc
17
+ .python-version
18
+
19
+ # Editor
20
+ *.swp
21
+ *.swo
22
+
23
+ # OS
24
+ .DS_Store
25
+
26
+ # Secrets
27
+ .env
@@ -0,0 +1,24 @@
1
+ Metadata-Version: 2.4
2
+ Name: zapcode-ai
3
+ Version: 1.0.0
4
+ Summary: AI SDK integration for Zapcode — let LLMs write and execute TypeScript safely
5
+ Project-URL: Homepage, https://github.com/TheUncharted/zapcode
6
+ Project-URL: Repository, https://github.com/TheUncharted/zapcode
7
+ Project-URL: Issues, https://github.com/TheUncharted/zapcode/issues
8
+ Project-URL: Documentation, https://github.com/TheUncharted/zapcode#readme
9
+ Author: Uncharted
10
+ License-Expression: MIT
11
+ Keywords: agent,ai,interpreter,llm,mcp,sandbox,tool-use,typescript
12
+ Classifier: Development Status :: 4 - Beta
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
+ Requires-Python: >=3.10
18
+ Requires-Dist: zapcode
19
+ Provides-Extra: anthropic
20
+ Requires-Dist: anthropic>=0.39.0; extra == 'anthropic'
21
+ Provides-Extra: langchain
22
+ Requires-Dist: langchain-core>=0.3.0; extra == 'langchain'
23
+ Provides-Extra: openai
24
+ Requires-Dist: openai>=1.0.0; extra == 'openai'
@@ -0,0 +1,596 @@
1
+ <p align="center">
2
+ <img src="assets/logo.png" alt="Zapcode" width="160" />
3
+ </p>
4
+ <h1 align="center">Zapcode</h1>
5
+ <p align="center"><strong>Run AI code. Safely. Instantly.</strong></p>
6
+ <p align="center">A minimal, secure TypeScript interpreter written in Rust for use by AI agents</p>
7
+
8
+ <p align="center">
9
+ <a href="https://github.com/TheUncharted/zapcode/actions"><img src="https://img.shields.io/github/actions/workflow/status/TheUncharted/zapcode/ci.yml?branch=master&label=CI" alt="CI"></a>
10
+ <a href="https://crates.io/crates/zapcode-core"><img src="https://img.shields.io/crates/v/zapcode-core" alt="crates.io"></a>
11
+ <a href="https://www.npmjs.com/package/@unchartedfr/zapcode"><img src="https://img.shields.io/npm/v/@unchartedfr/zapcode" alt="npm"></a>
12
+ <a href="https://pypi.org/project/zapcode/"><img src="https://img.shields.io/pypi/v/zapcode" alt="PyPI"></a>
13
+ <a href="https://github.com/TheUncharted/zapcode/blob/master/LICENSE"><img src="https://img.shields.io/github/license/TheUncharted/zapcode" alt="License"></a>
14
+ </p>
15
+
16
+ ---
17
+
18
+ > **Experimental** — Zapcode is under active development. APIs may change.
19
+
20
+ ## Why agents should write code
21
+
22
+ AI agents are more capable when they **write code** instead of chaining tool calls. Code gives agents loops, conditionals, variables, and composition — things that tool chains simulate poorly.
23
+
24
+ - [CodeMode](https://blog.cloudflare.com/codemode-ai-agent-coding) — Cloudflare on why agents should write code
25
+ - [Programmatic Tool Calling](https://docs.anthropic.com/en/docs/build-with-claude/tool-use/tool-use-examples#programmatic-tool-calling) — Anthropic's approach
26
+ - [Code Execution with MCP](https://www.anthropic.com/engineering/code-execution-mcp) — Anthropic engineering
27
+ - [Smol Agents](https://huggingface.co/docs/smolagents/en/index) — Hugging Face's code-first agents
28
+
29
+ **But running AI-generated code is dangerous and slow.**
30
+
31
+ Docker adds 200-500ms of cold-start latency and requires a container runtime. V8 isolates bring ~20MB of binary and millisecond startup. Neither supports snapshotting execution mid-function.
32
+
33
+ Zapcode takes a different approach: a purpose-built TypeScript interpreter that starts in **2 microseconds**, enforces a security sandbox at the language level, and can snapshot execution state to bytes for later resumption — all in a single, embeddable library with zero dependencies on Node.js or V8.
34
+
35
+ Inspired by [Monty](https://github.com/pydantic/monty), Pydantic's Python subset interpreter that takes the same approach for Python.
36
+
37
+ ## Alternatives
38
+
39
+ | | Language completeness | Security | Startup | Snapshots | Setup |
40
+ |---|---|---|---|---|---|
41
+ | **Zapcode** | TypeScript subset | Language-level sandbox | **~2 µs** | Built-in, < 2 KB | `npm install` / `pip install` |
42
+ | Docker + Node.js | Full Node.js | Container isolation | ~200-500 ms | No | Container runtime |
43
+ | V8 Isolates | Full JS/TS | Isolate boundary | ~5-50 ms | No | V8 (~20 MB) |
44
+ | Deno Deploy | Full TS | Isolate + permissions | ~10-50 ms | No | Cloud service |
45
+ | QuickJS | Full ES2023 | Process isolation | ~1-5 ms | No | C library |
46
+ | WASI/Wasmer | Depends on guest | Wasm sandbox | ~1-10 ms | Possible | Wasm runtime |
47
+
48
+ ### Why not Docker?
49
+
50
+ Docker provides strong isolation but adds hundreds of milliseconds of cold-start latency, requires a container runtime, and doesn't support snapshotting execution state mid-function. For AI agent loops that execute thousands of small code snippets, the overhead dominates.
51
+
52
+ ### Why not V8?
53
+
54
+ V8 is the gold standard for JavaScript execution. But it brings ~20 MB of binary size, millisecond startup times, and a vast API surface that must be carefully restricted for sandboxing. If you need full ECMAScript compliance, use V8. If you need microsecond startup, byte-sized snapshots, and a security model where "blocked by default" is the foundation rather than an afterthought, use Zapcode.
55
+
56
+ ## Benchmarks
57
+
58
+ All benchmarks run the full pipeline: parse → compile → execute. No caching, no warm-up.
59
+
60
+ | Benchmark | Zapcode | Docker + Node.js | V8 Isolate |
61
+ |---|---|---|---|
62
+ | Simple expression (`1 + 2 * 3`) | **2.1 µs** | ~200-500 ms | ~5-50 ms |
63
+ | Variable arithmetic | **2.8 µs** | — | — |
64
+ | String concatenation | **2.6 µs** | — | — |
65
+ | Template literal | **2.9 µs** | — | — |
66
+ | Array creation | **2.4 µs** | — | — |
67
+ | Object creation | **5.2 µs** | — | — |
68
+ | Function call | **4.6 µs** | — | — |
69
+ | Loop (100 iterations) | **77.8 µs** | — | — |
70
+ | Fibonacci (n=10, 177 calls) | **138.4 µs** | — | — |
71
+ | Snapshot size (typical agent) | **< 2 KB** | N/A | N/A |
72
+ | Memory per execution | **~10 KB** | ~50+ MB | ~20+ MB |
73
+ | Cold start | **~2 µs** | ~200-500 ms | ~5-50 ms |
74
+
75
+ No background thread, no GC, no runtime — CPU usage is exactly proportional to the instructions executed.
76
+
77
+ ```bash
78
+ cargo bench # run benchmarks yourself
79
+ ```
80
+
81
+ ## Installation
82
+
83
+ **TypeScript / JavaScript**
84
+ ```bash
85
+ npm install @unchartedfr/zapcode # npm / yarn / pnpm / bun
86
+ ```
87
+
88
+ **Python**
89
+ ```bash
90
+ pip install zapcode # pip / uv
91
+ ```
92
+
93
+ **Rust**
94
+ ```toml
95
+ # Cargo.toml
96
+ [dependencies]
97
+ zapcode-core = "1.0.0"
98
+ ```
99
+
100
+ **WebAssembly**
101
+ ```bash
102
+ wasm-pack build crates/zapcode-wasm --target web
103
+ ```
104
+
105
+ ## Basic Usage
106
+
107
+ ### TypeScript / JavaScript
108
+
109
+ ```typescript
110
+ import { Zapcode, ZapcodeSnapshotHandle } from '@unchartedfr/zapcode';
111
+
112
+ // Simple expression
113
+ const b = new Zapcode('1 + 2 * 3');
114
+ console.log(b.run().output); // 7
115
+
116
+ // With inputs
117
+ const greeter = new Zapcode(
118
+ '`Hello, ${name}! You are ${age} years old.`',
119
+ { inputs: ['name', 'age'] },
120
+ );
121
+ console.log(greeter.run({ name: 'Zapcode', age: 30 }).output);
122
+
123
+ // Data processing
124
+ const processor = new Zapcode(`
125
+ const items = [
126
+ { name: "Widget", price: 25.99, qty: 3 },
127
+ { name: "Gadget", price: 49.99, qty: 1 },
128
+ ];
129
+ const total = items.reduce((sum, i) => sum + i.price * i.qty, 0);
130
+ ({ total, names: items.map(i => i.name) })
131
+ `);
132
+ console.log(processor.run().output);
133
+ // { total: 127.96, names: ["Widget", "Gadget"] }
134
+
135
+ // External function (snapshot/resume)
136
+ const app = new Zapcode(`const data = await fetch(url); data`, {
137
+ inputs: ['url'],
138
+ externalFunctions: ['fetch'],
139
+ });
140
+ const state = app.start({ url: 'https://api.example.com' });
141
+ if (!state.completed) {
142
+ console.log(state.functionName); // "fetch"
143
+ const snapshot = ZapcodeSnapshotHandle.load(state.snapshot);
144
+ const final_ = snapshot.resume({ status: 'ok' });
145
+ console.log(final_.output); // { status: "ok" }
146
+ }
147
+ ```
148
+
149
+ See [`examples/typescript/basic.ts`](examples/typescript/basic.ts) for more.
150
+
151
+ ### Python
152
+
153
+ ```python
154
+ from zapcode import Zapcode, ZapcodeSnapshot
155
+
156
+ # Simple expression
157
+ b = Zapcode("1 + 2 * 3")
158
+ print(b.run()["output"]) # 7
159
+
160
+ # With inputs
161
+ b = Zapcode(
162
+ '`Hello, ${name}!`',
163
+ inputs=["name"],
164
+ )
165
+ print(b.run({"name": "Zapcode"})["output"]) # "Hello, Zapcode!"
166
+
167
+ # External function (snapshot/resume)
168
+ b = Zapcode(
169
+ "const w = await getWeather(city); `${city}: ${w.temp}°C`",
170
+ inputs=["city"],
171
+ external_functions=["getWeather"],
172
+ )
173
+ state = b.start({"city": "London"})
174
+ if state.get("suspended"):
175
+ result = state["snapshot"].resume({"condition": "Cloudy", "temp": 12})
176
+ print(result["output"]) # "London: 12°C"
177
+
178
+ # Snapshot persistence
179
+ state = b.start({"city": "Tokyo"})
180
+ if state.get("suspended"):
181
+ bytes_ = state["snapshot"].dump() # serialize to bytes
182
+ restored = ZapcodeSnapshot.load(bytes_) # load from bytes
183
+ result = restored.resume({"condition": "Clear", "temp": 26})
184
+ ```
185
+
186
+ See [`examples/python/basic.py`](examples/python/basic.py) for more.
187
+
188
+ <details>
189
+ <summary><strong>Rust</strong></summary>
190
+
191
+ ```rust
192
+ use zapcode_core::{ZapcodeRun, Value, ResourceLimits, VmState};
193
+
194
+ // Simple expression
195
+ let runner = ZapcodeRun::new(
196
+ "1 + 2 * 3".to_string(), vec![], vec![],
197
+ ResourceLimits::default(),
198
+ )?;
199
+ assert_eq!(runner.run_simple()?, Value::Int(7));
200
+
201
+ // With inputs and external functions (snapshot/resume)
202
+ let runner = ZapcodeRun::new(
203
+ r#"const weather = await getWeather(city);
204
+ `${city}: ${weather.condition}, ${weather.temp}°C`"#.to_string(),
205
+ vec!["city".to_string()],
206
+ vec!["getWeather".to_string()],
207
+ ResourceLimits::default(),
208
+ )?;
209
+
210
+ let state = runner.start(vec![
211
+ ("city".to_string(), Value::String("London".into())),
212
+ ])?;
213
+
214
+ if let VmState::Suspended { snapshot, .. } = state {
215
+ let weather = Value::Object(indexmap::indexmap! {
216
+ "condition".into() => Value::String("Cloudy".into()),
217
+ "temp".into() => Value::Int(12),
218
+ });
219
+ let final_state = snapshot.resume(weather)?;
220
+ // VmState::Complete("London: Cloudy, 12°C")
221
+ }
222
+ ```
223
+
224
+ See [`examples/rust/basic.rs`](examples/rust/basic.rs) for more.
225
+ </details>
226
+
227
+ <details>
228
+ <summary><strong>WebAssembly (browser)</strong></summary>
229
+
230
+ ```html
231
+ <script type="module">
232
+ import init, { Zapcode } from './zapcode-wasm/zapcode_wasm.js';
233
+
234
+ await init();
235
+
236
+ const b = new Zapcode(`
237
+ const items = [10, 20, 30];
238
+ items.map(x => x * 2).reduce((a, b) => a + b, 0)
239
+ `);
240
+ const result = b.run();
241
+ console.log(result.output); // 120
242
+ </script>
243
+ ```
244
+
245
+ See [`examples/wasm/index.html`](examples/wasm/index.html) for a full playground.
246
+ </details>
247
+
248
+ ## AI Agent Usage
249
+
250
+ ### Vercel AI SDK (@unchartedfr/zapcode-ai)
251
+
252
+ The recommended way — one call gives you `{ system, tools }` that plug directly into `generateText` / `streamText`:
253
+
254
+ ```typescript
255
+ import { zapcode } from "@unchartedfr/zapcode-ai";
256
+ import { generateText } from "ai";
257
+ import { anthropic } from "@ai-sdk/anthropic";
258
+
259
+ const { system, tools } = zapcode({
260
+ system: "You are a helpful travel assistant.",
261
+ tools: {
262
+ getWeather: {
263
+ description: "Get current weather for a city",
264
+ parameters: { city: { type: "string", description: "City name" } },
265
+ execute: async ({ city }) => {
266
+ const res = await fetch(`https://api.weather.com/${city}`);
267
+ return res.json();
268
+ },
269
+ },
270
+ searchFlights: {
271
+ description: "Search flights between two cities",
272
+ parameters: {
273
+ from: { type: "string" },
274
+ to: { type: "string" },
275
+ date: { type: "string" },
276
+ },
277
+ execute: async ({ from, to, date }) => {
278
+ return flightAPI.search(from, to, date);
279
+ },
280
+ },
281
+ },
282
+ });
283
+
284
+ // Works with any AI SDK model — Anthropic, OpenAI, Google, etc.
285
+ const { text } = await generateText({
286
+ model: anthropic("claude-sonnet-4-20250514"),
287
+ system,
288
+ tools,
289
+ messages: [{ role: "user", content: "Weather in Tokyo and cheapest flight from London?" }],
290
+ });
291
+ ```
292
+
293
+ Under the hood: the LLM writes TypeScript code that calls your tools → Zapcode executes it in a sandbox → tool calls suspend the VM → your `execute` functions run on the host → results flow back in. All in ~2µs startup + tool execution time.
294
+
295
+ See [`examples/typescript/ai-agent-zapcode-ai.ts`](examples/typescript/ai-agent-zapcode-ai.ts) for the full working example.
296
+
297
+ <details>
298
+ <summary><strong>Anthropic SDK</strong></summary>
299
+
300
+ **TypeScript:**
301
+
302
+ ```typescript
303
+ import Anthropic from "@anthropic-ai/sdk";
304
+ import { Zapcode, ZapcodeSnapshotHandle } from "@unchartedfr/zapcode";
305
+
306
+ const tools = {
307
+ getWeather: async (city: string) => {
308
+ const res = await fetch(`https://api.weather.com/${city}`);
309
+ return res.json();
310
+ },
311
+ };
312
+
313
+ const client = new Anthropic();
314
+ const response = await client.messages.create({
315
+ model: "claude-sonnet-4-20250514",
316
+ max_tokens: 1024,
317
+ system: `Write TypeScript to answer the user's question.
318
+ Available functions (use await): getWeather(city: string) → { condition, temp }
319
+ Last expression = output. No markdown fences.`,
320
+ messages: [{ role: "user", content: "What's the weather in Tokyo?" }],
321
+ });
322
+
323
+ const code = response.content[0].type === "text" ? response.content[0].text : "";
324
+
325
+ // Execute + resolve tool calls via snapshot/resume
326
+ const sandbox = new Zapcode(code, { externalFunctions: ["getWeather"] });
327
+ let state = sandbox.start();
328
+ while (!state.completed) {
329
+ const result = await tools[state.functionName](...state.args);
330
+ state = ZapcodeSnapshotHandle.load(state.snapshot).resume(result);
331
+ }
332
+ console.log(state.output);
333
+ ```
334
+
335
+ **Python:**
336
+
337
+ ```python
338
+ import anthropic
339
+ from zapcode import Zapcode
340
+
341
+ client = anthropic.Anthropic()
342
+ response = client.messages.create(
343
+ model="claude-sonnet-4-20250514",
344
+ max_tokens=1024,
345
+ system="""Write TypeScript to answer the user's question.
346
+ Available functions (use await): getWeather(city: string) → { condition, temp }
347
+ Last expression = output. No markdown fences.""",
348
+ messages=[{"role": "user", "content": "What's the weather in Tokyo?"}],
349
+ )
350
+ code = response.content[0].text
351
+
352
+ sandbox = Zapcode(code, external_functions=["getWeather"])
353
+ state = sandbox.start()
354
+ while state.get("suspended"):
355
+ result = get_weather(*state["args"])
356
+ state = state["snapshot"].resume(result)
357
+ print(state["output"])
358
+ ```
359
+
360
+ See [`examples/typescript/ai-agent-anthropic.ts`](examples/typescript/ai-agent-anthropic.ts) and [`examples/python/ai_agent_anthropic.py`](examples/python/ai_agent_anthropic.py).
361
+ </details>
362
+
363
+ <details>
364
+ <summary><strong>Multi-SDK support</strong></summary>
365
+
366
+ `zapcode()` returns adapters for all major AI SDKs from a single call:
367
+
368
+ ```typescript
369
+ const { system, tools, openaiTools, anthropicTools, handleToolCall } = zapcode({
370
+ tools: { getWeather: { ... } },
371
+ });
372
+
373
+ // Vercel AI SDK
374
+ await generateText({ model: anthropic("claude-sonnet-4-20250514"), system, tools, messages });
375
+
376
+ // OpenAI SDK
377
+ await openai.chat.completions.create({
378
+ messages: [{ role: "system", content: system }, ...userMessages],
379
+ tools: openaiTools,
380
+ });
381
+
382
+ // Anthropic SDK
383
+ await anthropic.messages.create({ system, tools: anthropicTools, messages });
384
+
385
+ // Any SDK — just extract the code from the tool call and pass it to handleToolCall
386
+ const result = await handleToolCall(codeFromToolCall);
387
+ ```
388
+
389
+ ```python
390
+ b = zapcode(tools={...})
391
+ b.anthropic_tools # → Anthropic SDK format
392
+ b.openai_tools # → OpenAI SDK format
393
+ b.handle_tool_call(code) # → Universal handler
394
+ ```
395
+ </details>
396
+
397
+ <details>
398
+ <summary><strong>Custom adapters</strong></summary>
399
+
400
+ Build a custom adapter for any AI SDK without forking Zapcode:
401
+
402
+ ```typescript
403
+ import { zapcode, createAdapter } from "@unchartedfr/zapcode-ai";
404
+
405
+ const myAdapter = createAdapter("my-sdk", (ctx) => {
406
+ return {
407
+ systemMessage: ctx.system,
408
+ actions: [{
409
+ id: ctx.toolName,
410
+ schema: ctx.toolSchema,
411
+ run: async (input: { code: string }) => {
412
+ return ctx.handleToolCall(input.code);
413
+ },
414
+ }],
415
+ };
416
+ });
417
+
418
+ const { custom } = zapcode({
419
+ tools: { ... },
420
+ adapters: [myAdapter],
421
+ });
422
+
423
+ const myConfig = custom["my-sdk"];
424
+ ```
425
+
426
+ ```python
427
+ from zapcode_ai import zapcode, Adapter, AdapterContext
428
+
429
+ class LangChainAdapter(Adapter):
430
+ name = "langchain"
431
+
432
+ def adapt(self, ctx: AdapterContext):
433
+ from langchain_core.tools import StructuredTool
434
+ return StructuredTool.from_function(
435
+ func=lambda code: ctx.handle_tool_call(code),
436
+ name=ctx.tool_name,
437
+ description=ctx.tool_description,
438
+ )
439
+
440
+ b = zapcode(tools={...}, adapters=[LangChainAdapter()])
441
+ langchain_tool = b.custom["langchain"]
442
+ ```
443
+
444
+ The adapter receives an `AdapterContext` with everything needed: system prompt, tool name, tool JSON schema, and a `handleToolCall` function. Return whatever shape your SDK expects.
445
+ </details>
446
+
447
+ ## What Zapcode Can and Cannot Do
448
+
449
+ **Can do:**
450
+
451
+ - Execute a useful subset of TypeScript — variables, functions, classes, generators, async/await, closures, destructuring, spread/rest, optional chaining, nullish coalescing, template literals, try/catch
452
+ - Strip TypeScript types at parse time via [oxc](https://oxc.rs) — no `tsc` needed
453
+ - Snapshot execution to bytes and resume later, even in a different process or machine
454
+ - Call from Rust, Node.js, Python, or WebAssembly
455
+ - Track and limit resources — memory, allocations, stack depth, and wall-clock time
456
+ - 30+ string methods, 25+ array methods, plus Math, JSON, Object, and Promise builtins
457
+
458
+ **Cannot do:**
459
+
460
+ - Run arbitrary npm packages or the full Node.js standard library
461
+ - Execute regular expressions (parsing supported, execution is a no-op)
462
+ - Provide full `Promise` semantics (`.then()` chains, `Promise.race`, etc.)
463
+ - Run code that requires `this` in non-class contexts
464
+
465
+ These are intentional constraints, not bugs. Zapcode targets one use case: **running code written by AI agents** inside a secure, embeddable sandbox.
466
+
467
+ ## Supported Syntax
468
+
469
+ | Feature | Status |
470
+ |---|---|
471
+ | Variables (`const`, `let`) | Supported |
472
+ | Functions (declarations, arrows, expressions) | Supported |
473
+ | Classes (`constructor`, methods, `extends`, `super`, `static`) | Supported |
474
+ | Generators (`function*`, `yield`, `.next()`) | Supported |
475
+ | Async/await | Supported |
476
+ | Control flow (`if`, `for`, `while`, `do-while`, `switch`, `for-of`) | Supported |
477
+ | Try/catch/finally, `throw` | Supported |
478
+ | Closures with mutable capture | Supported |
479
+ | Destructuring (object and array) | Supported |
480
+ | Spread/rest operators | Supported |
481
+ | Optional chaining (`?.`) | Supported |
482
+ | Nullish coalescing (`??`) | Supported |
483
+ | Template literals | Supported |
484
+ | Type annotations, interfaces, type aliases | Stripped at parse time |
485
+ | String methods (30+) | Supported |
486
+ | Array methods (25+, including `map`, `filter`, `reduce`) | Supported |
487
+ | Math, JSON, Object, Promise | Supported |
488
+ | `import` / `require` / `eval` | Blocked (sandbox) |
489
+ | Regular expressions | Parsed, not executed |
490
+ | `var` declarations | Not supported (use `let`/`const`) |
491
+ | Decorators | Not supported |
492
+ | `Symbol`, `WeakMap`, `WeakSet` | Not supported |
493
+
494
+ ## Security
495
+
496
+ Running AI-generated code is inherently dangerous. Unlike Docker, which isolates at the OS level, Zapcode isolates at the **language level** — no container, no process boundary, no syscall filter. The sandbox must be correct by construction, not by configuration.
497
+
498
+ ### Deny-by-default sandbox
499
+
500
+ Guest code runs inside a bytecode VM with no access to the host:
501
+
502
+ | Blocked | How |
503
+ |---|---|
504
+ | Filesystem (`fs`, `path`) | No `std::fs` in the core crate |
505
+ | Network (`net`, `http`, `fetch`) | No `std::net` in the core crate |
506
+ | Environment (`process.env`, `os`) | No `std::env` in the core crate |
507
+ | `eval`, `Function()`, dynamic import | Blocked at parse time |
508
+ | `import`, `require` | Blocked at parse time |
509
+ | `globalThis`, `global` | Blocked at parse time |
510
+ | Prototype pollution | Not applicable — objects are plain `IndexMap` values |
511
+
512
+ The **only** escape hatch is external functions that you explicitly register. When guest code calls one, the VM suspends and returns a snapshot — your code resolves the call, not the guest.
513
+
514
+ ### Resource limits
515
+
516
+ | Limit | Default | Configurable |
517
+ |---|---|---|
518
+ | Memory | 32 MB | `memory_limit_bytes` |
519
+ | Execution time | 5 seconds | `time_limit_ms` |
520
+ | Call stack depth | 512 frames | `max_stack_depth` |
521
+ | Heap allocations | 100,000 | `max_allocations` |
522
+
523
+ ### Zero `unsafe` code
524
+
525
+ The `zapcode-core` crate contains **zero `unsafe` blocks**. Memory safety is guaranteed by the Rust compiler.
526
+
527
+ <details>
528
+ <summary><strong>Adversarial test suite — 65 tests across 19 attack categories</strong></summary>
529
+
530
+ | Attack category | Tests | Result |
531
+ |---|---|---|
532
+ | Prototype pollution (`Object.prototype`, `__proto__`) | 4 | Blocked |
533
+ | Constructor chain escapes (`({}).constructor.constructor(...)`) | 3 | Blocked |
534
+ | `eval`, `Function()`, indirect eval, dynamic import | 5 | Blocked at parse time |
535
+ | `globalThis`, `process`, `require`, `import` | 6 | Blocked at parse time |
536
+ | Stack overflow (direct + mutual recursion) | 2 | Caught by stack depth limit |
537
+ | Memory exhaustion (huge arrays, string doubling) | 4 | Caught by allocation limit |
538
+ | Infinite loops (`while(true)`, `for(;;)`) | 2 | Caught by time/allocation limit |
539
+ | JSON bombs (deep nesting, huge payloads) | 2 | Depth-limited (max 64) |
540
+ | Sparse array attacks (`arr[1e9]`, `arr[MAX_SAFE_INTEGER]`) | 3 | Capped growth (max +1024) |
541
+ | toString/valueOf hijacking during coercion | 3 | Not invoked (by design) |
542
+ | Unicode escapes for blocked keywords | 2 | Blocked |
543
+ | Computed property access tricks | 2 | Returns undefined |
544
+ | Timing side channels (`performance.now`) | 1 | Blocked |
545
+ | Error message information leakage | 3 | No host paths/env exposed |
546
+ | Type confusion attacks | 4 | Proper TypeError |
547
+ | Promise/Generator internal abuse | 4 | No escape |
548
+ | Negative array indices | 2 | Returns undefined |
549
+ | `setTimeout`, `setInterval`, `Proxy`, `Reflect` | 6 | Blocked |
550
+ | `with` statement, `arguments.callee` | 3 | Blocked |
551
+
552
+ ```bash
553
+ cargo test -p zapcode-core --test security # run the security tests
554
+ ```
555
+
556
+ **Known limitations:**
557
+ - `Object.freeze()` is not yet implemented — frozen objects can still be mutated (correctness gap, not a sandbox escape)
558
+ - User-defined `toString()`/`valueOf()` are not called during implicit type coercion (intentional — prevents injection)
559
+ </details>
560
+
561
+ ## Architecture
562
+
563
+ ```
564
+ TypeScript source
565
+
566
+
567
+ ┌─────────┐ oxc_parser (fastest TS parser in Rust)
568
+ │ Parse │──────────────────────────────────────────► Strip types
569
+ └────┬────┘
570
+
571
+ ┌─────────┐
572
+ │ IR │ ZapcodeIR (statements, expressions, operators)
573
+ └────┬────┘
574
+
575
+ ┌─────────┐
576
+ │ Compile │ Stack-based bytecode (~50 instructions)
577
+ └────┬────┘
578
+
579
+ ┌─────────┐
580
+ │ VM │ Execute, snapshot at external calls, resume later
581
+ └────┬────┘
582
+
583
+ Result / Suspended { snapshot }
584
+ ```
585
+
586
+ ## Contributing
587
+
588
+ ```bash
589
+ git clone https://github.com/TheUncharted/zapcode.git
590
+ cd zapcode
591
+ ./scripts/dev-setup.sh # installs toolchain, builds, runs tests
592
+ ```
593
+
594
+ ## License
595
+
596
+ MIT
@@ -0,0 +1,36 @@
1
+ [project]
2
+ name = "zapcode-ai"
3
+ version = "1.0.0"
4
+ description = "AI SDK integration for Zapcode — let LLMs write and execute TypeScript safely"
5
+ requires-python = ">=3.10"
6
+ license = "MIT"
7
+ authors = [{name = "Uncharted"}]
8
+ keywords = ["typescript", "interpreter", "sandbox", "ai", "mcp", "llm", "tool-use", "agent"]
9
+ classifiers = [
10
+ "Development Status :: 4 - Beta",
11
+ "Intended Audience :: Developers",
12
+ "License :: OSI Approved :: MIT License",
13
+ "Programming Language :: Python :: 3",
14
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
15
+ ]
16
+ dependencies = [
17
+ "zapcode",
18
+ ]
19
+
20
+ [project.urls]
21
+ Homepage = "https://github.com/TheUncharted/zapcode"
22
+ Repository = "https://github.com/TheUncharted/zapcode"
23
+ Issues = "https://github.com/TheUncharted/zapcode/issues"
24
+ Documentation = "https://github.com/TheUncharted/zapcode#readme"
25
+
26
+ [project.optional-dependencies]
27
+ anthropic = ["anthropic>=0.39.0"]
28
+ openai = ["openai>=1.0.0"]
29
+ langchain = ["langchain-core>=0.3.0"]
30
+
31
+ [build-system]
32
+ requires = ["hatchling"]
33
+ build-backend = "hatchling.build"
34
+
35
+ [tool.hatch.build.targets.wheel]
36
+ packages = ["src/zapcode_ai"]
@@ -0,0 +1,376 @@
1
+ """
2
+ zapcode-ai — High-level AI SDK integration for Zapcode.
3
+
4
+ Works with any AI SDK:
5
+
6
+ # Anthropic SDK
7
+ from zapcode_ai import zapcode
8
+ b = zapcode(tools={...})
9
+ response = client.messages.create(system=b.system, tools=b.anthropic_tools, ...)
10
+ result = b.handle_tool_call(code)
11
+
12
+ # OpenAI SDK
13
+ b = zapcode(tools={...})
14
+ response = client.chat.completions.create(
15
+ messages=[{"role": "system", "content": b.system}, ...],
16
+ tools=b.openai_tools,
17
+ )
18
+ result = b.handle_tool_call(code)
19
+
20
+ # Custom adapter
21
+ from zapcode_ai import zapcode, Adapter
22
+ class MyAdapter(Adapter):
23
+ name = "my-sdk"
24
+ def adapt(self, ctx):
25
+ return {"system": ctx.system, "tool": ctx.tool_schema}
26
+ b = zapcode(tools={...}, adapters=[MyAdapter()])
27
+ config = b.custom["my-sdk"]
28
+ """
29
+
30
+ from __future__ import annotations
31
+
32
+ from dataclasses import dataclass, field
33
+ from typing import Any, Callable, Awaitable
34
+
35
+ from zapcode import Zapcode, ZapcodeSnapshot
36
+
37
+
38
+ # ---------------------------------------------------------------------------
39
+ # Public types
40
+ # ---------------------------------------------------------------------------
41
+
42
+ @dataclass
43
+ class ParamDef:
44
+ """Schema for a single parameter."""
45
+ type: str # "string" | "number" | "boolean" | "object" | "array"
46
+ description: str = ""
47
+ optional: bool = False
48
+
49
+
50
+ @dataclass
51
+ class ToolDefinition:
52
+ """Definition for a single tool that guest code can call."""
53
+ description: str
54
+ parameters: dict[str, ParamDef]
55
+ execute: Callable[..., Any] # (args: dict) -> Any or awaitable
56
+
57
+
58
+ @dataclass
59
+ class ExecutionResult:
60
+ """Result of executing guest code."""
61
+ output: Any
62
+ stdout: str
63
+ tool_calls: list[dict[str, Any]]
64
+
65
+
66
+ # ---------------------------------------------------------------------------
67
+ # Adapter protocol
68
+ # ---------------------------------------------------------------------------
69
+
70
+ @dataclass
71
+ class AdapterContext:
72
+ """Context passed to custom adapters."""
73
+ system: str
74
+ tool_name: str
75
+ tool_description: str
76
+ tool_schema: dict[str, Any]
77
+ handle_tool_call: Callable[[str], ExecutionResult]
78
+
79
+
80
+ class Adapter:
81
+ """
82
+ Base class for custom SDK adapters.
83
+
84
+ Subclass this to add support for any AI SDK:
85
+
86
+ class LangChainAdapter(Adapter):
87
+ name = "langchain"
88
+ def adapt(self, ctx: AdapterContext):
89
+ from langchain_core.tools import StructuredTool
90
+ return StructuredTool.from_function(
91
+ func=lambda code: ctx.handle_tool_call(code),
92
+ name=ctx.tool_name,
93
+ description=ctx.tool_description,
94
+ )
95
+ """
96
+ name: str = ""
97
+
98
+ def adapt(self, ctx: AdapterContext) -> Any:
99
+ raise NotImplementedError
100
+
101
+
102
+ # ---------------------------------------------------------------------------
103
+ # System prompt generation
104
+ # ---------------------------------------------------------------------------
105
+
106
+ def _generate_signature(name: str, defn: ToolDefinition) -> str:
107
+ params = ", ".join(
108
+ f"{pname}{'?' if pdef.optional else ''}: {pdef.type}"
109
+ for pname, pdef in defn.parameters.items()
110
+ )
111
+ return f"{name}({params})"
112
+
113
+
114
+ def _build_system_prompt(
115
+ tools: dict[str, ToolDefinition],
116
+ user_system: str | None = None,
117
+ ) -> str:
118
+ tool_docs = "\n".join(
119
+ f"- await {_generate_signature(name, defn)}\n {defn.description}"
120
+ for name, defn in tools.items()
121
+ )
122
+
123
+ parts = []
124
+ if user_system:
125
+ parts.append(user_system)
126
+
127
+ parts.append(
128
+ f"""When you need to use tools or compute something, write TypeScript code and pass it to the execute_code tool.
129
+ The code runs in a sandboxed interpreter with these functions available (use await):
130
+
131
+ {tool_docs}
132
+
133
+ Rules:
134
+ - Write ONLY TypeScript code, no markdown fences, no explanation.
135
+ - The last expression in your code is the return value.
136
+ - You can use variables, loops, conditionals, array methods, etc.
137
+ - All tool calls must use `await`.
138
+ - If the user's question doesn't need tools, you can compute the answer directly."""
139
+ )
140
+
141
+ return "\n\n".join(parts)
142
+
143
+
144
+ # ---------------------------------------------------------------------------
145
+ # Execution engine
146
+ # ---------------------------------------------------------------------------
147
+
148
+ def _execute_code(
149
+ code: str,
150
+ tool_defs: dict[str, ToolDefinition],
151
+ *,
152
+ memory_limit_bytes: int | None = None,
153
+ time_limit_ms: int | None = None,
154
+ ) -> ExecutionResult:
155
+ tool_names = list(tool_defs.keys())
156
+ tool_calls: list[dict[str, Any]] = []
157
+
158
+ kwargs: dict[str, Any] = {"external_functions": tool_names}
159
+ if time_limit_ms is not None:
160
+ kwargs["time_limit_ms"] = time_limit_ms
161
+ if memory_limit_bytes is not None:
162
+ kwargs["memory_limit_bytes"] = memory_limit_bytes
163
+
164
+ sandbox = Zapcode(code, **kwargs)
165
+ state = sandbox.start()
166
+
167
+ while state.get("suspended"):
168
+ fn_name = state["function_name"]
169
+ args = state["args"]
170
+
171
+ tool_def = tool_defs.get(fn_name)
172
+ if not tool_def:
173
+ raise ValueError(
174
+ f"Guest code called unknown function '{fn_name}'. "
175
+ f"Available: {', '.join(tool_names)}"
176
+ )
177
+
178
+ # Build named args from positional args
179
+ param_names = list(tool_def.parameters.keys())
180
+ named_args = {
181
+ param_names[i]: args[i]
182
+ for i in range(min(len(param_names), len(args)))
183
+ }
184
+
185
+ result = tool_def.execute(named_args)
186
+ tool_calls.append({"name": fn_name, "args": args, "result": result})
187
+
188
+ snapshot: ZapcodeSnapshot = state["snapshot"]
189
+ state = snapshot.resume(result)
190
+
191
+ return ExecutionResult(
192
+ output=state.get("output"),
193
+ stdout=state.get("stdout", ""),
194
+ tool_calls=tool_calls,
195
+ )
196
+
197
+
198
+ # ---------------------------------------------------------------------------
199
+ # Tool schema
200
+ # ---------------------------------------------------------------------------
201
+
202
+ _CODE_TOOL_DESCRIPTION = (
203
+ "Execute TypeScript code in a secure sandbox. "
204
+ "The code can call the available tool functions using await. "
205
+ "The last expression is the return value."
206
+ )
207
+
208
+ _CODE_TOOL_SCHEMA = {
209
+ "type": "object",
210
+ "properties": {
211
+ "code": {
212
+ "type": "string",
213
+ "description": "TypeScript code to execute in the sandbox",
214
+ },
215
+ },
216
+ "required": ["code"],
217
+ }
218
+
219
+
220
+ # ---------------------------------------------------------------------------
221
+ # Result object
222
+ # ---------------------------------------------------------------------------
223
+
224
+ @dataclass
225
+ class ZapcodeAI:
226
+ """Result of `zapcode()` — adapters for every major AI SDK."""
227
+
228
+ system: str
229
+ """System prompt instructing the LLM to write TypeScript."""
230
+
231
+ anthropic_tools: list[dict[str, Any]]
232
+ """Anthropic SDK tool format. Use with `messages.create(tools=...)`."""
233
+
234
+ openai_tools: list[dict[str, Any]]
235
+ """OpenAI SDK tool format. Use with `chat.completions.create(tools=...)`."""
236
+
237
+ handle_tool_call: Callable[[str], ExecutionResult]
238
+ """Execute code from a tool call. Works with any SDK."""
239
+
240
+ custom: dict[str, Any] = field(default_factory=dict)
241
+ """Output from custom adapters, keyed by adapter name."""
242
+
243
+
244
+ # ---------------------------------------------------------------------------
245
+ # Main entry point
246
+ # ---------------------------------------------------------------------------
247
+
248
+ def zapcode(
249
+ tools: dict[str, ToolDefinition],
250
+ *,
251
+ system: str | None = None,
252
+ memory_limit_bytes: int | None = None,
253
+ time_limit_ms: int = 10_000,
254
+ adapters: list[Adapter] | None = None,
255
+ ) -> ZapcodeAI:
256
+ """
257
+ Create AI SDK-compatible system prompt and tools for Zapcode.
258
+
259
+ Returns adapters for every major AI SDK:
260
+ - `anthropic_tools` → Anthropic SDK (`messages.create`)
261
+ - `openai_tools` → OpenAI SDK (`chat.completions.create`)
262
+ - `handle_tool_call(code)` → Universal handler for any SDK
263
+ - `custom` → Output from custom adapters
264
+
265
+ Example with Anthropic SDK::
266
+
267
+ from zapcode_ai import zapcode, ToolDefinition, ParamDef
268
+ import anthropic
269
+
270
+ b = zapcode(
271
+ tools={
272
+ "getWeather": ToolDefinition(
273
+ description="Get weather for a city",
274
+ parameters={"city": ParamDef(type="string")},
275
+ execute=lambda args: get_weather(args["city"]),
276
+ ),
277
+ },
278
+ system="You are a helpful travel assistant.",
279
+ )
280
+
281
+ client = anthropic.Anthropic()
282
+ response = client.messages.create(
283
+ model="claude-sonnet-4-20250514",
284
+ system=b.system,
285
+ tools=b.anthropic_tools,
286
+ messages=[{"role": "user", "content": "Weather in Tokyo?"}],
287
+ )
288
+
289
+ for block in response.content:
290
+ if block.type == "tool_use":
291
+ result = b.handle_tool_call(block.input["code"])
292
+ print(result.output)
293
+ """
294
+ system_prompt = _build_system_prompt(tools, system)
295
+
296
+ def handle_tool_call(code: str) -> ExecutionResult:
297
+ return _execute_code(
298
+ code, tools,
299
+ memory_limit_bytes=memory_limit_bytes,
300
+ time_limit_ms=time_limit_ms,
301
+ )
302
+
303
+ # Anthropic SDK format
304
+ anthropic_tools = [
305
+ {
306
+ "name": "execute_code",
307
+ "description": _CODE_TOOL_DESCRIPTION,
308
+ "input_schema": _CODE_TOOL_SCHEMA,
309
+ }
310
+ ]
311
+
312
+ # OpenAI SDK format
313
+ openai_tools = [
314
+ {
315
+ "type": "function",
316
+ "function": {
317
+ "name": "execute_code",
318
+ "description": _CODE_TOOL_DESCRIPTION,
319
+ "parameters": _CODE_TOOL_SCHEMA,
320
+ },
321
+ }
322
+ ]
323
+
324
+ # Run custom adapters
325
+ custom: dict[str, Any] = {}
326
+ if adapters:
327
+ ctx = AdapterContext(
328
+ system=system_prompt,
329
+ tool_name="execute_code",
330
+ tool_description=_CODE_TOOL_DESCRIPTION,
331
+ tool_schema=_CODE_TOOL_SCHEMA,
332
+ handle_tool_call=handle_tool_call,
333
+ )
334
+ for adapter in adapters:
335
+ custom[adapter.name] = adapter.adapt(ctx)
336
+
337
+ return ZapcodeAI(
338
+ system=system_prompt,
339
+ anthropic_tools=anthropic_tools,
340
+ openai_tools=openai_tools,
341
+ handle_tool_call=handle_tool_call,
342
+ custom=custom,
343
+ )
344
+
345
+
346
+ def execute(
347
+ code: str,
348
+ tools: dict[str, ToolDefinition],
349
+ *,
350
+ memory_limit_bytes: int | None = None,
351
+ time_limit_ms: int | None = None,
352
+ ) -> ExecutionResult:
353
+ """
354
+ Execute TypeScript code directly in a Zapcode sandbox with tool resolution.
355
+
356
+ Lower-level API if you don't need AI SDK integration::
357
+
358
+ from zapcode_ai import execute, ToolDefinition, ParamDef
359
+
360
+ result = execute(
361
+ 'const w = await getWeather("Tokyo"); w.temp',
362
+ tools={
363
+ "getWeather": ToolDefinition(
364
+ description="Get weather",
365
+ parameters={"city": ParamDef(type="string")},
366
+ execute=lambda args: {"temp": 26, "condition": "Clear"},
367
+ ),
368
+ },
369
+ )
370
+ print(result.output) # 26
371
+ """
372
+ return _execute_code(
373
+ code, tools,
374
+ memory_limit_bytes=memory_limit_bytes,
375
+ time_limit_ms=time_limit_ms,
376
+ )