@supertools-ai/core 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/README.md +379 -0
  2. package/dist/__tests__/protocol.test.d.ts +5 -0
  3. package/dist/__tests__/protocol.test.d.ts.map +1 -0
  4. package/dist/__tests__/security.test.d.ts +5 -0
  5. package/dist/__tests__/security.test.d.ts.map +1 -0
  6. package/dist/__tests__/tool.test.d.ts +5 -0
  7. package/dist/__tests__/tool.test.d.ts.map +1 -0
  8. package/dist/errors.d.ts +64 -0
  9. package/dist/errors.d.ts.map +1 -0
  10. package/dist/executor.d.ts +38 -0
  11. package/dist/executor.d.ts.map +1 -0
  12. package/dist/index.d.mts +183 -0
  13. package/dist/index.d.ts +48 -0
  14. package/dist/index.d.ts.map +1 -0
  15. package/dist/index.js +14556 -0
  16. package/dist/index.mjs +937 -0
  17. package/dist/mcp/client.d.ts +48 -0
  18. package/dist/mcp/client.d.ts.map +1 -0
  19. package/dist/mcp/index.d.ts +9 -0
  20. package/dist/mcp/index.d.ts.map +1 -0
  21. package/dist/mcp/schema.d.ts +20 -0
  22. package/dist/mcp/schema.d.ts.map +1 -0
  23. package/dist/mcp/types.d.ts +96 -0
  24. package/dist/mcp/types.d.ts.map +1 -0
  25. package/dist/prompts.d.ts +7 -0
  26. package/dist/prompts.d.ts.map +1 -0
  27. package/dist/providers/anthropic.d.ts +63 -0
  28. package/dist/providers/anthropic.d.ts.map +1 -0
  29. package/dist/relay/client.d.ts +46 -0
  30. package/dist/relay/client.d.ts.map +1 -0
  31. package/dist/relay/index.d.ts +11 -0
  32. package/dist/relay/index.d.ts.map +1 -0
  33. package/dist/relay/protocol.d.ts +84 -0
  34. package/dist/relay/protocol.d.ts.map +1 -0
  35. package/dist/relay/security.d.ts +4 -0
  36. package/dist/relay/security.d.ts.map +1 -0
  37. package/dist/relay/server.d.ts +15 -0
  38. package/dist/relay/server.d.ts.map +1 -0
  39. package/dist/relay/types.d.ts +16 -0
  40. package/dist/relay/types.d.ts.map +1 -0
  41. package/dist/relay/utils/protocol.d.ts +92 -0
  42. package/dist/relay/utils/protocol.d.ts.map +1 -0
  43. package/dist/relay/utils/token.d.ts +4 -0
  44. package/dist/relay/utils/token.d.ts.map +1 -0
  45. package/dist/supertools.d.ts +90 -0
  46. package/dist/supertools.d.ts.map +1 -0
  47. package/dist/tool.d.ts +66 -0
  48. package/dist/tool.d.ts.map +1 -0
  49. package/dist/type-hints.d.ts +3 -0
  50. package/dist/type-hints.d.ts.map +1 -0
  51. package/dist/types.d.ts +85 -0
  52. package/dist/types.d.ts.map +1 -0
  53. package/dist/utils/errors.d.ts +50 -0
  54. package/dist/utils/errors.d.ts.map +1 -0
  55. package/dist/utils/sandbox-pool.d.ts +63 -0
  56. package/dist/utils/sandbox-pool.d.ts.map +1 -0
  57. package/dist/utils/string.d.ts +5 -0
  58. package/dist/utils/string.d.ts.map +1 -0
  59. package/dist/utils/type-hints.d.ts +3 -0
  60. package/dist/utils/type-hints.d.ts.map +1 -0
  61. package/package.json +77 -0
package/README.md ADDED
@@ -0,0 +1,379 @@
1
+ # Supertools
2
+
3
+ <p align="center">
4
+ <img src="assets/banner.svg" alt="Supertools - Let LLMs write code that calls your tools" width="100%">
5
+ </p>
6
+
7
+ > **🚧 Work in Progress** — This project is under active development. The npm package will be published soon. Contributions are welcome, especially for adding support for other AI providers (OpenAI, Vercel AI SDK, etc.)!
8
+
9
+ <p align="center">
10
+ <a href="#quick-start">Quick Start</a> •
11
+ <a href="#how-it-works">How It Works</a> •
12
+ <a href="#api">API</a> •
13
+ <a href="#architecture">Architecture</a> •
14
+ <a href="#roadmap">Roadmap</a>
15
+ </p>
16
+
17
+ ---
18
+
19
+ > Inspired by [Anthropic's Programmatic Tool Calling](https://platform.claude.com/docs/en/agents-and-tools/tool-use/programmatic-tool-calling) — the LLM writes code that orchestrates tools, instead of calling them one by one.
20
+
21
+ ## The Problem
22
+
23
+ Traditional tool calling has limitations:
24
+
25
+ - **Loops require enumeration** — querying 50 states means 50 explicit tool calls
26
+ - **Results stay in context** — all tool outputs consume tokens on every round-trip
27
+ - **Processing needs LLM** — filtering, aggregating, or transforming data requires another LLM call
28
+
29
+ ## The Solution
30
+
31
+ Supertools lets the LLM write code that runs in a sandbox:
32
+
33
+ ```
34
+ User Request → LLM generates code → Sandbox executes → Result
35
+
36
+ for (state of states) {
37
+ await query_db(state)
38
+ }
39
+ // Process locally
40
+ return topResults
41
+ ```
42
+
43
+ - **Loops are native** — the LLM writes a `for` loop, not 50 tool calls
44
+ - **Processing is free** — filtering/aggregation runs in sandbox, not LLM
45
+ - **Only final result** — intermediate data never hits the LLM context
46
+
47
+ ## Quick Start
48
+
49
+ ```bash
50
+ bun add @supertools-ai/core
51
+ ```
52
+
53
+ ```typescript
54
+ import { supertools, defineTool, z } from '@supertools-ai/core';
55
+ import { Sandbox } from 'e2b';
56
+ import Anthropic from '@anthropic-ai/sdk';
57
+
58
+ // Define your tools with Zod schemas
59
+ const queryDatabase = defineTool({
60
+ name: 'queryDatabase',
61
+ description: 'Execute a SQL query',
62
+ parameters: z.object({
63
+ sql: z.string().describe('The SQL query to execute'),
64
+ }),
65
+ returns: z.array(z.record(z.unknown())), // Array of row objects
66
+ execute: async ({ sql }) => db.query(sql),
67
+ });
68
+
69
+ const sendEmail = defineTool({
70
+ name: 'sendEmail',
71
+ description: 'Send an email',
72
+ parameters: z.object({
73
+ to: z.string(),
74
+ subject: z.string(),
75
+ body: z.string(),
76
+ }),
77
+ returns: z.object({ success: z.boolean(), messageId: z.string() }),
78
+ execute: async ({ to, subject, body }) => mailer.send({ to, subject, body }),
79
+ });
80
+
81
+ // Create sandbox and wrap your SDK client
82
+ const sandbox = await Sandbox.create('supertools-bun');
83
+ const client = supertools(new Anthropic(), {
84
+ tools: [queryDatabase, sendEmail],
85
+ sandbox,
86
+ });
87
+
88
+ // Use exactly like the normal Anthropic SDK
89
+ const response = await client.messages.create({
90
+ model: 'claude-haiku-4-5-20251001',
91
+ max_tokens: 1024,
92
+ messages: [{
93
+ role: 'user',
94
+ content: 'Query sales for all 50 states, find the top 5, and email a report to the CEO'
95
+ }],
96
+ });
97
+
98
+ // Traditional: 2-3 LLM round-trips + tokens for all 50 results in context
99
+ // Supertools: 1 LLM call, loop runs in sandbox, only final result returned
100
+ ```
101
+
102
+ ## How It Works
103
+
104
+ When you ask: *"Query sales for all 50 states, find top 5, email a report"*
105
+
106
+ The LLM generates JavaScript that runs in a secure sandbox:
107
+
108
+ ```javascript
109
+ // All 50 queries execute without LLM round-trips
110
+ const states = ['AL', 'AK', 'AZ', /* ... all 50 */];
111
+ const results = {};
112
+
113
+ for (const state of states) {
114
+ const data = await query_database({
115
+ sql: `SELECT SUM(revenue) FROM sales WHERE state = '${state}'`
116
+ });
117
+ results[state] = data[0].sum;
118
+ }
119
+
120
+ // Process data locally (no tokens consumed)
121
+ const top5 = Object.entries(results)
122
+ .sort((a, b) => b[1] - a[1])
123
+ .slice(0, 5);
124
+
125
+ // Format and send
126
+ const report = top5.map(([state, rev]) => `${state}: $${rev.toLocaleString()}`).join('\n');
127
+ await send_email({
128
+ to: 'ceo@company.com',
129
+ subject: 'Top 5 States Report',
130
+ body: report
131
+ });
132
+
133
+ // Return the result as JSON
134
+ return { topStates: top5, reportSent: true };
135
+ ```
136
+
137
+ **Result:** 51 tool calls execute in the sandbox with 1 LLM call. Traditional approach would need multiple round-trips with all results in context.
138
+
139
+ ## Why Supertools?
140
+
141
+ <p align="center">
142
+ <img src="assets/benchmark.svg" alt="Benchmark Results" width="100%">
143
+ </p>
144
+
145
+ The benchmark compares three approaches on the same model (Claude Sonnet 4.5):
146
+ - **Native**: Traditional tool calling with LLM round-trips
147
+ - **Anthropic Beta**: Anthropic's `code_execution` beta feature
148
+ - **Supertools**: Code generation with E2B sandbox execution
149
+
150
+ > **Note on Anthropic Beta results:** While the `allowed_callers` feature works (tools are called from within the Python code), each tool call still requires a full API round-trip. For N tool calls, you need N+1 API requests - the code execution pauses, returns to your server, you provide the result, and it continues. The only savings are that tool results don't inflate Claude's context. In contrast, Supertools makes 1 API call total - the generated code runs in the sandbox and calls tools via WebSocket without additional API round-trips. This explains the significant performance difference.
151
+
152
+ > **Note:** Supertools returns raw JSON data, not natural language. The LLM generates code but never sees the execution results. This is ideal for data pipelines and batch operations, but for chatbots needing conversational responses, consider traditional tool calling or add a summarization step.
153
+
154
+ ## API
155
+
156
+ ### `supertools(client, config)`
157
+
158
+ Wrap any supported LLM SDK client with programmatic tool calling.
159
+
160
+ ```typescript
161
+ import { supertools, defineTool, z } from '@supertools-ai/core';
162
+ import { Sandbox } from 'e2b';
163
+ import Anthropic from '@anthropic-ai/sdk';
164
+
165
+ const sandbox = await Sandbox.create('supertools-bun');
166
+ const client = supertools(new Anthropic(), {
167
+ // Required
168
+ tools: [defineTool({ name, description, parameters, execute })],
169
+ sandbox, // E2B sandbox instance
170
+
171
+ // Optional
172
+ debug: false, // Enable debug logging
173
+ instructions: '...', // Additional instructions for the LLM
174
+ onEvent: (event) => {
175
+ // Available event types:
176
+ // - 'code_generated': LLM generated the code
177
+ // - 'sandbox_ready': Sandbox connection established
178
+ // - 'tool_call': Tool invoked (includes tool name and args)
179
+ // - 'tool_result': Tool completed (includes result and durationMs)
180
+ // - 'tool_error': Tool execution failed
181
+ // - 'result': Final execution result
182
+ // - 'stdout': Standard output from sandbox
183
+ // - 'stderr': Standard error from sandbox
184
+ // - 'complete': Execution finished (success or error)
185
+ if (event.type === 'tool_call') console.log(`Calling ${event.tool}...`);
186
+ if (event.type === 'tool_result') console.log(`${event.tool} done in ${event.durationMs}ms`);
187
+ if (event.type === 'result') console.log('Result:', event.data);
188
+ },
189
+ });
190
+
191
+ // Use exactly like the original SDK
192
+ const response = await client.messages.create({
193
+ model: 'claude-haiku-4-5-20251001',
194
+ max_tokens: 1024,
195
+ messages: [{ role: 'user', content: 'Your request here' }],
196
+ });
197
+ ```
198
+
199
+ **Supported SDKs:**
200
+ - ✅ Anthropic SDK (`@anthropic-ai/sdk`)
201
+ - ⏳ OpenAI SDK — [contributions welcome](https://github.com/anthropics/supertools)
202
+ - ⏳ Vercel AI SDK — [contributions welcome](https://github.com/anthropics/supertools)
203
+ - ⏳ Mastra AI — [contributions welcome](https://github.com/anthropics/supertools)
204
+
205
+ ### `defineTool(config)`
206
+
207
+ ```typescript
208
+ const tool = defineTool({
209
+ name: 'searchUsers', // Must match /^[a-zA-Z][a-zA-Z0-9_]*$/
210
+ description: 'Search users', // Used in LLM prompt (min 5 chars)
211
+ parameters: z.object({ // Zod schema for inputs
212
+ query: z.string(),
213
+ limit: z.number().optional().default(10),
214
+ }),
215
+ returns: z.array(UserSchema), // Optional: Zod schema for return type (improves LLM accuracy)
216
+ execute: async (params) => { // Your implementation
217
+ return db.users.search(params);
218
+ },
219
+ });
220
+ // Note: Tool names are converted to snake_case in sandbox code
221
+ // e.g., 'searchUsers' becomes 'search_users' when called
222
+
223
+ // Local tools run entirely in the sandbox (no network round-trip)
224
+ // Use for pure computation when all data is already available
225
+ const calculateStats = defineTool({
226
+ name: 'calculateStats',
227
+ description: 'Calculate statistics for numbers',
228
+ parameters: z.object({ values: z.array(z.number()) }),
229
+ returns: z.object({ mean: z.number(), sum: z.number() }),
230
+ local: true, // Runs in sandbox, not on host
231
+ execute: async ({ values }) => ({
232
+ mean: values.reduce((a, b) => a + b, 0) / values.length,
233
+ sum: values.reduce((a, b) => a + b, 0),
234
+ }),
235
+ });
236
+ ```
237
+
238
+ ### Advanced: Low-level Executor
239
+
240
+ For more control, use the executor directly:
241
+
242
+ ```typescript
243
+ import { createExecutor, defineTool } from '@supertools-ai/core';
244
+ import { Sandbox } from 'e2b';
245
+
246
+ // Create your own LLM adapter
247
+ const myAdapter = {
248
+ async generateCode(request: string, systemPrompt: string) {
249
+ // Call your LLM
250
+ return { code: '...', rawResponse: '...' };
251
+ },
252
+ };
253
+
254
+ const sandbox = await Sandbox.create('supertools-bun');
255
+ const executor = createExecutor({
256
+ llm: myAdapter,
257
+ tools: [/* your tools */],
258
+ sandbox,
259
+ });
260
+
261
+ const result = await executor.run('Your natural language request');
262
+ console.log(result.code); // Generated JavaScript
263
+ console.log(result.result.output); // stdout from execution
264
+ ```
265
+
266
+ ## Architecture
267
+
268
+ ```
269
+ ┌───────────────────────────────────────────────────────────────────┐
270
+ │ Your Application │
271
+ │ │
272
+ │ const client = supertools(new Anthropic(), { tools, sandbox }); │
273
+ │ const response = await client.messages.create({...}); │
274
+ └─────────────────────────────────┬─────────────────────────────────┘
275
+
276
+
277
+ ┌────────────────────────────┐
278
+ │ Supertools Wrapper │
279
+ │ (intercepts SDK calls) │
280
+ └──────────────┬─────────────┘
281
+ │ generates JavaScript
282
+
283
+ ┌───────────────────────────────────────────────────────────────────┐
284
+ │ E2B Cloud Sandbox │
285
+ │ ┌─────────────────────────────────────────────────────────────┐ │
286
+ │ │ Generated Code │ │
287
+ │ │ │ │
288
+ │ │ for (const r of regions) { │ │
289
+ │ │ const data = await query_db({ region: r }); │ │
290
+ │ │ results.push(data); │ │
291
+ │ │ } │ │
292
+ │ │ await send_email({ to: 'ceo', body: summary }); │ │
293
+ │ │ return { regions: results, emailSent: true }; │ │
294
+ │ │ │ │
295
+ │ └────────────────────────────┬────────────────────────────────┘ │
296
+ │ │ tool calls via WebSocket │
297
+ │ ┌────────────────────────────▼────────────────────────────────┐ │
298
+ │ │ Relay Server (Bun) │ │
299
+ │ │ WebSocket bridge to host │ │
300
+ │ └────────────────────────────┬────────────────────────────────┘ │
301
+ └───────────────────────────────┼───────────────────────────────────┘
302
+ │ WebSocket (authenticated)
303
+
304
+ ┌────────────────────────────┐
305
+ │ Relay Client │
306
+ │ (runs on your host) │
307
+ └──────────────┬─────────────┘
308
+
309
+
310
+ ┌────────────────────────────┐
311
+ │ Your Tools │
312
+ │ query_db, send_email │
313
+ │ (execute locally) │
314
+ └────────────────────────────┘
315
+ ```
316
+
317
+ **How it works:**
318
+
319
+ 1. You wrap your SDK client with `supertools()`
320
+ 2. When you call `client.messages.create()`, supertools intercepts it
321
+ 3. The LLM generates JavaScript code that calls your tools
322
+ 4. Code runs in an isolated E2B sandbox (secure, no host access)
323
+ 5. Tool calls are relayed back to your machine via WebSocket
324
+ 6. Your tools execute locally with full access to your systems
325
+ 7. Results flow back to the sandbox, code continues executing
326
+ 8. Final output returns in the expected SDK response format
327
+
328
+ > **Note:** The Relay Server runs inside the pre-built `supertools-bun` E2B template. The Relay Client is included in the `@supertools-ai/core` package and runs on your host.
329
+
330
+ **Security:**
331
+ - LLM-generated code runs in isolated cloud containers
332
+ - Your tools run locally — the sandbox never has direct access
333
+ - WebSocket authenticated with cryptographically secure tokens
334
+ - Tokens are single-use and expire with the sandbox
335
+
336
+ ## When to Use
337
+
338
+ **Use Supertools when:**
339
+ - Calling 3+ tools in sequence
340
+ - Processing data (filter/aggregate before returning)
341
+ - Parallel operations (query 50 endpoints at once)
342
+ - Complex logic (loops, conditionals, early exit)
343
+
344
+ **Use traditional tool calling when:**
345
+ - Single tool calls
346
+ - User needs to approve each step
347
+ - Tools have dangerous side effects
348
+
349
+ ## Roadmap
350
+
351
+ **Coming Soon:**
352
+ - [x] Publish npm package (`@supertools-ai/core`)
353
+ - [x] Publish E2B sandbox template for zero-config setup
354
+
355
+ **Providers:**
356
+ - [x] Anthropic SDK
357
+ - [ ] OpenAI SDK
358
+ - [ ] Vercel AI SDK
359
+ - [ ] Mastra AI
360
+
361
+ **Future:**
362
+ - [ ] Alternative sandbox providers (??)
363
+ - [ ] Python SDK (1:1 API parity)
364
+
365
+ ## Requirements
366
+
367
+ - Node.js 18+ or [Bun](https://bun.sh)
368
+ - [E2B](https://e2b.dev) API key (set `E2B_API_KEY` env var)
369
+ - [Anthropic](https://anthropic.com) API key (set `ANTHROPIC_API_KEY` env var)
370
+
371
+ ## License
372
+
373
+ MIT
374
+
375
+ ---
376
+
377
+ <p align="center">
378
+ Secure sandboxing powered by <a href="https://e2b.dev">E2B</a>
379
+ </p>
@@ -0,0 +1,5 @@
1
+ /**
2
+ * Protocol Validation Tests
3
+ */
4
+ export {};
5
+ //# sourceMappingURL=protocol.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"protocol.test.d.ts","sourceRoot":"","sources":["../../src/__tests__/protocol.test.ts"],"names":[],"mappings":"AAAA;;GAEG"}
@@ -0,0 +1,5 @@
1
+ /**
2
+ * Security Module Tests
3
+ */
4
+ export {};
5
+ //# sourceMappingURL=security.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"security.test.d.ts","sourceRoot":"","sources":["../../src/__tests__/security.test.ts"],"names":[],"mappings":"AAAA;;GAEG"}
@@ -0,0 +1,5 @@
1
+ /**
2
+ * Tool Module Tests
3
+ */
4
+ export {};
5
+ //# sourceMappingURL=tool.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"tool.test.d.ts","sourceRoot":"","sources":["../../src/__tests__/tool.test.ts"],"names":[],"mappings":"AAAA;;GAEG"}
@@ -0,0 +1,64 @@
1
+ export type ErrorCode = 'CODE_GENERATION_ERROR' | 'EXECUTION_ERROR' | 'TOOL_ERROR' | 'RELAY_CONNECTION_ERROR' | 'RELAY_TIMEOUT_ERROR' | 'PROTOCOL_ERROR' | 'SANDBOX_ERROR' | 'AUTHENTICATION_ERROR' | 'VALIDATION_ERROR' | 'CONFIGURATION_ERROR';
2
+ export declare class OPTError extends Error {
3
+ readonly code: ErrorCode;
4
+ readonly cause?: Error;
5
+ constructor(message: string, code: ErrorCode, cause?: Error);
6
+ toJSON(): {
7
+ name: string;
8
+ code: ErrorCode;
9
+ message: string;
10
+ cause: string | undefined;
11
+ };
12
+ }
13
+ export declare class CodeGenerationError extends OPTError {
14
+ constructor(message: string, cause?: Error);
15
+ }
16
+ export declare class ExecutionError extends OPTError {
17
+ readonly output?: string;
18
+ constructor(message: string, output?: string, cause?: Error);
19
+ toJSON(): {
20
+ output: string | undefined;
21
+ name: string;
22
+ code: ErrorCode;
23
+ message: string;
24
+ cause: string | undefined;
25
+ };
26
+ }
27
+ export declare class ToolError extends OPTError {
28
+ readonly toolName: string;
29
+ constructor(message: string, toolName: string, cause?: Error);
30
+ toJSON(): {
31
+ toolName: string;
32
+ name: string;
33
+ code: ErrorCode;
34
+ message: string;
35
+ cause: string | undefined;
36
+ };
37
+ }
38
+ export declare class RelayConnectionError extends OPTError {
39
+ constructor(message: string, cause?: Error);
40
+ }
41
+ export declare class RelayTimeoutError extends OPTError {
42
+ constructor(message: string);
43
+ }
44
+ export declare class SandboxError extends OPTError {
45
+ constructor(message: string, cause?: Error);
46
+ }
47
+ export declare class AuthenticationError extends OPTError {
48
+ constructor(message: string);
49
+ }
50
+ export declare class ConfigurationError extends OPTError {
51
+ constructor(message: string);
52
+ }
53
+ export declare class ValidationError extends OPTError {
54
+ readonly field?: string;
55
+ constructor(message: string, field?: string);
56
+ toJSON(): {
57
+ field: string | undefined;
58
+ name: string;
59
+ code: ErrorCode;
60
+ message: string;
61
+ cause: string | undefined;
62
+ };
63
+ }
64
+ //# sourceMappingURL=errors.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"errors.d.ts","sourceRoot":"","sources":["../src/errors.ts"],"names":[],"mappings":"AAAA,MAAM,MAAM,SAAS,GACjB,uBAAuB,GACvB,iBAAiB,GACjB,YAAY,GACZ,wBAAwB,GACxB,qBAAqB,GACrB,gBAAgB,GAChB,eAAe,GACf,sBAAsB,GACtB,kBAAkB,GAClB,qBAAqB,CAAC;AAE1B,qBAAa,QAAS,SAAQ,KAAK;IACjC,QAAQ,CAAC,IAAI,EAAE,SAAS,CAAC;IACzB,QAAQ,CAAC,KAAK,CAAC,EAAE,KAAK,CAAC;gBAEX,OAAO,EAAE,MAAM,EAAE,IAAI,EAAE,SAAS,EAAE,KAAK,CAAC,EAAE,KAAK;IAQ3D,MAAM;;;;;;CAQP;AAED,qBAAa,mBAAoB,SAAQ,QAAQ;gBACnC,OAAO,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,KAAK;CAI3C;AAED,qBAAa,cAAe,SAAQ,QAAQ;IAC1C,QAAQ,CAAC,MAAM,CAAC,EAAE,MAAM,CAAC;gBAEb,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,KAAK;IAMlD,MAAM;;;;;;;CAGhB;AAED,qBAAa,SAAU,SAAQ,QAAQ;IACrC,QAAQ,CAAC,QAAQ,EAAE,MAAM,CAAC;gBAEd,OAAO,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,KAAK;IAMnD,MAAM;;;;;;;CAGhB;AAED,qBAAa,oBAAqB,SAAQ,QAAQ;gBACpC,OAAO,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,KAAK;CAI3C;AAED,qBAAa,iBAAkB,SAAQ,QAAQ;gBACjC,OAAO,EAAE,MAAM;CAI5B;AAED,qBAAa,YAAa,SAAQ,QAAQ;gBAC5B,OAAO,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,KAAK;CAI3C;AAED,qBAAa,mBAAoB,SAAQ,QAAQ;gBACnC,OAAO,EAAE,MAAM;CAI5B;AAED,qBAAa,kBAAmB,SAAQ,QAAQ;gBAClC,OAAO,EAAE,MAAM;CAI5B;AAED,qBAAa,eAAgB,SAAQ,QAAQ;IAC3C,QAAQ,CAAC,KAAK,CAAC,EAAE,MAAM,CAAC;gBAEZ,OAAO,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,MAAM;IAMlC,MAAM;;;;;;;CAGhB"}
@@ -0,0 +1,38 @@
1
+ /**
2
+ * Programmatic Executor
3
+ *
4
+ * Orchestrates the complete flow:
5
+ * 1. LLM generates JavaScript code
6
+ * 2. Code executes in secure E2B Bun sandbox
7
+ * 3. Tool calls relay back to host via WebSocket
8
+ * 4. Results return to user
9
+ */
10
+ import type { NormalizedTool } from './tool';
11
+ import type { ExecutorConfig, ExecutionResult, ProgrammaticResult, LLMAdapter } from './types';
12
+ export interface CreateExecutorOptions extends ExecutorConfig {
13
+ readonly llm: LLMAdapter;
14
+ }
15
+ export declare function createExecutor(options: CreateExecutorOptions): ProgrammaticExecutor;
16
+ export declare class ProgrammaticExecutor {
17
+ private readonly originalTools;
18
+ private readonly normalizedTools;
19
+ private readonly toolsMap;
20
+ private readonly llm;
21
+ private readonly sandbox;
22
+ private readonly instructions?;
23
+ private readonly debug;
24
+ private readonly onEvent?;
25
+ constructor(options: CreateExecutorOptions);
26
+ private emit;
27
+ run(userRequest: string): Promise<ProgrammaticResult>;
28
+ executeCode(code: string): Promise<ExecutionResult>;
29
+ private executeCodeWithRelay;
30
+ getTools(): readonly NormalizedTool[];
31
+ getToolDocumentation(): string;
32
+ private generateCode;
33
+ private connectRelay;
34
+ private cleanup;
35
+ private log;
36
+ private logTiming;
37
+ }
38
+ //# sourceMappingURL=executor.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"executor.d.ts","sourceRoot":"","sources":["../src/executor.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AASH,OAAO,KAAK,EAAE,cAAc,EAAW,MAAM,QAAQ,CAAC;AAEtD,OAAO,KAAK,EAAE,cAAc,EAAE,eAAe,EAAE,kBAAkB,EAAE,UAAU,EAAkB,MAAM,SAAS,CAAC;AAE/G,MAAM,WAAW,qBAAsB,SAAQ,cAAc;IAC3D,QAAQ,CAAC,GAAG,EAAE,UAAU,CAAC;CAC1B;AAED,wBAAgB,cAAc,CAAC,OAAO,EAAE,qBAAqB,GAAG,oBAAoB,CAEnF;AAED,qBAAa,oBAAoB;IAC/B,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAqB;IACnD,OAAO,CAAC,QAAQ,CAAC,eAAe,CAAmB;IACnD,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAA8B;IACvD,OAAO,CAAC,QAAQ,CAAC,GAAG,CAAa;IACjC,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAU;IAClC,OAAO,CAAC,QAAQ,CAAC,YAAY,CAAC,CAAS;IACvC,OAAO,CAAC,QAAQ,CAAC,KAAK,CAAU;IAChC,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAkC;gBAE/C,OAAO,EAAE,qBAAqB;IAW1C,OAAO,CAAC,IAAI;IAIN,GAAG,CAAC,WAAW,EAAE,MAAM,GAAG,OAAO,CAAC,kBAAkB,CAAC;IA6CrD,WAAW,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,eAAe,CAAC;YAW3C,oBAAoB;IA8ClC,QAAQ,IAAI,SAAS,cAAc,EAAE;IAIrC,oBAAoB,IAAI,MAAM;YAIhB,YAAY;YAWZ,YAAY;YAsBZ,OAAO;IAcrB,OAAO,CAAC,GAAG;IAIX,OAAO,CAAC,SAAS;CAMlB"}