@economic/agents 0.0.1-alpha.10 → 0.0.1-alpha.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +366 -49
- package/dist/index.d.mts +72 -509
- package/dist/index.mjs +230 -460
- package/package.json +14 -17
- package/schema/audit_events.sql +15 -0
package/README.md
CHANGED
|
@@ -1,87 +1,404 @@
|
|
|
1
1
|
# @economic/agents
|
|
2
2
|
|
|
3
|
-
Base
|
|
3
|
+
Base class and utilities for building LLM chat agents on Cloudflare's Agents SDK with lazy skill loading, optional message compaction, and built-in audit logging.
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
```bash
|
|
6
|
+
npm install @economic/agents ai @cloudflare/ai-chat
|
|
7
|
+
```
|
|
6
8
|
|
|
7
|
-
|
|
8
|
-
- **`AIChatAgentBase`** — base class for when you need full control over `streamText`. Implement `getTools()`, `getSkills()`, and your own `onChatMessage` decorated with `@withSkills`. Compaction is **disabled by default**.
|
|
9
|
-
- **`withSkills`** — method decorator used with `AIChatAgentBase`.
|
|
10
|
-
- **`createSkills`** — lower-level factory for wiring lazy skill loading into any agent subclass yourself.
|
|
11
|
-
- **`filterEphemeralMessages`**, **`injectGuidance`** — utilities used internally, exported for custom wiring.
|
|
12
|
-
- **`compactIfNeeded`**, **`compactMessages`**, **`estimateMessagesTokens`**, **`COMPACT_TOKEN_THRESHOLD`** — compaction utilities, exported for use with `AIChatAgentBase` or fully custom agents.
|
|
13
|
-
- Types: `Tool`, `Skill`, `SkillsConfig`, `SkillsResult`, `SkillContext`.
|
|
9
|
+
---
|
|
14
10
|
|
|
15
|
-
|
|
11
|
+
## Overview
|
|
16
12
|
|
|
17
|
-
|
|
13
|
+
`@economic/agents` provides:
|
|
18
14
|
|
|
19
|
-
|
|
15
|
+
- **`AIChatAgent`** — an abstract Cloudflare Durable Object base class. Implement `onChatMessage`, call `this.buildLLMParams()`, and pass the result to `streamText` from the AI SDK.
|
|
16
|
+
- **`buildLLMParams`** — the standalone version of the above, for use outside of `AIChatAgent` or in custom agent implementations.
|
|
20
17
|
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
18
|
+
Skills and compaction are AI SDK concerns — they control what goes to the LLM. The CF layer is responsible for WebSockets, Durable Objects, and message persistence. These are kept separate.
|
|
19
|
+
|
|
20
|
+
---
|
|
21
|
+
|
|
22
|
+
## Quick start
|
|
23
|
+
|
|
24
|
+
```typescript
|
|
25
|
+
import { streamText } from "ai";
|
|
26
|
+
import { openai } from "@ai-sdk/openai";
|
|
27
|
+
import { tool } from "ai";
|
|
28
|
+
import { z } from "zod";
|
|
29
|
+
import { AIChatAgent } from "@economic/agents";
|
|
30
|
+
import type { Skill } from "@economic/agents";
|
|
31
|
+
|
|
32
|
+
const searchSkill: Skill = {
|
|
33
|
+
name: "search",
|
|
34
|
+
description: "Web search tools",
|
|
35
|
+
guidance: "Use search_web for any queries requiring up-to-date information.",
|
|
36
|
+
tools: {
|
|
37
|
+
search_web: tool({
|
|
38
|
+
description: "Search the web",
|
|
39
|
+
inputSchema: z.object({ query: z.string() }),
|
|
40
|
+
execute: async ({ query }) => `Results for: ${query}`,
|
|
41
|
+
}),
|
|
42
|
+
},
|
|
43
|
+
};
|
|
44
|
+
|
|
45
|
+
export class MyAgent extends AIChatAgent<Env> {
|
|
46
|
+
async onChatMessage(onFinish, options) {
|
|
47
|
+
const params = await this.buildLLMParams({
|
|
48
|
+
options,
|
|
49
|
+
onFinish,
|
|
50
|
+
model: openai("gpt-4o"),
|
|
51
|
+
system: "You are a helpful assistant.",
|
|
52
|
+
skills: [searchSkill],
|
|
53
|
+
});
|
|
54
|
+
return streamText(params).toUIMessageStreamResponse();
|
|
55
|
+
}
|
|
56
|
+
}
|
|
25
57
|
```
|
|
26
58
|
|
|
59
|
+
No D1 database needed — skill state is persisted to Durable Object SQLite automatically.
|
|
60
|
+
|
|
27
61
|
---
|
|
28
62
|
|
|
29
|
-
##
|
|
63
|
+
## Prerequisites
|
|
64
|
+
|
|
65
|
+
### Cloudflare environment
|
|
66
|
+
|
|
67
|
+
Your agent class is a Durable Object. Declare it in `wrangler.jsonc`:
|
|
68
|
+
|
|
69
|
+
```jsonc
|
|
70
|
+
{
|
|
71
|
+
"durable_objects": {
|
|
72
|
+
"bindings": [{ "name": "MyAgent", "class_name": "MyAgent" }],
|
|
73
|
+
},
|
|
74
|
+
"migrations": [{ "tag": "v1", "new_sqlite_classes": ["MyAgent"] }],
|
|
75
|
+
}
|
|
76
|
+
```
|
|
30
77
|
|
|
31
|
-
|
|
78
|
+
Run `wrangler types` after to generate typed `Env` bindings.
|
|
79
|
+
|
|
80
|
+
---
|
|
81
|
+
|
|
82
|
+
## `AIChatAgent`
|
|
83
|
+
|
|
84
|
+
Extend this class and implement `onChatMessage`. Call `this.buildLLMParams()` to prepare the call, then pass the result to `streamText` or `generateText`.
|
|
32
85
|
|
|
33
86
|
```typescript
|
|
87
|
+
import { streamText } from "ai";
|
|
34
88
|
import { AIChatAgent } from "@economic/agents";
|
|
35
89
|
|
|
36
|
-
export class
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
getTools() {
|
|
41
|
-
return [myAlwaysOnTool];
|
|
42
|
-
}
|
|
43
|
-
getSkills() {
|
|
44
|
-
return [searchSkill, codeSkill];
|
|
45
|
-
}
|
|
46
|
-
getSystemPrompt() {
|
|
47
|
-
return "You are a helpful assistant.";
|
|
48
|
-
}
|
|
90
|
+
export class ChatAgent extends AIChatAgent<Env> {
|
|
91
|
+
async onChatMessage(onFinish, options) {
|
|
92
|
+
const body = (options?.body ?? {}) as { userTier: "free" | "pro" };
|
|
93
|
+
const model = body.userTier === "pro" ? openai("gpt-4o") : openai("gpt-4o-mini");
|
|
49
94
|
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
95
|
+
const params = await this.buildLLMParams({
|
|
96
|
+
options,
|
|
97
|
+
onFinish,
|
|
98
|
+
model,
|
|
99
|
+
system: "You are a helpful assistant.",
|
|
100
|
+
skills: [searchSkill, calcSkill], // available for on-demand loading
|
|
101
|
+
tools: { alwaysOnTool }, // always active, regardless of loaded skills
|
|
102
|
+
});
|
|
103
|
+
return streamText(params).toUIMessageStreamResponse();
|
|
53
104
|
}
|
|
54
105
|
}
|
|
55
106
|
```
|
|
56
107
|
|
|
57
|
-
|
|
108
|
+
### `this.buildLLMParams(config)`
|
|
109
|
+
|
|
110
|
+
Protected method on `AIChatAgent`. Wraps the standalone `buildLLMParams` function with:
|
|
58
111
|
|
|
59
|
-
|
|
112
|
+
- `messages` pre-filled from `this.messages`
|
|
113
|
+
- `activeSkills` pre-filled from `await this.getLoadedSkills()`
|
|
114
|
+
- `log` injected into `experimental_context` alongside `options.body`
|
|
115
|
+
- Automatic error logging for non-clean finish reasons
|
|
60
116
|
|
|
61
|
-
|
|
117
|
+
Config is everything accepted by the standalone `buildLLMParams` except `messages` and `activeSkills`.
|
|
62
118
|
|
|
63
|
-
|
|
119
|
+
### `getLoadedSkills()`
|
|
120
|
+
|
|
121
|
+
Protected method on `AIChatAgent`. Returns skill names persisted from previous turns (read from DO SQLite). Used internally by `this.buildLLMParams()`.
|
|
122
|
+
|
|
123
|
+
### `persistMessages` (automatic)
|
|
124
|
+
|
|
125
|
+
When `persistMessages` runs at the end of each turn, it:
|
|
126
|
+
|
|
127
|
+
1. Scans `activate_skill` tool results for newly loaded skill state.
|
|
128
|
+
2. Writes the updated skill name list to DO SQLite (no D1 needed).
|
|
129
|
+
3. Logs a turn summary via `log()`.
|
|
130
|
+
4. Strips all `activate_skill` and `list_capabilities` messages from history.
|
|
131
|
+
5. Delegates to the CF base `persistMessages` for message storage and WS broadcast.
|
|
132
|
+
|
|
133
|
+
### `onConnect` (automatic)
|
|
134
|
+
|
|
135
|
+
Replays the full message history to newly connected clients — without this, a page refresh would show an empty UI even though history is in DO SQLite.
|
|
136
|
+
|
|
137
|
+
---
|
|
138
|
+
|
|
139
|
+
## `buildLLMParams` (standalone)
|
|
140
|
+
|
|
141
|
+
The standalone `buildLLMParams` builds the full parameter object for a Vercel AI SDK `streamText` or `generateText` call. Use this directly only if you are not extending `AIChatAgent`, or need fine-grained control.
|
|
64
142
|
|
|
65
143
|
```typescript
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
144
|
+
import { buildLLMParams } from "@economic/agents";
|
|
145
|
+
|
|
146
|
+
const params = await buildLLMParams({
|
|
147
|
+
options, // OnChatMessageOptions — extracts abortSignal and body
|
|
148
|
+
onFinish, // StreamTextOnFinishCallback<ToolSet>
|
|
149
|
+
model, // LanguageModel
|
|
150
|
+
messages: this.messages, // UIMessage[] — converted to ModelMessage[] internally
|
|
151
|
+
activeSkills: await this.getLoadedSkills(),
|
|
152
|
+
system: "You are a helpful assistant.",
|
|
153
|
+
skills: [searchSkill, codeSkill],
|
|
154
|
+
tools: { myAlwaysOnTool },
|
|
155
|
+
compact: { model: openai("gpt-4o-mini"), maxMessages: 30 },
|
|
156
|
+
stopWhen: stepCountIs(20), // defaults to stepCountIs(20)
|
|
157
|
+
});
|
|
158
|
+
|
|
159
|
+
return streamText(params).toUIMessageStreamResponse();
|
|
160
|
+
// or: generateText(params);
|
|
69
161
|
```
|
|
70
162
|
|
|
71
|
-
|
|
163
|
+
| Parameter | Type | Required | Description |
|
|
164
|
+
| -------------- | ----------------------------------------------- | -------- | ------------------------------------------------------------------------------ |
|
|
165
|
+
| `options` | `OnChatMessageOptions \| undefined` | Yes | CF options object. Extracts `abortSignal` and `experimental_context`. |
|
|
166
|
+
| `onFinish` | `StreamTextOnFinishCallback<ToolSet>` | Yes | Called when the stream completes. |
|
|
167
|
+
| `model` | `LanguageModel` | Yes | The language model to use. |
|
|
168
|
+
| `messages` | `UIMessage[]` | Yes | Conversation history. Converted to `ModelMessage[]` internally. |
|
|
169
|
+
| `activeSkills` | `string[]` | No | Names of skills loaded in previous turns. Pass `await this.getLoadedSkills()`. |
|
|
170
|
+
| `skills` | `Skill[]` | No | Skills available for on-demand loading. Wires up meta-tools automatically. |
|
|
171
|
+
| `system` | `string` | No | Base system prompt. |
|
|
172
|
+
| `tools` | `ToolSet` | No | Always-on tools, active every turn regardless of loaded skills. |
|
|
173
|
+
| `compact` | `{ model: LanguageModel; maxMessages: number }` | No | When provided, compacts old messages before sending to the model. |
|
|
174
|
+
| `stopWhen` | `StopCondition` | No | Stop condition. Defaults to `stepCountIs(20)`. |
|
|
175
|
+
|
|
176
|
+
When `skills` are provided, `buildLLMParams`:
|
|
177
|
+
|
|
178
|
+
- Registers `activate_skill` and `list_capabilities` meta-tools.
|
|
179
|
+
- Sets initial `activeTools` (meta + always-on + loaded skill tools).
|
|
180
|
+
- Wires up `prepareStep` to update `activeTools` after each step.
|
|
181
|
+
- Composes `system` with guidance from loaded skills.
|
|
182
|
+
|
|
183
|
+
---
|
|
184
|
+
|
|
185
|
+
## Defining skills
|
|
72
186
|
|
|
73
187
|
```typescript
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
}
|
|
188
|
+
import { tool } from "ai";
|
|
189
|
+
import { z } from "zod";
|
|
190
|
+
import type { Skill } from "@economic/agents";
|
|
191
|
+
|
|
192
|
+
// Skill with guidance — injected into the system prompt when the skill is loaded
|
|
193
|
+
export const calculatorSkill: Skill = {
|
|
194
|
+
name: "calculator",
|
|
195
|
+
description: "Mathematical calculation and expression evaluation",
|
|
196
|
+
guidance:
|
|
197
|
+
"Use the calculate tool for any arithmetic or algebraic expressions. " +
|
|
198
|
+
"Always show the expression you are evaluating.",
|
|
199
|
+
tools: {
|
|
200
|
+
calculate: tool({
|
|
201
|
+
description: "Evaluate a mathematical expression and return the result.",
|
|
202
|
+
inputSchema: z.object({
|
|
203
|
+
expression: z.string().describe('e.g. "2 + 2", "Math.sqrt(144)"'),
|
|
204
|
+
}),
|
|
205
|
+
execute: async ({ expression }) => {
|
|
206
|
+
const result = new Function(`"use strict"; return (${expression})`)();
|
|
207
|
+
return `${expression} = ${result}`;
|
|
208
|
+
},
|
|
209
|
+
}),
|
|
210
|
+
},
|
|
211
|
+
};
|
|
212
|
+
|
|
213
|
+
// Skill without guidance — tools are self-explanatory
|
|
214
|
+
export const datetimeSkill: Skill = {
|
|
215
|
+
name: "datetime",
|
|
216
|
+
description: "Current date and time information in any timezone",
|
|
217
|
+
tools: {
|
|
218
|
+
get_current_datetime: tool({
|
|
219
|
+
description: "Get the current date and time in an optional IANA timezone.",
|
|
220
|
+
inputSchema: z.object({
|
|
221
|
+
timezone: z.string().optional().describe('e.g. "Europe/Copenhagen"'),
|
|
222
|
+
}),
|
|
223
|
+
execute: async ({ timezone = "UTC" }) =>
|
|
224
|
+
new Date().toLocaleString("en-GB", {
|
|
225
|
+
timeZone: timezone,
|
|
226
|
+
dateStyle: "full",
|
|
227
|
+
timeStyle: "long",
|
|
228
|
+
}),
|
|
229
|
+
}),
|
|
230
|
+
},
|
|
231
|
+
};
|
|
77
232
|
```
|
|
78
233
|
|
|
79
|
-
|
|
234
|
+
### `Skill` fields
|
|
235
|
+
|
|
236
|
+
| Field | Type | Required | Description |
|
|
237
|
+
| ------------- | --------- | -------- | ---------------------------------------------------------------------------- |
|
|
238
|
+
| `name` | `string` | Yes | Unique identifier used by `activate_skill` and for DO SQLite persistence. |
|
|
239
|
+
| `description` | `string` | Yes | One-line description shown in the `activate_skill` schema. |
|
|
240
|
+
| `guidance` | `string` | No | Instructions appended to the `system` prompt when this skill is loaded. |
|
|
241
|
+
| `tools` | `ToolSet` | Yes | Record of tool names to `tool()` definitions. Names must be globally unique. |
|
|
242
|
+
|
|
243
|
+
---
|
|
244
|
+
|
|
245
|
+
## Compaction
|
|
246
|
+
|
|
247
|
+
When `compact` is provided to `buildLLMParams`, it compacts `messages` before converting and sending to the model:
|
|
248
|
+
|
|
249
|
+
1. The message list is split into an older window and a recent verbatim tail (`maxMessages`).
|
|
250
|
+
2. A model call generates a concise summary of the older window.
|
|
251
|
+
3. That summary + the verbatim tail is what gets sent to the LLM.
|
|
252
|
+
4. Full history in DO SQLite is unaffected — compaction is in-memory only.
|
|
80
253
|
|
|
81
254
|
```typescript
|
|
82
|
-
|
|
83
|
-
|
|
255
|
+
const params = await this.buildLLMParams({
|
|
256
|
+
options,
|
|
257
|
+
onFinish,
|
|
258
|
+
model: openai("gpt-4o"),
|
|
259
|
+
system: "...",
|
|
260
|
+
compact: {
|
|
261
|
+
model: openai("gpt-4o-mini"), // cheaper model for summarisation
|
|
262
|
+
maxMessages: 30, // keep last 30 messages verbatim
|
|
263
|
+
},
|
|
264
|
+
});
|
|
265
|
+
return streamText(params).toUIMessageStreamResponse();
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
---
|
|
269
|
+
|
|
270
|
+
## Built-in meta tools
|
|
271
|
+
|
|
272
|
+
Two meta tools are automatically registered when `skills` are provided. You do not need to define or wire them.
|
|
273
|
+
|
|
274
|
+
### `activate_skill`
|
|
275
|
+
|
|
276
|
+
Loads one or more skills by name, making their tools available for the rest of the conversation. The LLM calls this when it needs capabilities it does not currently have.
|
|
277
|
+
|
|
278
|
+
- Loading is idempotent — calling for an already-loaded skill is a no-op.
|
|
279
|
+
- The skills available are exactly those passed as `skills` — filter by request body to control access.
|
|
280
|
+
- When skills are successfully loaded, the new state is embedded in the tool result. `persistMessages` extracts it and writes to DO SQLite.
|
|
281
|
+
- All `activate_skill` messages are stripped from history before persistence — state is restored from DO SQLite, not from message history.
|
|
282
|
+
|
|
283
|
+
### `list_capabilities`
|
|
284
|
+
|
|
285
|
+
Returns a summary of active tools, loaded skills, and skills available to load. Always stripped from history before persistence.
|
|
286
|
+
|
|
287
|
+
---
|
|
288
|
+
|
|
289
|
+
## Passing request context to tools
|
|
290
|
+
|
|
291
|
+
Pass arbitrary data via the `body` option of `useAgentChat`. It arrives as `experimental_context` in tool `execute` functions.
|
|
292
|
+
|
|
293
|
+
When using `this.buildLLMParams()`, the context is automatically composed: your body fields plus a `log` function for writing audit events. Use `AgentContext<TBody>` to type it:
|
|
294
|
+
|
|
295
|
+
```typescript
|
|
296
|
+
// types.ts
|
|
297
|
+
import type { AgentContext } from "@economic/agents";
|
|
298
|
+
|
|
299
|
+
interface AgentBody {
|
|
300
|
+
authorization: string;
|
|
301
|
+
userId: string;
|
|
84
302
|
}
|
|
303
|
+
|
|
304
|
+
export type ToolContext = AgentContext<AgentBody>;
|
|
305
|
+
```
|
|
306
|
+
|
|
307
|
+
```typescript
|
|
308
|
+
// Client
|
|
309
|
+
useAgentChat({ body: { authorization: token, userId: "u_123" } });
|
|
310
|
+
|
|
311
|
+
// Tool
|
|
312
|
+
execute: async (args, { experimental_context }) => {
|
|
313
|
+
const ctx = experimental_context as ToolContext;
|
|
314
|
+
await ctx.log("tool called", { userId: ctx.userId });
|
|
315
|
+
const data = await fetchSomething(ctx.authorization);
|
|
316
|
+
return data;
|
|
317
|
+
};
|
|
318
|
+
```
|
|
319
|
+
|
|
320
|
+
`log` is a no-op when `AUDIT_DB` is not bound — so no changes are needed in tools when running without a D1 database.
|
|
321
|
+
|
|
322
|
+
---
|
|
323
|
+
|
|
324
|
+
## Audit logging — D1 setup
|
|
325
|
+
|
|
326
|
+
`AIChatAgent` writes audit events to a Cloudflare D1 database when `AUDIT_DB` is bound on the environment. The table is shared across all agent workers — create it once.
|
|
327
|
+
|
|
328
|
+
### 1. Create the D1 database
|
|
329
|
+
|
|
330
|
+
In the [Cloudflare dashboard](https://dash.cloudflare.com) → **Workers & Pages** → **D1** → **Create database**. Note the database name and ID.
|
|
331
|
+
|
|
332
|
+
### 2. Create the schema
|
|
333
|
+
|
|
334
|
+
Open the database in the D1 dashboard, select **Console**, and run the contents of [`schema/audit_events.sql`](schema/audit_events.sql):
|
|
335
|
+
|
|
336
|
+
```sql
|
|
337
|
+
CREATE TABLE IF NOT EXISTS audit_events (
|
|
338
|
+
id TEXT PRIMARY KEY,
|
|
339
|
+
agent_name TEXT NOT NULL,
|
|
340
|
+
durable_object_id TEXT NOT NULL,
|
|
341
|
+
message TEXT NOT NULL,
|
|
342
|
+
payload TEXT,
|
|
343
|
+
created_at TEXT NOT NULL
|
|
344
|
+
);
|
|
345
|
+
CREATE INDEX IF NOT EXISTS audit_events_do ON audit_events(durable_object_id);
|
|
346
|
+
CREATE INDEX IF NOT EXISTS audit_events_ts ON audit_events(created_at);
|
|
347
|
+
```
|
|
348
|
+
|
|
349
|
+
Safe to re-run — all statements use `IF NOT EXISTS`.
|
|
350
|
+
|
|
351
|
+
### 3. Bind it in `wrangler.jsonc`
|
|
352
|
+
|
|
353
|
+
```jsonc
|
|
354
|
+
"d1_databases": [
|
|
355
|
+
{ "binding": "AUDIT_DB", "database_name": "agents", "database_id": "YOUR_DB_ID" }
|
|
356
|
+
]
|
|
85
357
|
```
|
|
86
358
|
|
|
87
|
-
|
|
359
|
+
Then run `wrangler types` to regenerate the `Env` type.
|
|
360
|
+
|
|
361
|
+
### 4. Seed local development
|
|
362
|
+
|
|
363
|
+
```bash
|
|
364
|
+
npm run db:setup
|
|
365
|
+
```
|
|
366
|
+
|
|
367
|
+
This runs the schema SQL against the local D1 SQLite file (`.wrangler/state/`). Re-running is harmless.
|
|
368
|
+
|
|
369
|
+
If `AUDIT_DB` is not bound, all `log()` calls are silent no-ops — the agent works without it.
|
|
370
|
+
|
|
371
|
+
---
|
|
372
|
+
|
|
373
|
+
## API reference
|
|
374
|
+
|
|
375
|
+
### Classes
|
|
376
|
+
|
|
377
|
+
| Export | Description |
|
|
378
|
+
| ------------- | --------------------------------------------------------------------------------------------------------------------- |
|
|
379
|
+
| `AIChatAgent` | Abstract CF Durable Object base class. Implement `onChatMessage`. Manages skill state, history replay, and audit log. |
|
|
380
|
+
|
|
381
|
+
### Functions
|
|
382
|
+
|
|
383
|
+
| Export | Signature | Description |
|
|
384
|
+
| ---------------- | -------------------------------------- | -------------------------------------------------------------------- |
|
|
385
|
+
| `buildLLMParams` | `async (config) => Promise<LLMParams>` | Builds the full parameter object for `streamText` or `generateText`. |
|
|
386
|
+
|
|
387
|
+
### Types
|
|
388
|
+
|
|
389
|
+
| Export | Description |
|
|
390
|
+
| ---------------------- | ------------------------------------------------------------------------------- |
|
|
391
|
+
| `Skill` | A named group of tools with optional guidance. |
|
|
392
|
+
| `CompactOptions` | `{ model: LanguageModel; maxMessages: number }` |
|
|
393
|
+
| `AgentContext<TBody>` | Request body type merged with `log`. Use as the type of `experimental_context`. |
|
|
394
|
+
| `BuildLLMParamsConfig` | Config type for the standalone `buildLLMParams` function. |
|
|
395
|
+
|
|
396
|
+
---
|
|
397
|
+
|
|
398
|
+
## Development
|
|
399
|
+
|
|
400
|
+
```bash
|
|
401
|
+
npm install # install dependencies
|
|
402
|
+
npm test # run tests
|
|
403
|
+
npm pack # build
|
|
404
|
+
```
|