@economic/agents 0.0.1-alpha.11 → 0.0.1-alpha.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +149 -138
- package/dist/index.d.mts +94 -89
- package/dist/index.mjs +205 -143
- package/package.json +3 -2
- package/schema/audit_events.sql +15 -0
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# @economic/agents
|
|
2
2
|
|
|
3
|
-
Base class and
|
|
3
|
+
Base class and utilities for building LLM chat agents on Cloudflare's Agents SDK with lazy skill loading, optional message compaction, and built-in audit logging.
|
|
4
4
|
|
|
5
5
|
```bash
|
|
6
6
|
npm install @economic/agents ai @cloudflare/ai-chat
|
|
@@ -12,8 +12,8 @@ npm install @economic/agents ai @cloudflare/ai-chat
|
|
|
12
12
|
|
|
13
13
|
`@economic/agents` provides:
|
|
14
14
|
|
|
15
|
-
- **`AIChatAgent`** — an abstract Cloudflare Durable Object base class. Implement `onChatMessage` and
|
|
16
|
-
- **`
|
|
15
|
+
- **`AIChatAgent`** — an abstract Cloudflare Durable Object base class. Implement `onChatMessage`, call `this.buildLLMParams()`, and pass the result to `streamText` from the AI SDK.
|
|
16
|
+
- **`buildLLMParams`** — the standalone version of the above, for use outside of `AIChatAgent` or in custom agent implementations.
|
|
17
17
|
|
|
18
18
|
Skills and compaction are AI SDK concerns — they control what goes to the LLM. The CF layer is responsible for WebSockets, Durable Objects, and message persistence. These are kept separate.
|
|
19
19
|
|
|
@@ -22,11 +22,12 @@ Skills and compaction are AI SDK concerns — they control what goes to the LLM.
|
|
|
22
22
|
## Quick start
|
|
23
23
|
|
|
24
24
|
```typescript
|
|
25
|
-
import {
|
|
26
|
-
import type { Skill } from "@economic/agents";
|
|
25
|
+
import { streamText } from "ai";
|
|
27
26
|
import { openai } from "@ai-sdk/openai";
|
|
28
|
-
import {
|
|
27
|
+
import { tool } from "ai";
|
|
29
28
|
import { z } from "zod";
|
|
29
|
+
import { AIChatAgent } from "@economic/agents";
|
|
30
|
+
import type { Skill } from "@economic/agents";
|
|
30
31
|
|
|
31
32
|
const searchSkill: Skill = {
|
|
32
33
|
name: "search",
|
|
@@ -43,17 +44,14 @@ const searchSkill: Skill = {
|
|
|
43
44
|
|
|
44
45
|
export class MyAgent extends AIChatAgent<Env> {
|
|
45
46
|
async onChatMessage(onFinish, options) {
|
|
46
|
-
const
|
|
47
|
+
const params = await this.buildLLMParams({
|
|
48
|
+
options,
|
|
49
|
+
onFinish,
|
|
47
50
|
model: openai("gpt-4o"),
|
|
48
|
-
messages: await convertToModelMessages(this.messages),
|
|
49
51
|
system: "You are a helpful assistant.",
|
|
50
52
|
skills: [searchSkill],
|
|
51
|
-
activeSkills: await this.getLoadedSkills(),
|
|
52
|
-
stopWhen: stepCountIs(20),
|
|
53
|
-
abortSignal: options?.abortSignal,
|
|
54
|
-
onFinish,
|
|
55
53
|
});
|
|
56
|
-
return
|
|
54
|
+
return streamText(params).toUIMessageStreamResponse();
|
|
57
55
|
}
|
|
58
56
|
}
|
|
59
57
|
```
|
|
@@ -83,36 +81,44 @@ Run `wrangler types` after to generate typed `Env` bindings.
|
|
|
83
81
|
|
|
84
82
|
## `AIChatAgent`
|
|
85
83
|
|
|
86
|
-
Extend this class and implement `onChatMessage`. Call
|
|
84
|
+
Extend this class and implement `onChatMessage`. Call `this.buildLLMParams()` to prepare the call, then pass the result to `streamText` or `generateText`.
|
|
87
85
|
|
|
88
86
|
```typescript
|
|
89
|
-
import {
|
|
90
|
-
import
|
|
87
|
+
import { streamText } from "ai";
|
|
88
|
+
import { AIChatAgent } from "@economic/agents";
|
|
91
89
|
|
|
92
90
|
export class ChatAgent extends AIChatAgent<Env> {
|
|
93
91
|
async onChatMessage(onFinish, options) {
|
|
94
|
-
const body = (options?.body ?? {}) as
|
|
92
|
+
const body = (options?.body ?? {}) as { userTier: "free" | "pro" };
|
|
95
93
|
const model = body.userTier === "pro" ? openai("gpt-4o") : openai("gpt-4o-mini");
|
|
96
94
|
|
|
97
|
-
const
|
|
95
|
+
const params = await this.buildLLMParams({
|
|
96
|
+
options,
|
|
97
|
+
onFinish,
|
|
98
98
|
model,
|
|
99
|
-
messages: await convertToModelMessages(this.messages),
|
|
100
99
|
system: "You are a helpful assistant.",
|
|
101
100
|
skills: [searchSkill, calcSkill], // available for on-demand loading
|
|
102
|
-
|
|
103
|
-
tools: { alwaysOnTool }, // always active
|
|
104
|
-
stopWhen: stepCountIs(20),
|
|
105
|
-
abortSignal: options?.abortSignal,
|
|
106
|
-
onFinish,
|
|
101
|
+
tools: { alwaysOnTool }, // always active, regardless of loaded skills
|
|
107
102
|
});
|
|
108
|
-
return
|
|
103
|
+
return streamText(params).toUIMessageStreamResponse();
|
|
109
104
|
}
|
|
110
105
|
}
|
|
111
106
|
```
|
|
112
107
|
|
|
108
|
+
### `this.buildLLMParams(config)`
|
|
109
|
+
|
|
110
|
+
Protected method on `AIChatAgent`. Wraps the standalone `buildLLMParams` function with:
|
|
111
|
+
|
|
112
|
+
- `messages` pre-filled from `this.messages`
|
|
113
|
+
- `activeSkills` pre-filled from `await this.getLoadedSkills()`
|
|
114
|
+
- `log` injected into `experimental_context` alongside `options.body`
|
|
115
|
+
- Automatic error logging for non-clean finish reasons
|
|
116
|
+
|
|
117
|
+
Config is everything accepted by the standalone `buildLLMParams` except `messages` and `activeSkills`.
|
|
118
|
+
|
|
113
119
|
### `getLoadedSkills()`
|
|
114
120
|
|
|
115
|
-
Protected method on `AIChatAgent`. Returns skill names persisted from previous turns (read from DO SQLite).
|
|
121
|
+
Protected method on `AIChatAgent`. Returns skill names persisted from previous turns (read from DO SQLite). Used internally by `this.buildLLMParams()`.
|
|
116
122
|
|
|
117
123
|
### `persistMessages` (automatic)
|
|
118
124
|
|
|
@@ -120,8 +126,9 @@ When `persistMessages` runs at the end of each turn, it:
|
|
|
120
126
|
|
|
121
127
|
1. Scans `activate_skill` tool results for newly loaded skill state.
|
|
122
128
|
2. Writes the updated skill name list to DO SQLite (no D1 needed).
|
|
123
|
-
3.
|
|
124
|
-
4.
|
|
129
|
+
3. Logs a turn summary via `log()`.
|
|
130
|
+
4. Strips all `activate_skill` and `list_capabilities` messages from history.
|
|
131
|
+
5. Delegates to the CF base `persistMessages` for message storage and WS broadcast.
|
|
125
132
|
|
|
126
133
|
### `onConnect` (automatic)
|
|
127
134
|
|
|
@@ -129,64 +136,49 @@ Replays the full message history to newly connected clients — without this, a
|
|
|
129
136
|
|
|
130
137
|
---
|
|
131
138
|
|
|
132
|
-
## `
|
|
133
|
-
|
|
134
|
-
Drop-in replacement for `streamText` from `ai` with three extra params:
|
|
139
|
+
## `buildLLMParams` (standalone)
|
|
135
140
|
|
|
136
|
-
|
|
137
|
-
| -------------- | ----------------------------------------------- | ------------------------------------------------------------------------------ |
|
|
138
|
-
| `messages` | `UIMessage[]` | Converted to `ModelMessage[]` internally. Pass `this.messages`. |
|
|
139
|
-
| `skills` | `Skill[]` | Skills available for on-demand loading. Wires up meta-tools automatically. |
|
|
140
|
-
| `activeSkills` | `string[]` | Names of skills loaded in previous turns. Pass `await this.getLoadedSkills()`. |
|
|
141
|
-
| `compact` | `{ model: LanguageModel; maxMessages: number }` | When provided, compacts old messages before sending to the model. |
|
|
142
|
-
|
|
143
|
-
When `skills` are provided the wrapper:
|
|
144
|
-
|
|
145
|
-
- Registers `activate_skill` and `list_capabilities` meta-tools
|
|
146
|
-
- Sets initial `activeTools` (meta + always-on + loaded skill tools)
|
|
147
|
-
- Wires up `prepareStep` to update `activeTools` after each step
|
|
148
|
-
- Composes `system` with guidance from loaded skills
|
|
149
|
-
- Merges any `activeTools` / `prepareStep` you also pass (additive)
|
|
141
|
+
The standalone `buildLLMParams` builds the full parameter object for a Vercel AI SDK `streamText` or `generateText` call. Use this directly only if you are not extending `AIChatAgent`, or need fine-grained control.
|
|
150
142
|
|
|
151
143
|
```typescript
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
144
|
+
import { buildLLMParams } from "@economic/agents";
|
|
145
|
+
|
|
146
|
+
const params = await buildLLMParams({
|
|
147
|
+
options, // OnChatMessageOptions — extracts abortSignal and body
|
|
148
|
+
onFinish, // StreamTextOnFinishCallback<ToolSet>
|
|
149
|
+
model, // LanguageModel
|
|
150
|
+
messages: this.messages, // UIMessage[] — converted to ModelMessage[] internally
|
|
151
|
+
activeSkills: await this.getLoadedSkills(),
|
|
156
152
|
system: "You are a helpful assistant.",
|
|
157
153
|
skills: [searchSkill, codeSkill],
|
|
158
|
-
activeSkills: await this.getLoadedSkills(),
|
|
159
154
|
tools: { myAlwaysOnTool },
|
|
160
155
|
compact: { model: openai("gpt-4o-mini"), maxMessages: 30 },
|
|
161
|
-
|
|
156
|
+
stopWhen: stepCountIs(20), // defaults to stepCountIs(20)
|
|
162
157
|
});
|
|
163
|
-
return stream.toUIMessageStreamResponse();
|
|
164
158
|
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
model: openai("gpt-4o"),
|
|
168
|
-
messages: await convertToModelMessages(this.messages),
|
|
169
|
-
tools: { myTool },
|
|
170
|
-
activeTools: ["myTool"],
|
|
171
|
-
onFinish,
|
|
172
|
-
});
|
|
173
|
-
return stream.toUIMessageStreamResponse();
|
|
159
|
+
return streamText(params).toUIMessageStreamResponse();
|
|
160
|
+
// or: generateText(params);
|
|
174
161
|
```
|
|
175
162
|
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
163
|
+
| Parameter | Type | Required | Description |
|
|
164
|
+
| -------------- | ----------------------------------------------- | -------- | ------------------------------------------------------------------------------ |
|
|
165
|
+
| `options` | `OnChatMessageOptions \| undefined` | Yes | CF options object. Extracts `abortSignal` and `experimental_context`. |
|
|
166
|
+
| `onFinish` | `StreamTextOnFinishCallback<ToolSet>` | Yes | Called when the stream completes. |
|
|
167
|
+
| `model` | `LanguageModel` | Yes | The language model to use. |
|
|
168
|
+
| `messages` | `UIMessage[]` | Yes | Conversation history. Converted to `ModelMessage[]` internally. |
|
|
169
|
+
| `activeSkills` | `string[]` | No | Names of skills loaded in previous turns. Pass `await this.getLoadedSkills()`. |
|
|
170
|
+
| `skills` | `Skill[]` | No | Skills available for on-demand loading. Wires up meta-tools automatically. |
|
|
171
|
+
| `system` | `string` | No | Base system prompt. |
|
|
172
|
+
| `tools` | `ToolSet` | No | Always-on tools, active every turn regardless of loaded skills. |
|
|
173
|
+
| `compact` | `{ model: LanguageModel; maxMessages: number }` | No | When provided, compacts old messages before sending to the model. |
|
|
174
|
+
| `stopWhen` | `StopCondition` | No | Stop condition. Defaults to `stepCountIs(20)`. |
|
|
175
|
+
|
|
176
|
+
When `skills` are provided, `buildLLMParams`:
|
|
177
|
+
|
|
178
|
+
- Registers `activate_skill` and `list_capabilities` meta-tools.
|
|
179
|
+
- Sets initial `activeTools` (meta + always-on + loaded skill tools).
|
|
180
|
+
- Wires up `prepareStep` to update `activeTools` after each step.
|
|
181
|
+
- Composes `system` with guidance from loaded skills.
|
|
190
182
|
|
|
191
183
|
---
|
|
192
184
|
|
|
@@ -252,7 +244,7 @@ export const datetimeSkill: Skill = {
|
|
|
252
244
|
|
|
253
245
|
## Compaction
|
|
254
246
|
|
|
255
|
-
When `compact` is provided to `
|
|
247
|
+
When `compact` is provided to `buildLLMParams`, it compacts `messages` before converting and sending to the model:
|
|
256
248
|
|
|
257
249
|
1. The message list is split into an older window and a recent verbatim tail (`maxMessages`).
|
|
258
250
|
2. A model call generates a concise summary of the older window.
|
|
@@ -260,17 +252,17 @@ When `compact` is provided to `streamText` / `generateText`, the wrapper compact
|
|
|
260
252
|
4. Full history in DO SQLite is unaffected — compaction is in-memory only.
|
|
261
253
|
|
|
262
254
|
```typescript
|
|
263
|
-
const
|
|
255
|
+
const params = await this.buildLLMParams({
|
|
256
|
+
options,
|
|
257
|
+
onFinish,
|
|
264
258
|
model: openai("gpt-4o"),
|
|
265
|
-
messages: await convertToModelMessages(this.messages),
|
|
266
259
|
system: "...",
|
|
267
260
|
compact: {
|
|
268
261
|
model: openai("gpt-4o-mini"), // cheaper model for summarisation
|
|
269
262
|
maxMessages: 30, // keep last 30 messages verbatim
|
|
270
263
|
},
|
|
271
|
-
onFinish,
|
|
272
264
|
});
|
|
273
|
-
return
|
|
265
|
+
return streamText(params).toUIMessageStreamResponse();
|
|
274
266
|
```
|
|
275
267
|
|
|
276
268
|
---
|
|
@@ -296,7 +288,21 @@ Returns a summary of active tools, loaded skills, and skills available to load.
|
|
|
296
288
|
|
|
297
289
|
## Passing request context to tools
|
|
298
290
|
|
|
299
|
-
Pass arbitrary data via the `body` option of `useAgentChat`. It arrives as `experimental_context` in tool `execute` functions
|
|
291
|
+
Pass arbitrary data via the `body` option of `useAgentChat`. It arrives as `experimental_context` in tool `execute` functions.
|
|
292
|
+
|
|
293
|
+
When using `this.buildLLMParams()`, the context is automatically composed: your body fields plus a `log` function for writing audit events. Use `AgentContext<TBody>` to type it:
|
|
294
|
+
|
|
295
|
+
```typescript
|
|
296
|
+
// types.ts
|
|
297
|
+
import type { AgentContext } from "@economic/agents";
|
|
298
|
+
|
|
299
|
+
interface AgentBody {
|
|
300
|
+
authorization: string;
|
|
301
|
+
userId: string;
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
export type ToolContext = AgentContext<AgentBody>;
|
|
305
|
+
```
|
|
300
306
|
|
|
301
307
|
```typescript
|
|
302
308
|
// Client
|
|
@@ -304,90 +310,95 @@ useAgentChat({ body: { authorization: token, userId: "u_123" } });
|
|
|
304
310
|
|
|
305
311
|
// Tool
|
|
306
312
|
execute: async (args, { experimental_context }) => {
|
|
307
|
-
const
|
|
313
|
+
const ctx = experimental_context as ToolContext;
|
|
314
|
+
await ctx.log("tool called", { userId: ctx.userId });
|
|
315
|
+
const data = await fetchSomething(ctx.authorization);
|
|
316
|
+
return data;
|
|
308
317
|
};
|
|
309
|
-
|
|
310
|
-
// In onChatMessage — forward body to tools
|
|
311
|
-
const stream = await streamText({
|
|
312
|
-
...
|
|
313
|
-
experimental_context: options?.body,
|
|
314
|
-
});
|
|
315
|
-
return stream.toUIMessageStreamResponse();
|
|
316
318
|
```
|
|
317
319
|
|
|
320
|
+
`log` is a no-op when `AUDIT_DB` is not bound — so no changes are needed in tools when running without a D1 database.
|
|
321
|
+
|
|
318
322
|
---
|
|
319
323
|
|
|
320
|
-
##
|
|
324
|
+
## Audit logging — D1 setup
|
|
321
325
|
|
|
322
|
-
`
|
|
326
|
+
`AIChatAgent` writes audit events to a Cloudflare D1 database when `AUDIT_DB` is bound on the environment. The table is shared across all agent workers — create it once.
|
|
323
327
|
|
|
324
|
-
|
|
325
|
-
import { createSkills, filterEphemeralMessages } from "@economic/agents";
|
|
328
|
+
### 1. Create the D1 database
|
|
326
329
|
|
|
327
|
-
|
|
328
|
-
tools: alwaysOnTools,
|
|
329
|
-
skills: permittedSkills,
|
|
330
|
-
initialLoadedSkills: await getLoadedSkills(), // from storage
|
|
331
|
-
systemPrompt: "You are a helpful assistant.",
|
|
332
|
-
});
|
|
330
|
+
In the [Cloudflare dashboard](https://dash.cloudflare.com) → **Workers & Pages** → **D1** → **Create database**. Note the database name and ID.
|
|
333
331
|
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
332
|
+
### 2. Create the schema
|
|
333
|
+
|
|
334
|
+
Open the database in the D1 dashboard, select **Console**, and run the contents of [`schema/audit_events.sql`](schema/audit_events.sql):
|
|
335
|
+
|
|
336
|
+
```sql
|
|
337
|
+
CREATE TABLE IF NOT EXISTS audit_events (
|
|
338
|
+
id TEXT PRIMARY KEY,
|
|
339
|
+
agent_name TEXT NOT NULL,
|
|
340
|
+
durable_object_id TEXT NOT NULL,
|
|
341
|
+
message TEXT NOT NULL,
|
|
342
|
+
payload TEXT,
|
|
343
|
+
created_at TEXT NOT NULL
|
|
344
|
+
);
|
|
345
|
+
CREATE INDEX IF NOT EXISTS audit_events_do ON audit_events(durable_object_id);
|
|
346
|
+
CREATE INDEX IF NOT EXISTS audit_events_ts ON audit_events(created_at);
|
|
347
|
+
```
|
|
348
|
+
|
|
349
|
+
Safe to re-run — all statements use `IF NOT EXISTS`.
|
|
350
|
+
|
|
351
|
+
### 3. Bind it in `wrangler.jsonc`
|
|
352
|
+
|
|
353
|
+
```jsonc
|
|
354
|
+
"d1_databases": [
|
|
355
|
+
{ "binding": "AUDIT_DB", "database_name": "agents", "database_id": "YOUR_DB_ID" }
|
|
356
|
+
]
|
|
342
357
|
```
|
|
343
358
|
|
|
359
|
+
Then run `wrangler types` to regenerate the `Env` type.
|
|
360
|
+
|
|
361
|
+
### 4. Seed local development
|
|
362
|
+
|
|
363
|
+
```bash
|
|
364
|
+
npm run db:setup
|
|
365
|
+
```
|
|
366
|
+
|
|
367
|
+
This runs the schema SQL against the local D1 SQLite file (`.wrangler/state/`). Re-running is harmless.
|
|
368
|
+
|
|
369
|
+
If `AUDIT_DB` is not bound, all `log()` calls are silent no-ops — the agent works without it.
|
|
370
|
+
|
|
344
371
|
---
|
|
345
372
|
|
|
346
373
|
## API reference
|
|
347
374
|
|
|
348
375
|
### Classes
|
|
349
376
|
|
|
350
|
-
| Export | Description
|
|
351
|
-
| ------------- |
|
|
352
|
-
| `AIChatAgent` | Abstract CF Durable Object base class. Implement `onChatMessage`. Manages skill state
|
|
377
|
+
| Export | Description |
|
|
378
|
+
| ------------- | --------------------------------------------------------------------------------------------------------------------- |
|
|
379
|
+
| `AIChatAgent` | Abstract CF Durable Object base class. Implement `onChatMessage`. Manages skill state, history replay, and audit log. |
|
|
353
380
|
|
|
354
381
|
### Functions
|
|
355
382
|
|
|
356
|
-
| Export
|
|
357
|
-
|
|
|
358
|
-
| `
|
|
359
|
-
| `generateText` | `async (params: GenerateTextParams) => GenerateTextResult` | Wraps AI SDK `generateText`; same extra params as `streamText`. |
|
|
360
|
-
| `createSkills` | `(config: SkillsConfig) => SkillsResult` | Lower-level factory for building the skill loading system. |
|
|
361
|
-
| `filterEphemeralMessages` | `(messages: UIMessage[]) => UIMessage[]` | Strips all `activate_skill` and `list_capabilities` tool calls. |
|
|
362
|
-
| `injectGuidance` | `(messages: ModelMessage[], guidance: string, prev?: string) => ModelMessage[]` | Inserts guidance just before the last user message. **Deprecated** — use `system` in the wrappers instead. |
|
|
363
|
-
| `compactIfNeeded` | `(messages, model, tailSize) => Promise<UIMessage[]>` | Compacts if token estimate exceeds threshold; no-op if model is `undefined`. |
|
|
364
|
-
| `compactMessages` | `(messages, model, tailSize) => Promise<UIMessage[]>` | Summarises the older window and returns `[summaryMsg, ...verbatimTail]`. |
|
|
365
|
-
| `estimateMessagesTokens` | `(messages: UIMessage[]) => number` | Character-count heuristic (÷ 3.5) over text, reasoning, and tool parts. |
|
|
366
|
-
|
|
367
|
-
### Constants
|
|
368
|
-
|
|
369
|
-
| Export | Value | Description |
|
|
370
|
-
| ------------------------- | --------- | ---------------------------------------------------- |
|
|
371
|
-
| `COMPACT_TOKEN_THRESHOLD` | `140_000` | Token count above which compaction is triggered. |
|
|
372
|
-
| `SKILL_STATE_SENTINEL` | `string` | Delimiter used to embed skill state in tool results. |
|
|
383
|
+
| Export | Signature | Description |
|
|
384
|
+
| ---------------- | -------------------------------------- | -------------------------------------------------------------------- |
|
|
385
|
+
| `buildLLMParams` | `async (config) => Promise<LLMParams>` | Builds the full parameter object for `streamText` or `generateText`. |
|
|
373
386
|
|
|
374
387
|
### Types
|
|
375
388
|
|
|
376
|
-
| Export
|
|
377
|
-
|
|
|
378
|
-
| `Skill`
|
|
379
|
-
| `
|
|
380
|
-
| `
|
|
381
|
-
| `
|
|
382
|
-
| `GenerateTextParams` | Params for the `generateText` wrapper. |
|
|
383
|
-
| `CompactOptions` | `{ model: LanguageModel; maxMessages: number }` |
|
|
389
|
+
| Export | Description |
|
|
390
|
+
| ---------------------- | ------------------------------------------------------------------------------- |
|
|
391
|
+
| `Skill` | A named group of tools with optional guidance. |
|
|
392
|
+
| `CompactOptions` | `{ model: LanguageModel; maxMessages: number }` |
|
|
393
|
+
| `AgentContext<TBody>` | Request body type merged with `log`. Use as the type of `experimental_context`. |
|
|
394
|
+
| `BuildLLMParamsConfig` | Config type for the standalone `buildLLMParams` function. |
|
|
384
395
|
|
|
385
396
|
---
|
|
386
397
|
|
|
387
398
|
## Development
|
|
388
399
|
|
|
389
400
|
```bash
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
401
|
+
npm install # install dependencies
|
|
402
|
+
npm test # run tests
|
|
403
|
+
npm pack # build
|
|
393
404
|
```
|
package/dist/index.d.mts
CHANGED
|
@@ -1,49 +1,7 @@
|
|
|
1
|
-
import { AIChatAgent as AIChatAgent$1 } from "@cloudflare/ai-chat";
|
|
2
|
-
import { LanguageModel, ToolSet, UIMessage, generateText
|
|
1
|
+
import { AIChatAgent as AIChatAgent$1, OnChatMessageOptions } from "@cloudflare/ai-chat";
|
|
2
|
+
import { LanguageModel, ToolSet, UIMessage, generateText, streamText } from "ai";
|
|
3
3
|
|
|
4
|
-
//#region src/
|
|
5
|
-
/**
|
|
6
|
-
* Base class for Cloudflare Agents SDK chat agents with lazy skill loading.
|
|
7
|
-
*
|
|
8
|
-
* Handles CF infrastructure concerns only: DO SQLite persistence for loaded
|
|
9
|
-
* skill state, stripping skill meta-tool messages before persistence, and
|
|
10
|
-
* history replay to newly connected clients.
|
|
11
|
-
*
|
|
12
|
-
* Skill loading, compaction, and LLM communication are delegated to the
|
|
13
|
-
* `streamText` / `generateText` wrappers from `@economic/agents`, which you
|
|
14
|
-
* call directly inside `onChatMessage`.
|
|
15
|
-
*/
|
|
16
|
-
declare abstract class AIChatAgent<Env extends Cloudflare.Env = Cloudflare.Env> extends AIChatAgent$1<Env> {
|
|
17
|
-
/**
|
|
18
|
-
* Skill names persisted from previous turns, read from DO SQLite.
|
|
19
|
-
* Pass as `activeSkills` to `streamText` or `generateText`.
|
|
20
|
-
* Returns an empty array if no skills have been loaded yet.
|
|
21
|
-
*/
|
|
22
|
-
protected getLoadedSkills(): Promise<string[]>;
|
|
23
|
-
/**
|
|
24
|
-
* Extracts skill state from activate_skill results, persists to DO SQLite,
|
|
25
|
-
* then strips all skill meta-tool messages before delegating to super.
|
|
26
|
-
*
|
|
27
|
-
* Three things happen here:
|
|
28
|
-
*
|
|
29
|
-
* 1. Scan activate_skill tool results for the SKILL_STATE_SENTINEL. When
|
|
30
|
-
* found, the embedded JSON array of loaded skill names is written to DO
|
|
31
|
-
* SQLite (`this.sql`). This replaces D1 — no extra binding needed.
|
|
32
|
-
*
|
|
33
|
-
* 2. All activate_skill and list_capabilities messages are stripped entirely.
|
|
34
|
-
* Skill state is restored from DO SQLite at turn start via getLoadedSkills(),
|
|
35
|
-
* so these messages are not needed for future turns.
|
|
36
|
-
*
|
|
37
|
-
* 3. super.persistMessages writes the cleaned message list to DO SQLite and
|
|
38
|
-
* broadcasts to connected clients.
|
|
39
|
-
*/
|
|
40
|
-
persistMessages(messages: UIMessage[], excludeBroadcastIds?: string[], options?: {
|
|
41
|
-
_deleteStaleRows?: boolean;
|
|
42
|
-
}): Promise<void>;
|
|
43
|
-
private ensureSkillTableExists;
|
|
44
|
-
}
|
|
45
|
-
//#endregion
|
|
46
|
-
//#region src/ai/skills.d.ts
|
|
4
|
+
//#region src/features/skills/index.d.ts
|
|
47
5
|
/**
|
|
48
6
|
* A named group of related tools that can be loaded together on demand.
|
|
49
7
|
*
|
|
@@ -63,63 +21,110 @@ interface Skill {
|
|
|
63
21
|
guidance?: string;
|
|
64
22
|
tools: ToolSet;
|
|
65
23
|
}
|
|
66
|
-
type SkillsCallSettings = {
|
|
67
|
-
/**
|
|
68
|
-
* Skills available for on-demand loading this turn. When provided, the wrapper
|
|
69
|
-
* automatically wires up activate_skill, list_capabilities, activeTools, and
|
|
70
|
-
* prepareStep. Skills are additive — any activeTools you also pass are merged.
|
|
71
|
-
*/
|
|
72
|
-
skills?: Skill[];
|
|
73
|
-
/**
|
|
74
|
-
* Names of skills loaded in previous turns. Pass `await this.getLoadedSkills()`
|
|
75
|
-
* from your AIChatAgent subclass.
|
|
76
|
-
*/
|
|
77
|
-
activeSkills?: string[];
|
|
78
|
-
};
|
|
79
24
|
//#endregion
|
|
80
|
-
//#region src/
|
|
25
|
+
//#region src/features/compaction/index.d.ts
|
|
81
26
|
type CompactOptions = {
|
|
82
27
|
/** Model used to generate the compaction summary */model: LanguageModel; /** Number of recent messages to keep verbatim; older messages are summarised */
|
|
83
28
|
maxMessages: number;
|
|
84
29
|
};
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
30
|
+
//#endregion
|
|
31
|
+
//#region src/llm.d.ts
|
|
32
|
+
type LLMParams = Parameters<typeof streamText>[0] & Parameters<typeof generateText>[0];
|
|
33
|
+
type BuildLLMParamsConfig = Omit<LLMParams, "messages" | "experimental_context" | "abortSignal"> & {
|
|
34
|
+
/** CF options object — extracts `abortSignal` and `experimental_context` (from `body`). */options: OnChatMessageOptions | undefined; /** Conversation history (`this.messages`). Converted to `ModelMessage[]` internally. */
|
|
35
|
+
messages: UIMessage[]; /** Skill names loaded in previous turns. Pass `await this.getLoadedSkills()`. */
|
|
36
|
+
activeSkills?: string[]; /** Skills available for on-demand loading this turn. */
|
|
37
|
+
skills?: Skill[]; /** When provided, compacts old messages before sending to the model. */
|
|
90
38
|
compact?: CompactOptions;
|
|
91
39
|
};
|
|
92
|
-
//#endregion
|
|
93
|
-
//#region src/ai/wrappers.d.ts
|
|
94
|
-
type AiStreamTextParams = Parameters<typeof streamText$1>[0];
|
|
95
|
-
type AiGenerateTextParams = Parameters<typeof generateText$1>[0];
|
|
96
|
-
type StreamTextParams = AiStreamTextParams & SkillsCallSettings & CompactionCallSettings;
|
|
97
|
-
type GenerateTextParams = AiGenerateTextParams & SkillsCallSettings & CompactionCallSettings;
|
|
98
40
|
/**
|
|
99
|
-
*
|
|
100
|
-
* lazy skill loading and message compaction.
|
|
41
|
+
* Builds the parameter object for a Vercel AI SDK `streamText` or `generateText` call.
|
|
101
42
|
*
|
|
102
|
-
*
|
|
43
|
+
* Handles message conversion, optional compaction, skill wiring (`activate_skill`,
|
|
44
|
+
* `list_capabilities`, `prepareStep`), and context/abort signal extraction from
|
|
45
|
+
* the Cloudflare Agents SDK `options` object.
|
|
103
46
|
*
|
|
104
|
-
*
|
|
105
|
-
*
|
|
106
|
-
*
|
|
107
|
-
*
|
|
108
|
-
*
|
|
47
|
+
* The returned object can be spread directly into `streamText` or `generateText`:
|
|
48
|
+
*
|
|
49
|
+
* ```typescript
|
|
50
|
+
* const params = await buildLLMParams({ ... });
|
|
51
|
+
* return streamText(params).toUIMessageStreamResponse();
|
|
52
|
+
* ```
|
|
109
53
|
*/
|
|
110
|
-
declare function
|
|
54
|
+
declare function buildLLMParams(config: BuildLLMParamsConfig): Promise<LLMParams>;
|
|
55
|
+
//#endregion
|
|
56
|
+
//#region src/agents/AIChatAgent.d.ts
|
|
111
57
|
/**
|
|
112
|
-
*
|
|
113
|
-
*
|
|
58
|
+
* Base class for Cloudflare Agents SDK chat agents with lazy skill loading
|
|
59
|
+
* and built-in audit logging.
|
|
114
60
|
*
|
|
115
|
-
*
|
|
61
|
+
* Handles CF infrastructure concerns only: DO SQLite persistence for loaded
|
|
62
|
+
* skill state, stripping skill meta-tool messages before persistence, history
|
|
63
|
+
* replay to newly connected clients, and writing audit events to D1.
|
|
116
64
|
*
|
|
117
|
-
*
|
|
118
|
-
*
|
|
119
|
-
* @param params.activeSkills - Skill names loaded in previous turns. Pass `await this.getLoadedSkills()`.
|
|
120
|
-
* @param params.compact - When provided, compacts messages before the model call. Older messages
|
|
121
|
-
* are summarised into a single system message using the given model.
|
|
65
|
+
* Skill loading, compaction, and LLM communication are delegated to
|
|
66
|
+
* `buildLLMParams` from `@economic/agents`, which you call inside `onChatMessage`.
|
|
122
67
|
*/
|
|
123
|
-
declare
|
|
68
|
+
declare abstract class AIChatAgent<Env extends Cloudflare.Env = Cloudflare.Env> extends AIChatAgent$1<Env> {
|
|
69
|
+
/**
|
|
70
|
+
* Writes an audit event to D1 if `AUDIT_DB` is bound on the environment,
|
|
71
|
+
* otherwise silently does nothing.
|
|
72
|
+
*
|
|
73
|
+
* Called automatically after every turn (from `persistMessages`) and on
|
|
74
|
+
* non-clean finish reasons (from `buildLLMParams`). Also available via
|
|
75
|
+
* `experimental_context.log` in tool `execute` functions.
|
|
76
|
+
*/
|
|
77
|
+
protected log(message: string, payload?: Record<string, unknown>): Promise<void>;
|
|
78
|
+
/**
|
|
79
|
+
* Builds the parameter object for a `streamText` or `generateText` call,
|
|
80
|
+
* pre-filling `messages` and `activeSkills` from this agent instance.
|
|
81
|
+
* Injects `log` into `experimental_context` and logs non-clean finish reasons.
|
|
82
|
+
*
|
|
83
|
+
* ```typescript
|
|
84
|
+
* const params = await this.buildLLMParams({ options, onFinish, model, system: "..." });
|
|
85
|
+
* return streamText(params).toUIMessageStreamResponse();
|
|
86
|
+
* ```
|
|
87
|
+
*/
|
|
88
|
+
protected buildLLMParams(config: Omit<BuildLLMParamsConfig, "messages" | "activeSkills">): ReturnType<typeof buildLLMParams>;
|
|
89
|
+
/**
|
|
90
|
+
* Skill names persisted from previous turns, read from DO SQLite.
|
|
91
|
+
* Returns an empty array if no skills have been loaded yet.
|
|
92
|
+
*/
|
|
93
|
+
protected getLoadedSkills(): Promise<string[]>;
|
|
94
|
+
/**
|
|
95
|
+
* Extracts skill state from activate_skill results, persists to DO SQLite,
|
|
96
|
+
* logs a turn summary, then strips all skill meta-tool messages before
|
|
97
|
+
* delegating to super.
|
|
98
|
+
*
|
|
99
|
+
* 1. Scans activate_skill tool results for SKILL_STATE_SENTINEL. When found,
|
|
100
|
+
* the embedded JSON array of loaded skill names is written to DO SQLite.
|
|
101
|
+
*
|
|
102
|
+
* 2. Logs a turn summary via `log()`. Best-effort: fire-and-forget.
|
|
103
|
+
*
|
|
104
|
+
* 3. Strips all activate_skill and list_capabilities messages from history.
|
|
105
|
+
*
|
|
106
|
+
* 4. Delegates to super.persistMessages for message storage and WS broadcast.
|
|
107
|
+
*/
|
|
108
|
+
persistMessages(messages: UIMessage[], excludeBroadcastIds?: string[], options?: {
|
|
109
|
+
_deleteStaleRows?: boolean;
|
|
110
|
+
}): Promise<void>;
|
|
111
|
+
private ensureSkillTableExists;
|
|
112
|
+
}
|
|
113
|
+
//#endregion
|
|
114
|
+
//#region src/types.d.ts
|
|
115
|
+
/**
|
|
116
|
+
* The context object available throughout an agent's lifetime — passed via
|
|
117
|
+
* `experimental_context` to tool `execute` functions. Contains the typed
|
|
118
|
+
* request body merged with platform capabilities like `log`.
|
|
119
|
+
*
|
|
120
|
+
* Define your own body shape and compose:
|
|
121
|
+
* ```typescript
|
|
122
|
+
* interface MyBody { userId: string; userTier: "free" | "pro" }
|
|
123
|
+
* type MyContext = AgentContext<MyBody>;
|
|
124
|
+
* ```
|
|
125
|
+
*/
|
|
126
|
+
type AgentContext<TBody = Record<string, unknown>> = TBody & {
|
|
127
|
+
log: (message: string, payload?: Record<string, unknown>) => void | Promise<void>;
|
|
128
|
+
};
|
|
124
129
|
//#endregion
|
|
125
|
-
export { AIChatAgent, type
|
|
130
|
+
export { AIChatAgent, type AgentContext, type BuildLLMParamsConfig, type CompactOptions, type Skill, buildLLMParams };
|
package/dist/index.mjs
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { AIChatAgent as AIChatAgent$1 } from "@cloudflare/ai-chat";
|
|
2
|
-
import {
|
|
3
|
-
//#region src/
|
|
2
|
+
import { convertToModelMessages, generateText, jsonSchema, stepCountIs, tool } from "ai";
|
|
3
|
+
//#region src/features/skills/index.ts
|
|
4
4
|
const ACTIVATE_SKILL = "activate_skill";
|
|
5
5
|
const LIST_CAPABILITIES = "list_capabilities";
|
|
6
6
|
function buildActivateSkillDescription(skills) {
|
|
@@ -122,94 +122,6 @@ function createSkills(config) {
|
|
|
122
122
|
};
|
|
123
123
|
}
|
|
124
124
|
const ALREADY_LOADED_OUTPUT = "All requested skills were already loaded.";
|
|
125
|
-
//#endregion
|
|
126
|
-
//#region src/agents/AIChatAgent.ts
|
|
127
|
-
/**
|
|
128
|
-
* Base class for Cloudflare Agents SDK chat agents with lazy skill loading.
|
|
129
|
-
*
|
|
130
|
-
* Handles CF infrastructure concerns only: DO SQLite persistence for loaded
|
|
131
|
-
* skill state, stripping skill meta-tool messages before persistence, and
|
|
132
|
-
* history replay to newly connected clients.
|
|
133
|
-
*
|
|
134
|
-
* Skill loading, compaction, and LLM communication are delegated to the
|
|
135
|
-
* `streamText` / `generateText` wrappers from `@economic/agents`, which you
|
|
136
|
-
* call directly inside `onChatMessage`.
|
|
137
|
-
*/
|
|
138
|
-
var AIChatAgent = class extends AIChatAgent$1 {
|
|
139
|
-
/**
|
|
140
|
-
* Skill names persisted from previous turns, read from DO SQLite.
|
|
141
|
-
* Pass as `activeSkills` to `streamText` or `generateText`.
|
|
142
|
-
* Returns an empty array if no skills have been loaded yet.
|
|
143
|
-
*/
|
|
144
|
-
async getLoadedSkills() {
|
|
145
|
-
try {
|
|
146
|
-
this.ensureSkillTableExists();
|
|
147
|
-
const rows = this.sql`SELECT active_skills FROM skill_state WHERE id = 1`;
|
|
148
|
-
if (rows.length === 0) return [];
|
|
149
|
-
return JSON.parse(rows[0].active_skills);
|
|
150
|
-
} catch {
|
|
151
|
-
return [];
|
|
152
|
-
}
|
|
153
|
-
}
|
|
154
|
-
/**
|
|
155
|
-
* Extracts skill state from activate_skill results, persists to DO SQLite,
|
|
156
|
-
* then strips all skill meta-tool messages before delegating to super.
|
|
157
|
-
*
|
|
158
|
-
* Three things happen here:
|
|
159
|
-
*
|
|
160
|
-
* 1. Scan activate_skill tool results for the SKILL_STATE_SENTINEL. When
|
|
161
|
-
* found, the embedded JSON array of loaded skill names is written to DO
|
|
162
|
-
* SQLite (`this.sql`). This replaces D1 — no extra binding needed.
|
|
163
|
-
*
|
|
164
|
-
* 2. All activate_skill and list_capabilities messages are stripped entirely.
|
|
165
|
-
* Skill state is restored from DO SQLite at turn start via getLoadedSkills(),
|
|
166
|
-
* so these messages are not needed for future turns.
|
|
167
|
-
*
|
|
168
|
-
* 3. super.persistMessages writes the cleaned message list to DO SQLite and
|
|
169
|
-
* broadcasts to connected clients.
|
|
170
|
-
*/
|
|
171
|
-
async persistMessages(messages, excludeBroadcastIds = [], options) {
|
|
172
|
-
let latestSkillState;
|
|
173
|
-
for (const msg of messages) {
|
|
174
|
-
if (msg.role !== "assistant" || !msg.parts) continue;
|
|
175
|
-
for (const part of msg.parts) {
|
|
176
|
-
if (!("toolCallId" in part)) continue;
|
|
177
|
-
const { type, output } = part;
|
|
178
|
-
if (type !== `tool-activate_skill` || typeof output !== "string") continue;
|
|
179
|
-
const sentinelIdx = output.indexOf(SKILL_STATE_SENTINEL);
|
|
180
|
-
if (sentinelIdx !== -1) try {
|
|
181
|
-
const stateJson = output.slice(sentinelIdx + 18);
|
|
182
|
-
latestSkillState = JSON.parse(stateJson);
|
|
183
|
-
} catch {}
|
|
184
|
-
}
|
|
185
|
-
}
|
|
186
|
-
if (latestSkillState !== void 0) {
|
|
187
|
-
this.ensureSkillTableExists();
|
|
188
|
-
this.sql`INSERT OR REPLACE INTO skill_state(id, active_skills) VALUES(1, ${JSON.stringify(latestSkillState)})`;
|
|
189
|
-
}
|
|
190
|
-
const filtered = stripSkillMessages(messages);
|
|
191
|
-
return super.persistMessages(filtered, excludeBroadcastIds, options);
|
|
192
|
-
}
|
|
193
|
-
ensureSkillTableExists() {
|
|
194
|
-
this.sql`CREATE TABLE IF NOT EXISTS skill_state (id INTEGER PRIMARY KEY, active_skills TEXT NOT NULL DEFAULT '[]')`;
|
|
195
|
-
}
|
|
196
|
-
};
|
|
197
|
-
function stripSkillMessages(messages) {
|
|
198
|
-
return messages.flatMap((msg) => {
|
|
199
|
-
if (msg.role !== "assistant" || !msg.parts?.length) return [msg];
|
|
200
|
-
const filtered = msg.parts.filter((part) => {
|
|
201
|
-
if (!("toolCallId" in part)) return true;
|
|
202
|
-
const { type } = part;
|
|
203
|
-
return type !== `tool-activate_skill` && type !== `tool-list_capabilities`;
|
|
204
|
-
});
|
|
205
|
-
if (filtered.length === 0) return [];
|
|
206
|
-
if (filtered.length === msg.parts.length) return [msg];
|
|
207
|
-
return [{
|
|
208
|
-
...msg,
|
|
209
|
-
parts: filtered
|
|
210
|
-
}];
|
|
211
|
-
});
|
|
212
|
-
}
|
|
213
125
|
const TOOL_RESULT_PREVIEW_CHARS = 200;
|
|
214
126
|
const SUMMARY_MAX_TOKENS = 4e3;
|
|
215
127
|
/**
|
|
@@ -277,7 +189,7 @@ ${formatMessagesForSummary(recentMessages)}
|
|
|
277
189
|
|
|
278
190
|
Write a concise summary:`;
|
|
279
191
|
try {
|
|
280
|
-
const { text } = await generateText
|
|
192
|
+
const { text } = await generateText({
|
|
281
193
|
model,
|
|
282
194
|
messages: [{
|
|
283
195
|
role: "user",
|
|
@@ -314,71 +226,221 @@ async function compactIfNeeded(messages, model, tailSize) {
|
|
|
314
226
|
return compactMessages(messages, model, tailSize);
|
|
315
227
|
}
|
|
316
228
|
//#endregion
|
|
317
|
-
//#region src/
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
229
|
+
//#region src/llm.ts
|
|
230
|
+
/**
|
|
231
|
+
* Builds the parameter object for a Vercel AI SDK `streamText` or `generateText` call.
|
|
232
|
+
*
|
|
233
|
+
* Handles message conversion, optional compaction, skill wiring (`activate_skill`,
|
|
234
|
+
* `list_capabilities`, `prepareStep`), and context/abort signal extraction from
|
|
235
|
+
* the Cloudflare Agents SDK `options` object.
|
|
236
|
+
*
|
|
237
|
+
* The returned object can be spread directly into `streamText` or `generateText`:
|
|
238
|
+
*
|
|
239
|
+
* ```typescript
|
|
240
|
+
* const params = await buildLLMParams({ ... });
|
|
241
|
+
* return streamText(params).toUIMessageStreamResponse();
|
|
242
|
+
* ```
|
|
243
|
+
*/
|
|
244
|
+
async function buildLLMParams(config) {
|
|
245
|
+
const { options, messages, activeSkills = [], skills, compact, ...rest } = config;
|
|
246
|
+
const rawMessages = await convertToModelMessages(messages);
|
|
247
|
+
const processedMessages = compact ? await compactIfNeeded(rawMessages, compact.model, compact.maxMessages) : rawMessages;
|
|
248
|
+
const baseParams = {
|
|
322
249
|
...rest,
|
|
323
250
|
messages: processedMessages,
|
|
324
|
-
|
|
325
|
-
|
|
251
|
+
experimental_context: options?.body,
|
|
252
|
+
abortSignal: options?.abortSignal,
|
|
253
|
+
stopWhen: rest.stopWhen ?? stepCountIs(20)
|
|
326
254
|
};
|
|
327
|
-
if (skills?.length)
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
activeTools: [...new Set([...skillsResult.activeTools ?? [], ...activeTools ?? []])],
|
|
340
|
-
system: skillsResult.system ?? consumerResult.system
|
|
341
|
-
};
|
|
342
|
-
};
|
|
343
|
-
callSettings = {
|
|
344
|
-
...callSettings,
|
|
345
|
-
system: skillsCtx.getSystem() || (typeof rest.system === "string" ? rest.system : void 0),
|
|
346
|
-
tools: skillsCtx.tools,
|
|
347
|
-
activeTools: [...new Set([...skillsCtx.activeTools, ...activeTools ?? []])],
|
|
348
|
-
prepareStep: mergedPrepareStep
|
|
255
|
+
if (!skills?.length) return baseParams;
|
|
256
|
+
const skillsCtx = createSkills({
|
|
257
|
+
tools: rest.tools ?? {},
|
|
258
|
+
skills,
|
|
259
|
+
initialLoadedSkills: activeSkills,
|
|
260
|
+
systemPrompt: typeof rest.system === "string" ? rest.system : void 0
|
|
261
|
+
});
|
|
262
|
+
const prepareStep = async (stepOptions) => {
|
|
263
|
+
const skillsResult = await skillsCtx.prepareStep(stepOptions) ?? {};
|
|
264
|
+
return {
|
|
265
|
+
activeTools: skillsResult.activeTools ?? [],
|
|
266
|
+
system: skillsResult.system
|
|
349
267
|
};
|
|
350
|
-
}
|
|
351
|
-
return
|
|
268
|
+
};
|
|
269
|
+
return {
|
|
270
|
+
...baseParams,
|
|
271
|
+
system: skillsCtx.getSystem() || rest.system,
|
|
272
|
+
tools: skillsCtx.tools,
|
|
273
|
+
activeTools: skillsCtx.activeTools,
|
|
274
|
+
prepareStep
|
|
275
|
+
};
|
|
352
276
|
}
|
|
277
|
+
//#endregion
|
|
278
|
+
//#region src/features/audit/index.ts
|
|
353
279
|
/**
|
|
354
|
-
*
|
|
355
|
-
* lazy skill loading and message compaction.
|
|
280
|
+
* Inserts a single audit event row into the shared `audit_events` D1 table.
|
|
356
281
|
*
|
|
357
|
-
*
|
|
282
|
+
* Called by `AIChatAgent.log()`. Not intended for direct use.
|
|
283
|
+
*/
|
|
284
|
+
async function insertAuditEvent(db, agentName, durableObjectId, message, payload) {
|
|
285
|
+
await db.prepare(`INSERT INTO audit_events (id, agent_name, durable_object_id, message, payload, created_at)
|
|
286
|
+
VALUES (?, ?, ?, ?, ?, ?)`).bind(crypto.randomUUID(), agentName, durableObjectId, message, payload ? JSON.stringify(payload) : null, (/* @__PURE__ */ new Date()).toISOString()).run();
|
|
287
|
+
}
|
|
288
|
+
/**
|
|
289
|
+
* Builds the payload for a "turn completed" audit event from the final message list.
|
|
358
290
|
*
|
|
359
|
-
*
|
|
360
|
-
*
|
|
361
|
-
* @param params.activeSkills - Skill names loaded in previous turns. Pass `await this.getLoadedSkills()`.
|
|
362
|
-
* @param params.compact - When provided, compacts messages before the model call. Older messages
|
|
363
|
-
* are summarised into a single system message using the given model.
|
|
291
|
+
* Extracts the last user and assistant message texts (truncated to 200 chars),
|
|
292
|
+
* all non-meta tool call names used this turn, and the current loaded skill set.
|
|
364
293
|
*/
|
|
365
|
-
|
|
366
|
-
|
|
294
|
+
function buildTurnSummary(messages, loadedSkills) {
|
|
295
|
+
const toolCallNames = [];
|
|
296
|
+
for (const msg of messages) {
|
|
297
|
+
if (msg.role !== "assistant" || !msg.parts) continue;
|
|
298
|
+
for (const part of msg.parts) {
|
|
299
|
+
if (!("toolCallId" in part)) continue;
|
|
300
|
+
const { type } = part;
|
|
301
|
+
if (!type.startsWith("tool-")) continue;
|
|
302
|
+
const name = type.slice(5);
|
|
303
|
+
if (name !== "activate_skill" && name !== "list_capabilities" && !toolCallNames.includes(name)) toolCallNames.push(name);
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
const lastUserMsg = [...messages].reverse().find((m) => m.role === "user");
|
|
307
|
+
const lastAssistantMsg = [...messages].reverse().find((m) => m.role === "assistant");
|
|
308
|
+
return {
|
|
309
|
+
userMessage: extractMessageText(lastUserMsg).slice(0, 200),
|
|
310
|
+
toolCalls: toolCallNames,
|
|
311
|
+
loadedSkills,
|
|
312
|
+
assistantMessage: extractMessageText(lastAssistantMsg).slice(0, 200)
|
|
313
|
+
};
|
|
314
|
+
}
|
|
315
|
+
function extractMessageText(msg) {
|
|
316
|
+
if (!msg?.parts) return "";
|
|
317
|
+
return msg.parts.filter((p) => p.type === "text").map((p) => p.text).join(" ").trim();
|
|
367
318
|
}
|
|
319
|
+
//#endregion
|
|
320
|
+
//#region src/agents/AIChatAgent.ts
|
|
368
321
|
/**
|
|
369
|
-
*
|
|
370
|
-
*
|
|
322
|
+
* Base class for Cloudflare Agents SDK chat agents with lazy skill loading
|
|
323
|
+
* and built-in audit logging.
|
|
371
324
|
*
|
|
372
|
-
*
|
|
325
|
+
* Handles CF infrastructure concerns only: DO SQLite persistence for loaded
|
|
326
|
+
* skill state, stripping skill meta-tool messages before persistence, history
|
|
327
|
+
* replay to newly connected clients, and writing audit events to D1.
|
|
373
328
|
*
|
|
374
|
-
*
|
|
375
|
-
*
|
|
376
|
-
* @param params.activeSkills - Skill names loaded in previous turns. Pass `await this.getLoadedSkills()`.
|
|
377
|
-
* @param params.compact - When provided, compacts messages before the model call. Older messages
|
|
378
|
-
* are summarised into a single system message using the given model.
|
|
329
|
+
* Skill loading, compaction, and LLM communication are delegated to
|
|
330
|
+
* `buildLLMParams` from `@economic/agents`, which you call inside `onChatMessage`.
|
|
379
331
|
*/
|
|
380
|
-
|
|
381
|
-
|
|
332
|
+
var AIChatAgent = class extends AIChatAgent$1 {
|
|
333
|
+
/**
|
|
334
|
+
* Writes an audit event to D1 if `AUDIT_DB` is bound on the environment,
|
|
335
|
+
* otherwise silently does nothing.
|
|
336
|
+
*
|
|
337
|
+
* Called automatically after every turn (from `persistMessages`) and on
|
|
338
|
+
* non-clean finish reasons (from `buildLLMParams`). Also available via
|
|
339
|
+
* `experimental_context.log` in tool `execute` functions.
|
|
340
|
+
*/
|
|
341
|
+
async log(message, payload) {
|
|
342
|
+
const db = this.env.AUDIT_DB;
|
|
343
|
+
if (!db) return;
|
|
344
|
+
await insertAuditEvent(db, this.constructor.name, this.ctx.id.toString(), message, payload);
|
|
345
|
+
}
|
|
346
|
+
/**
|
|
347
|
+
* Builds the parameter object for a `streamText` or `generateText` call,
|
|
348
|
+
* pre-filling `messages` and `activeSkills` from this agent instance.
|
|
349
|
+
* Injects `log` into `experimental_context` and logs non-clean finish reasons.
|
|
350
|
+
*
|
|
351
|
+
* ```typescript
|
|
352
|
+
* const params = await this.buildLLMParams({ options, onFinish, model, system: "..." });
|
|
353
|
+
* return streamText(params).toUIMessageStreamResponse();
|
|
354
|
+
* ```
|
|
355
|
+
*/
|
|
356
|
+
async buildLLMParams(config) {
|
|
357
|
+
const onFinishWithErrorLogging = async (result) => {
|
|
358
|
+
if (result.finishReason !== "stop" && result.finishReason !== "tool-calls") await this.log("turn error", { finishReason: result.finishReason });
|
|
359
|
+
return config.onFinish?.(result);
|
|
360
|
+
};
|
|
361
|
+
return {
|
|
362
|
+
...await buildLLMParams({
|
|
363
|
+
...config,
|
|
364
|
+
onFinish: onFinishWithErrorLogging,
|
|
365
|
+
messages: this.messages,
|
|
366
|
+
activeSkills: await this.getLoadedSkills()
|
|
367
|
+
}),
|
|
368
|
+
experimental_context: {
|
|
369
|
+
...config.options?.body,
|
|
370
|
+
log: this.log.bind(this)
|
|
371
|
+
}
|
|
372
|
+
};
|
|
373
|
+
}
|
|
374
|
+
/**
|
|
375
|
+
* Skill names persisted from previous turns, read from DO SQLite.
|
|
376
|
+
* Returns an empty array if no skills have been loaded yet.
|
|
377
|
+
*/
|
|
378
|
+
async getLoadedSkills() {
|
|
379
|
+
try {
|
|
380
|
+
this.ensureSkillTableExists();
|
|
381
|
+
const rows = this.sql`SELECT active_skills FROM skill_state WHERE id = 1`;
|
|
382
|
+
if (rows.length === 0) return [];
|
|
383
|
+
return JSON.parse(rows[0].active_skills);
|
|
384
|
+
} catch {
|
|
385
|
+
return [];
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
/**
|
|
389
|
+
* Extracts skill state from activate_skill results, persists to DO SQLite,
|
|
390
|
+
* logs a turn summary, then strips all skill meta-tool messages before
|
|
391
|
+
* delegating to super.
|
|
392
|
+
*
|
|
393
|
+
* 1. Scans activate_skill tool results for SKILL_STATE_SENTINEL. When found,
|
|
394
|
+
* the embedded JSON array of loaded skill names is written to DO SQLite.
|
|
395
|
+
*
|
|
396
|
+
* 2. Logs a turn summary via `log()`. Best-effort: fire-and-forget.
|
|
397
|
+
*
|
|
398
|
+
* 3. Strips all activate_skill and list_capabilities messages from history.
|
|
399
|
+
*
|
|
400
|
+
* 4. Delegates to super.persistMessages for message storage and WS broadcast.
|
|
401
|
+
*/
|
|
402
|
+
async persistMessages(messages, excludeBroadcastIds = [], options) {
|
|
403
|
+
let latestSkillState;
|
|
404
|
+
for (const msg of messages) {
|
|
405
|
+
if (msg.role !== "assistant" || !msg.parts) continue;
|
|
406
|
+
for (const part of msg.parts) {
|
|
407
|
+
if (!("toolCallId" in part)) continue;
|
|
408
|
+
const { type, output } = part;
|
|
409
|
+
if (type !== `tool-activate_skill` || typeof output !== "string") continue;
|
|
410
|
+
const sentinelIdx = output.indexOf(SKILL_STATE_SENTINEL);
|
|
411
|
+
if (sentinelIdx !== -1) try {
|
|
412
|
+
const stateJson = output.slice(sentinelIdx + 18);
|
|
413
|
+
latestSkillState = JSON.parse(stateJson);
|
|
414
|
+
} catch {}
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
if (latestSkillState !== void 0) {
|
|
418
|
+
this.ensureSkillTableExists();
|
|
419
|
+
this.sql`INSERT OR REPLACE INTO skill_state(id, active_skills) VALUES(1, ${JSON.stringify(latestSkillState)})`;
|
|
420
|
+
}
|
|
421
|
+
this.log("turn completed", buildTurnSummary(messages, latestSkillState ?? []));
|
|
422
|
+
const filtered = stripSkillMessages(messages);
|
|
423
|
+
return super.persistMessages(filtered, excludeBroadcastIds, options);
|
|
424
|
+
}
|
|
425
|
+
ensureSkillTableExists() {
|
|
426
|
+
this.sql`CREATE TABLE IF NOT EXISTS skill_state (id INTEGER PRIMARY KEY, active_skills TEXT NOT NULL DEFAULT '[]')`;
|
|
427
|
+
}
|
|
428
|
+
};
|
|
429
|
+
function stripSkillMessages(messages) {
|
|
430
|
+
return messages.flatMap((msg) => {
|
|
431
|
+
if (msg.role !== "assistant" || !msg.parts?.length) return [msg];
|
|
432
|
+
const filtered = msg.parts.filter((part) => {
|
|
433
|
+
if (!("toolCallId" in part)) return true;
|
|
434
|
+
const { type } = part;
|
|
435
|
+
return type !== `tool-activate_skill` && type !== `tool-list_capabilities`;
|
|
436
|
+
});
|
|
437
|
+
if (filtered.length === 0) return [];
|
|
438
|
+
if (filtered.length === msg.parts.length) return [msg];
|
|
439
|
+
return [{
|
|
440
|
+
...msg,
|
|
441
|
+
parts: filtered
|
|
442
|
+
}];
|
|
443
|
+
});
|
|
382
444
|
}
|
|
383
445
|
//#endregion
|
|
384
|
-
export { AIChatAgent,
|
|
446
|
+
export { AIChatAgent, buildLLMParams };
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@economic/agents",
|
|
3
|
-
"version": "0.0.1-alpha.
|
|
3
|
+
"version": "0.0.1-alpha.12",
|
|
4
4
|
"description": "A starter for creating a TypeScript package.",
|
|
5
5
|
"homepage": "https://github.com/author/library#readme",
|
|
6
6
|
"bugs": {
|
|
@@ -13,7 +13,8 @@
|
|
|
13
13
|
"url": "git+https://github.com/author/library.git"
|
|
14
14
|
},
|
|
15
15
|
"files": [
|
|
16
|
-
"dist"
|
|
16
|
+
"dist",
|
|
17
|
+
"schema"
|
|
17
18
|
],
|
|
18
19
|
"type": "module",
|
|
19
20
|
"exports": {
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
-- Audit events table for @economic/agents.
|
|
2
|
+
-- This is a shared global table — create it once in the Cloudflare D1 portal.
|
|
3
|
+
-- Safe to re-run: all statements use IF NOT EXISTS.
|
|
4
|
+
|
|
5
|
+
CREATE TABLE IF NOT EXISTS audit_events (
|
|
6
|
+
id TEXT PRIMARY KEY,
|
|
7
|
+
agent_name TEXT NOT NULL,
|
|
8
|
+
durable_object_id TEXT NOT NULL,
|
|
9
|
+
message TEXT NOT NULL,
|
|
10
|
+
payload TEXT, -- JSON, nullable
|
|
11
|
+
created_at TEXT NOT NULL
|
|
12
|
+
);
|
|
13
|
+
|
|
14
|
+
CREATE INDEX IF NOT EXISTS audit_events_do ON audit_events(durable_object_id);
|
|
15
|
+
CREATE INDEX IF NOT EXISTS audit_events_ts ON audit_events(created_at);
|