@economic/agents 0.0.1-alpha.17 → 0.0.1-alpha.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -13,6 +13,7 @@ npm install @economic/agents ai @cloudflare/ai-chat
13
13
  `@economic/agents` provides:
14
14
 
15
15
  - **`AIChatAgent`** — an abstract Cloudflare Durable Object base class. Implement `onChatMessage`, call `this.buildLLMParams()`, and pass the result to `streamText` from the AI SDK.
16
+ - **`guard`** — optional TypeScript 5+ method decorator for `onChatMessage`. Runs your function with `options.body`; return a `Response` to short-circuit (e.g. auth), or nothing to continue.
16
17
  - **`buildLLMParams`** — the standalone version of the above, for use outside of `AIChatAgent` or in custom agent implementations.
17
18
 
18
19
  Skills and compaction are AI SDK concerns — they control what goes to the LLM. The CF layer is responsible for WebSockets, Durable Objects, and message persistence. These are kept separate.
@@ -121,6 +122,43 @@ Protected method on `AIChatAgent`. Wraps the standalone `buildLLMParams` functio
121
122
 
122
123
  Config is everything accepted by the standalone `buildLLMParams` except `messages`, `activeSkills`, and `fastModel`.
123
124
 
125
+ ### `guard`
126
+
127
+ Method decorator (TypeScript 5+ stage-3) for handlers shaped like `onChatMessage(onFinish, options?)`. Before your method runs, it calls your `GuardFn` with `options?.body` (the same custom body the client sends via `useAgentChat` / `body` on the chat request).
128
+
129
+ - Return **`undefined` / nothing** — the decorated method runs as usual.
130
+ - Return a **`Response`** — that response is returned immediately; `onChatMessage` is not called.
131
+
132
+ All policy (tokens, tiers, rate limits) lives in the guard function; the decorator only forwards `body` and branches on whether a `Response` was returned.
133
+
134
+ ```typescript
135
+ import { streamText } from "ai";
136
+ import { openai } from "@ai-sdk/openai";
137
+ import { AIChatAgent, guard, type GuardFn } from "@economic/agents";
138
+
139
+ const requireToken: GuardFn = async (body) => {
140
+ const token = body?.token;
141
+ if (typeof token !== "string" || !(await isValidToken(token))) {
142
+ return new Response("Unauthorized", { status: 401 });
143
+ }
144
+ };
145
+
146
+ export class ChatAgent extends AIChatAgent<Env> {
147
+ protected fastModel = openai("gpt-4o-mini");
148
+
149
+ @guard(requireToken)
150
+ async onChatMessage(onFinish, options) {
151
+ const params = await this.buildLLMParams({
152
+ options,
153
+ onFinish,
154
+ model: openai("gpt-4o"),
155
+ system: "You are a helpful assistant.",
156
+ });
157
+ return streamText(params).toUIMessageStreamResponse();
158
+ }
159
+ }
160
+ ```
161
+
124
162
  ### `fastModel` property
125
163
 
126
164
  Override `fastModel` on your subclass to enable automatic compaction and future background conversation summarization:
@@ -503,17 +541,19 @@ If `userId` is not set on the request body, the upsert is skipped and a `console
503
541
 
504
542
  ### Functions
505
543
 
506
- | Export | Signature | Description |
507
- | ---------------- | -------------------------------------- | -------------------------------------------------------------------- |
508
- | `buildLLMParams` | `async (config) => Promise<LLMParams>` | Builds the full parameter object for `streamText` or `generateText`. |
544
+ | Export | Signature | Description |
545
+ | ---------------- | -------------------------------------- | ------------------------------------------------------------------------------------------------------ |
546
+ | `guard` | `(guardFn: GuardFn)` | Method decorator: runs `guardFn` with `options.body`; a returned `Response` short-circuits the method. |
547
+ | `buildLLMParams` | `async (config) => Promise<LLMParams>` | Builds the full parameter object for `streamText` or `generateText`. |
509
548
 
510
549
  ### Types
511
550
 
512
- | Export | Description |
513
- | ---------------------- | ------------------------------------------------------------------------------- |
514
- | `Skill` | A named group of tools with optional guidance. |
515
- | `AgentContext<TBody>` | Request body type merged with `log`. Use as the type of `experimental_context`. |
516
- | `BuildLLMParamsConfig` | Config type for the standalone `buildLLMParams` function. |
551
+ | Export | Description |
552
+ | ---------------------- | ---------------------------------------------------------------------------------------------------------------- |
553
+ | `GuardFn` | `(body) => Response \| void \| Promise<...>`. Receives chat request `body`; return `Response` to block the turn. |
554
+ | `Skill` | A named group of tools with optional guidance. |
555
+ | `AgentContext<TBody>` | Request body type merged with `log`. Use as the type of `experimental_context`. |
556
+ | `BuildLLMParamsConfig` | Config type for the standalone `buildLLMParams` function. |
517
557
 
518
558
  ---
519
559
 
package/dist/index.d.mts CHANGED
@@ -113,28 +113,17 @@ declare abstract class AIChatAgent<Env extends Cloudflare.Env = Cloudflare.Env>
113
113
  */
114
114
  protected log(message: string, payload?: Record<string, unknown>): Promise<void>;
115
115
  /**
116
- * Records this conversation in the `conversations` D1 table and resets
117
- * the idle summarization timer. Called automatically from `persistMessages`
118
- * after every turn.
119
- *
120
- * After each upsert, any pending `generateSummary` schedule is cancelled
121
- * and a new one is set for 30 minutes from now. If the user sends another
122
- * message before the timer fires, the schedule is cancelled and reset again
123
- * (debounce). When the conversation goes idle, `generateSummary` fires and
124
- * writes the LLM-generated title and summary to D1.
116
+ * Records this conversation in the `conversations` D1 table and triggers
117
+ * LLM-based title/summary generation when appropriate. Called automatically
118
+ * from `persistMessages` after every turn.
119
+ *
120
+ * On the first turn (no existing row), awaits `generateTitleAndSummary` and
121
+ * inserts the row with title and summary already populated. On subsequent
122
+ * turns, upserts the timestamp and fire-and-forgets a summary refresh every
123
+ * `SUMMARY_CONTEXT_MESSAGES` messages (when the context window fully turns
124
+ * over). Neither path blocks the response to the client.
125
125
  */
126
126
  private recordConversation;
127
- /**
128
- * Generates a title and summary for the conversation after 30 minutes of
129
- * inactivity. Invoked automatically by the Cloudflare Agents SDK scheduler
130
- * — do not call this directly.
131
- *
132
- * Delegates to `generateConversationSummary` in `features/conversations`,
133
- * which fetches the previous summary, slices to the last
134
- * `SUMMARY_CONTEXT_MESSAGES` messages, calls `fastModel` with a structured
135
- * output schema, and writes the result back to D1.
136
- */
137
- generateSummary(): Promise<void>;
138
127
  /**
139
128
  * Builds the parameter object for a `streamText` or `generateText` call,
140
129
  * pre-filling `messages`, `activeSkills`, and `fastModel` from this agent instance.
@@ -183,13 +172,46 @@ declare abstract class AIChatAgent<Env extends Cloudflare.Env = Cloudflare.Env>
183
172
  }): Promise<void>;
184
173
  }
185
174
  //#endregion
186
- //#region src/features/compaction/index.d.ts
175
+ //#region src/decorators/guard.d.ts
176
+ /**
177
+ * Function run before a guarded handler (e.g. `onChatMessage`). Receives the
178
+ * custom request body from chat options (`options.body`, same shape as
179
+ * `OnChatMessageOptions` from `@cloudflare/ai-chat`).
180
+ *
181
+ * Return a {@link Response} to short-circuit and skip the decorated method.
182
+ * Return nothing (or `undefined`) to allow the method to run.
183
+ *
184
+ * Typical uses include auth, rate limits, or feature flags — all logic lives here;
185
+ * the `guard` decorator only forwards `body` and handles the return shape.
186
+ */
187
+ type GuardFn = (body: Record<string, unknown> | undefined) => Response | void | Promise<Response | void>;
187
188
  /**
188
- * Number of recent messages to keep verbatim when compaction runs.
189
- * Older messages beyond this count are summarised into a single system message.
190
- * Used as the default when `maxMessagesBeforeCompaction` is not provided to `buildLLMParams`.
189
+ * Method decorator (TypeScript 5+ stage-3) that runs `guardFn` with the second
190
+ * argument's `body` (the chat request body). If `guardFn` returns a
191
+ * {@link Response}, that value is returned and the original method is not called.
192
+ *
193
+ * Intended for `onChatMessage(onFinish, options?)` on subclasses of
194
+ * `AIChatAgent`; `options` is read as `{ body?: Record<string, unknown> }`.
195
+ *
196
+ * @param guardFn - Called with `options?.body` before the method body.
197
+ *
198
+ * @example
199
+ * ```ts
200
+ * const requireToken: GuardFn = async (body) => {
201
+ * if (!await isValidToken(body?.token)) {
202
+ * return new Response("Unauthorized", { status: 401 });
203
+ * }
204
+ * };
205
+ *
206
+ * class MyAgent extends AIChatAgent<Env> {
207
+ * @guard(requireToken)
208
+ * async onChatMessage(onFinish, options) {
209
+ * // ...
210
+ * }
211
+ * }
212
+ * ```
191
213
  */
192
- declare const DEFAULT_MAX_MESSAGES_BEFORE_COMPACTION = 15;
214
+ declare function guard(guardFn: GuardFn): (target: Function, _context: ClassMethodDecoratorContext) => (this: unknown, ...args: unknown[]) => Promise<unknown>;
193
215
  //#endregion
194
216
  //#region src/types.d.ts
195
217
  /**
@@ -207,4 +229,4 @@ type AgentContext<TBody = Record<string, unknown>> = TBody & {
207
229
  log: (message: string, payload?: Record<string, unknown>) => void | Promise<void>;
208
230
  };
209
231
  //#endregion
210
- export { AIChatAgent, type AgentContext, type BuildLLMParamsConfig, DEFAULT_MAX_MESSAGES_BEFORE_COMPACTION, type Skill, buildLLMParams };
232
+ export { AIChatAgent, type AgentContext, type BuildLLMParamsConfig, type GuardFn, type Skill, buildLLMParams, guard };
package/dist/index.mjs CHANGED
@@ -174,14 +174,6 @@ function filterEphemeralMessages(messages) {
174
174
  }];
175
175
  });
176
176
  }
177
- //#endregion
178
- //#region src/features/compaction/index.ts
179
- /**
180
- * Number of recent messages to keep verbatim when compaction runs.
181
- * Older messages beyond this count are summarised into a single system message.
182
- * Used as the default when `maxMessagesBeforeCompaction` is not provided to `buildLLMParams`.
183
- */
184
- const DEFAULT_MAX_MESSAGES_BEFORE_COMPACTION = 15;
185
177
  const TOOL_RESULT_PREVIEW_CHARS = 200;
186
178
  const SUMMARY_MAX_TOKENS = 4e3;
187
179
  /**
@@ -3724,21 +3716,19 @@ function superRefine(fn) {
3724
3716
  * Records a conversation row in the `conversations` D1 table.
3725
3717
  *
3726
3718
  * Called by `AIChatAgent` after every turn. On first call for a given
3727
- * `durableObjectId` the row is inserted with `created_at` set to now.
3719
+ * `durableObjectId` the row is inserted with `created_at` set to now,
3720
+ * and with the provided `title` and `summary` if supplied.
3728
3721
  * On subsequent calls only `user_id` and `updated_at` are refreshed —
3729
- * `created_at`, `title`, and `summary` are never overwritten.
3730
- *
3731
- * After upserting, `AIChatAgent` cancels any pending `generateSummary`
3732
- * schedule and resets it to fire 30 minutes from now, debouncing the
3733
- * idle timer on every turn.
3722
+ * `created_at`, `title`, and `summary` are never overwritten, preserving
3723
+ * any user edits.
3734
3724
  */
3735
- async function recordConversation(db, durableObjectId, userId) {
3725
+ async function recordConversation(db, durableObjectId, userId, title, summary) {
3736
3726
  const now = (/* @__PURE__ */ new Date()).toISOString();
3737
3727
  await db.prepare(`INSERT INTO conversations (durable_object_id, user_id, title, summary, created_at, updated_at)
3738
- VALUES (?, ?, NULL, NULL, ?, ?)
3728
+ VALUES (?, ?, ?, ?, ?, ?)
3739
3729
  ON CONFLICT(durable_object_id) DO UPDATE SET
3740
3730
  user_id = excluded.user_id,
3741
- updated_at = excluded.updated_at`).bind(durableObjectId, userId, now, now).run();
3731
+ updated_at = excluded.updated_at`).bind(durableObjectId, userId, title ?? null, summary ?? null, now, now).run();
3742
3732
  }
3743
3733
  /**
3744
3734
  * Returns the current `title` and `summary` for a conversation row,
@@ -3754,19 +3744,19 @@ async function updateConversationSummary(db, durableObjectId, title, summary) {
3754
3744
  await db.prepare(`UPDATE conversations SET title = ?, summary = ? WHERE durable_object_id = ?`).bind(title, summary, durableObjectId).run();
3755
3745
  }
3756
3746
  /**
3757
- * Generates a title and summary for a conversation using the provided model
3758
- * and writes the result back to D1.
3747
+ * Generates a title and summary for a conversation using the provided model.
3748
+ * Returns the result without writing to D1.
3759
3749
  *
3760
- * Fetches any existing summary first so the model can detect direction changes.
3761
- * Only the last `SUMMARY_CONTEXT_MESSAGES` messages are passed to keep the
3762
- * prompt bounded regardless of total conversation length.
3750
+ * Pass `existingSummary` so the model can detect direction changes when
3751
+ * updating an existing summary. Omit it (or pass undefined) for the initial
3752
+ * generation.
3763
3753
  *
3764
- * Called by `AIChatAgent.generateSummary()` after the idle timer fires.
3754
+ * Only the last `SUMMARY_CONTEXT_MESSAGES` messages are used to keep the
3755
+ * prompt bounded regardless of total conversation length.
3765
3756
  */
3766
- async function generateConversationSummary(db, durableObjectId, messages, model) {
3767
- const existing = await getConversationSummary(db, durableObjectId);
3757
+ async function generateTitleAndSummary(messages, model, existingSummary) {
3768
3758
  const recentMessages = await convertToModelMessages(messages.slice(-30));
3769
- const previousContext = existing?.summary ? `Previous summary: ${existing.summary}\n\nMost recent messages:` : "Conversation:";
3759
+ const previousContext = existingSummary ? `Previous summary: ${existingSummary}\n\nMost recent messages:` : "Conversation:";
3770
3760
  const { output } = await generateText({
3771
3761
  model,
3772
3762
  output: Output.object({ schema: object({
@@ -3775,7 +3765,22 @@ async function generateConversationSummary(db, durableObjectId, messages, model)
3775
3765
  }) }),
3776
3766
  prompt: `${previousContext}\n\n${formatMessagesForSummary(recentMessages)}`
3777
3767
  });
3778
- await updateConversationSummary(db, durableObjectId, output.title, output.summary);
3768
+ return output;
3769
+ }
3770
+ /**
3771
+ * Generates a title and summary for a conversation using the provided model
3772
+ * and writes the result back to D1.
3773
+ *
3774
+ * Fetches any existing summary first so the model can detect direction changes.
3775
+ * Only the last `SUMMARY_CONTEXT_MESSAGES` messages are passed to keep the
3776
+ * prompt bounded regardless of total conversation length.
3777
+ *
3778
+ * Called by `AIChatAgent` every `SUMMARY_CONTEXT_MESSAGES` messages after
3779
+ * the first turn.
3780
+ */
3781
+ async function generateConversationSummary(db, durableObjectId, messages, model) {
3782
+ const { title, summary } = await generateTitleAndSummary(messages, model, (await getConversationSummary(db, durableObjectId))?.summary ?? void 0);
3783
+ await updateConversationSummary(db, durableObjectId, title, summary);
3779
3784
  }
3780
3785
  //#endregion
3781
3786
  //#region src/agents/AIChatAgent.ts
@@ -3806,7 +3811,7 @@ var AIChatAgent = class extends AIChatAgent$1 {
3806
3811
  const db = this.env.AGENT_DB;
3807
3812
  if (!db) return null;
3808
3813
  if (!this._userId) {
3809
- console.error("[AIChatAgent] D1 write skipped: userId not set on request body");
3814
+ console.error("[AIChatAgent] Logging & conversation tracking skipped: userId not set on request body");
3810
3815
  return null;
3811
3816
  }
3812
3817
  return {
@@ -3828,39 +3833,31 @@ var AIChatAgent = class extends AIChatAgent$1 {
3828
3833
  await insertAuditEvent(context.db, this.ctx.id.toString(), context.userId, message, payload);
3829
3834
  }
3830
3835
  /**
3831
- * Records this conversation in the `conversations` D1 table and resets
3832
- * the idle summarization timer. Called automatically from `persistMessages`
3833
- * after every turn.
3834
- *
3835
- * After each upsert, any pending `generateSummary` schedule is cancelled
3836
- * and a new one is set for 30 minutes from now. If the user sends another
3837
- * message before the timer fires, the schedule is cancelled and reset again
3838
- * (debounce). When the conversation goes idle, `generateSummary` fires and
3839
- * writes the LLM-generated title and summary to D1.
3840
- */
3841
- async recordConversation() {
3842
- const context = this.resolveD1Context();
3843
- if (!context) return;
3844
- await recordConversation(context.db, this.ctx.id.toString(), context.userId);
3845
- const existing = this.getSchedules({ type: "delayed" }).find((s) => s.callback === "generateSummary");
3846
- if (existing) await this.cancelSchedule(existing.id);
3847
- await this.schedule(1800, "generateSummary", {});
3848
- }
3849
- /**
3850
- * Generates a title and summary for the conversation after 30 minutes of
3851
- * inactivity. Invoked automatically by the Cloudflare Agents SDK scheduler
3852
- * — do not call this directly.
3836
+ * Records this conversation in the `conversations` D1 table and triggers
3837
+ * LLM-based title/summary generation when appropriate. Called automatically
3838
+ * from `persistMessages` after every turn.
3853
3839
  *
3854
- * Delegates to `generateConversationSummary` in `features/conversations`,
3855
- * which fetches the previous summary, slices to the last
3856
- * `SUMMARY_CONTEXT_MESSAGES` messages, calls `fastModel` with a structured
3857
- * output schema, and writes the result back to D1.
3840
+ * On the first turn (no existing row), awaits `generateTitleAndSummary` and
3841
+ * inserts the row with title and summary already populated. On subsequent
3842
+ * turns, upserts the timestamp and fire-and-forgets a summary refresh every
3843
+ * `SUMMARY_CONTEXT_MESSAGES` messages (when the context window fully turns
3844
+ * over). Neither path blocks the response to the client.
3858
3845
  */
3859
- async generateSummary() {
3846
+ async recordConversation(messageCount) {
3860
3847
  const context = this.resolveD1Context();
3861
3848
  if (!context) return;
3862
- await generateConversationSummary(context.db, this.ctx.id.toString(), this.messages, this.fastModel);
3863
- this.log("conversation summary generated");
3849
+ const durableObjectId = this.ctx.id.toString();
3850
+ if (!await getConversationSummary(context.db, durableObjectId)) {
3851
+ const { title, summary } = await generateTitleAndSummary(this.messages, this.fastModel);
3852
+ await recordConversation(context.db, durableObjectId, context.userId, title, summary);
3853
+ this.log("conversation summary generated");
3854
+ } else {
3855
+ await recordConversation(context.db, durableObjectId, context.userId);
3856
+ if (messageCount % 30 === 0) {
3857
+ generateConversationSummary(context.db, durableObjectId, this.messages, this.fastModel);
3858
+ this.log("conversation summary updated");
3859
+ }
3860
+ }
3864
3861
  }
3865
3862
  /**
3866
3863
  * Builds the parameter object for a `streamText` or `generateText` call,
@@ -3913,7 +3910,7 @@ var AIChatAgent = class extends AIChatAgent$1 {
3913
3910
  * Returns an empty array if no skills have been loaded yet.
3914
3911
  */
3915
3912
  async getLoadedSkills() {
3916
- return getStoredSkills(this.sql);
3913
+ return getStoredSkills(this.sql.bind(this));
3917
3914
  }
3918
3915
  /**
3919
3916
  * Extracts skill state from activate_skill results, persists to DO SQLite,
@@ -3944,12 +3941,50 @@ var AIChatAgent = class extends AIChatAgent$1 {
3944
3941
  } catch {}
3945
3942
  }
3946
3943
  }
3947
- if (latestSkillState !== void 0) saveStoredSkills(this.sql, latestSkillState);
3944
+ if (latestSkillState !== void 0) saveStoredSkills(this.sql.bind(this), latestSkillState);
3948
3945
  this.log("turn completed", buildTurnSummary(messages, latestSkillState ?? []));
3949
- this.recordConversation();
3946
+ this.recordConversation(messages.length);
3950
3947
  const filtered = filterEphemeralMessages(messages);
3951
3948
  return super.persistMessages(filtered, excludeBroadcastIds, options);
3952
3949
  }
3953
3950
  };
3954
3951
  //#endregion
3955
- export { AIChatAgent, DEFAULT_MAX_MESSAGES_BEFORE_COMPACTION, buildLLMParams };
3952
+ //#region src/decorators/guard.ts
3953
+ /**
3954
+ * Method decorator (TypeScript 5+ stage-3) that runs `guardFn` with the second
3955
+ * argument's `body` (the chat request body). If `guardFn` returns a
3956
+ * {@link Response}, that value is returned and the original method is not called.
3957
+ *
3958
+ * Intended for `onChatMessage(onFinish, options?)` on subclasses of
3959
+ * `AIChatAgent`; `options` is read as `{ body?: Record<string, unknown> }`.
3960
+ *
3961
+ * @param guardFn - Called with `options?.body` before the method body.
3962
+ *
3963
+ * @example
3964
+ * ```ts
3965
+ * const requireToken: GuardFn = async (body) => {
3966
+ * if (!await isValidToken(body?.token)) {
3967
+ * return new Response("Unauthorized", { status: 401 });
3968
+ * }
3969
+ * };
3970
+ *
3971
+ * class MyAgent extends AIChatAgent<Env> {
3972
+ * @guard(requireToken)
3973
+ * async onChatMessage(onFinish, options) {
3974
+ * // ...
3975
+ * }
3976
+ * }
3977
+ * ```
3978
+ */
3979
+ function guard(guardFn) {
3980
+ return function(target, _context) {
3981
+ return async function(...args) {
3982
+ const options = args[1];
3983
+ const result = await guardFn(options?.body);
3984
+ if (result instanceof Response) return result;
3985
+ return target.apply(this, args);
3986
+ };
3987
+ };
3988
+ }
3989
+ //#endregion
3990
+ export { AIChatAgent, buildLLMParams, guard };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@economic/agents",
3
- "version": "0.0.1-alpha.17",
3
+ "version": "0.0.1-alpha.19",
4
4
  "description": "A starter for creating a TypeScript package.",
5
5
  "homepage": "https://github.com/author/library#readme",
6
6
  "bugs": {