@economic/agents 0.0.1-alpha.4 → 0.0.1-alpha.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -105,6 +105,14 @@ interface SkillsConfig {
105
105
  tools: ToolSet;
106
106
  /** All available skills that can be loaded on demand */
107
107
  skills: Skill[];
108
+ /**
109
+ * The base system prompt for the agent. When provided, createSkills uses
110
+ * this to compose the full system string (base + guidance) returned by
111
+ * getSystem() and from prepareStep. Guidance is kept in the system
112
+ * parameter rather than the messages array — Anthropic/Gemini only allow
113
+ * system messages at the start of the conversation.
114
+ */
115
+ systemPrompt?: string;
108
116
  /**
109
117
  * Skill names that were loaded in previous turns, read from D1 at turn
110
118
  * start. Seeds the in-memory loadedSkills set so prior state is restored
@@ -128,16 +136,17 @@ interface SkillsConfig {
128
136
  /**
129
137
  * Skill context injected by the @withSkills decorator.
130
138
  *
131
- * Spread the context fields directly into streamText messages already has
132
- * guidance injected at the correct position:
139
+ * Spread the skill fields into streamText and pass headers via
140
+ * experimental_context so tools can read them from their second execute arg:
133
141
  *
134
142
  * ```typescript
135
- * const { messages, ...skillArgs } = ctx;
143
+ * const { messages, headers, guidance, ...skillArgs } = ctx;
136
144
  * return streamText({
137
145
  * model: this.getModel(),
138
146
  * system: "Your base prompt — static, never includes guidance",
139
147
  * messages,
140
148
  * ...skillArgs,
149
+ * experimental_context: { headers },
141
150
  * onFinish,
142
151
  * stopWhen: stepCountIs(20),
143
152
  * }).toUIMessageStreamResponse();
@@ -149,16 +158,25 @@ interface SkillContext {
149
158
  /** Currently active tool names — spread into streamText */
150
159
  activeTools: string[];
151
160
  /**
152
- * Updates active tools and the guidance system message before each LLM step.
153
- * Spread into streamText.
161
+ * Updates activeTools before each LLM step. Spread into streamText.
154
162
  */
155
163
  prepareStep: ai.PrepareStepFunction;
156
164
  /**
157
- * Conversation messages read from D1 with current skill guidance already
158
- * injected just before the last message (the current user turn). Pass
159
- * directly as the `messages` param of streamText.
165
+ * Guidance text from all currently-loaded skills. Compose your system
166
+ * prompt as: `${myBase}${guidance ? '\n\n' + guidance : ''}`
167
+ */
168
+ guidance: string;
169
+ /**
170
+ * Plain conversation history from DO SQLite — no guidance injected.
171
+ * Pass directly as the `messages` param of streamText.
160
172
  */
161
173
  messages: ai.ModelMessage[];
174
+ /**
175
+ * Headers captured from the WebSocket upgrade request, filtered to the
176
+ * names returned by getTrackedHeaders(). Pass to tools via
177
+ * experimental_context: `streamText({ experimental_context: { headers }, ... })`
178
+ */
179
+ headers: Record<string, string>;
162
180
  }
163
181
  /**
164
182
  * The object returned by createSkills().
@@ -169,6 +187,12 @@ interface SkillsResult {
169
187
  getLoadedGuidance(): string;
170
188
  /** Current loaded skill names */
171
189
  getLoadedSkills(): string[];
190
+ /**
191
+ * Full system string: base system prompt concatenated with guidance from
192
+ * all currently-loaded skills. Only meaningful when `systemPrompt` was
193
+ * passed to createSkills. Use as the `system` param of streamText.
194
+ */
195
+ getSystem(): string;
172
196
  }
173
197
  //#endregion
174
198
  //#region src/agents/chat/AIChatAgentBase.d.ts
@@ -227,6 +251,21 @@ declare abstract class AIChatAgentBase<Env extends Cloudflare.Env = Cloudflare.E
227
251
  * tail is maxPersistedMessages - 1 recent messages. Raise or lower per agent.
228
252
  */
229
253
  maxPersistedMessages: number;
254
+ /**
255
+ * Query parameter names to read from the WebSocket connection URL and
256
+ * forward to tools via experimental_context.
257
+ *
258
+ * Browsers cannot set custom headers on WebSocket upgrade requests, so
259
+ * auth tokens and other metadata must be passed as query parameters instead.
260
+ *
261
+ * ```typescript
262
+ * passthroughRequestHeaders = ['authorization', 'x-user-id'];
263
+ * ```
264
+ *
265
+ * Values are read from the URL at connect time and stored in _requestHeaders
266
+ * for the lifetime of the Durable Object instance.
267
+ */
268
+ passthroughRequestHeaders: string[];
230
269
  /** Tools that are always active regardless of loaded skills */
231
270
  abstract getTools(): ToolSet;
232
271
  /** All skills available for on-demand loading */
@@ -259,6 +298,8 @@ declare abstract class AIChatAgentBase<Env extends Cloudflare.Env = Cloudflare.E
259
298
  * skill when activate_skill is called. Defaults to allow-all.
260
299
  */
261
300
  protected filterSkill(_skillName: string): Promise<boolean>;
301
+ /** @internal Captured header values, keyed by lowercase name. */
302
+ protected _requestHeaders: Record<string, string>;
262
303
  /**
263
304
  * Buffered skill state from the current turn.
264
305
  *
@@ -331,13 +372,14 @@ declare abstract class AIChatAgentBase<Env extends Cloudflare.Env = Cloudflare.E
331
372
  /**
332
373
  * Called by the @withSkills decorator at the start of each turn.
333
374
  *
334
- * Reads loaded skill state from D1, seeds createSkills, injects guidance,
335
- * and returns a SkillContext ready to use in a streamText call.
375
+ * Reads loaded skill state from D1, seeds createSkills, and returns a
376
+ * SkillContext ready to use in a streamText call.
336
377
  *
337
- * The returned `messages` already has guidance injected just before the
338
- * current user turn pass it directly as the `messages` param of streamText.
339
- * Guidance is never stored in DO SQLite, so loaded_skills in D1 is the
340
- * single source of truth for which skills are active.
378
+ * Guidance is exposed as `ctx.guidance` compose your system prompt as:
379
+ * `${myBase}${ctx.guidance ? '\n\n' + ctx.guidance : ''}`
380
+ *
381
+ * Messages are plain (no guidance injected). Guidance stays out of the
382
+ * messages array — Anthropic/Gemini only allow system messages at position 0.
341
383
  */
342
384
  protected _prepareSkillContext(): Promise<SkillContext>;
343
385
  }
@@ -358,10 +400,11 @@ declare abstract class AIChatAgentBase<Env extends Cloudflare.Env = Cloudflare.E
358
400
  * ctx: SkillContext,
359
401
  * options?: OnChatMessageOptions,
360
402
  * ) {
361
- * const { messages, ...skillArgs } = ctx;
403
+ * const { messages, guidance, ...skillArgs } = ctx;
404
+ * const base = "Your base prompt";
362
405
  * return streamText({
363
406
  * model: this.getModel(),
364
- * system: "Your base prompt — static, never includes guidance",
407
+ * system: guidance ? `${base}\n\n${guidance}` : base,
365
408
  * messages,
366
409
  * ...skillArgs,
367
410
  * onFinish,
@@ -390,13 +433,30 @@ declare function withSkills(fn: WithSkillsFn, _context: ClassMethodDecoratorCont
390
433
  * ```typescript
391
434
  * export class MyAgent extends AIChatAgent {
392
435
  * getModel() { return openai("gpt-4o"); }
393
- * getTools() { return []; }
436
+ * getTools() { return tools; }
394
437
  * getSkills() { return [searchSkill, codeSkill]; }
395
438
  * getSystemPrompt() { return "You are a helpful assistant."; }
396
439
  * getDB() { return this.env.AGENT_DB; }
397
440
  * }
398
441
  * ```
399
442
  *
443
+ * ## Passing auth headers to tools
444
+ *
445
+ * Set `passthroughRequestHeaders` to capture headers from the WebSocket upgrade
446
+ * request. They are forwarded automatically to every tool via `experimental_context`:
447
+ *
448
+ * ```typescript
449
+ * passthroughRequestHeaders = ['authorization', 'x-user-id'];
450
+ * ```
451
+ *
452
+ * Tools receive them as the second `execute` argument:
453
+ *
454
+ * ```typescript
455
+ * execute: async (args, { experimental_context }) => {
456
+ * const { authorization } = experimental_context?.headers ?? {};
457
+ * }
458
+ * ```
459
+ *
400
460
  * If you need full control over the `streamText` call (custom model options,
401
461
  * streaming transforms, varying the model per request, etc.) use
402
462
  * `AIChatAgentBase` with the `@withSkills` decorator instead.
package/dist/index.mjs CHANGED
@@ -80,7 +80,11 @@ function createSkills(config) {
80
80
  function getLoadedGuidance() {
81
81
  return [...loadedSkills].map((name) => skillMap.get(name)?.guidance).filter((g) => Boolean(g)).join("\n\n");
82
82
  }
83
- let previousGuidance = getLoadedGuidance();
83
+ function getSystem() {
84
+ const guidance = getLoadedGuidance();
85
+ if (!config.systemPrompt) return guidance;
86
+ return guidance ? `${config.systemPrompt}\n\n${guidance}` : config.systemPrompt;
87
+ }
84
88
  allTools[ACTIVATE_SKILL] = tool({
85
89
  description: buildActivateSkillDescription(skills),
86
90
  inputSchema: jsonSchema({
@@ -136,13 +140,10 @@ function createSkills(config) {
136
140
  ].join("\n");
137
141
  }
138
142
  });
139
- const prepareStep = async ({ messages }) => {
140
- const guidance = getLoadedGuidance();
141
- const updatedMessages = injectGuidance(messages, guidance, previousGuidance);
142
- previousGuidance = guidance;
143
+ const prepareStep = async () => {
143
144
  return {
144
145
  activeTools: getActiveToolNames(),
145
- messages: updatedMessages
146
+ ...config.systemPrompt !== void 0 && { system: getSystem() }
146
147
  };
147
148
  };
148
149
  return {
@@ -150,6 +151,7 @@ function createSkills(config) {
150
151
  activeTools: getActiveToolNames(),
151
152
  prepareStep,
152
153
  getLoadedGuidance,
154
+ getSystem,
153
155
  getLoadedSkills() {
154
156
  return [...loadedSkills];
155
157
  }
@@ -237,13 +239,14 @@ function filterEphemeralMessages(messages, guidanceToStrip) {
237
239
  function injectGuidance(messages, guidance, previousGuidance) {
238
240
  if (!guidance) return messages;
239
241
  const base = previousGuidance ? messages.filter((m) => !(m.role === "system" && m.content === previousGuidance)) : messages;
242
+ const insertAt = base.findLastIndex((m) => m.role === "user");
240
243
  return [
241
- ...base.slice(0, -1),
244
+ ...base.slice(0, insertAt),
242
245
  {
243
246
  role: "system",
244
247
  content: guidance
245
248
  },
246
- base.at(-1)
249
+ ...base.slice(insertAt)
247
250
  ];
248
251
  }
249
252
  //#endregion
@@ -444,6 +447,21 @@ var AIChatAgentBase = class extends AIChatAgent$1 {
444
447
  */
445
448
  maxPersistedMessages = 50;
446
449
  /**
450
+ * Query parameter names to read from the WebSocket connection URL and
451
+ * forward to tools via experimental_context.
452
+ *
453
+ * Browsers cannot set custom headers on WebSocket upgrade requests, so
454
+ * auth tokens and other metadata must be passed as query parameters instead.
455
+ *
456
+ * ```typescript
457
+ * passthroughRequestHeaders = ['authorization', 'x-user-id'];
458
+ * ```
459
+ *
460
+ * Values are read from the URL at connect time and stored in _requestHeaders
461
+ * for the lifetime of the Durable Object instance.
462
+ */
463
+ passthroughRequestHeaders = [];
464
+ /**
447
465
  * Return a LanguageModel to use for compaction summarisation.
448
466
  *
449
467
  * Return undefined (default) to disable compaction — messages are kept up
@@ -473,6 +491,8 @@ var AIChatAgentBase = class extends AIChatAgent$1 {
473
491
  async filterSkill(_skillName) {
474
492
  return true;
475
493
  }
494
+ /** @internal Captured header values, keyed by lowercase name. */
495
+ _requestHeaders = {};
476
496
  /**
477
497
  * Buffered skill state from the current turn.
478
498
  *
@@ -516,6 +536,14 @@ var AIChatAgentBase = class extends AIChatAgent$1 {
516
536
  * that case and replays in-progress chunks via its own protocol.
517
537
  */
518
538
  async onConnect(connection, ctx) {
539
+ if (this.passthroughRequestHeaders.length > 0) {
540
+ this._requestHeaders = {};
541
+ const params = new URL(ctx.request.url).searchParams;
542
+ for (const name of this.passthroughRequestHeaders) {
543
+ const value = params.get(name);
544
+ if (value !== null) this._requestHeaders[name] = value;
545
+ }
546
+ }
519
547
  await super.onConnect(connection, ctx);
520
548
  if (!this._activeStreamId && this.messages.length > 0) connection.send(JSON.stringify({
521
549
  type: "cf_agent_chat_messages",
@@ -565,13 +593,14 @@ var AIChatAgentBase = class extends AIChatAgent$1 {
565
593
  /**
566
594
  * Called by the @withSkills decorator at the start of each turn.
567
595
  *
568
- * Reads loaded skill state from D1, seeds createSkills, injects guidance,
569
- * and returns a SkillContext ready to use in a streamText call.
596
+ * Reads loaded skill state from D1, seeds createSkills, and returns a
597
+ * SkillContext ready to use in a streamText call.
570
598
  *
571
- * The returned `messages` already has guidance injected just before the
572
- * current user turn pass it directly as the `messages` param of streamText.
573
- * Guidance is never stored in DO SQLite, so loaded_skills in D1 is the
574
- * single source of truth for which skills are active.
599
+ * Guidance is exposed as `ctx.guidance` compose your system prompt as:
600
+ * `${myBase}${ctx.guidance ? '\n\n' + ctx.guidance : ''}`
601
+ *
602
+ * Messages are plain (no guidance injected). Guidance stays out of the
603
+ * messages array — Anthropic/Gemini only allow system messages at position 0.
575
604
  */
576
605
  async _prepareSkillContext() {
577
606
  const loadedSkills = await this._readSkillState();
@@ -584,13 +613,13 @@ var AIChatAgentBase = class extends AIChatAgent$1 {
584
613
  },
585
614
  filterSkill: (name) => this.filterSkill(name)
586
615
  });
587
- const guidance = skills.getLoadedGuidance();
588
- const messages = injectGuidance(await convertToModelMessages(this.messages), guidance);
589
616
  return {
590
617
  tools: skills.tools,
591
618
  activeTools: skills.activeTools,
592
619
  prepareStep: skills.prepareStep,
593
- messages
620
+ guidance: skills.getLoadedGuidance(),
621
+ messages: await convertToModelMessages(this.messages),
622
+ headers: this._requestHeaders
594
623
  };
595
624
  }
596
625
  };
@@ -619,13 +648,30 @@ function withSkills(fn, _context) {
619
648
  * ```typescript
620
649
  * export class MyAgent extends AIChatAgent {
621
650
  * getModel() { return openai("gpt-4o"); }
622
- * getTools() { return []; }
651
+ * getTools() { return tools; }
623
652
  * getSkills() { return [searchSkill, codeSkill]; }
624
653
  * getSystemPrompt() { return "You are a helpful assistant."; }
625
654
  * getDB() { return this.env.AGENT_DB; }
626
655
  * }
627
656
  * ```
628
657
  *
658
+ * ## Passing auth headers to tools
659
+ *
660
+ * Set `passthroughRequestHeaders` to capture headers from the WebSocket upgrade
661
+ * request. They are forwarded automatically to every tool via `experimental_context`:
662
+ *
663
+ * ```typescript
664
+ * passthroughRequestHeaders = ['authorization', 'x-user-id'];
665
+ * ```
666
+ *
667
+ * Tools receive them as the second `execute` argument:
668
+ *
669
+ * ```typescript
670
+ * execute: async (args, { experimental_context }) => {
671
+ * const { authorization } = experimental_context?.headers ?? {};
672
+ * }
673
+ * ```
674
+ *
629
675
  * If you need full control over the `streamText` call (custom model options,
630
676
  * streaming transforms, varying the model per request, etc.) use
631
677
  * `AIChatAgentBase` with the `@withSkills` decorator instead.
@@ -648,21 +694,21 @@ var AIChatAgent = class extends AIChatAgentBase {
648
694
  const skills = createSkills({
649
695
  tools: this.getTools(),
650
696
  skills: this.getSkills(),
697
+ systemPrompt: this.getSystemPrompt(),
651
698
  initialLoadedSkills: loadedSkills,
652
699
  onSkillsChanged: async (updated) => {
653
700
  this._pendingSkills = updated;
654
701
  },
655
702
  filterSkill: (name) => this.filterSkill(name)
656
703
  });
657
- const guidance = skills.getLoadedGuidance();
658
- const messages = injectGuidance(await convertToModelMessages(this.messages), guidance);
659
704
  return streamText({
660
705
  model: this.getModel(),
661
- system: this.getSystemPrompt(),
662
- messages,
706
+ system: skills.getSystem(),
707
+ messages: await convertToModelMessages(this.messages),
663
708
  tools: skills.tools,
664
709
  activeTools: skills.activeTools,
665
710
  prepareStep: skills.prepareStep,
711
+ experimental_context: { headers: this._requestHeaders },
666
712
  stopWhen: stepCountIs(20),
667
713
  abortSignal: options?.abortSignal,
668
714
  onFinish
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@economic/agents",
3
- "version": "0.0.1-alpha.4",
3
+ "version": "0.0.1-alpha.6",
4
4
  "description": "A starter for creating a TypeScript package.",
5
5
  "homepage": "https://github.com/author/library#readme",
6
6
  "bugs": {