@economic/agents 0.0.1-alpha.3 → 0.0.1-alpha.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -75,27 +75,15 @@ declare global {
75
75
  }
76
76
  //#endregion
77
77
  //#region src/features/skills/types.d.ts
78
- /**
79
- * A single tool with a name, JSON Schema parameters, and an execute function.
80
- *
81
- * Tools are defined in this SDK-agnostic format and converted to the
82
- * target SDK's format by the adapter layer (createSkills).
83
- */
84
- interface Tool {
85
- name: string;
86
- description: string;
87
- /** JSON Schema object describing the tool's input parameters */
88
- parameters: Record<string, unknown>;
89
- execute(args: Record<string, unknown>, options: {
90
- toolCallId: string;
91
- }): Promise<string>;
92
- }
93
78
  /**
94
79
  * A named group of related tools that can be loaded together on demand.
95
80
  *
96
81
  * The agent starts with only its always-on tools active. When the LLM calls
97
82
  * activate_skill with a skill name, that skill's tools become available for
98
83
  * the rest of the conversation.
84
+ *
85
+ * Define tools using the AI SDK's `tool()` helper — Zod schemas are supported
86
+ * natively via the `parameters` field.
99
87
  */
100
88
  interface Skill {
101
89
  name: string;
@@ -107,16 +95,24 @@ interface Skill {
107
95
  * skill that is loaded, keeping the `system` prompt static and cacheable.
108
96
  */
109
97
  guidance?: string;
110
- tools: Tool[];
98
+ tools: ToolSet;
111
99
  }
112
100
  /**
113
101
  * Configuration passed to createSkills().
114
102
  */
115
103
  interface SkillsConfig {
116
104
  /** Tools that are always active regardless of loaded skills */
117
- tools: Tool[];
105
+ tools: ToolSet;
118
106
  /** All available skills that can be loaded on demand */
119
107
  skills: Skill[];
108
+ /**
109
+ * The base system prompt for the agent. When provided, createSkills uses
110
+ * this to compose the full system string (base + guidance) returned by
111
+ * getSystem() and from prepareStep. Guidance is kept in the system
112
+ * parameter rather than the messages array — Anthropic/Gemini only allow
113
+ * system messages at the start of the conversation.
114
+ */
115
+ systemPrompt?: string;
120
116
  /**
121
117
  * Skill names that were loaded in previous turns, read from D1 at turn
122
118
  * start. Seeds the in-memory loadedSkills set so prior state is restored
@@ -161,14 +157,17 @@ interface SkillContext {
161
157
  /** Currently active tool names — spread into streamText */
162
158
  activeTools: string[];
163
159
  /**
164
- * Updates active tools and the guidance system message before each LLM step.
165
- * Spread into streamText.
160
+ * Updates activeTools before each LLM step. Spread into streamText.
166
161
  */
167
162
  prepareStep: ai.PrepareStepFunction;
168
163
  /**
169
- * Conversation messages read from D1 with current skill guidance already
170
- * injected just before the last message (the current user turn). Pass
171
- * directly as the `messages` param of streamText.
164
+ * Guidance text from all currently-loaded skills. Compose your system
165
+ * prompt as: `${myBase}${guidance ? '\n\n' + guidance : ''}`
166
+ */
167
+ guidance: string;
168
+ /**
169
+ * Plain conversation history from DO SQLite — no guidance injected.
170
+ * Pass directly as the `messages` param of streamText.
172
171
  */
173
172
  messages: ai.ModelMessage[];
174
173
  }
@@ -181,6 +180,12 @@ interface SkillsResult {
181
180
  getLoadedGuidance(): string;
182
181
  /** Current loaded skill names */
183
182
  getLoadedSkills(): string[];
183
+ /**
184
+ * Full system string: base system prompt concatenated with guidance from
185
+ * all currently-loaded skills. Only meaningful when `systemPrompt` was
186
+ * passed to createSkills. Use as the `system` param of streamText.
187
+ */
188
+ getSystem(): string;
184
189
  }
185
190
  //#endregion
186
191
  //#region src/agents/chat/AIChatAgentBase.d.ts
@@ -240,7 +245,7 @@ declare abstract class AIChatAgentBase<Env extends Cloudflare.Env = Cloudflare.E
240
245
  */
241
246
  maxPersistedMessages: number;
242
247
  /** Tools that are always active regardless of loaded skills */
243
- abstract getTools(): Tool[];
248
+ abstract getTools(): ToolSet;
244
249
  /** All skills available for on-demand loading */
245
250
  abstract getSkills(): Skill[];
246
251
  /**
@@ -343,13 +348,14 @@ declare abstract class AIChatAgentBase<Env extends Cloudflare.Env = Cloudflare.E
343
348
  /**
344
349
  * Called by the @withSkills decorator at the start of each turn.
345
350
  *
346
- * Reads loaded skill state from D1, seeds createSkills, injects guidance,
347
- * and returns a SkillContext ready to use in a streamText call.
351
+ * Reads loaded skill state from D1, seeds createSkills, and returns a
352
+ * SkillContext ready to use in a streamText call.
353
+ *
354
+ * Guidance is exposed as `ctx.guidance` — compose your system prompt as:
355
+ * `${myBase}${ctx.guidance ? '\n\n' + ctx.guidance : ''}`
348
356
  *
349
- * The returned `messages` already has guidance injected just before the
350
- * current user turn pass it directly as the `messages` param of streamText.
351
- * Guidance is never stored in DO SQLite, so loaded_skills in D1 is the
352
- * single source of truth for which skills are active.
357
+ * Messages are plain (no guidance injected). Guidance stays out of the
358
+ * messages arrayAnthropic/Gemini only allow system messages at position 0.
353
359
  */
354
360
  protected _prepareSkillContext(): Promise<SkillContext>;
355
361
  }
@@ -370,10 +376,11 @@ declare abstract class AIChatAgentBase<Env extends Cloudflare.Env = Cloudflare.E
370
376
  * ctx: SkillContext,
371
377
  * options?: OnChatMessageOptions,
372
378
  * ) {
373
- * const { messages, ...skillArgs } = ctx;
379
+ * const { messages, guidance, ...skillArgs } = ctx;
380
+ * const base = "Your base prompt";
374
381
  * return streamText({
375
382
  * model: this.getModel(),
376
- * system: "Your base prompt — static, never includes guidance",
383
+ * system: guidance ? `${base}\n\n${guidance}` : base,
377
384
  * messages,
378
385
  * ...skillArgs,
379
386
  * onFinish,
@@ -417,7 +424,7 @@ declare abstract class AIChatAgent<Env extends Cloudflare.Env = Cloudflare.Env>
417
424
  /** Return the Vercel AI SDK LanguageModel to use for this agent */
418
425
  abstract getModel(): LanguageModel;
419
426
  /** Tools that are always active regardless of loaded skills */
420
- abstract getTools(): Tool[];
427
+ abstract getTools(): ToolSet;
421
428
  /** All skills available for on-demand loading */
422
429
  abstract getSkills(): Skill[];
423
430
  /**
@@ -558,4 +565,4 @@ declare function compactMessages(messages: UIMessage[], model: LanguageModel, ta
558
565
  */
559
566
  declare function compactIfNeeded(messages: UIMessage[], model: LanguageModel | undefined, tailSize: number): Promise<UIMessage[]>;
560
567
  //#endregion
561
- export { AIChatAgent, AIChatAgentBase, COMPACT_TOKEN_THRESHOLD, type Skill, type SkillContext, type SkillsConfig, type SkillsResult, type Tool, compactIfNeeded, compactMessages, createSkills, estimateMessagesTokens, filterEphemeralMessages, injectGuidance, withSkills };
568
+ export { AIChatAgent, AIChatAgentBase, COMPACT_TOKEN_THRESHOLD, type Skill, type SkillContext, type SkillsConfig, type SkillsResult, compactIfNeeded, compactMessages, createSkills, estimateMessagesTokens, filterEphemeralMessages, injectGuidance, withSkills };
package/dist/index.mjs CHANGED
@@ -62,35 +62,29 @@ function createSkills(config) {
62
62
  const loadedSkills = new Set(config.initialLoadedSkills ?? []);
63
63
  const skillMap = new Map(skills.map((s) => [s.name, s]));
64
64
  const allTools = {};
65
- const registeredNames = /* @__PURE__ */ new Set();
66
- function registerTool(t) {
67
- if (registeredNames.has(t.name)) return;
68
- allTools[t.name] = tool({
69
- description: t.description,
70
- inputSchema: jsonSchema(t.parameters),
71
- execute: async (args, options) => t.execute(args, { toolCallId: options.toolCallId })
72
- });
73
- registeredNames.add(t.name);
74
- }
75
- for (const t of alwaysOnTools) registerTool(t);
76
- for (const skill of skills) for (const t of skill.tools) registerTool(t);
65
+ Object.assign(allTools, alwaysOnTools);
66
+ for (const skill of skills) Object.assign(allTools, skill.tools);
77
67
  function getActiveToolNames() {
78
68
  const names = [
79
69
  ACTIVATE_SKILL,
80
70
  LIST_CAPABILITIES,
81
- ...alwaysOnTools.map((t) => t.name)
71
+ ...Object.keys(alwaysOnTools)
82
72
  ];
83
73
  for (const skillName of loadedSkills) {
84
74
  const skill = skillMap.get(skillName);
85
75
  if (!skill) continue;
86
- for (const t of skill.tools) if (!names.includes(t.name)) names.push(t.name);
76
+ for (const toolName of Object.keys(skill.tools)) if (!names.includes(toolName)) names.push(toolName);
87
77
  }
88
78
  return names;
89
79
  }
90
80
  function getLoadedGuidance() {
91
81
  return [...loadedSkills].map((name) => skillMap.get(name)?.guidance).filter((g) => Boolean(g)).join("\n\n");
92
82
  }
93
- let previousGuidance = getLoadedGuidance();
83
+ function getSystem() {
84
+ const guidance = getLoadedGuidance();
85
+ if (!config.systemPrompt) return guidance;
86
+ return guidance ? `${config.systemPrompt}\n\n${guidance}` : config.systemPrompt;
87
+ }
94
88
  allTools[ACTIVATE_SKILL] = tool({
95
89
  description: buildActivateSkillDescription(skills),
96
90
  inputSchema: jsonSchema({
@@ -146,13 +140,10 @@ function createSkills(config) {
146
140
  ].join("\n");
147
141
  }
148
142
  });
149
- const prepareStep = async ({ messages }) => {
150
- const guidance = getLoadedGuidance();
151
- const updatedMessages = injectGuidance(messages, guidance, previousGuidance);
152
- previousGuidance = guidance;
143
+ const prepareStep = async () => {
153
144
  return {
154
145
  activeTools: getActiveToolNames(),
155
- messages: updatedMessages
146
+ ...config.systemPrompt !== void 0 && { system: getSystem() }
156
147
  };
157
148
  };
158
149
  return {
@@ -160,6 +151,7 @@ function createSkills(config) {
160
151
  activeTools: getActiveToolNames(),
161
152
  prepareStep,
162
153
  getLoadedGuidance,
154
+ getSystem,
163
155
  getLoadedSkills() {
164
156
  return [...loadedSkills];
165
157
  }
@@ -247,13 +239,14 @@ function filterEphemeralMessages(messages, guidanceToStrip) {
247
239
  function injectGuidance(messages, guidance, previousGuidance) {
248
240
  if (!guidance) return messages;
249
241
  const base = previousGuidance ? messages.filter((m) => !(m.role === "system" && m.content === previousGuidance)) : messages;
242
+ const insertAt = base.findLastIndex((m) => m.role === "user");
250
243
  return [
251
- ...base.slice(0, -1),
244
+ ...base.slice(0, insertAt),
252
245
  {
253
246
  role: "system",
254
247
  content: guidance
255
248
  },
256
- base.at(-1)
249
+ ...base.slice(insertAt)
257
250
  ];
258
251
  }
259
252
  //#endregion
@@ -575,13 +568,14 @@ var AIChatAgentBase = class extends AIChatAgent$1 {
575
568
  /**
576
569
  * Called by the @withSkills decorator at the start of each turn.
577
570
  *
578
- * Reads loaded skill state from D1, seeds createSkills, injects guidance,
579
- * and returns a SkillContext ready to use in a streamText call.
571
+ * Reads loaded skill state from D1, seeds createSkills, and returns a
572
+ * SkillContext ready to use in a streamText call.
573
+ *
574
+ * Guidance is exposed as `ctx.guidance` — compose your system prompt as:
575
+ * `${myBase}${ctx.guidance ? '\n\n' + ctx.guidance : ''}`
580
576
  *
581
- * The returned `messages` already has guidance injected just before the
582
- * current user turn pass it directly as the `messages` param of streamText.
583
- * Guidance is never stored in DO SQLite, so loaded_skills in D1 is the
584
- * single source of truth for which skills are active.
577
+ * Messages are plain (no guidance injected). Guidance stays out of the
578
+ * messages arrayAnthropic/Gemini only allow system messages at position 0.
585
579
  */
586
580
  async _prepareSkillContext() {
587
581
  const loadedSkills = await this._readSkillState();
@@ -594,13 +588,12 @@ var AIChatAgentBase = class extends AIChatAgent$1 {
594
588
  },
595
589
  filterSkill: (name) => this.filterSkill(name)
596
590
  });
597
- const guidance = skills.getLoadedGuidance();
598
- const messages = injectGuidance(await convertToModelMessages(this.messages), guidance);
599
591
  return {
600
592
  tools: skills.tools,
601
593
  activeTools: skills.activeTools,
602
594
  prepareStep: skills.prepareStep,
603
- messages
595
+ guidance: skills.getLoadedGuidance(),
596
+ messages: await convertToModelMessages(this.messages)
604
597
  };
605
598
  }
606
599
  };
@@ -658,18 +651,17 @@ var AIChatAgent = class extends AIChatAgentBase {
658
651
  const skills = createSkills({
659
652
  tools: this.getTools(),
660
653
  skills: this.getSkills(),
654
+ systemPrompt: this.getSystemPrompt(),
661
655
  initialLoadedSkills: loadedSkills,
662
656
  onSkillsChanged: async (updated) => {
663
657
  this._pendingSkills = updated;
664
658
  },
665
659
  filterSkill: (name) => this.filterSkill(name)
666
660
  });
667
- const guidance = skills.getLoadedGuidance();
668
- const messages = injectGuidance(await convertToModelMessages(this.messages), guidance);
669
661
  return streamText({
670
662
  model: this.getModel(),
671
- system: this.getSystemPrompt(),
672
- messages,
663
+ system: skills.getSystem(),
664
+ messages: await convertToModelMessages(this.messages),
673
665
  tools: skills.tools,
674
666
  activeTools: skills.activeTools,
675
667
  prepareStep: skills.prepareStep,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@economic/agents",
3
- "version": "0.0.1-alpha.3",
3
+ "version": "0.0.1-alpha.5",
4
4
  "description": "A starter for creating a TypeScript package.",
5
5
  "homepage": "https://github.com/author/library#readme",
6
6
  "bugs": {