@clawpify/skills 1.0.4 → 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/agent.ts CHANGED
@@ -1,5 +1,132 @@
1
1
  import Anthropic from "@anthropic-ai/sdk";
2
2
  import { ShopifyClient } from "./shopify";
3
+ import type { MemoryStore } from "./memory";
4
+
5
+ // ---------------------------------------------------------------------------
6
+ // Types
7
+ // ---------------------------------------------------------------------------
8
+
9
+ /** Accumulated token usage and estimated cost for a single chat() call. */
10
+ export interface TokenUsage {
11
+ inputTokens: number;
12
+ outputTokens: number;
13
+ cacheCreationInputTokens: number;
14
+ cacheReadInputTokens: number;
15
+ /** Estimated USD cost based on the configured pricing. */
16
+ totalCost: number;
17
+ }
18
+
19
+ /** Per-model pricing in USD per million tokens. */
20
+ export interface ModelPricing {
21
+ inputPerMillion: number;
22
+ outputPerMillion: number;
23
+ cacheWritePerMillion: number;
24
+ cacheReadPerMillion: number;
25
+ }
26
+
27
+ /** A user-provided tool + handler that extends the agent's capabilities. */
28
+ export interface AgentPlugin {
29
+ tool: Anthropic.Tool;
30
+ handler: (input: Record<string, any>) => Promise<string>;
31
+ }
32
+
33
+ /** Lifecycle hooks for observability and custom behavior. */
34
+ export interface AgentHooks {
35
+ /** Fires before a chat request is processed. */
36
+ onRequest?: (
37
+ message: string,
38
+ history: Anthropic.MessageParam[]
39
+ ) => void | Promise<void>;
40
+
41
+ /** Fires before each Anthropic API call (including retries in the loop). */
42
+ onApiCall?: (params: {
43
+ model: string;
44
+ messages: Anthropic.MessageParam[];
45
+ tools: Anthropic.Tool[];
46
+ }) => void | Promise<void>;
47
+
48
+ /** Fires before a tool handler is executed. */
49
+ onToolCall?: (
50
+ toolName: string,
51
+ input: Record<string, any>
52
+ ) => void | Promise<void>;
53
+
54
+ /** Fires after a tool handler returns. */
55
+ onToolResult?: (
56
+ toolName: string,
57
+ result: string,
58
+ isError: boolean
59
+ ) => void | Promise<void>;
60
+
61
+ /** Fires after the final text response is assembled. */
62
+ onResponse?: (
63
+ response: string,
64
+ usage: TokenUsage
65
+ ) => void | Promise<void>;
66
+
67
+ /** Fires when an error is caught during the chat loop. */
68
+ onError?: (error: Error) => void | Promise<void>;
69
+ }
70
+
71
+ /** Extended thinking configuration. */
72
+ export interface ThinkingConfig {
73
+ /** Token budget for thinking. Must be >= 1024. */
74
+ budgetTokens: number;
75
+ }
76
+
77
+ /** Full configuration accepted by the ShopifyAgent constructor. */
78
+ export interface AgentConfig {
79
+ shopify: ShopifyClient;
80
+ skillContent: string;
81
+ model?: string;
82
+ /** Override the default system instruction sent to the model. */
83
+ systemInstruction?: string;
84
+ /** Override default Claude Sonnet pricing. */
85
+ pricing?: ModelPricing;
86
+ /** Register plugins at construction time. */
87
+ plugins?: AgentPlugin[];
88
+ /** Lifecycle hooks for observability. */
89
+ hooks?: AgentHooks;
90
+ /** Enable extended thinking so the model plans before executing. */
91
+ thinking?: ThinkingConfig;
92
+ /** Maximum tool-use loop iterations before the agent stops. Defaults to 20. */
93
+ maxIterations?: number;
94
+ /** Persistent memory store for conversation history across sessions. */
95
+ memory?: MemoryStore;
96
+ }
97
+
98
+ /** Return value of a single chat() invocation. */
99
+ export interface ChatResult {
100
+ response: string;
101
+ history: Anthropic.MessageParam[];
102
+ usage: TokenUsage;
103
+ /** Concatenated thinking text when extended thinking is enabled. */
104
+ thinking?: string;
105
+ /** Number of tool-use loop iterations used in this call. */
106
+ iterationsUsed: number;
107
+ }
108
+
109
+ /** Events yielded by chatStream(). */
110
+ export type StreamEvent =
111
+ | { type: "thinking"; text: string }
112
+ | { type: "text"; text: string }
113
+ | { type: "tool_call"; name: string; input: Record<string, any> }
114
+ | { type: "tool_result"; name: string; result: string; isError: boolean }
115
+ | {
116
+ type: "done";
117
+ response: string;
118
+ thinking?: string;
119
+ usage: TokenUsage;
120
+ iterationsUsed: number;
121
+ history: Anthropic.MessageParam[];
122
+ };
123
+
124
+ // ---------------------------------------------------------------------------
125
+ // Constants
126
+ // ---------------------------------------------------------------------------
127
+
128
+ export const DEFAULT_SYSTEM_INSTRUCTION =
129
+ "You're Clawpify. You help run a Shopify store. The merchant texts you to get stuff done";
3
130
 
4
131
  const SHOPIFY_GRAPHQL_TOOL: Anthropic.Tool = {
5
132
  name: "shopify_graphql",
@@ -21,113 +148,526 @@ const SHOPIFY_GRAPHQL_TOOL: Anthropic.Tool = {
21
148
  },
22
149
  };
23
150
 
151
+ /** Default pricing for claude-sonnet-4-5 (USD per million tokens). */
152
+ const DEFAULT_PRICING: ModelPricing = {
153
+ inputPerMillion: 3,
154
+ outputPerMillion: 15,
155
+ cacheWritePerMillion: 3.75,
156
+ cacheReadPerMillion: 0.3,
157
+ };
158
+
159
+ const DEFAULT_MAX_ITERATIONS = 20;
160
+
161
+ // ---------------------------------------------------------------------------
162
+ // Helpers
163
+ // ---------------------------------------------------------------------------
164
+
165
+ /** Safely invoke an async hook, swallowing errors so they never break the main flow. */
166
+ async function safeHook<T extends (...args: any[]) => void | Promise<void>>(
167
+ hook: T | undefined,
168
+ ...args: Parameters<T>
169
+ ): Promise<void> {
170
+ if (!hook) return;
171
+ try {
172
+ await hook(...args);
173
+ } catch {
174
+ // hooks must never break the agent loop
175
+ }
176
+ }
177
+
178
+ function computeCost(usage: TokenUsage, pricing: ModelPricing): number {
179
+ return (
180
+ (usage.inputTokens * pricing.inputPerMillion) / 1_000_000 +
181
+ (usage.outputTokens * pricing.outputPerMillion) / 1_000_000 +
182
+ (usage.cacheCreationInputTokens * pricing.cacheWritePerMillion) /
183
+ 1_000_000 +
184
+ (usage.cacheReadInputTokens * pricing.cacheReadPerMillion) / 1_000_000
185
+ );
186
+ }
187
+
188
+ function emptyUsage(): TokenUsage {
189
+ return {
190
+ inputTokens: 0,
191
+ outputTokens: 0,
192
+ cacheCreationInputTokens: 0,
193
+ cacheReadInputTokens: 0,
194
+ totalCost: 0,
195
+ };
196
+ }
197
+
198
+ function accumulateUsage(
199
+ total: TokenUsage,
200
+ raw: Anthropic.Usage,
201
+ pricing: ModelPricing
202
+ ): void {
203
+ total.inputTokens += raw.input_tokens;
204
+ total.outputTokens += raw.output_tokens;
205
+ total.cacheCreationInputTokens += raw.cache_creation_input_tokens ?? 0;
206
+ total.cacheReadInputTokens += raw.cache_read_input_tokens ?? 0;
207
+ total.totalCost = computeCost(total, pricing);
208
+ }
209
+
210
+ /** Extract text and thinking content from an Anthropic Message. */
211
+ function extractContent(response: Anthropic.Message): {
212
+ text: string;
213
+ thinking: string;
214
+ } {
215
+ let text = "";
216
+ let thinking = "";
217
+ for (const block of response.content) {
218
+ if (block.type === "text") {
219
+ text += (text ? "\n" : "") + block.text;
220
+ } else if (block.type === "thinking") {
221
+ thinking +=
222
+ (thinking ? "\n" : "") + (block as Anthropic.ThinkingBlock).thinking;
223
+ }
224
+ }
225
+ return { text, thinking };
226
+ }
227
+
228
+ // ---------------------------------------------------------------------------
229
+ // Agent
230
+ // ---------------------------------------------------------------------------
231
+
24
232
  export class ShopifyAgent {
25
233
  private anthropic: Anthropic;
26
234
  private shopify: ShopifyClient;
27
235
  private skillContent: string;
236
+ private systemInstruction: string;
28
237
  private model: string;
238
+ private pricing: ModelPricing;
239
+ private plugins: Map<string, AgentPlugin> = new Map();
240
+ private hooks: AgentHooks;
241
+ private thinkingConfig: ThinkingConfig | undefined;
242
+ private maxIterations: number;
243
+ private memory: MemoryStore | undefined;
29
244
 
30
- constructor(config: {
31
- shopify: ShopifyClient;
32
- skillContent: string;
33
- model?: string;
34
- }) {
245
+ constructor(config: AgentConfig) {
35
246
  this.anthropic = new Anthropic();
36
247
  this.shopify = config.shopify;
37
248
  this.skillContent = config.skillContent;
249
+ this.systemInstruction =
250
+ config.systemInstruction ?? DEFAULT_SYSTEM_INSTRUCTION;
38
251
  this.model = config.model ?? "claude-sonnet-4-5";
252
+ this.pricing = config.pricing ?? DEFAULT_PRICING;
253
+ this.hooks = config.hooks ?? {};
254
+ this.thinkingConfig = config.thinking;
255
+ this.maxIterations = config.maxIterations ?? DEFAULT_MAX_ITERATIONS;
256
+ this.memory = config.memory;
257
+
258
+ // Register initial plugins
259
+ if (config.plugins) {
260
+ for (const plugin of config.plugins) {
261
+ this.registerPlugin(plugin);
262
+ }
263
+ }
264
+ }
265
+
266
+ /** Register a plugin at runtime. Throws if a tool with the same name already exists. */
267
+ registerPlugin(plugin: AgentPlugin): void {
268
+ const name = plugin.tool.name;
269
+ if (name === "shopify_graphql") {
270
+ throw new Error(
271
+ `Cannot register plugin with reserved tool name "shopify_graphql"`
272
+ );
273
+ }
274
+ if (this.plugins.has(name)) {
275
+ throw new Error(`Plugin with tool name "${name}" is already registered`);
276
+ }
277
+ this.plugins.set(name, plugin);
278
+ }
279
+
280
+ // -------------------------------------------------------------------------
281
+ // Private helpers
282
+ // -------------------------------------------------------------------------
283
+
284
+ /** Build the system prompt array with prompt caching. */
285
+ private buildSystemPrompt(): Anthropic.TextBlockParam[] {
286
+ return [
287
+ { type: "text", text: this.systemInstruction },
288
+ {
289
+ type: "text",
290
+ text: this.skillContent,
291
+ cache_control: { type: "ephemeral" },
292
+ },
293
+ ];
294
+ }
295
+
296
+ /** Build the tools list with cache_control on the last tool. */
297
+ private buildTools(): Anthropic.Tool[] {
298
+ const allTools: Anthropic.Tool[] = [
299
+ SHOPIFY_GRAPHQL_TOOL,
300
+ ...[...this.plugins.values()].map((p) => p.tool),
301
+ ];
302
+ if (allTools.length > 0) {
303
+ const last = allTools[allTools.length - 1];
304
+ allTools[allTools.length - 1] = Object.assign({}, last, {
305
+ cache_control: { type: "ephemeral" } as const,
306
+ });
307
+ }
308
+ return allTools;
309
+ }
310
+
311
+ /** Compute max_tokens accounting for thinking budget. */
312
+ private getMaxTokens(): number {
313
+ return this.thinkingConfig
314
+ ? this.thinkingConfig.budgetTokens + 4096
315
+ : 4096;
316
+ }
317
+
318
+ /** Build optional thinking param for the API call. */
319
+ private getThinkingParam():
320
+ | { thinking: Anthropic.ThinkingConfigParam }
321
+ | {} {
322
+ if (!this.thinkingConfig) return {};
323
+ return {
324
+ thinking: {
325
+ type: "enabled" as const,
326
+ budget_tokens: this.thinkingConfig.budgetTokens,
327
+ },
328
+ };
329
+ }
330
+
331
+ /** Execute a single tool call by name. Returns content and error flag. */
332
+ private async executeTool(
333
+ name: string,
334
+ input: Record<string, any>
335
+ ): Promise<{ content: string; isError: boolean }> {
336
+ await safeHook(this.hooks.onToolCall, name, input);
337
+
338
+ if (name === "shopify_graphql") {
339
+ try {
340
+ const result = await this.shopify.graphql(input.query, input.variables);
341
+ const content = JSON.stringify(result, null, 2);
342
+ await safeHook(this.hooks.onToolResult, name, content, false);
343
+ return { content, isError: false };
344
+ } catch (error) {
345
+ const content = `Error: ${error instanceof Error ? error.message : String(error)}`;
346
+ await safeHook(this.hooks.onToolResult, name, content, true);
347
+ return { content, isError: true };
348
+ }
349
+ }
350
+
351
+ if (this.plugins.has(name)) {
352
+ const plugin = this.plugins.get(name)!;
353
+ try {
354
+ const content = await plugin.handler(input);
355
+ await safeHook(this.hooks.onToolResult, name, content, false);
356
+ return { content, isError: false };
357
+ } catch (error) {
358
+ const content = `Error: ${error instanceof Error ? error.message : String(error)}`;
359
+ await safeHook(this.hooks.onToolResult, name, content, true);
360
+ return { content, isError: true };
361
+ }
362
+ }
363
+
364
+ // Unknown tool
365
+ const content = `Error: Unknown tool "${name}"`;
366
+ await safeHook(this.hooks.onToolResult, name, content, true);
367
+ return { content, isError: true };
39
368
  }
40
369
 
370
+ /** Process all tool_use blocks from a response, returning tool results. */
371
+ private async processToolCalls(
372
+ response: Anthropic.Message
373
+ ): Promise<Anthropic.ToolResultBlockParam[]> {
374
+ const toolUseBlocks = response.content.filter(
375
+ (block): block is Anthropic.ToolUseBlock => block.type === "tool_use"
376
+ );
377
+
378
+ const toolResults: Anthropic.ToolResultBlockParam[] = [];
379
+ for (const toolUse of toolUseBlocks) {
380
+ const input = toolUse.input as Record<string, any>;
381
+ const { content, isError } = await this.executeTool(toolUse.name, input);
382
+ toolResults.push({
383
+ type: "tool_result",
384
+ tool_use_id: toolUse.id,
385
+ content,
386
+ ...(isError ? { is_error: true } : {}),
387
+ });
388
+ }
389
+ return toolResults;
390
+ }
391
+
392
+ // -------------------------------------------------------------------------
393
+ // Public methods
394
+ // -------------------------------------------------------------------------
395
+
396
+ /**
397
+ * Send a message and get a complete response (non-streaming).
398
+ * Runs the agentic tool-use loop until the model produces a final text response.
399
+ */
41
400
  async chat(
42
401
  userMessage: string,
43
402
  conversationHistory: Anthropic.MessageParam[] = []
44
- ): Promise<{ response: string; history: Anthropic.MessageParam[] }> {
45
- const systemPrompt = `You are a helpful Shopify assistant that can query and manage a Shopify store using the GraphQL Admin API.
403
+ ): Promise<ChatResult> {
404
+ const usage = emptyUsage();
405
+ let iterationsUsed = 0;
406
+ let allThinking = "";
46
407
 
47
- ${this.skillContent}
408
+ const systemPrompt = this.buildSystemPrompt();
409
+ const allTools = this.buildTools();
410
+ const maxTokens = this.getMaxTokens();
411
+ const thinkingParam = this.getThinkingParam();
48
412
 
49
- When the user asks about products, orders, customers, or any Shopify data, use the shopify_graphql tool to fetch or modify the data. Always explain what you're doing and present results clearly.`;
413
+ const messages: Anthropic.MessageParam[] = [
414
+ ...conversationHistory,
415
+ { role: "user", content: userMessage },
416
+ ];
417
+
418
+ await safeHook(this.hooks.onRequest, userMessage, conversationHistory);
419
+
420
+ try {
421
+ await safeHook(this.hooks.onApiCall, {
422
+ model: this.model,
423
+ messages,
424
+ tools: allTools,
425
+ });
426
+
427
+ let response = await this.anthropic.messages.create({
428
+ model: this.model,
429
+ max_tokens: maxTokens,
430
+ system: systemPrompt,
431
+ tools: allTools,
432
+ messages,
433
+ ...thinkingParam,
434
+ });
435
+
436
+ accumulateUsage(usage, response.usage, this.pricing);
437
+
438
+ // Collect thinking from the first response
439
+ const firstContent = extractContent(response);
440
+ if (firstContent.thinking) allThinking += firstContent.thinking;
441
+
442
+ // Agentic loop
443
+ while (response.stop_reason === "tool_use") {
444
+ iterationsUsed++;
445
+
446
+ if (iterationsUsed > this.maxIterations) {
447
+ const err = new Error(
448
+ `Agent exceeded maximum iterations (${this.maxIterations})`
449
+ );
450
+ await safeHook(this.hooks.onError, err);
451
+ break;
452
+ }
453
+
454
+ // Push assistant content and process tool calls
455
+ messages.push({
456
+ role: "assistant",
457
+ content: response.content as Anthropic.ContentBlockParam[],
458
+ });
459
+ const toolResults = await this.processToolCalls(response);
460
+ messages.push({ role: "user", content: toolResults });
461
+
462
+ await safeHook(this.hooks.onApiCall, {
463
+ model: this.model,
464
+ messages,
465
+ tools: allTools,
466
+ });
467
+
468
+ response = await this.anthropic.messages.create({
469
+ model: this.model,
470
+ max_tokens: maxTokens,
471
+ system: systemPrompt,
472
+ tools: allTools,
473
+ messages,
474
+ ...thinkingParam,
475
+ });
476
+
477
+ accumulateUsage(usage, response.usage, this.pricing);
478
+
479
+ // Collect thinking from subsequent responses
480
+ const loopContent = extractContent(response);
481
+ if (loopContent.thinking) {
482
+ allThinking +=
483
+ (allThinking ? "\n" : "") + loopContent.thinking;
484
+ }
485
+ }
486
+
487
+ // Extract final text response
488
+ const { text: finalResponse } = extractContent(response);
489
+
490
+ // Build updated history
491
+ const updatedHistory: Anthropic.MessageParam[] = [
492
+ ...messages,
493
+ { role: "assistant", content: response.content },
494
+ ];
495
+
496
+ await safeHook(this.hooks.onResponse, finalResponse, usage);
497
+
498
+ return {
499
+ response: finalResponse,
500
+ history: updatedHistory,
501
+ usage,
502
+ thinking: allThinking || undefined,
503
+ iterationsUsed,
504
+ };
505
+ } catch (error) {
506
+ const err = error instanceof Error ? error : new Error(String(error));
507
+ await safeHook(this.hooks.onError, err);
508
+ throw err;
509
+ }
510
+ }
511
+
512
+ /**
513
+ * Send a message and stream the response as typed events.
514
+ * Same agentic loop as chat() but yields incremental text/thinking deltas.
515
+ */
516
+ async *chatStream(
517
+ userMessage: string,
518
+ conversationHistory: Anthropic.MessageParam[] = []
519
+ ): AsyncGenerator<StreamEvent> {
520
+ const usage = emptyUsage();
521
+ let iterationsUsed = 0;
522
+ let allThinking = "";
523
+ let finalResponse = "";
524
+
525
+ const systemPrompt = this.buildSystemPrompt();
526
+ const allTools = this.buildTools();
527
+ const maxTokens = this.getMaxTokens();
528
+ const thinkingParam = this.getThinkingParam();
50
529
 
51
530
  const messages: Anthropic.MessageParam[] = [
52
531
  ...conversationHistory,
53
532
  { role: "user", content: userMessage },
54
533
  ];
55
534
 
56
- let response = await this.anthropic.messages.create({
57
- model: this.model,
58
- max_tokens: 4096,
59
- system: systemPrompt,
60
- tools: [SHOPIFY_GRAPHQL_TOOL],
61
- messages,
62
- });
63
-
64
- const assistantMessages: Anthropic.ContentBlockParam[] = [];
65
-
66
- // Agentic loop: keep processing tool calls until we get a final response
67
- while (response.stop_reason === "tool_use") {
68
- const toolUseBlocks = response.content.filter(
69
- (block: Anthropic.ContentBlock): block is Anthropic.ToolUseBlock =>
70
- block.type === "tool_use"
71
- );
535
+ await safeHook(this.hooks.onRequest, userMessage, conversationHistory);
536
+
537
+ try {
538
+ let stopReason: string | null = null;
539
+
540
+ do {
541
+ await safeHook(this.hooks.onApiCall, {
542
+ model: this.model,
543
+ messages,
544
+ tools: allTools,
545
+ });
72
546
 
73
- assistantMessages.push(...(response.content as Anthropic.ContentBlockParam[]));
547
+ const stream = this.anthropic.messages.stream({
548
+ model: this.model,
549
+ max_tokens: maxTokens,
550
+ system: systemPrompt,
551
+ tools: allTools,
552
+ messages,
553
+ ...thinkingParam,
554
+ });
74
555
 
75
- const toolResults: Anthropic.ToolResultBlockParam[] = [];
556
+ // Yield incremental deltas
557
+ for await (const event of stream) {
558
+ if (event.type === "content_block_delta") {
559
+ const delta = event.delta as { type: string; [key: string]: any };
560
+ if (delta.type === "text_delta") {
561
+ yield { type: "text", text: delta.text };
562
+ } else if (delta.type === "thinking_delta") {
563
+ yield { type: "thinking", text: delta.thinking };
564
+ }
565
+ }
566
+ }
567
+
568
+ const response = await stream.finalMessage();
569
+ accumulateUsage(usage, response.usage, this.pricing);
570
+ stopReason = response.stop_reason;
76
571
 
77
- for (const toolUse of toolUseBlocks) {
78
- if (toolUse.name === "shopify_graphql") {
79
- const input = toolUse.input as {
80
- query: string;
81
- variables?: Record<string, any>;
82
- };
572
+ // Collect thinking and text from this response
573
+ const content = extractContent(response);
574
+ if (content.thinking) {
575
+ allThinking += (allThinking ? "\n" : "") + content.thinking;
576
+ }
577
+ if (content.text) {
578
+ finalResponse = content.text;
579
+ }
83
580
 
84
- try {
85
- const result = await this.shopify.graphql(
86
- input.query,
87
- input.variables
581
+ if (stopReason === "tool_use") {
582
+ iterationsUsed++;
583
+
584
+ if (iterationsUsed > this.maxIterations) {
585
+ const err = new Error(
586
+ `Agent exceeded maximum iterations (${this.maxIterations})`
88
587
  );
89
- toolResults.push({
588
+ await safeHook(this.hooks.onError, err);
589
+ break;
590
+ }
591
+
592
+ // Push assistant content
593
+ messages.push({
594
+ role: "assistant",
595
+ content: response.content as Anthropic.ContentBlockParam[],
596
+ });
597
+
598
+ // Execute tools and yield events
599
+ const toolUseBlocks = response.content.filter(
600
+ (block): block is Anthropic.ToolUseBlock =>
601
+ block.type === "tool_use"
602
+ );
603
+
604
+ const toolResults: Anthropic.ToolResultBlockParam[] = [];
605
+ for (const toolUse of toolUseBlocks) {
606
+ const input = toolUse.input as Record<string, any>;
607
+ yield { type: "tool_call", name: toolUse.name, input };
608
+
609
+ const result = await this.executeTool(toolUse.name, input);
610
+ yield {
90
611
  type: "tool_result",
91
- tool_use_id: toolUse.id,
92
- content: JSON.stringify(result, null, 2),
93
- });
94
- } catch (error) {
612
+ name: toolUse.name,
613
+ result: result.content,
614
+ isError: result.isError,
615
+ };
616
+
95
617
  toolResults.push({
96
618
  type: "tool_result",
97
619
  tool_use_id: toolUse.id,
98
- content: `Error: ${error instanceof Error ? error.message : String(error)}`,
99
- is_error: true,
620
+ content: result.content,
621
+ ...(result.isError ? { is_error: true } : {}),
100
622
  });
101
623
  }
624
+
625
+ messages.push({ role: "user", content: toolResults });
626
+ } else {
627
+ // Final response — add to history
628
+ messages.push({
629
+ role: "assistant",
630
+ content: response.content as Anthropic.ContentBlockParam[],
631
+ });
102
632
  }
103
- }
633
+ } while (stopReason === "tool_use");
104
634
 
105
- messages.push({ role: "assistant", content: assistantMessages.slice() });
106
- messages.push({ role: "user", content: toolResults });
107
- assistantMessages.length = 0;
635
+ await safeHook(this.hooks.onResponse, finalResponse, usage);
108
636
 
109
- response = await this.anthropic.messages.create({
110
- model: this.model,
111
- max_tokens: 4096,
112
- system: systemPrompt,
113
- tools: [SHOPIFY_GRAPHQL_TOOL],
114
- messages,
115
- });
637
+ yield {
638
+ type: "done",
639
+ response: finalResponse,
640
+ thinking: allThinking || undefined,
641
+ usage,
642
+ iterationsUsed,
643
+ history: messages,
644
+ };
645
+ } catch (error) {
646
+ const err = error instanceof Error ? error : new Error(String(error));
647
+ await safeHook(this.hooks.onError, err);
648
+ throw err;
116
649
  }
650
+ }
117
651
 
118
- // Extract final text response
119
- const textBlocks = response.content.filter(
120
- (block: Anthropic.ContentBlock): block is Anthropic.TextBlock =>
121
- block.type === "text"
122
- );
123
- const finalResponse = textBlocks.map((b: Anthropic.TextBlock) => b.text).join("\n");
124
-
125
- // Build updated history
126
- const updatedHistory: Anthropic.MessageParam[] = [
127
- ...messages,
128
- { role: "assistant", content: response.content },
129
- ];
652
+ /**
653
+ * Chat with automatic session memory.
654
+ * Loads conversation history from the memory store, runs chat(), and saves
655
+ * the updated history back. Requires a MemoryStore to be configured.
656
+ */
657
+ async chatWithMemory(
658
+ sessionId: string,
659
+ userMessage: string
660
+ ): Promise<ChatResult> {
661
+ if (!this.memory) {
662
+ throw new Error(
663
+ "chatWithMemory requires a MemoryStore. Pass `memory` in AgentConfig."
664
+ );
665
+ }
130
666
 
131
- return { response: finalResponse, history: updatedHistory };
667
+ const history =
668
+ (await this.memory.load(sessionId)) as Anthropic.MessageParam[];
669
+ const result = await this.chat(userMessage, history);
670
+ await this.memory.save(sessionId, result.history);
671
+ return result;
132
672
  }
133
673
  }