@sprucelabs/sprucebot-llm 15.1.6 → 15.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -18,6 +18,7 @@ A TypeScript library for leveraging large language models to do... anything!
18
18
  * Leverage Skills to get your bot to complete any task!
19
19
  * Multiple adapter support
20
20
  * [OpenAI](#openai-adapter-configuration) - GPT-4o, o1, and other OpenAI models
21
+ * [Anthropic](#anthropic-adapter) - Claude models with prompt caching support
21
22
  * [Ollama](#ollama-adapter) - Run local models like Llama, Mistral, etc.
22
23
  * [Custom adapters](#custom-adapters) - Implement your own
23
24
  * Fully typed
@@ -194,6 +195,46 @@ adapter.setReasoningEffort('low')
194
195
 
195
196
  Requests are sent via `openai.chat.completions.create(...)` with messages built by the adapter from the Bot state and history.
196
197
 
198
+ ### Anthropic adapter
199
+
200
+ Use Claude models from Anthropic. Requires `@anthropic-ai/sdk` and an Anthropic API key.
201
+
202
+ ```ts
203
+ import { AnthropicAdapter, SprucebotLlmFactory } from '@sprucelabs/sprucebot-llm'
204
+
205
+ const adapter = AnthropicAdapter.Adapter(process.env.ANTHROPIC_API_KEY!, {
206
+ maxTokens: 4096, // required
207
+ model: 'claude-sonnet-4-5', // default
208
+ log: yourLogger, // optional
209
+ memoryLimit: 10, // optional
210
+ thinking: false, // optional: enable extended thinking mode
211
+ })
212
+
213
+ const bots = SprucebotLlmFactory.Factory(adapter)
214
+ ```
215
+
216
+ **Anthropic adapter options:**
217
+
218
+ | Option | Type | Required | Description |
219
+ |--------|------|----------|-------------|
220
+ | `maxTokens` | `number` | yes | Maximum tokens for the model response |
221
+ | `model` | `string` | no | Model to use (default: `'claude-sonnet-4-5'`) |
222
+ | `log` | `Log` | no | Optional logger instance |
223
+ | `memoryLimit` | `number` | no | Limit how many tracked messages are sent |
224
+ | `thinking` | `boolean` | no | Enable extended thinking (`thinking: adaptive`) |
225
+
226
+ #### Anthropic prompt caching
227
+
228
+ The Anthropic adapter automatically enables [prompt caching](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching) by inserting an `ephemeral` cache breakpoint after the system prompt. This allows Anthropic to cache the static portion of the prompt (your `youAre` + skill instructions) and only re-process the changing chat history on each turn — reducing latency and cost on long conversations.
229
+
230
+ Token usage (including cache creation and cache read tokens) is logged at the `info` level on each request:
231
+
232
+ ```
233
+ [TOKEN USAGE] input=1234 cache_create=800 cache_read=400 output=256
234
+ ```
235
+
236
+ No configuration is required — caching is applied automatically.
237
+
197
238
  ### Ollama adapter
198
239
 
199
240
  Run local models using [Ollama](https://ollama.ai). No API key required - just have Ollama running locally.
@@ -426,6 +467,35 @@ const bookingBot = bots.Bot({
426
467
 
427
468
  If you are using reasoning models that accept `reasoning_effort`, you can set it via `OPENAI_REASONING_EFFORT` or `adapter.setReasoningEffort(...)`.
428
469
 
470
+ ## Serialization and Persistence
471
+
472
+ You can snapshot a bot's full state — including message history, skill configuration, and any accumulated state — and later restore it. This is useful for persisting conversations across process restarts, saving/loading sessions, or transferring bot state.
473
+
474
+ ```ts
475
+ // Save bot state (e.g. to a database or file)
476
+ const snapshot = bot.serialize()
477
+ // snapshot: { youAre, stateSchema, state, messages, skill }
478
+
479
+ // Later, recreate the bot and restore state
480
+ const bot2 = bots.Bot({
481
+ skill: mySkill,
482
+ youAre: 'a helpful assistant',
483
+ })
484
+ bot2.unserialize(snapshot)
485
+ // bot2 now has the same message history and state as bot had when serialized
486
+ ```
487
+
488
+ The skill's state is also preserved through serialization:
489
+
490
+ ```ts
491
+ const skillSnapshot = skill.serialize()
492
+ // skillSnapshot: { yourJobIfYouChooseToAcceptItIs, state, stateSchema, ... }
493
+
494
+ skill.unserialize(skillSnapshot)
495
+ ```
496
+
497
+ `unserialize` restores `state` and skill options but does not reconnect callback handlers — those are defined in code. Re-attach any callbacks when recreating the skill.
498
+
429
499
  ## API Reference
430
500
 
431
501
  ### Bot methods
@@ -439,6 +509,7 @@ If you are using reasoning models that accept `reasoning_effort`, you can set it
439
509
  | `updateState(partialState)` | Update state and emit `did-update-state` |
440
510
  | `setSkill(skill)` | Swap the active skill |
441
511
  | `serialize()` | Snapshot of bot's current state, skill, and history |
512
+ | `unserialize(serialized)` | Restore state from a previous `serialize()` snapshot |
442
513
 
443
514
  ### Skill methods
444
515
 
@@ -447,7 +518,8 @@ If you are using reasoning models that accept `reasoning_effort`, you can set it
447
518
  | `updateState(partialState)` | Update skill state |
448
519
  | `getState()` | Get current state |
449
520
  | `setModel(model)` | Change the model this skill uses |
450
- | `serialize()` | Snapshot of skill configuration |
521
+ | `serialize()` | Snapshot of skill configuration and state |
522
+ | `unserialize(serialized)` | Restore skill state from a previous `serialize()` snapshot |
451
523
 
452
524
  ### Factory helpers
453
525
 
@@ -45,8 +45,11 @@ class SprucebotLlmBotImpl extends mercury_event_emitter_1.AbstractEventEmitter {
45
45
  };
46
46
  }
47
47
  unserialize(serialized) {
48
- this.state = serialized.state;
49
- serialized.skill && this.skill?.unserialize(serialized.skill);
48
+ const { state, messages, skill, youAre } = serialized;
49
+ this.youAre = youAre;
50
+ this.state = state;
51
+ this.messages = messages;
52
+ skill && this.skill?.unserialize(skill);
50
53
  }
51
54
  async sendMessage(message, cb) {
52
55
  (0, schema_1.assertOptions)({ message }, ['message']);
@@ -48,8 +48,11 @@ class SprucebotLlmBotImpl extends AbstractEventEmitter {
48
48
  }
49
49
  unserialize(serialized) {
50
50
  var _a;
51
- this.state = serialized.state;
52
- serialized.skill && ((_a = this.skill) === null || _a === void 0 ? void 0 : _a.unserialize(serialized.skill));
51
+ const { state, messages, skill, youAre } = serialized;
52
+ this.youAre = youAre;
53
+ this.state = state;
54
+ this.messages = messages;
55
+ skill && ((_a = this.skill) === null || _a === void 0 ? void 0 : _a.unserialize(skill));
53
56
  }
54
57
  sendMessage(message, cb) {
55
58
  return __awaiter(this, void 0, void 0, function* () {
package/package.json CHANGED
@@ -8,7 +8,7 @@
8
8
  "eta"
9
9
  ]
10
10
  },
11
- "version": "15.1.6",
11
+ "version": "15.1.8",
12
12
  "files": [
13
13
  "build"
14
14
  ],