@mastra/libsql 1.7.0-alpha.0 → 1.7.1-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/CHANGELOG.md +28 -0
  2. package/dist/docs/SKILL.md +19 -19
  3. package/dist/docs/assets/SOURCE_MAP.json +1 -1
  4. package/dist/docs/references/docs-agents-agent-approval.md +7 -7
  5. package/dist/docs/references/docs-agents-agent-memory.md +1 -1
  6. package/dist/docs/references/docs-agents-network-approval.md +2 -2
  7. package/dist/docs/references/docs-agents-networks.md +2 -2
  8. package/dist/docs/references/docs-memory-memory-processors.md +14 -14
  9. package/dist/docs/references/docs-memory-message-history.md +2 -2
  10. package/dist/docs/references/docs-memory-overview.md +3 -3
  11. package/dist/docs/references/docs-memory-semantic-recall.md +8 -8
  12. package/dist/docs/references/docs-memory-storage.md +6 -6
  13. package/dist/docs/references/docs-memory-working-memory.md +15 -15
  14. package/dist/docs/references/docs-observability-overview.md +5 -5
  15. package/dist/docs/references/docs-observability-tracing-exporters-default.md +7 -7
  16. package/dist/docs/references/docs-rag-retrieval.md +16 -16
  17. package/dist/docs/references/reference-core-getMemory.md +1 -1
  18. package/dist/docs/references/reference-core-listMemory.md +1 -1
  19. package/dist/docs/references/reference-core-mastra-class.md +2 -2
  20. package/dist/docs/references/reference-memory-memory-class.md +4 -4
  21. package/dist/docs/references/reference-storage-composite.md +12 -4
  22. package/dist/docs/references/reference-storage-dynamodb.md +5 -5
  23. package/dist/docs/references/reference-storage-libsql.md +1 -1
  24. package/dist/docs/references/reference-vectors-libsql.md +16 -16
  25. package/dist/index.cjs +8 -5
  26. package/dist/index.cjs.map +1 -1
  27. package/dist/index.js +8 -5
  28. package/dist/index.js.map +1 -1
  29. package/dist/storage/domains/memory/index.d.ts.map +1 -1
  30. package/package.json +6 -6
@@ -1,8 +1,8 @@
1
- # Observability Overview
1
+ # Observability overview
2
2
 
3
3
  Mastra provides observability features for AI applications. Monitor LLM operations, trace agent decisions, and debug complex workflows with tools that understand AI-specific patterns.
4
4
 
5
- ## Key Features
5
+ ## Key features
6
6
 
7
7
  ### Tracing
8
8
 
@@ -13,13 +13,13 @@ Specialized tracing for AI operations that captures:
13
13
  - **Workflow steps**: Branching logic, parallel execution, and step outputs
14
14
  - **Automatic instrumentation**: Tracing with decorators
15
15
 
16
- ## Storage Requirements
16
+ ## Storage requirements
17
17
 
18
18
  The `DefaultExporter` persists traces to your configured storage backend. Not all storage providers support observability—for the full list, see [Storage Provider Support](https://mastra.ai/docs/observability/tracing/exporters/default).
19
19
 
20
20
  For production environments with high traffic, we recommend using **ClickHouse** for the observability domain via [composite storage](https://mastra.ai/reference/storage/composite). See [Production Recommendations](https://mastra.ai/docs/observability/tracing/exporters/default) for details.
21
21
 
22
- ## Quick Start
22
+ ## Quick start
23
23
 
24
24
  Configure Observability in your Mastra instance:
25
25
 
@@ -63,7 +63,7 @@ With this basic setup, you will see Traces and Logs in both Studio and in Mastra
63
63
 
64
64
  We also support various external tracing providers like MLflow, Langfuse, Braintrust, and any OpenTelemetry-compatible platform (Datadog, New Relic, SigNoz, etc.). See more about this in the [Tracing](https://mastra.ai/docs/observability/tracing/overview) documentation.
65
65
 
66
- ## What's Next?
66
+ ## What's next?
67
67
 
68
68
  - **[Set up Tracing](https://mastra.ai/docs/observability/tracing/overview)**: Configure tracing for your application
69
69
  - **[Configure Logging](https://mastra.ai/docs/observability/logging)**: Add structured logging
@@ -1,4 +1,4 @@
1
- # Default Exporter
1
+ # Default exporter
2
2
 
3
3
  The `DefaultExporter` persists traces to your configured storage backend, making them accessible through Studio. It's automatically enabled when using the default observability configuration and requires no external services.
4
4
 
@@ -68,7 +68,7 @@ export const mastra = new Mastra({
68
68
  })
69
69
  ```
70
70
 
71
- ## Viewing Traces
71
+ ## Viewing traces
72
72
 
73
73
  ### Studio
74
74
 
@@ -79,7 +79,7 @@ Access your traces through Studio:
79
79
  3. Filter and search your local traces
80
80
  4. Inspect detailed span information
81
81
 
82
- ## Tracing Strategies
82
+ ## Tracing strategies
83
83
 
84
84
  DefaultExporter automatically selects the optimal tracing strategy based on your storage provider. You can also override this selection if needed.
85
85
 
@@ -106,7 +106,7 @@ new DefaultExporter({
106
106
  })
107
107
  ```
108
108
 
109
- ## Storage Provider Support
109
+ ## Storage provider support
110
110
 
111
111
  Different storage providers support different tracing strategies. Some providers support observability for production workloads, while others are intended primarily for local development.
112
112
 
@@ -139,7 +139,7 @@ The following storage providers **don't support** the observability domain. If y
139
139
  - **batch-with-updates**: 10-100x throughput improvement, full span lifecycle
140
140
  - **insert-only**: Additional 70% reduction in database operations, perfect for analytics
141
141
 
142
- ## Production Recommendations
142
+ ## Production recommendations
143
143
 
144
144
  Observability data grows quickly in production environments. A single agent interaction can generate hundreds of spans, and high-traffic applications can produce thousands of traces per day. Most general-purpose databases aren't optimized for this write-heavy, append-only workload.
145
145
 
@@ -156,7 +156,7 @@ Observability data grows quickly in production environments. A single agent inte
156
156
 
157
157
  If you're using a provider without observability support (like Convex or DynamoDB) or want to optimize performance, use [composite storage](https://mastra.ai/reference/storage/composite) to route observability data to ClickHouse while keeping other data in your primary database.
158
158
 
159
- ## Batching Behavior
159
+ ## Batching behavior
160
160
 
161
161
  ### Flush Triggers
162
162
 
@@ -167,7 +167,7 @@ For both batch strategies (`batch-with-updates` and `insert-only`), traces are f
167
167
  3. **Emergency flush**: Buffer approaches `maxBufferSize` limit
168
168
  4. **Shutdown**: Force flush all pending events
169
169
 
170
- ### Error Handling
170
+ ### Error handling
171
171
 
172
172
  The DefaultExporter includes robust error handling for production use:
173
173
 
@@ -1,10 +1,10 @@
1
- # Retrieval in RAG Systems
1
+ # Retrieval in RAG systems
2
2
 
3
3
  After storing embeddings, you need to retrieve relevant chunks to answer user queries.
4
4
 
5
5
  Mastra provides flexible retrieval options with support for semantic search, filtering, and re-ranking.
6
6
 
7
- ## How Retrieval Works
7
+ ## How retrieval works
8
8
 
9
9
  1. The user's query is converted to an embedding using the same model used for document embeddings
10
10
  2. This embedding is compared to stored embeddings using vector similarity
@@ -14,7 +14,7 @@ Mastra provides flexible retrieval options with support for semantic search, fil
14
14
  - Re-ranked for better relevance
15
15
  - Processed through a knowledge graph
16
16
 
17
- ## Basic Retrieval
17
+ ## Basic retrieval
18
18
 
19
19
  The simplest approach is direct semantic search. This method uses vector similarity to find chunks that are semantically similar to the query:
20
20
 
@@ -63,7 +63,7 @@ Results include both the text content and a similarity score:
63
63
  ]
64
64
  ```
65
65
 
66
- ## Advanced Retrieval options
66
+ ## Advanced retrieval options
67
67
 
68
68
  ### Metadata Filtering
69
69
 
@@ -272,7 +272,7 @@ import { PGVECTOR_PROMPT } from '@mastra/pg'
272
272
  export const ragAgent = new Agent({
273
273
  id: 'rag-agent',
274
274
  name: 'RAG Agent',
275
- model: 'openai/gpt-5.1',
275
+ model: 'openai/gpt-5.4',
276
276
  instructions: `
277
277
  Process queries using the provided context. Structure responses to be concise and relevant.
278
278
  ${PGVECTOR_PROMPT}
@@ -289,7 +289,7 @@ import { PINECONE_PROMPT } from '@mastra/pinecone'
289
289
  export const ragAgent = new Agent({
290
290
  id: 'rag-agent',
291
291
  name: 'RAG Agent',
292
- model: 'openai/gpt-5.1',
292
+ model: 'openai/gpt-5.4',
293
293
  instructions: `
294
294
  Process queries using the provided context. Structure responses to be concise and relevant.
295
295
  ${PINECONE_PROMPT}
@@ -306,7 +306,7 @@ import { QDRANT_PROMPT } from '@mastra/qdrant'
306
306
  export const ragAgent = new Agent({
307
307
  id: 'rag-agent',
308
308
  name: 'RAG Agent',
309
- model: 'openai/gpt-5.1',
309
+ model: 'openai/gpt-5.4',
310
310
  instructions: `
311
311
  Process queries using the provided context. Structure responses to be concise and relevant.
312
312
  ${QDRANT_PROMPT}
@@ -323,7 +323,7 @@ import { CHROMA_PROMPT } from '@mastra/chroma'
323
323
  export const ragAgent = new Agent({
324
324
  id: 'rag-agent',
325
325
  name: 'RAG Agent',
326
- model: 'openai/gpt-5.1',
326
+ model: 'openai/gpt-5.4',
327
327
  instructions: `
328
328
  Process queries using the provided context. Structure responses to be concise and relevant.
329
329
  ${CHROMA_PROMPT}
@@ -340,7 +340,7 @@ import { ASTRA_PROMPT } from '@mastra/astra'
340
340
  export const ragAgent = new Agent({
341
341
  id: 'rag-agent',
342
342
  name: 'RAG Agent',
343
- model: 'openai/gpt-5.1',
343
+ model: 'openai/gpt-5.4',
344
344
  instructions: `
345
345
  Process queries using the provided context. Structure responses to be concise and relevant.
346
346
  ${ASTRA_PROMPT}
@@ -357,7 +357,7 @@ import { LIBSQL_PROMPT } from '@mastra/libsql'
357
357
  export const ragAgent = new Agent({
358
358
  id: 'rag-agent',
359
359
  name: 'RAG Agent',
360
- model: 'openai/gpt-5.1',
360
+ model: 'openai/gpt-5.4',
361
361
  instructions: `
362
362
  Process queries using the provided context. Structure responses to be concise and relevant.
363
363
  ${LIBSQL_PROMPT}
@@ -374,7 +374,7 @@ import { UPSTASH_PROMPT } from '@mastra/upstash'
374
374
  export const ragAgent = new Agent({
375
375
  id: 'rag-agent',
376
376
  name: 'RAG Agent',
377
- model: 'openai/gpt-5.1',
377
+ model: 'openai/gpt-5.4',
378
378
  instructions: `
379
379
  Process queries using the provided context. Structure responses to be concise and relevant.
380
380
  ${UPSTASH_PROMPT}
@@ -391,7 +391,7 @@ import { VECTORIZE_PROMPT } from '@mastra/vectorize'
391
391
  export const ragAgent = new Agent({
392
392
  id: 'rag-agent',
393
393
  name: 'RAG Agent',
394
- model: 'openai/gpt-5.1',
394
+ model: 'openai/gpt-5.4',
395
395
  instructions: `
396
396
  Process queries using the provided context. Structure responses to be concise and relevant.
397
397
  ${VECTORIZE_PROMPT}
@@ -408,7 +408,7 @@ import { MONGODB_PROMPT } from '@mastra/mongodb'
408
408
  export const ragAgent = new Agent({
409
409
  id: 'rag-agent',
410
410
  name: 'RAG Agent',
411
- model: 'openai/gpt-5.1',
411
+ model: 'openai/gpt-5.4',
412
412
  instructions: `
413
413
  Process queries using the provided context. Structure responses to be concise and relevant.
414
414
  ${MONGODB_PROMPT}
@@ -425,7 +425,7 @@ import { OPENSEARCH_PROMPT } from '@mastra/opensearch'
425
425
  export const ragAgent = new Agent({
426
426
  id: 'rag-agent',
427
427
  name: 'RAG Agent',
428
- model: 'openai/gpt-5.1',
428
+ model: 'openai/gpt-5.4',
429
429
  instructions: `
430
430
  Process queries using the provided context. Structure responses to be concise and relevant.
431
431
  ${OPENSEARCH_PROMPT}
@@ -442,7 +442,7 @@ import { S3VECTORS_PROMPT } from '@mastra/s3vectors'
442
442
  export const ragAgent = new Agent({
443
443
  id: 'rag-agent',
444
444
  name: 'RAG Agent',
445
- model: 'openai/gpt-5.1',
445
+ model: 'openai/gpt-5.4',
446
446
  instructions: `
447
447
  Process queries using the provided context. Structure responses to be concise and relevant.
448
448
  ${S3VECTORS_PROMPT}
@@ -472,7 +472,7 @@ const initialResults = await pgVector.query({
472
472
  })
473
473
 
474
474
  // Create a relevance scorer
475
- const relevanceProvider = new MastraAgentRelevanceScorer('relevance-scorer', 'openai/gpt-5.1')
475
+ const relevanceProvider = new MastraAgentRelevanceScorer('relevance-scorer', 'openai/gpt-5.4')
476
476
 
477
477
  // Re-rank the results
478
478
  const rerankedResults = await rerank({
@@ -22,7 +22,7 @@ const thread = await memory.createThread({
22
22
 
23
23
  **memory** (`TMemory[TMemoryKey]`): The memory instance with the specified key. Throws an error if the memory is not found.
24
24
 
25
- ## Example: Registering and Retrieving Memory
25
+ ## Example: Registering and retrieving memory
26
26
 
27
27
  ```typescript
28
28
  import { Mastra } from '@mastra/core'
@@ -20,7 +20,7 @@ This method takes no parameters.
20
20
 
21
21
  **memory** (`Record<string, MastraMemory>`): An object containing all registered memory instances, keyed by their registry keys.
22
22
 
23
- ## Example: Checking Registered Memory
23
+ ## Example: Checking registered memory
24
24
 
25
25
  ```typescript
26
26
  import { Mastra } from '@mastra/core'
@@ -1,4 +1,4 @@
1
- # Mastra Class
1
+ # Mastra class
2
2
 
3
3
  The `Mastra` class is the central orchestrator in any Mastra application, managing agents, workflows, storage, logging, observability, and more. Typically, you create a single instance of `Mastra` to coordinate your application.
4
4
 
@@ -55,7 +55,7 @@ Visit the [Configuration reference](https://mastra.ai/reference/configuration) f
55
55
 
56
56
  **mcpServers** (`Record<string, MCPServerBase>`): An object where keys are registry keys (used for getMCPServer()) and values are instances of MCPServer or classes extending MCPServerBase. Each MCPServer must have an id property. Servers can be retrieved by registry key using getMCPServer() or by their intrinsic id using getMCPServerById().
57
57
 
58
- **bundler** (`BundlerConfig`): Configuration for the asset bundler with options for externals, sourcemap, and transpilePackages.
58
+ **bundler** (`BundlerConfig`): Configuration for the asset bundler with options for externals, sourcemap, transpilePackages, and dynamicPackages. (Default: `{ externals: [], sourcemap: false, transpilePackages: [], dynamicPackages: [] }`)
59
59
 
60
60
  **scorers** (`Record<string, Scorer>`): Scorers for evaluating agent responses and workflow outputs (Default: `{}`)
61
61
 
@@ -1,4 +1,4 @@
1
- # Memory Class
1
+ # Memory class
2
2
 
3
3
  The `Memory` class provides a robust system for managing conversation history and thread-based message storage in Mastra. It enables persistent storage of conversations, semantic search capabilities, and efficient message retrieval. You must configure a storage provider for conversation history, and if you enable semantic recall you will also need to provide a vector store and embedder.
4
4
 
@@ -11,7 +11,7 @@ import { Agent } from '@mastra/core/agent'
11
11
  export const agent = new Agent({
12
12
  name: 'test-agent',
13
13
  instructions: 'You are an agent with memory.',
14
- model: 'openai/gpt-5.1',
14
+ model: 'openai/gpt-5.4',
15
15
  memory: new Memory({
16
16
  options: {
17
17
  workingMemory: {
@@ -60,7 +60,7 @@ import { LibSQLStore, LibSQLVector } from '@mastra/libsql'
60
60
  export const agent = new Agent({
61
61
  name: 'test-agent',
62
62
  instructions: 'You are an agent with memory.',
63
- model: 'openai/gpt-5.1',
63
+ model: 'openai/gpt-5.4',
64
64
  memory: new Memory({
65
65
  storage: new LibSQLStore({
66
66
  id: 'test-agent-storage',
@@ -97,7 +97,7 @@ import { PgStore, PgVector } from '@mastra/pg'
97
97
  export const agent = new Agent({
98
98
  name: 'pg-agent',
99
99
  instructions: 'You are an agent with optimized PostgreSQL memory.',
100
- model: 'openai/gpt-5.1',
100
+ model: 'openai/gpt-5.4',
101
101
  memory: new Memory({
102
102
  storage: new PgStore({
103
103
  id: 'pg-agent-storage',
@@ -1,4 +1,4 @@
1
- # Composite Storage
1
+ # Composite storage
2
2
 
3
3
  `MastraCompositeStore` can compose storage domains from different providers. Use it when you need different databases for different purposes. For example, use LibSQL for memory and PostgreSQL for workflows.
4
4
 
@@ -58,7 +58,7 @@ bun add @mastra/pg@latest @mastra/libsql@latest
58
58
 
59
59
  ## Storage domains
60
60
 
61
- Mastra organizes storage into five specialized domains, each handling a specific type of data. Each domain can be backed by a different storage adapter, and domain classes are exported from each storage package.
61
+ Mastra organizes storage into domains, each handling a specific type of data. Each domain can be backed by a different storage adapter, and domain classes are exported from each storage package.
62
62
 
63
63
  | Domain | Description |
64
64
  | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -67,6 +67,10 @@ Mastra organizes storage into five specialized domains, each handling a specific
67
67
  | `scores` | Evaluation results from Mastra's evals system. Scores and metrics are persisted here for analysis and comparison over time. |
68
68
  | `observability` | Telemetry data including traces and spans. Agent interactions, tool calls, and LLM requests generate spans collected into traces for debugging and performance analysis. |
69
69
  | `agents` | Agent configurations for stored agents. Enables agents to be defined and updated at runtime without code deployments. |
70
+ | `datasets` | Evaluation datasets used for experiment runs. Stores dataset definitions, schemas, and versioned items. |
71
+ | `experiments` | Experiment runs and per-item experiment results linked to datasets and targets. |
72
+
73
+ > **Note:** `MastraCompositeStore` accepts all of the domain keys above, but storage adapter support varies by package. You can mix adapters per domain, but only for domains implemented and exported by those adapters. For example, `memory: new MemoryLibSQL(...)` and `workflows: new WorkflowsPG(...)` is valid because both packages export those domain classes.
70
74
 
71
75
  ## Usage
72
76
 
@@ -124,7 +128,9 @@ export const mastra = new Mastra({
124
128
 
125
129
  **default** (`MastraCompositeStore`): Default storage adapter. Domains not explicitly specified in \`domains\` will use this storage's domains as fallbacks.
126
130
 
127
- **domains** (`object`): Individual domain overrides. Each domain can come from a different storage adapter. These take precedence over the default storage.
131
+ **disableInit** (`boolean`): When true, automatic initialization is disabled. You must call init() explicitly.
132
+
133
+ **domains** (`object`): Individual domain overrides. Each domain can come from a different storage adapter. These take precedence over both \`editor\` and \`default\` storage.
128
134
 
129
135
  **domains.memory** (`MemoryStorage`): Storage for threads, messages, and resources.
130
136
 
@@ -136,7 +142,9 @@ export const mastra = new Mastra({
136
142
 
137
143
  **domains.agents** (`AgentsStorage`): Storage for stored agent configurations.
138
144
 
139
- **disableInit** (`boolean`): When true, automatic initialization is disabled. You must call init() explicitly.
145
+ **domains.datasets** (`DatasetsStorage`): Storage for dataset metadata, dataset items, and dataset versions.
146
+
147
+ **domains.experiments** (`ExperimentsStorage`): Storage for experiment runs and per-item experiment results.
140
148
 
141
149
  ## Initialization
142
150
 
@@ -1,4 +1,4 @@
1
- # DynamoDB Storage
1
+ # DynamoDB storage
2
2
 
3
3
  The DynamoDB storage implementation provides a scalable and performant NoSQL database solution for Mastra, leveraging a single-table design pattern with [ElectroDB](https://electrodb.dev/).
4
4
 
@@ -120,7 +120,7 @@ For local development, you can use [DynamoDB Local](https://docs.aws.amazon.com/
120
120
 
121
121
  **config.ttl** (`object`): TTL (Time To Live) configuration for automatic data expiration. Configure per entity type: thread, message, trace, eval, workflow\_snapshot, resource, score. Each entity config includes: enabled (boolean), attributeName (string, default: 'ttl'), defaultTtlSeconds (number).
122
122
 
123
- ## TTL (Time To Live) Configuration
123
+ ## TTL (time to live) configuration
124
124
 
125
125
  DynamoDB TTL allows you to automatically delete items after a specified time period. This is useful for:
126
126
 
@@ -216,7 +216,7 @@ aws dynamodb update-time-to-live \
216
216
 
217
217
  > **Note:** DynamoDB deletes expired items within 48 hours after expiration. Items remain queryable until actually deleted.
218
218
 
219
- ## AWS IAM Permissions
219
+ ## AWS IAM permissions
220
220
 
221
221
  The IAM role or user executing the code needs appropriate permissions to interact with the specified DynamoDB table and its indexes. Below is a sample policy. Replace `${YOUR_TABLE_NAME}` with your actual table name and `${YOUR_AWS_REGION}` and `${YOUR_AWS_ACCOUNT_ID}` with appropriate values.
222
222
 
@@ -246,7 +246,7 @@ The IAM role or user executing the code needs appropriate permissions to interac
246
246
  }
247
247
  ```
248
248
 
249
- ## Key Considerations
249
+ ## Key considerations
250
250
 
251
251
  Before diving into the architectural details, keep these key points in mind when working with the DynamoDB storage adapter:
252
252
 
@@ -255,7 +255,7 @@ Before diving into the architectural details, keep these key points in mind when
255
255
  - **Understanding GSIs:** Familiarity with how the GSIs are structured (as per `TABLE_SETUP.md`) is important for understanding data retrieval and potential query patterns.
256
256
  - **ElectroDB:** The adapter uses ElectroDB to manage interactions with DynamoDB, providing a layer of abstraction and type safety over raw DynamoDB operations.
257
257
 
258
- ## Architectural Approach
258
+ ## Architectural approach
259
259
 
260
260
  This storage adapter utilizes a **single-table design pattern** leveraging [ElectroDB](https://electrodb.dev/), a common and recommended approach for DynamoDB. This differs architecturally from relational database adapters (like `@mastra/pg` or `@mastra/libsql`) that typically use multiple tables, each dedicated to a specific entity (threads, messages, etc.).
261
261
 
@@ -1,4 +1,4 @@
1
- # libSQL Storage
1
+ # libSQL storage
2
2
 
3
3
  [libSQL](https://docs.turso.tech/libsql) is an open-source, SQLite-compatible database that supports both local and remote deployments. It can be used to store message history, workflow snapshots, traces, and eval scores.
4
4
 
@@ -1,4 +1,4 @@
1
- # libSQL Vector Store
1
+ # libSQL vector store
2
2
 
3
3
  The libSQL storage implementation provides a SQLite-compatible vector search [libSQL](https://github.com/tursodatabase/libsql), a fork of SQLite with vector extensions, and [Turso](https://turso.tech/) with vector extensions, offering a lightweight and efficient vector database solution. It's part of the `@mastra/libsql` package and offers efficient vector similarity search with metadata filtering.
4
4
 
@@ -69,7 +69,7 @@ const results = await store.query({
69
69
  });
70
70
  ```
71
71
 
72
- ## Constructor Options
72
+ ## Constructor options
73
73
 
74
74
  **url** (`string`): libSQL database URL. Use ':memory:' for in-memory database, 'file:dbname.db' for local file, or a libSQL-compatible connection string like 'libsql://your-database.turso.io'.
75
75
 
@@ -81,7 +81,7 @@ const results = await store.query({
81
81
 
82
82
  ## Methods
83
83
 
84
- ### createIndex()
84
+ ### `createIndex()`
85
85
 
86
86
  Creates a new vector collection. The index name must start with a letter or underscore and can only contain letters, numbers, and underscores. The dimension must be a positive integer.
87
87
 
@@ -91,7 +91,7 @@ Creates a new vector collection. The index name must start with a letter or unde
91
91
 
92
92
  **metric** (`'cosine' | 'euclidean' | 'dotproduct'`): Distance metric for similarity search. Note: Currently only cosine similarity is supported by libSQL. (Default: `cosine`)
93
93
 
94
- ### upsert()
94
+ ### `upsert()`
95
95
 
96
96
  Adds or updates vectors and their metadata in the index. Uses a transaction to ensure all vectors are inserted atomically - if any insert fails, the entire operation is rolled back.
97
97
 
@@ -103,7 +103,7 @@ Adds or updates vectors and their metadata in the index. Uses a transaction to e
103
103
 
104
104
  **ids** (`string[]`): Optional vector IDs (auto-generated if not provided)
105
105
 
106
- ### query()
106
+ ### `query()`
107
107
 
108
108
  Searches for similar vectors with optional metadata filtering.
109
109
 
@@ -119,7 +119,7 @@ Searches for similar vectors with optional metadata filtering.
119
119
 
120
120
  **minScore** (`number`): Minimum similarity score threshold (Default: `0`)
121
121
 
122
- ### describeIndex()
122
+ ### `describeIndex()`
123
123
 
124
124
  Gets information about an index.
125
125
 
@@ -135,25 +135,25 @@ interface IndexStats {
135
135
  }
136
136
  ```
137
137
 
138
- ### deleteIndex()
138
+ ### `deleteIndex()`
139
139
 
140
140
  Deletes an index and all its data.
141
141
 
142
142
  **indexName** (`string`): Name of the index to delete
143
143
 
144
- ### listIndexes()
144
+ ### `listIndexes()`
145
145
 
146
146
  Lists all vector indexes in the database.
147
147
 
148
148
  Returns: `Promise<string[]>`
149
149
 
150
- ### truncateIndex()
150
+ ### `truncateIndex()`
151
151
 
152
152
  Removes all vectors from an index while keeping the index structure.
153
153
 
154
154
  **indexName** (`string`): Name of the index to truncate
155
155
 
156
- ### updateVector()
156
+ ### `updateVector()`
157
157
 
158
158
  Update a single vector by ID or by metadata filter. Either `id` or `filter` must be provided, but not both.
159
159
 
@@ -169,7 +169,7 @@ Update a single vector by ID or by metadata filter. Either `id` or `filter` must
169
169
 
170
170
  **update.metadata** (`Record<string, any>`): New metadata to update
171
171
 
172
- ### deleteVector()
172
+ ### `deleteVector()`
173
173
 
174
174
  Deletes a specific vector entry from an index by its ID.
175
175
 
@@ -177,7 +177,7 @@ Deletes a specific vector entry from an index by its ID.
177
177
 
178
178
  **id** (`string`): ID of the vector entry to delete
179
179
 
180
- ### deleteVectors()
180
+ ### `deleteVectors()`
181
181
 
182
182
  Delete multiple vectors by IDs or by metadata filter. Either `ids` or `filter` must be provided, but not both.
183
183
 
@@ -187,7 +187,7 @@ Delete multiple vectors by IDs or by metadata filter. Either `ids` or `filter` m
187
187
 
188
188
  **filter** (`Record<string, any>`): Metadata filter to identify vectors to delete (mutually exclusive with ids)
189
189
 
190
- ## Response Types
190
+ ## Response types
191
191
 
192
192
  Query results are returned in this format:
193
193
 
@@ -200,7 +200,7 @@ interface QueryResult {
200
200
  }
201
201
  ```
202
202
 
203
- ## Error Handling
203
+ ## Error handling
204
204
 
205
205
  The store throws specific errors for different failure cases:
206
206
 
@@ -232,7 +232,7 @@ Common error cases include:
232
232
  - Database connection issues
233
233
  - Transaction failures during upsert
234
234
 
235
- ## Usage Example
235
+ ## Usage example
236
236
 
237
237
  ### Local embeddings with fastembed
238
238
 
@@ -277,7 +277,7 @@ export const libsqlAgent = new Agent({
277
277
  name: 'libSQL Agent',
278
278
  instructions:
279
279
  'You are an AI agent with the ability to automatically recall memories from previous interactions.',
280
- model: 'openai/gpt-5.1',
280
+ model: 'openai/gpt-5.4',
281
281
  memory: new Memory({
282
282
  storage: new LibSQLStore({
283
283
  id: 'libsql-agent-storage',
package/dist/index.cjs CHANGED
@@ -6994,15 +6994,15 @@ var MemoryLibSQL = class extends storage.MemoryStorage {
6994
6994
  let bestUnderTokens = 0;
6995
6995
  for (let i = 0; i < chunks.length; i++) {
6996
6996
  cumulativeMessageTokens += chunks[i].messageTokens ?? 0;
6997
- const boundary = i + 1;
6997
+ const boundary2 = i + 1;
6998
6998
  if (cumulativeMessageTokens >= targetMessageTokens) {
6999
6999
  if (bestOverBoundary === 0 || cumulativeMessageTokens < bestOverTokens) {
7000
- bestOverBoundary = boundary;
7000
+ bestOverBoundary = boundary2;
7001
7001
  bestOverTokens = cumulativeMessageTokens;
7002
7002
  }
7003
7003
  } else {
7004
7004
  if (cumulativeMessageTokens > bestUnderTokens) {
7005
- bestUnderBoundary = boundary;
7005
+ bestUnderBoundary = boundary2;
7006
7006
  bestUnderTokens = cumulativeMessageTokens;
7007
7007
  }
7008
7008
  }
@@ -7037,9 +7037,12 @@ var MemoryLibSQL = class extends storage.MemoryStorage {
7037
7037
  const lastObservedAtStr = lastObservedAt.toISOString();
7038
7038
  const existingActive = row.activeObservations || "";
7039
7039
  const existingTokenCount = Number(row.observationTokenCount || 0);
7040
- const newActive = existingActive ? `${existingActive}
7040
+ const boundary = `
7041
7041
 
7042
- ${activatedContent}` : activatedContent;
7042
+ --- message boundary (${lastObservedAt.toISOString()}) ---
7043
+
7044
+ `;
7045
+ const newActive = existingActive ? `${existingActive}${boundary}${activatedContent}` : activatedContent;
7043
7046
  const newTokenCount = existingTokenCount + activatedTokens;
7044
7047
  const existingPending = Number(row.pendingMessageTokens || 0);
7045
7048
  const newPending = Math.max(0, existingPending - activatedMessageTokens);