@mastra/pg 1.0.0-beta.11 → 1.0.0-beta.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +80 -0
- package/dist/docs/README.md +36 -0
- package/dist/docs/SKILL.md +37 -0
- package/dist/docs/SOURCE_MAP.json +6 -0
- package/dist/docs/memory/01-storage.md +181 -0
- package/dist/docs/memory/02-working-memory.md +386 -0
- package/dist/docs/memory/03-semantic-recall.md +235 -0
- package/dist/docs/memory/04-reference.md +135 -0
- package/dist/docs/processors/01-reference.md +295 -0
- package/dist/docs/rag/01-overview.md +74 -0
- package/dist/docs/rag/02-vector-databases.md +638 -0
- package/dist/docs/rag/03-retrieval.md +549 -0
- package/dist/docs/rag/04-reference.md +351 -0
- package/dist/docs/storage/01-reference.md +667 -0
- package/dist/docs/tools/01-reference.md +440 -0
- package/dist/docs/vectors/01-reference.md +307 -0
- package/dist/index.cjs +159 -7
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +159 -7
- package/dist/index.js.map +1 -1
- package/dist/storage/domains/memory/index.d.ts +2 -1
- package/dist/storage/domains/memory/index.d.ts.map +1 -1
- package/package.json +9 -8
|
@@ -0,0 +1,307 @@
|
|
|
1
|
+
# Vectors API Reference
|
|
2
|
+
|
|
3
|
+
> API reference for vectors - 1 entries
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Reference: PG Vector Store
|
|
9
|
+
|
|
10
|
+
> Documentation for the PgVector class in Mastra, which provides vector search using PostgreSQL with pgvector extension.
|
|
11
|
+
|
|
12
|
+
The PgVector class provides vector search using [PostgreSQL](https://www.postgresql.org/) with [pgvector](https://github.com/pgvector/pgvector) extension.
|
|
13
|
+
It provides robust vector similarity search capabilities within your existing PostgreSQL database.
|
|
14
|
+
|
|
15
|
+
## Constructor Options
|
|
16
|
+
|
|
17
|
+
## Constructor Examples
|
|
18
|
+
|
|
19
|
+
### Connection String
|
|
20
|
+
|
|
21
|
+
```ts
|
|
22
|
+
import { PgVector } from "@mastra/pg";
|
|
23
|
+
|
|
24
|
+
const vectorStore = new PgVector({
|
|
25
|
+
id: 'pg-vector',
|
|
26
|
+
connectionString: "postgresql://user:password@localhost:5432/mydb",
|
|
27
|
+
});
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
### Host/Port/Database Configuration
|
|
31
|
+
|
|
32
|
+
```ts
|
|
33
|
+
const vectorStore = new PgVector({
|
|
34
|
+
id: 'pg-vector',
|
|
35
|
+
host: "localhost",
|
|
36
|
+
port: 5432,
|
|
37
|
+
database: "mydb",
|
|
38
|
+
user: "postgres",
|
|
39
|
+
password: "password",
|
|
40
|
+
});
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
### Advanced Configuration
|
|
44
|
+
|
|
45
|
+
```ts
|
|
46
|
+
const vectorStore = new PgVector({
|
|
47
|
+
id: 'pg-vector',
|
|
48
|
+
connectionString: "postgresql://user:password@localhost:5432/mydb",
|
|
49
|
+
schemaName: "custom_schema",
|
|
50
|
+
max: 30,
|
|
51
|
+
idleTimeoutMillis: 60000,
|
|
52
|
+
pgPoolOptions: {
|
|
53
|
+
connectionTimeoutMillis: 5000,
|
|
54
|
+
allowExitOnIdle: true,
|
|
55
|
+
},
|
|
56
|
+
});
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
## Methods
|
|
60
|
+
|
|
61
|
+
### createIndex()
|
|
62
|
+
|
|
63
|
+
#### IndexConfig
|
|
64
|
+
|
|
65
|
+
#### Memory Requirements
|
|
66
|
+
|
|
67
|
+
HNSW indexes require significant shared memory during construction. For 100K vectors:
|
|
68
|
+
|
|
69
|
+
- Small dimensions (64d): ~60MB with default settings
|
|
70
|
+
- Medium dimensions (256d): ~180MB with default settings
|
|
71
|
+
- Large dimensions (384d+): ~250MB+ with default settings
|
|
72
|
+
|
|
73
|
+
Higher M values or efConstruction values will increase memory requirements significantly. Adjust your system's shared memory limits if needed.
|
|
74
|
+
|
|
75
|
+
### upsert()
|
|
76
|
+
|
|
77
|
+
### query()
|
|
78
|
+
|
|
79
|
+
### listIndexes()
|
|
80
|
+
|
|
81
|
+
Returns an array of index names as strings.
|
|
82
|
+
|
|
83
|
+
### describeIndex()
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
|
|
87
|
+
```typescript
|
|
88
|
+
interface PGIndexStats {
|
|
89
|
+
dimension: number;
|
|
90
|
+
count: number;
|
|
91
|
+
metric: "cosine" | "euclidean" | "dotproduct";
|
|
92
|
+
type: "flat" | "hnsw" | "ivfflat";
|
|
93
|
+
config: {
|
|
94
|
+
m?: number;
|
|
95
|
+
efConstruction?: number;
|
|
96
|
+
lists?: number;
|
|
97
|
+
probes?: number;
|
|
98
|
+
};
|
|
99
|
+
}
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### deleteIndex()
|
|
103
|
+
|
|
104
|
+
### updateVector()
|
|
105
|
+
|
|
106
|
+
Update a single vector by ID or by metadata filter. Either `id` or `filter` must be provided, but not both.
|
|
107
|
+
|
|
108
|
+
Updates an existing vector by ID or filter. At least one of vector or metadata must be provided in the update object.
|
|
109
|
+
|
|
110
|
+
```typescript
|
|
111
|
+
// Update by ID
|
|
112
|
+
await pgVector.updateVector({
|
|
113
|
+
indexName: "my_vectors",
|
|
114
|
+
id: "vector123",
|
|
115
|
+
update: {
|
|
116
|
+
vector: [0.1, 0.2, 0.3],
|
|
117
|
+
metadata: { label: "updated" },
|
|
118
|
+
},
|
|
119
|
+
});
|
|
120
|
+
|
|
121
|
+
// Update by filter
|
|
122
|
+
await pgVector.updateVector({
|
|
123
|
+
indexName: "my_vectors",
|
|
124
|
+
filter: { category: "product" },
|
|
125
|
+
update: {
|
|
126
|
+
metadata: { status: "reviewed" },
|
|
127
|
+
},
|
|
128
|
+
});
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
### deleteVector()
|
|
132
|
+
|
|
133
|
+
Deletes a single vector by ID from the specified index.
|
|
134
|
+
|
|
135
|
+
```typescript
|
|
136
|
+
await pgVector.deleteVector({ indexName: "my_vectors", id: "vector123" });
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
### deleteVectors()
|
|
140
|
+
|
|
141
|
+
Delete multiple vectors by IDs or by metadata filter. Either `ids` or `filter` must be provided, but not both.
|
|
142
|
+
|
|
143
|
+
### disconnect()
|
|
144
|
+
|
|
145
|
+
Closes the database connection pool. Should be called when done using the store.
|
|
146
|
+
|
|
147
|
+
### buildIndex()
|
|
148
|
+
|
|
149
|
+
Builds or rebuilds an index with specified metric and configuration. Will drop any existing index before creating the new one.
|
|
150
|
+
|
|
151
|
+
```typescript
|
|
152
|
+
// Define HNSW index
|
|
153
|
+
await pgVector.buildIndex("my_vectors", "cosine", {
|
|
154
|
+
type: "hnsw",
|
|
155
|
+
hnsw: {
|
|
156
|
+
m: 8,
|
|
157
|
+
efConstruction: 32,
|
|
158
|
+
},
|
|
159
|
+
});
|
|
160
|
+
|
|
161
|
+
// Define IVF index
|
|
162
|
+
await pgVector.buildIndex("my_vectors", "cosine", {
|
|
163
|
+
type: "ivfflat",
|
|
164
|
+
ivf: {
|
|
165
|
+
lists: 100,
|
|
166
|
+
},
|
|
167
|
+
});
|
|
168
|
+
|
|
169
|
+
// Define flat index
|
|
170
|
+
await pgVector.buildIndex("my_vectors", "cosine", {
|
|
171
|
+
type: "flat",
|
|
172
|
+
});
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
## Response Types
|
|
176
|
+
|
|
177
|
+
Query results are returned in this format:
|
|
178
|
+
|
|
179
|
+
```typescript
|
|
180
|
+
interface QueryResult {
|
|
181
|
+
id: string;
|
|
182
|
+
score: number;
|
|
183
|
+
metadata: Record<string, any>;
|
|
184
|
+
vector?: number[]; // Only included if includeVector is true
|
|
185
|
+
}
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
## Error Handling
|
|
189
|
+
|
|
190
|
+
The store throws typed errors that can be caught:
|
|
191
|
+
|
|
192
|
+
```typescript
|
|
193
|
+
try {
|
|
194
|
+
await store.query({
|
|
195
|
+
indexName: "index_name",
|
|
196
|
+
queryVector: queryVector,
|
|
197
|
+
});
|
|
198
|
+
} catch (error) {
|
|
199
|
+
if (error instanceof VectorStoreError) {
|
|
200
|
+
console.log(error.code); // 'connection_failed' | 'invalid_dimension' | etc
|
|
201
|
+
console.log(error.details); // Additional error context
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
## Index Configuration Guide
|
|
207
|
+
|
|
208
|
+
### Performance Optimization
|
|
209
|
+
|
|
210
|
+
#### IVFFlat Tuning
|
|
211
|
+
|
|
212
|
+
- **lists parameter**: Set to `sqrt(n) * 2` where n is the number of vectors
|
|
213
|
+
- More lists = better accuracy but slower build time
|
|
214
|
+
- Fewer lists = faster build but potentially lower accuracy
|
|
215
|
+
|
|
216
|
+
#### HNSW Tuning
|
|
217
|
+
|
|
218
|
+
- **m parameter**:
|
|
219
|
+
- 8-16: Moderate accuracy, lower memory
|
|
220
|
+
- 16-32: High accuracy, moderate memory
|
|
221
|
+
- 32-64: Very high accuracy, high memory
|
|
222
|
+
- **efConstruction**:
|
|
223
|
+
- 32-64: Fast build, good quality
|
|
224
|
+
- 64-128: Slower build, better quality
|
|
225
|
+
- 128-256: Slowest build, best quality
|
|
226
|
+
|
|
227
|
+
### Index Recreation Behavior
|
|
228
|
+
|
|
229
|
+
The system automatically detects configuration changes and only rebuilds indexes when necessary:
|
|
230
|
+
|
|
231
|
+
- Same configuration: Index is kept (no recreation)
|
|
232
|
+
- Changed configuration: Index is dropped and rebuilt
|
|
233
|
+
- This prevents the performance issues from unnecessary index recreations
|
|
234
|
+
|
|
235
|
+
## Best Practices
|
|
236
|
+
|
|
237
|
+
- Regularly evaluate your index configuration to ensure optimal performance.
|
|
238
|
+
- Adjust parameters like `lists` and `m` based on dataset size and query requirements.
|
|
239
|
+
- **Monitor index performance** using `describeIndex()` to track usage
|
|
240
|
+
- Rebuild indexes periodically to maintain efficiency, especially after significant data changes
|
|
241
|
+
|
|
242
|
+
## Direct Pool Access
|
|
243
|
+
|
|
244
|
+
The `PgVector` class exposes its underlying PostgreSQL connection pool as a public field:
|
|
245
|
+
|
|
246
|
+
```typescript
|
|
247
|
+
pgVector.pool; // instance of pg.Pool
|
|
248
|
+
```
|
|
249
|
+
|
|
250
|
+
This enables advanced usage such as running direct SQL queries, managing transactions, or monitoring pool state. When using the pool directly:
|
|
251
|
+
|
|
252
|
+
- You are responsible for releasing clients (`client.release()`) after use.
|
|
253
|
+
- The pool remains accessible after calling `disconnect()`, but new queries will fail.
|
|
254
|
+
- Direct access bypasses any validation or transaction logic provided by PgVector methods.
|
|
255
|
+
|
|
256
|
+
This design supports advanced use cases but requires careful resource management by the user.
|
|
257
|
+
|
|
258
|
+
## Usage Example
|
|
259
|
+
|
|
260
|
+
### Local embeddings with fastembed
|
|
261
|
+
|
|
262
|
+
Embeddings are numeric vectors used by memory's `semanticRecall` to retrieve related messages by meaning (not keywords). This setup uses `@mastra/fastembed` to generate vector embeddings.
|
|
263
|
+
|
|
264
|
+
Install `fastembed` to get started:
|
|
265
|
+
|
|
266
|
+
```bash
|
|
267
|
+
npm install @mastra/fastembed@beta
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
Add the following to your agent:
|
|
271
|
+
|
|
272
|
+
```typescript title="src/mastra/agents/example-pg-agent.ts"
|
|
273
|
+
import { Memory } from "@mastra/memory";
|
|
274
|
+
import { Agent } from "@mastra/core/agent";
|
|
275
|
+
import { PostgresStore, PgVector } from "@mastra/pg";
|
|
276
|
+
import { fastembed } from "@mastra/fastembed";
|
|
277
|
+
|
|
278
|
+
export const pgAgent = new Agent({
|
|
279
|
+
id: "pg-agent",
|
|
280
|
+
name: "PG Agent",
|
|
281
|
+
instructions:
|
|
282
|
+
"You are an AI agent with the ability to automatically recall memories from previous interactions.",
|
|
283
|
+
model: "openai/gpt-5.1",
|
|
284
|
+
memory: new Memory({
|
|
285
|
+
storage: new PostgresStore({
|
|
286
|
+
id: 'pg-agent-storage',
|
|
287
|
+
connectionString: process.env.DATABASE_URL!,
|
|
288
|
+
}),
|
|
289
|
+
vector: new PgVector({
|
|
290
|
+
id: 'pg-agent-vector',
|
|
291
|
+
connectionString: process.env.DATABASE_URL!,
|
|
292
|
+
}),
|
|
293
|
+
embedder: fastembed,
|
|
294
|
+
options: {
|
|
295
|
+
lastMessages: 10,
|
|
296
|
+
semanticRecall: {
|
|
297
|
+
topK: 3,
|
|
298
|
+
messageRange: 2,
|
|
299
|
+
},
|
|
300
|
+
},
|
|
301
|
+
}),
|
|
302
|
+
});
|
|
303
|
+
```
|
|
304
|
+
|
|
305
|
+
## Related
|
|
306
|
+
|
|
307
|
+
- [Metadata Filters](../rag/metadata-filters)
|
package/dist/index.cjs
CHANGED
|
@@ -3253,13 +3253,19 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
|
|
|
3253
3253
|
};
|
|
3254
3254
|
}
|
|
3255
3255
|
const limitValue = perPageInput === false ? total : perPage;
|
|
3256
|
-
const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "updatedAt" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $2 OFFSET $3`;
|
|
3257
|
-
const rows = await this.#db.client.manyOrNone(
|
|
3256
|
+
const dataQuery = `SELECT id, "resourceId", title, metadata, "createdAt", "createdAtZ", "updatedAt", "updatedAtZ" ${baseQuery} ORDER BY "${field}" ${direction} LIMIT $2 OFFSET $3`;
|
|
3257
|
+
const rows = await this.#db.client.manyOrNone(
|
|
3258
|
+
dataQuery,
|
|
3259
|
+
[...queryParams, limitValue, offset]
|
|
3260
|
+
);
|
|
3258
3261
|
const threads = (rows || []).map((thread) => ({
|
|
3259
|
-
|
|
3262
|
+
id: thread.id,
|
|
3263
|
+
resourceId: thread.resourceId,
|
|
3264
|
+
title: thread.title,
|
|
3260
3265
|
metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
|
|
3261
|
-
|
|
3262
|
-
|
|
3266
|
+
// Use timezone-aware columns (*Z) for correct UTC timestamps, with fallback for legacy data
|
|
3267
|
+
createdAt: thread.createdAtZ || thread.createdAt,
|
|
3268
|
+
updatedAt: thread.updatedAtZ || thread.updatedAt
|
|
3263
3269
|
}));
|
|
3264
3270
|
return {
|
|
3265
3271
|
threads,
|
|
@@ -3587,11 +3593,13 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
|
|
|
3587
3593
|
queryParams.push(resourceId);
|
|
3588
3594
|
}
|
|
3589
3595
|
if (filter?.dateRange?.start) {
|
|
3590
|
-
|
|
3596
|
+
const startOp = filter.dateRange.startExclusive ? ">" : ">=";
|
|
3597
|
+
conditions.push(`"createdAt" ${startOp} $${paramIndex++}`);
|
|
3591
3598
|
queryParams.push(filter.dateRange.start);
|
|
3592
3599
|
}
|
|
3593
3600
|
if (filter?.dateRange?.end) {
|
|
3594
|
-
|
|
3601
|
+
const endOp = filter.dateRange.endExclusive ? "<" : "<=";
|
|
3602
|
+
conditions.push(`"createdAt" ${endOp} $${paramIndex++}`);
|
|
3595
3603
|
queryParams.push(filter.dateRange.end);
|
|
3596
3604
|
}
|
|
3597
3605
|
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
@@ -3976,6 +3984,150 @@ var MemoryPG = class _MemoryPG extends storage.MemoryStorage {
|
|
|
3976
3984
|
await this.#db.client.none(`UPDATE ${tableName} SET ${updates.join(", ")} WHERE id = $${paramIndex}`, values);
|
|
3977
3985
|
return updatedResource;
|
|
3978
3986
|
}
|
|
3987
|
+
async cloneThread(args) {
|
|
3988
|
+
const { sourceThreadId, newThreadId: providedThreadId, resourceId, title, metadata, options } = args;
|
|
3989
|
+
const sourceThread = await this.getThreadById({ threadId: sourceThreadId });
|
|
3990
|
+
if (!sourceThread) {
|
|
3991
|
+
throw new error.MastraError({
|
|
3992
|
+
id: storage.createStorageErrorId("PG", "CLONE_THREAD", "SOURCE_NOT_FOUND"),
|
|
3993
|
+
domain: error.ErrorDomain.STORAGE,
|
|
3994
|
+
category: error.ErrorCategory.USER,
|
|
3995
|
+
text: `Source thread with id ${sourceThreadId} not found`,
|
|
3996
|
+
details: { sourceThreadId }
|
|
3997
|
+
});
|
|
3998
|
+
}
|
|
3999
|
+
const newThreadId = providedThreadId || crypto.randomUUID();
|
|
4000
|
+
const existingThread = await this.getThreadById({ threadId: newThreadId });
|
|
4001
|
+
if (existingThread) {
|
|
4002
|
+
throw new error.MastraError({
|
|
4003
|
+
id: storage.createStorageErrorId("PG", "CLONE_THREAD", "THREAD_EXISTS"),
|
|
4004
|
+
domain: error.ErrorDomain.STORAGE,
|
|
4005
|
+
category: error.ErrorCategory.USER,
|
|
4006
|
+
text: `Thread with id ${newThreadId} already exists`,
|
|
4007
|
+
details: { newThreadId }
|
|
4008
|
+
});
|
|
4009
|
+
}
|
|
4010
|
+
const threadTableName = getTableName3({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName3(this.#schema) });
|
|
4011
|
+
const messageTableName = getTableName3({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName3(this.#schema) });
|
|
4012
|
+
try {
|
|
4013
|
+
return await this.#db.client.tx(async (t) => {
|
|
4014
|
+
let messageQuery = `SELECT id, content, role, type, "createdAt", "createdAtZ", thread_id AS "threadId", "resourceId"
|
|
4015
|
+
FROM ${messageTableName} WHERE thread_id = $1`;
|
|
4016
|
+
const messageParams = [sourceThreadId];
|
|
4017
|
+
let paramIndex = 2;
|
|
4018
|
+
if (options?.messageFilter?.startDate) {
|
|
4019
|
+
messageQuery += ` AND "createdAt" >= $${paramIndex++}`;
|
|
4020
|
+
messageParams.push(options.messageFilter.startDate);
|
|
4021
|
+
}
|
|
4022
|
+
if (options?.messageFilter?.endDate) {
|
|
4023
|
+
messageQuery += ` AND "createdAt" <= $${paramIndex++}`;
|
|
4024
|
+
messageParams.push(options.messageFilter.endDate);
|
|
4025
|
+
}
|
|
4026
|
+
if (options?.messageFilter?.messageIds && options.messageFilter.messageIds.length > 0) {
|
|
4027
|
+
messageQuery += ` AND id IN (${options.messageFilter.messageIds.map(() => `$${paramIndex++}`).join(", ")})`;
|
|
4028
|
+
messageParams.push(...options.messageFilter.messageIds);
|
|
4029
|
+
}
|
|
4030
|
+
messageQuery += ` ORDER BY "createdAt" ASC`;
|
|
4031
|
+
if (options?.messageLimit && options.messageLimit > 0) {
|
|
4032
|
+
const limitQuery = `SELECT * FROM (${messageQuery.replace('ORDER BY "createdAt" ASC', 'ORDER BY "createdAt" DESC')} LIMIT $${paramIndex}) AS limited ORDER BY "createdAt" ASC`;
|
|
4033
|
+
messageParams.push(options.messageLimit);
|
|
4034
|
+
messageQuery = limitQuery;
|
|
4035
|
+
}
|
|
4036
|
+
const sourceMessages = await t.manyOrNone(messageQuery, messageParams);
|
|
4037
|
+
const now = /* @__PURE__ */ new Date();
|
|
4038
|
+
const lastMessageId = sourceMessages.length > 0 ? sourceMessages[sourceMessages.length - 1].id : void 0;
|
|
4039
|
+
const cloneMetadata = {
|
|
4040
|
+
sourceThreadId,
|
|
4041
|
+
clonedAt: now,
|
|
4042
|
+
...lastMessageId && { lastMessageId }
|
|
4043
|
+
};
|
|
4044
|
+
const newThread = {
|
|
4045
|
+
id: newThreadId,
|
|
4046
|
+
resourceId: resourceId || sourceThread.resourceId,
|
|
4047
|
+
title: title || (sourceThread.title ? `Clone of ${sourceThread.title}` : void 0),
|
|
4048
|
+
metadata: {
|
|
4049
|
+
...metadata,
|
|
4050
|
+
clone: cloneMetadata
|
|
4051
|
+
},
|
|
4052
|
+
createdAt: now,
|
|
4053
|
+
updatedAt: now
|
|
4054
|
+
};
|
|
4055
|
+
await t.none(
|
|
4056
|
+
`INSERT INTO ${threadTableName} (
|
|
4057
|
+
id,
|
|
4058
|
+
"resourceId",
|
|
4059
|
+
title,
|
|
4060
|
+
metadata,
|
|
4061
|
+
"createdAt",
|
|
4062
|
+
"createdAtZ",
|
|
4063
|
+
"updatedAt",
|
|
4064
|
+
"updatedAtZ"
|
|
4065
|
+
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
|
|
4066
|
+
[
|
|
4067
|
+
newThread.id,
|
|
4068
|
+
newThread.resourceId,
|
|
4069
|
+
newThread.title,
|
|
4070
|
+
newThread.metadata ? JSON.stringify(newThread.metadata) : null,
|
|
4071
|
+
now,
|
|
4072
|
+
now,
|
|
4073
|
+
now,
|
|
4074
|
+
now
|
|
4075
|
+
]
|
|
4076
|
+
);
|
|
4077
|
+
const clonedMessages = [];
|
|
4078
|
+
const targetResourceId = resourceId || sourceThread.resourceId;
|
|
4079
|
+
for (const sourceMsg of sourceMessages) {
|
|
4080
|
+
const newMessageId = crypto.randomUUID();
|
|
4081
|
+
const normalizedMsg = this.normalizeMessageRow(sourceMsg);
|
|
4082
|
+
let parsedContent = normalizedMsg.content;
|
|
4083
|
+
try {
|
|
4084
|
+
parsedContent = JSON.parse(normalizedMsg.content);
|
|
4085
|
+
} catch {
|
|
4086
|
+
}
|
|
4087
|
+
await t.none(
|
|
4088
|
+
`INSERT INTO ${messageTableName} (id, thread_id, content, "createdAt", "createdAtZ", role, type, "resourceId")
|
|
4089
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
|
|
4090
|
+
[
|
|
4091
|
+
newMessageId,
|
|
4092
|
+
newThreadId,
|
|
4093
|
+
typeof normalizedMsg.content === "string" ? normalizedMsg.content : JSON.stringify(normalizedMsg.content),
|
|
4094
|
+
normalizedMsg.createdAt,
|
|
4095
|
+
normalizedMsg.createdAt,
|
|
4096
|
+
normalizedMsg.role,
|
|
4097
|
+
normalizedMsg.type || "v2",
|
|
4098
|
+
targetResourceId
|
|
4099
|
+
]
|
|
4100
|
+
);
|
|
4101
|
+
clonedMessages.push({
|
|
4102
|
+
id: newMessageId,
|
|
4103
|
+
threadId: newThreadId,
|
|
4104
|
+
content: parsedContent,
|
|
4105
|
+
role: normalizedMsg.role,
|
|
4106
|
+
type: normalizedMsg.type,
|
|
4107
|
+
createdAt: new Date(normalizedMsg.createdAt),
|
|
4108
|
+
resourceId: targetResourceId
|
|
4109
|
+
});
|
|
4110
|
+
}
|
|
4111
|
+
return {
|
|
4112
|
+
thread: newThread,
|
|
4113
|
+
clonedMessages
|
|
4114
|
+
};
|
|
4115
|
+
});
|
|
4116
|
+
} catch (error$1) {
|
|
4117
|
+
if (error$1 instanceof error.MastraError) {
|
|
4118
|
+
throw error$1;
|
|
4119
|
+
}
|
|
4120
|
+
throw new error.MastraError(
|
|
4121
|
+
{
|
|
4122
|
+
id: storage.createStorageErrorId("PG", "CLONE_THREAD", "FAILED"),
|
|
4123
|
+
domain: error.ErrorDomain.STORAGE,
|
|
4124
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
4125
|
+
details: { sourceThreadId, newThreadId }
|
|
4126
|
+
},
|
|
4127
|
+
error$1
|
|
4128
|
+
);
|
|
4129
|
+
}
|
|
4130
|
+
}
|
|
3979
4131
|
};
|
|
3980
4132
|
var ObservabilityPG = class _ObservabilityPG extends storage.ObservabilityStorage {
|
|
3981
4133
|
#db;
|