@memorylayerai/sdk 0.4.0 → 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +75 -0
- package/bun.lock +390 -0
- package/dist/index.cjs +107 -45
- package/dist/index.d.cts +74 -13
- package/dist/index.d.ts +74 -13
- package/dist/index.js +107 -45
- package/package.json +1 -1
- package/src/resources/ingest.ts +82 -15
- package/src/resources/search.ts +42 -39
- package/src/types.ts +50 -11
package/dist/index.cjs
CHANGED
|
@@ -221,9 +221,18 @@ var init_search = __esm({
|
|
|
221
221
|
this.httpClient = httpClient;
|
|
222
222
|
}
|
|
223
223
|
/**
|
|
224
|
-
* Search memories
|
|
224
|
+
* Search memories using the unified /v1/search endpoint with hybrid retrieval.
|
|
225
|
+
*
|
|
226
|
+
* This uses the app's full retrieval pipeline with:
|
|
227
|
+
* - Vector similarity search
|
|
228
|
+
* - BM25 keyword search
|
|
229
|
+
* - Recency scoring
|
|
230
|
+
* - Graph connectivity (optional)
|
|
231
|
+
* - Entity expansion (optional)
|
|
232
|
+
* - LLM/Cross-encoder reranking (optional)
|
|
233
|
+
*
|
|
225
234
|
* @param request - Search request
|
|
226
|
-
* @returns Search results
|
|
235
|
+
* @returns Search results with memory pack structure
|
|
227
236
|
*/
|
|
228
237
|
async search(request) {
|
|
229
238
|
if (!request.query || request.query.trim().length === 0) {
|
|
@@ -238,42 +247,36 @@ var init_search = __esm({
|
|
|
238
247
|
[{ field: "projectId", message: "Project ID is required" }]
|
|
239
248
|
);
|
|
240
249
|
}
|
|
241
|
-
const
|
|
242
|
-
|
|
243
|
-
|
|
250
|
+
const body = {
|
|
251
|
+
query: request.query,
|
|
252
|
+
project_id: request.projectId,
|
|
253
|
+
include_text_format: true
|
|
244
254
|
};
|
|
245
255
|
if (request.limit !== void 0) {
|
|
246
|
-
|
|
247
|
-
}
|
|
248
|
-
if (request.threshold !== void 0) {
|
|
249
|
-
query.threshold = request.threshold.toString();
|
|
250
|
-
}
|
|
251
|
-
if (request.filter) {
|
|
252
|
-
query.filter = JSON.stringify(request.filter);
|
|
256
|
+
body.limit = request.limit;
|
|
253
257
|
}
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
if (request.enableEntityExpansion !== void 0) {
|
|
258
|
-
query.enableEntityExpansion = request.enableEntityExpansion.toString();
|
|
259
|
-
}
|
|
260
|
-
if (request.enableGraphConnectivity !== void 0) {
|
|
261
|
-
query.enableGraphConnectivity = request.enableGraphConnectivity.toString();
|
|
262
|
-
}
|
|
263
|
-
if (request.enableSemanticDedup !== void 0) {
|
|
264
|
-
query.enableSemanticDedup = request.enableSemanticDedup.toString();
|
|
265
|
-
}
|
|
266
|
-
if (request.rerankingStrategy) {
|
|
267
|
-
query.rerankingStrategy = request.rerankingStrategy;
|
|
268
|
-
}
|
|
269
|
-
if (request.fusionWeights) {
|
|
270
|
-
query.fusionWeights = JSON.stringify(request.fusionWeights);
|
|
271
|
-
}
|
|
272
|
-
return this.httpClient.request({
|
|
273
|
-
method: "GET",
|
|
258
|
+
body.rerank_strategy = request.rerankingStrategy || "cross-encoder";
|
|
259
|
+
const response = await this.httpClient.request({
|
|
260
|
+
method: "POST",
|
|
274
261
|
path: "/v1/search",
|
|
275
|
-
|
|
262
|
+
body
|
|
276
263
|
});
|
|
264
|
+
const memoryPack = response.memory_pack || {};
|
|
265
|
+
const results = [];
|
|
266
|
+
for (const memoryType of ["facts", "preferences", "entities", "sources"]) {
|
|
267
|
+
const items = memoryPack[memoryType] || [];
|
|
268
|
+
for (const item of items) {
|
|
269
|
+
results.push({
|
|
270
|
+
memory: item,
|
|
271
|
+
score: item.score || 1,
|
|
272
|
+
highlights: item.highlights || []
|
|
273
|
+
});
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
return {
|
|
277
|
+
results,
|
|
278
|
+
total: results.length
|
|
279
|
+
};
|
|
277
280
|
}
|
|
278
281
|
};
|
|
279
282
|
}
|
|
@@ -311,18 +314,15 @@ var init_ingest = __esm({
|
|
|
311
314
|
[{ field: "projectId", message: "Project ID is required" }]
|
|
312
315
|
);
|
|
313
316
|
}
|
|
314
|
-
const body = {
|
|
315
|
-
projectId: request.projectId,
|
|
316
|
-
metadata: request.metadata,
|
|
317
|
-
chunkSize: request.chunkSize,
|
|
318
|
-
chunkOverlap: request.chunkOverlap,
|
|
319
|
-
// In a real implementation, you'd convert the file to base64 or use FormData
|
|
320
|
-
file: request.file
|
|
321
|
-
};
|
|
322
317
|
return this.httpClient.request({
|
|
323
318
|
method: "POST",
|
|
324
|
-
path: "/v1/ingest
|
|
325
|
-
body
|
|
319
|
+
path: "/v1/ingest",
|
|
320
|
+
body: {
|
|
321
|
+
type: "pdf",
|
|
322
|
+
projectId: request.projectId,
|
|
323
|
+
metadata: request.metadata || {},
|
|
324
|
+
file: request.file
|
|
325
|
+
}
|
|
326
326
|
});
|
|
327
327
|
}
|
|
328
328
|
/**
|
|
@@ -345,9 +345,71 @@ var init_ingest = __esm({
|
|
|
345
345
|
}
|
|
346
346
|
return this.httpClient.request({
|
|
347
347
|
method: "POST",
|
|
348
|
-
path: "/v1/ingest
|
|
349
|
-
body:
|
|
348
|
+
path: "/v1/ingest",
|
|
349
|
+
body: {
|
|
350
|
+
type: "text",
|
|
351
|
+
content: request.text,
|
|
352
|
+
projectId: request.projectId,
|
|
353
|
+
metadata: request.metadata || {}
|
|
354
|
+
}
|
|
355
|
+
});
|
|
356
|
+
}
|
|
357
|
+
/**
|
|
358
|
+
* Ingest content from a URL
|
|
359
|
+
* @param url - URL to ingest from
|
|
360
|
+
* @param projectId - Project ID
|
|
361
|
+
* @param metadata - Optional metadata
|
|
362
|
+
* @returns Ingestion response with job details
|
|
363
|
+
*/
|
|
364
|
+
async url(url, projectId, metadata) {
|
|
365
|
+
if (!url || url.trim().length === 0) {
|
|
366
|
+
throw new ValidationError(
|
|
367
|
+
"URL cannot be empty",
|
|
368
|
+
[{ field: "url", message: "URL is required and cannot be empty" }]
|
|
369
|
+
);
|
|
370
|
+
}
|
|
371
|
+
if (!projectId || projectId.trim().length === 0) {
|
|
372
|
+
throw new ValidationError(
|
|
373
|
+
"Project ID is required",
|
|
374
|
+
[{ field: "projectId", message: "Project ID is required" }]
|
|
375
|
+
);
|
|
376
|
+
}
|
|
377
|
+
return this.httpClient.request({
|
|
378
|
+
method: "POST",
|
|
379
|
+
path: "/v1/ingest",
|
|
380
|
+
body: {
|
|
381
|
+
type: "url",
|
|
382
|
+
url,
|
|
383
|
+
projectId,
|
|
384
|
+
metadata: metadata || {}
|
|
385
|
+
}
|
|
386
|
+
});
|
|
387
|
+
}
|
|
388
|
+
/**
|
|
389
|
+
* Get the status of an ingestion job
|
|
390
|
+
* @param jobId - Job ID returned from ingest
|
|
391
|
+
* @param projectId - Project ID
|
|
392
|
+
* @returns Job status information
|
|
393
|
+
*/
|
|
394
|
+
async getJob(jobId, projectId) {
|
|
395
|
+
if (!jobId || jobId.trim().length === 0) {
|
|
396
|
+
throw new ValidationError(
|
|
397
|
+
"Job ID is required",
|
|
398
|
+
[{ field: "jobId", message: "Job ID is required" }]
|
|
399
|
+
);
|
|
400
|
+
}
|
|
401
|
+
if (!projectId || projectId.trim().length === 0) {
|
|
402
|
+
throw new ValidationError(
|
|
403
|
+
"Project ID is required",
|
|
404
|
+
[{ field: "projectId", message: "Project ID is required" }]
|
|
405
|
+
);
|
|
406
|
+
}
|
|
407
|
+
const response = await this.httpClient.request({
|
|
408
|
+
method: "GET",
|
|
409
|
+
path: `/v1/jobs/${jobId}`,
|
|
410
|
+
query: { projectId }
|
|
350
411
|
});
|
|
412
|
+
return response.data || response;
|
|
351
413
|
}
|
|
352
414
|
};
|
|
353
415
|
}
|
package/dist/index.d.cts
CHANGED
|
@@ -105,6 +105,37 @@ declare class APIError extends MemoryLayerError {
|
|
|
105
105
|
constructor(message: string, statusCode: number, requestId?: string, details?: any);
|
|
106
106
|
}
|
|
107
107
|
|
|
108
|
+
/**
|
|
109
|
+
* Model provider options for multi-provider support
|
|
110
|
+
* Allows overriding org-level settings at request time
|
|
111
|
+
*
|
|
112
|
+
* Supported providers:
|
|
113
|
+
* - 'openai': OpenAI GPT models and text-embedding models
|
|
114
|
+
* - 'google': Google Gemini models and text-embedding-004
|
|
115
|
+
* - 'anthropic': Anthropic Claude models (uses OpenAI embeddings for vector search)
|
|
116
|
+
*/
|
|
117
|
+
type ModelProvider = 'openai' | 'google' | 'anthropic';
|
|
118
|
+
/**
|
|
119
|
+
* Model settings for request-level override
|
|
120
|
+
*/
|
|
121
|
+
interface ModelSettings {
|
|
122
|
+
/** Model provider: 'openai', 'google', or 'anthropic' */
|
|
123
|
+
provider?: ModelProvider;
|
|
124
|
+
/**
|
|
125
|
+
* Embedding model to use:
|
|
126
|
+
* - OpenAI: 'text-embedding-3-small', 'text-embedding-3-large', 'text-embedding-ada-002'
|
|
127
|
+
* - Google: 'text-embedding-004'
|
|
128
|
+
* - Anthropic: Uses OpenAI embeddings (specify OpenAI model)
|
|
129
|
+
*/
|
|
130
|
+
embeddingModel?: string;
|
|
131
|
+
/**
|
|
132
|
+
* Language model for extraction/generation:
|
|
133
|
+
* - OpenAI: 'gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'o1', 'o1-mini'
|
|
134
|
+
* - Google: 'gemini-2.5-flash', 'gemini-2.5-pro', 'gemini-2.0-flash'
|
|
135
|
+
* - Anthropic: 'claude-3-5-sonnet-20241022', 'claude-3-opus-20240229', 'claude-3-haiku-20240307'
|
|
136
|
+
*/
|
|
137
|
+
languageModel?: string;
|
|
138
|
+
}
|
|
108
139
|
/**
|
|
109
140
|
* Memory object
|
|
110
141
|
*/
|
|
@@ -163,13 +194,13 @@ interface SearchRequest {
|
|
|
163
194
|
query: string;
|
|
164
195
|
/** Project ID to search in */
|
|
165
196
|
projectId: string;
|
|
166
|
-
/** Maximum number of results to return (default: 10) */
|
|
197
|
+
/** Maximum number of results to return (default: 10 - supermemory production default) */
|
|
167
198
|
limit?: number;
|
|
168
199
|
/** Filter criteria for search */
|
|
169
200
|
filter?: Record<string, any>;
|
|
170
|
-
/** Minimum relevance score threshold (0-1) */
|
|
201
|
+
/** Minimum relevance score threshold (0-1, default: 0.6 - supermemory production default for broad recall) */
|
|
171
202
|
threshold?: number;
|
|
172
|
-
/** Enable query rewriting (default:
|
|
203
|
+
/** Enable query rewriting (default: false - adds ~400ms latency) */
|
|
173
204
|
enableQueryRewriting?: boolean;
|
|
174
205
|
/** Enable entity expansion search (default: false) */
|
|
175
206
|
enableEntityExpansion?: boolean;
|
|
@@ -177,7 +208,7 @@ interface SearchRequest {
|
|
|
177
208
|
enableGraphConnectivity?: boolean;
|
|
178
209
|
/** Enable semantic deduplication (default: false) */
|
|
179
210
|
enableSemanticDedup?: boolean;
|
|
180
|
-
/** Reranking strategy: 'none', 'cross-encoder', 'llm' (default: 'none') */
|
|
211
|
+
/** Reranking strategy: 'none', 'cross-encoder', 'llm' (default: 'none' - adds latency) */
|
|
181
212
|
rerankingStrategy?: 'none' | 'cross-encoder' | 'llm';
|
|
182
213
|
/** Custom fusion weights for multi-method retrieval */
|
|
183
214
|
fusionWeights?: {
|
|
@@ -187,6 +218,8 @@ interface SearchRequest {
|
|
|
187
218
|
entity?: number;
|
|
188
219
|
graph?: number;
|
|
189
220
|
};
|
|
221
|
+
/** Model settings override (optional - uses org settings if not specified) */
|
|
222
|
+
modelSettings?: ModelSettings;
|
|
190
223
|
}
|
|
191
224
|
/**
|
|
192
225
|
* Search result
|
|
@@ -232,10 +265,12 @@ interface IngestFileRequest {
|
|
|
232
265
|
projectId: string;
|
|
233
266
|
/** Optional metadata to associate with ingested memories */
|
|
234
267
|
metadata?: Record<string, any>;
|
|
235
|
-
/** Chunk size for splitting the file (default:
|
|
268
|
+
/** Chunk size for splitting the file (default: 512 tokens - supermemory production default) */
|
|
236
269
|
chunkSize?: number;
|
|
237
|
-
/** Overlap between chunks (default:
|
|
270
|
+
/** Overlap between chunks (default: 10% - supermemory production default) */
|
|
238
271
|
chunkOverlap?: number;
|
|
272
|
+
/** Model settings override (optional - uses org settings if not specified) */
|
|
273
|
+
modelSettings?: ModelSettings;
|
|
239
274
|
}
|
|
240
275
|
/**
|
|
241
276
|
* Request to ingest text
|
|
@@ -247,10 +282,12 @@ interface IngestTextRequest {
|
|
|
247
282
|
projectId: string;
|
|
248
283
|
/** Optional metadata to associate with ingested memories */
|
|
249
284
|
metadata?: Record<string, any>;
|
|
250
|
-
/** Chunk size for splitting the text (default:
|
|
285
|
+
/** Chunk size for splitting the text (default: 512 tokens - supermemory production default) */
|
|
251
286
|
chunkSize?: number;
|
|
252
|
-
/** Overlap between chunks (default:
|
|
287
|
+
/** Overlap between chunks (default: 10% - supermemory production default) */
|
|
253
288
|
chunkOverlap?: number;
|
|
289
|
+
/** Model settings override (optional - uses org settings if not specified) */
|
|
290
|
+
modelSettings?: ModelSettings;
|
|
254
291
|
}
|
|
255
292
|
/**
|
|
256
293
|
* Ingestion response
|
|
@@ -278,11 +315,11 @@ interface RouterRequest {
|
|
|
278
315
|
messages: Message[];
|
|
279
316
|
/** Project ID for memory context */
|
|
280
317
|
projectId: string;
|
|
281
|
-
/** Model to use (
|
|
318
|
+
/** Model to use (default: 'gpt-4o-mini' - supermemory production default) */
|
|
282
319
|
model?: string;
|
|
283
|
-
/** Temperature for generation (0-2, default:
|
|
320
|
+
/** Temperature for generation (0-2, default: 0.7 - supermemory production default) */
|
|
284
321
|
temperature?: number;
|
|
285
|
-
/** Maximum tokens to generate */
|
|
322
|
+
/** Maximum tokens to generate (default: 2000 - supermemory production default) */
|
|
286
323
|
maxTokens?: number;
|
|
287
324
|
/** Whether to stream the response */
|
|
288
325
|
stream?: boolean;
|
|
@@ -644,9 +681,18 @@ declare class SearchResource {
|
|
|
644
681
|
private httpClient;
|
|
645
682
|
constructor(httpClient: HTTPClient);
|
|
646
683
|
/**
|
|
647
|
-
* Search memories
|
|
684
|
+
* Search memories using the unified /v1/search endpoint with hybrid retrieval.
|
|
685
|
+
*
|
|
686
|
+
* This uses the app's full retrieval pipeline with:
|
|
687
|
+
* - Vector similarity search
|
|
688
|
+
* - BM25 keyword search
|
|
689
|
+
* - Recency scoring
|
|
690
|
+
* - Graph connectivity (optional)
|
|
691
|
+
* - Entity expansion (optional)
|
|
692
|
+
* - LLM/Cross-encoder reranking (optional)
|
|
693
|
+
*
|
|
648
694
|
* @param request - Search request
|
|
649
|
-
* @returns Search results
|
|
695
|
+
* @returns Search results with memory pack structure
|
|
650
696
|
*/
|
|
651
697
|
search(request: SearchRequest): Promise<SearchResponse>;
|
|
652
698
|
}
|
|
@@ -669,6 +715,21 @@ declare class IngestResource {
|
|
|
669
715
|
* @returns Ingestion response with created memory IDs
|
|
670
716
|
*/
|
|
671
717
|
text(request: IngestTextRequest): Promise<IngestResponse>;
|
|
718
|
+
/**
|
|
719
|
+
* Ingest content from a URL
|
|
720
|
+
* @param url - URL to ingest from
|
|
721
|
+
* @param projectId - Project ID
|
|
722
|
+
* @param metadata - Optional metadata
|
|
723
|
+
* @returns Ingestion response with job details
|
|
724
|
+
*/
|
|
725
|
+
url(url: string, projectId: string, metadata?: Record<string, any>): Promise<IngestResponse>;
|
|
726
|
+
/**
|
|
727
|
+
* Get the status of an ingestion job
|
|
728
|
+
* @param jobId - Job ID returned from ingest
|
|
729
|
+
* @param projectId - Project ID
|
|
730
|
+
* @returns Job status information
|
|
731
|
+
*/
|
|
732
|
+
getJob(jobId: string, projectId: string): Promise<any>;
|
|
672
733
|
}
|
|
673
734
|
|
|
674
735
|
/**
|
package/dist/index.d.ts
CHANGED
|
@@ -105,6 +105,37 @@ declare class APIError extends MemoryLayerError {
|
|
|
105
105
|
constructor(message: string, statusCode: number, requestId?: string, details?: any);
|
|
106
106
|
}
|
|
107
107
|
|
|
108
|
+
/**
|
|
109
|
+
* Model provider options for multi-provider support
|
|
110
|
+
* Allows overriding org-level settings at request time
|
|
111
|
+
*
|
|
112
|
+
* Supported providers:
|
|
113
|
+
* - 'openai': OpenAI GPT models and text-embedding models
|
|
114
|
+
* - 'google': Google Gemini models and text-embedding-004
|
|
115
|
+
* - 'anthropic': Anthropic Claude models (uses OpenAI embeddings for vector search)
|
|
116
|
+
*/
|
|
117
|
+
type ModelProvider = 'openai' | 'google' | 'anthropic';
|
|
118
|
+
/**
|
|
119
|
+
* Model settings for request-level override
|
|
120
|
+
*/
|
|
121
|
+
interface ModelSettings {
|
|
122
|
+
/** Model provider: 'openai', 'google', or 'anthropic' */
|
|
123
|
+
provider?: ModelProvider;
|
|
124
|
+
/**
|
|
125
|
+
* Embedding model to use:
|
|
126
|
+
* - OpenAI: 'text-embedding-3-small', 'text-embedding-3-large', 'text-embedding-ada-002'
|
|
127
|
+
* - Google: 'text-embedding-004'
|
|
128
|
+
* - Anthropic: Uses OpenAI embeddings (specify OpenAI model)
|
|
129
|
+
*/
|
|
130
|
+
embeddingModel?: string;
|
|
131
|
+
/**
|
|
132
|
+
* Language model for extraction/generation:
|
|
133
|
+
* - OpenAI: 'gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'o1', 'o1-mini'
|
|
134
|
+
* - Google: 'gemini-2.5-flash', 'gemini-2.5-pro', 'gemini-2.0-flash'
|
|
135
|
+
* - Anthropic: 'claude-3-5-sonnet-20241022', 'claude-3-opus-20240229', 'claude-3-haiku-20240307'
|
|
136
|
+
*/
|
|
137
|
+
languageModel?: string;
|
|
138
|
+
}
|
|
108
139
|
/**
|
|
109
140
|
* Memory object
|
|
110
141
|
*/
|
|
@@ -163,13 +194,13 @@ interface SearchRequest {
|
|
|
163
194
|
query: string;
|
|
164
195
|
/** Project ID to search in */
|
|
165
196
|
projectId: string;
|
|
166
|
-
/** Maximum number of results to return (default: 10) */
|
|
197
|
+
/** Maximum number of results to return (default: 10 - supermemory production default) */
|
|
167
198
|
limit?: number;
|
|
168
199
|
/** Filter criteria for search */
|
|
169
200
|
filter?: Record<string, any>;
|
|
170
|
-
/** Minimum relevance score threshold (0-1) */
|
|
201
|
+
/** Minimum relevance score threshold (0-1, default: 0.6 - supermemory production default for broad recall) */
|
|
171
202
|
threshold?: number;
|
|
172
|
-
/** Enable query rewriting (default:
|
|
203
|
+
/** Enable query rewriting (default: false - adds ~400ms latency) */
|
|
173
204
|
enableQueryRewriting?: boolean;
|
|
174
205
|
/** Enable entity expansion search (default: false) */
|
|
175
206
|
enableEntityExpansion?: boolean;
|
|
@@ -177,7 +208,7 @@ interface SearchRequest {
|
|
|
177
208
|
enableGraphConnectivity?: boolean;
|
|
178
209
|
/** Enable semantic deduplication (default: false) */
|
|
179
210
|
enableSemanticDedup?: boolean;
|
|
180
|
-
/** Reranking strategy: 'none', 'cross-encoder', 'llm' (default: 'none') */
|
|
211
|
+
/** Reranking strategy: 'none', 'cross-encoder', 'llm' (default: 'none' - adds latency) */
|
|
181
212
|
rerankingStrategy?: 'none' | 'cross-encoder' | 'llm';
|
|
182
213
|
/** Custom fusion weights for multi-method retrieval */
|
|
183
214
|
fusionWeights?: {
|
|
@@ -187,6 +218,8 @@ interface SearchRequest {
|
|
|
187
218
|
entity?: number;
|
|
188
219
|
graph?: number;
|
|
189
220
|
};
|
|
221
|
+
/** Model settings override (optional - uses org settings if not specified) */
|
|
222
|
+
modelSettings?: ModelSettings;
|
|
190
223
|
}
|
|
191
224
|
/**
|
|
192
225
|
* Search result
|
|
@@ -232,10 +265,12 @@ interface IngestFileRequest {
|
|
|
232
265
|
projectId: string;
|
|
233
266
|
/** Optional metadata to associate with ingested memories */
|
|
234
267
|
metadata?: Record<string, any>;
|
|
235
|
-
/** Chunk size for splitting the file (default:
|
|
268
|
+
/** Chunk size for splitting the file (default: 512 tokens - supermemory production default) */
|
|
236
269
|
chunkSize?: number;
|
|
237
|
-
/** Overlap between chunks (default:
|
|
270
|
+
/** Overlap between chunks (default: 10% - supermemory production default) */
|
|
238
271
|
chunkOverlap?: number;
|
|
272
|
+
/** Model settings override (optional - uses org settings if not specified) */
|
|
273
|
+
modelSettings?: ModelSettings;
|
|
239
274
|
}
|
|
240
275
|
/**
|
|
241
276
|
* Request to ingest text
|
|
@@ -247,10 +282,12 @@ interface IngestTextRequest {
|
|
|
247
282
|
projectId: string;
|
|
248
283
|
/** Optional metadata to associate with ingested memories */
|
|
249
284
|
metadata?: Record<string, any>;
|
|
250
|
-
/** Chunk size for splitting the text (default:
|
|
285
|
+
/** Chunk size for splitting the text (default: 512 tokens - supermemory production default) */
|
|
251
286
|
chunkSize?: number;
|
|
252
|
-
/** Overlap between chunks (default:
|
|
287
|
+
/** Overlap between chunks (default: 10% - supermemory production default) */
|
|
253
288
|
chunkOverlap?: number;
|
|
289
|
+
/** Model settings override (optional - uses org settings if not specified) */
|
|
290
|
+
modelSettings?: ModelSettings;
|
|
254
291
|
}
|
|
255
292
|
/**
|
|
256
293
|
* Ingestion response
|
|
@@ -278,11 +315,11 @@ interface RouterRequest {
|
|
|
278
315
|
messages: Message[];
|
|
279
316
|
/** Project ID for memory context */
|
|
280
317
|
projectId: string;
|
|
281
|
-
/** Model to use (
|
|
318
|
+
/** Model to use (default: 'gpt-4o-mini' - supermemory production default) */
|
|
282
319
|
model?: string;
|
|
283
|
-
/** Temperature for generation (0-2, default:
|
|
320
|
+
/** Temperature for generation (0-2, default: 0.7 - supermemory production default) */
|
|
284
321
|
temperature?: number;
|
|
285
|
-
/** Maximum tokens to generate */
|
|
322
|
+
/** Maximum tokens to generate (default: 2000 - supermemory production default) */
|
|
286
323
|
maxTokens?: number;
|
|
287
324
|
/** Whether to stream the response */
|
|
288
325
|
stream?: boolean;
|
|
@@ -644,9 +681,18 @@ declare class SearchResource {
|
|
|
644
681
|
private httpClient;
|
|
645
682
|
constructor(httpClient: HTTPClient);
|
|
646
683
|
/**
|
|
647
|
-
* Search memories
|
|
684
|
+
* Search memories using the unified /v1/search endpoint with hybrid retrieval.
|
|
685
|
+
*
|
|
686
|
+
* This uses the app's full retrieval pipeline with:
|
|
687
|
+
* - Vector similarity search
|
|
688
|
+
* - BM25 keyword search
|
|
689
|
+
* - Recency scoring
|
|
690
|
+
* - Graph connectivity (optional)
|
|
691
|
+
* - Entity expansion (optional)
|
|
692
|
+
* - LLM/Cross-encoder reranking (optional)
|
|
693
|
+
*
|
|
648
694
|
* @param request - Search request
|
|
649
|
-
* @returns Search results
|
|
695
|
+
* @returns Search results with memory pack structure
|
|
650
696
|
*/
|
|
651
697
|
search(request: SearchRequest): Promise<SearchResponse>;
|
|
652
698
|
}
|
|
@@ -669,6 +715,21 @@ declare class IngestResource {
|
|
|
669
715
|
* @returns Ingestion response with created memory IDs
|
|
670
716
|
*/
|
|
671
717
|
text(request: IngestTextRequest): Promise<IngestResponse>;
|
|
718
|
+
/**
|
|
719
|
+
* Ingest content from a URL
|
|
720
|
+
* @param url - URL to ingest from
|
|
721
|
+
* @param projectId - Project ID
|
|
722
|
+
* @param metadata - Optional metadata
|
|
723
|
+
* @returns Ingestion response with job details
|
|
724
|
+
*/
|
|
725
|
+
url(url: string, projectId: string, metadata?: Record<string, any>): Promise<IngestResponse>;
|
|
726
|
+
/**
|
|
727
|
+
* Get the status of an ingestion job
|
|
728
|
+
* @param jobId - Job ID returned from ingest
|
|
729
|
+
* @param projectId - Project ID
|
|
730
|
+
* @returns Job status information
|
|
731
|
+
*/
|
|
732
|
+
getJob(jobId: string, projectId: string): Promise<any>;
|
|
672
733
|
}
|
|
673
734
|
|
|
674
735
|
/**
|
package/dist/index.js
CHANGED
|
@@ -220,9 +220,18 @@ var init_search = __esm({
|
|
|
220
220
|
this.httpClient = httpClient;
|
|
221
221
|
}
|
|
222
222
|
/**
|
|
223
|
-
* Search memories
|
|
223
|
+
* Search memories using the unified /v1/search endpoint with hybrid retrieval.
|
|
224
|
+
*
|
|
225
|
+
* This uses the app's full retrieval pipeline with:
|
|
226
|
+
* - Vector similarity search
|
|
227
|
+
* - BM25 keyword search
|
|
228
|
+
* - Recency scoring
|
|
229
|
+
* - Graph connectivity (optional)
|
|
230
|
+
* - Entity expansion (optional)
|
|
231
|
+
* - LLM/Cross-encoder reranking (optional)
|
|
232
|
+
*
|
|
224
233
|
* @param request - Search request
|
|
225
|
-
* @returns Search results
|
|
234
|
+
* @returns Search results with memory pack structure
|
|
226
235
|
*/
|
|
227
236
|
async search(request) {
|
|
228
237
|
if (!request.query || request.query.trim().length === 0) {
|
|
@@ -237,42 +246,36 @@ var init_search = __esm({
|
|
|
237
246
|
[{ field: "projectId", message: "Project ID is required" }]
|
|
238
247
|
);
|
|
239
248
|
}
|
|
240
|
-
const
|
|
241
|
-
|
|
242
|
-
|
|
249
|
+
const body = {
|
|
250
|
+
query: request.query,
|
|
251
|
+
project_id: request.projectId,
|
|
252
|
+
include_text_format: true
|
|
243
253
|
};
|
|
244
254
|
if (request.limit !== void 0) {
|
|
245
|
-
|
|
246
|
-
}
|
|
247
|
-
if (request.threshold !== void 0) {
|
|
248
|
-
query.threshold = request.threshold.toString();
|
|
249
|
-
}
|
|
250
|
-
if (request.filter) {
|
|
251
|
-
query.filter = JSON.stringify(request.filter);
|
|
255
|
+
body.limit = request.limit;
|
|
252
256
|
}
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
if (request.enableEntityExpansion !== void 0) {
|
|
257
|
-
query.enableEntityExpansion = request.enableEntityExpansion.toString();
|
|
258
|
-
}
|
|
259
|
-
if (request.enableGraphConnectivity !== void 0) {
|
|
260
|
-
query.enableGraphConnectivity = request.enableGraphConnectivity.toString();
|
|
261
|
-
}
|
|
262
|
-
if (request.enableSemanticDedup !== void 0) {
|
|
263
|
-
query.enableSemanticDedup = request.enableSemanticDedup.toString();
|
|
264
|
-
}
|
|
265
|
-
if (request.rerankingStrategy) {
|
|
266
|
-
query.rerankingStrategy = request.rerankingStrategy;
|
|
267
|
-
}
|
|
268
|
-
if (request.fusionWeights) {
|
|
269
|
-
query.fusionWeights = JSON.stringify(request.fusionWeights);
|
|
270
|
-
}
|
|
271
|
-
return this.httpClient.request({
|
|
272
|
-
method: "GET",
|
|
257
|
+
body.rerank_strategy = request.rerankingStrategy || "cross-encoder";
|
|
258
|
+
const response = await this.httpClient.request({
|
|
259
|
+
method: "POST",
|
|
273
260
|
path: "/v1/search",
|
|
274
|
-
|
|
261
|
+
body
|
|
275
262
|
});
|
|
263
|
+
const memoryPack = response.memory_pack || {};
|
|
264
|
+
const results = [];
|
|
265
|
+
for (const memoryType of ["facts", "preferences", "entities", "sources"]) {
|
|
266
|
+
const items = memoryPack[memoryType] || [];
|
|
267
|
+
for (const item of items) {
|
|
268
|
+
results.push({
|
|
269
|
+
memory: item,
|
|
270
|
+
score: item.score || 1,
|
|
271
|
+
highlights: item.highlights || []
|
|
272
|
+
});
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
return {
|
|
276
|
+
results,
|
|
277
|
+
total: results.length
|
|
278
|
+
};
|
|
276
279
|
}
|
|
277
280
|
};
|
|
278
281
|
}
|
|
@@ -310,18 +313,15 @@ var init_ingest = __esm({
|
|
|
310
313
|
[{ field: "projectId", message: "Project ID is required" }]
|
|
311
314
|
);
|
|
312
315
|
}
|
|
313
|
-
const body = {
|
|
314
|
-
projectId: request.projectId,
|
|
315
|
-
metadata: request.metadata,
|
|
316
|
-
chunkSize: request.chunkSize,
|
|
317
|
-
chunkOverlap: request.chunkOverlap,
|
|
318
|
-
// In a real implementation, you'd convert the file to base64 or use FormData
|
|
319
|
-
file: request.file
|
|
320
|
-
};
|
|
321
316
|
return this.httpClient.request({
|
|
322
317
|
method: "POST",
|
|
323
|
-
path: "/v1/ingest
|
|
324
|
-
body
|
|
318
|
+
path: "/v1/ingest",
|
|
319
|
+
body: {
|
|
320
|
+
type: "pdf",
|
|
321
|
+
projectId: request.projectId,
|
|
322
|
+
metadata: request.metadata || {},
|
|
323
|
+
file: request.file
|
|
324
|
+
}
|
|
325
325
|
});
|
|
326
326
|
}
|
|
327
327
|
/**
|
|
@@ -344,9 +344,71 @@ var init_ingest = __esm({
|
|
|
344
344
|
}
|
|
345
345
|
return this.httpClient.request({
|
|
346
346
|
method: "POST",
|
|
347
|
-
path: "/v1/ingest
|
|
348
|
-
body:
|
|
347
|
+
path: "/v1/ingest",
|
|
348
|
+
body: {
|
|
349
|
+
type: "text",
|
|
350
|
+
content: request.text,
|
|
351
|
+
projectId: request.projectId,
|
|
352
|
+
metadata: request.metadata || {}
|
|
353
|
+
}
|
|
354
|
+
});
|
|
355
|
+
}
|
|
356
|
+
/**
|
|
357
|
+
* Ingest content from a URL
|
|
358
|
+
* @param url - URL to ingest from
|
|
359
|
+
* @param projectId - Project ID
|
|
360
|
+
* @param metadata - Optional metadata
|
|
361
|
+
* @returns Ingestion response with job details
|
|
362
|
+
*/
|
|
363
|
+
async url(url, projectId, metadata) {
|
|
364
|
+
if (!url || url.trim().length === 0) {
|
|
365
|
+
throw new ValidationError(
|
|
366
|
+
"URL cannot be empty",
|
|
367
|
+
[{ field: "url", message: "URL is required and cannot be empty" }]
|
|
368
|
+
);
|
|
369
|
+
}
|
|
370
|
+
if (!projectId || projectId.trim().length === 0) {
|
|
371
|
+
throw new ValidationError(
|
|
372
|
+
"Project ID is required",
|
|
373
|
+
[{ field: "projectId", message: "Project ID is required" }]
|
|
374
|
+
);
|
|
375
|
+
}
|
|
376
|
+
return this.httpClient.request({
|
|
377
|
+
method: "POST",
|
|
378
|
+
path: "/v1/ingest",
|
|
379
|
+
body: {
|
|
380
|
+
type: "url",
|
|
381
|
+
url,
|
|
382
|
+
projectId,
|
|
383
|
+
metadata: metadata || {}
|
|
384
|
+
}
|
|
385
|
+
});
|
|
386
|
+
}
|
|
387
|
+
/**
|
|
388
|
+
* Get the status of an ingestion job
|
|
389
|
+
* @param jobId - Job ID returned from ingest
|
|
390
|
+
* @param projectId - Project ID
|
|
391
|
+
* @returns Job status information
|
|
392
|
+
*/
|
|
393
|
+
async getJob(jobId, projectId) {
|
|
394
|
+
if (!jobId || jobId.trim().length === 0) {
|
|
395
|
+
throw new ValidationError(
|
|
396
|
+
"Job ID is required",
|
|
397
|
+
[{ field: "jobId", message: "Job ID is required" }]
|
|
398
|
+
);
|
|
399
|
+
}
|
|
400
|
+
if (!projectId || projectId.trim().length === 0) {
|
|
401
|
+
throw new ValidationError(
|
|
402
|
+
"Project ID is required",
|
|
403
|
+
[{ field: "projectId", message: "Project ID is required" }]
|
|
404
|
+
);
|
|
405
|
+
}
|
|
406
|
+
const response = await this.httpClient.request({
|
|
407
|
+
method: "GET",
|
|
408
|
+
path: `/v1/jobs/${jobId}`,
|
|
409
|
+
query: { projectId }
|
|
349
410
|
});
|
|
411
|
+
return response.data || response;
|
|
350
412
|
}
|
|
351
413
|
};
|
|
352
414
|
}
|