@akashabot/openclaw-memory-offline-core 0.4.0 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +63 -0
- package/dist/index.js +225 -0
- package/package.json +1 -1
package/dist/index.d.ts
CHANGED
|
@@ -235,3 +235,66 @@ export declare function exportGraphJson(db: Database.Database, options?: {
|
|
|
235
235
|
* Find all entities matching a pattern (LIKE search).
|
|
236
236
|
*/
|
|
237
237
|
export declare function searchEntities(db: Database.Database, pattern: string, limit?: number): string[];
|
|
238
|
+
export type EmbeddingStats = {
|
|
239
|
+
totalEmbeddings: number;
|
|
240
|
+
totalSizeBytes: number;
|
|
241
|
+
avgDims: number;
|
|
242
|
+
models: Array<{
|
|
243
|
+
model: string;
|
|
244
|
+
count: number;
|
|
245
|
+
avgDims: number;
|
|
246
|
+
}>;
|
|
247
|
+
};
|
|
248
|
+
/**
|
|
249
|
+
* Get statistics about stored embeddings.
|
|
250
|
+
*/
|
|
251
|
+
export declare function getEmbeddingStats(db: Database.Database): EmbeddingStats;
|
|
252
|
+
/**
|
|
253
|
+
* Quantize a Float32Array to Float16 (Uint16Array representation).
|
|
254
|
+
* Reduces storage by 50% with minimal accuracy loss.
|
|
255
|
+
*/
|
|
256
|
+
export declare function quantizeF32ToF16(vec: Float32Array): Uint16Array;
|
|
257
|
+
/**
|
|
258
|
+
* Dequantize Float16 (Uint16Array) back to Float32Array.
|
|
259
|
+
*/
|
|
260
|
+
export declare function dequantizeF16ToF32(vec: Uint16Array): Float32Array;
|
|
261
|
+
/**
|
|
262
|
+
* Compute cosine similarity between two vectors.
|
|
263
|
+
* Useful for benchmarks and custom similarity searches.
|
|
264
|
+
*/
|
|
265
|
+
export declare function cosineSimilarity(a: Float32Array, b: Float32Array): number;
|
|
266
|
+
/**
|
|
267
|
+
* Benchmark cosine similarity performance.
|
|
268
|
+
* Returns ops/sec for computing similarity between two vectors.
|
|
269
|
+
*/
|
|
270
|
+
export declare function benchmarkCosineSimilarity(dims: number, iterations?: number): {
|
|
271
|
+
dims: number;
|
|
272
|
+
iterations: number;
|
|
273
|
+
totalTimeMs: number;
|
|
274
|
+
opsPerSecond: number;
|
|
275
|
+
};
|
|
276
|
+
/**
|
|
277
|
+
* Run a comprehensive benchmark suite for embedding operations.
|
|
278
|
+
*/
|
|
279
|
+
export declare function runEmbeddingBenchmark(): {
|
|
280
|
+
f32ToF16: {
|
|
281
|
+
dims: number;
|
|
282
|
+
iterations: number;
|
|
283
|
+
opsPerSecond: number;
|
|
284
|
+
};
|
|
285
|
+
f16ToF32: {
|
|
286
|
+
dims: number;
|
|
287
|
+
iterations: number;
|
|
288
|
+
opsPerSecond: number;
|
|
289
|
+
};
|
|
290
|
+
cosine: {
|
|
291
|
+
dims: number;
|
|
292
|
+
iterations: number;
|
|
293
|
+
opsPerSecond: number;
|
|
294
|
+
};
|
|
295
|
+
cosineF16: {
|
|
296
|
+
dims: number;
|
|
297
|
+
iterations: number;
|
|
298
|
+
opsPerSecond: number;
|
|
299
|
+
};
|
|
300
|
+
};
|
package/dist/index.js
CHANGED
|
@@ -906,3 +906,228 @@ export function searchEntities(db, pattern, limit = 50) {
|
|
|
906
906
|
.all(likePattern, likePattern, limit);
|
|
907
907
|
return rows.map(r => r.entity);
|
|
908
908
|
}
|
|
909
|
+
/**
|
|
910
|
+
* Get statistics about stored embeddings.
|
|
911
|
+
*/
|
|
912
|
+
export function getEmbeddingStats(db) {
|
|
913
|
+
const totalRow = db
|
|
914
|
+
.prepare(`SELECT COUNT(*) as count, SUM(LENGTH(vector)) as totalSize, AVG(dims) as avgDims FROM embeddings`)
|
|
915
|
+
.get();
|
|
916
|
+
const modelRows = db
|
|
917
|
+
.prepare(`SELECT model, COUNT(*) as count, AVG(dims) as avgDims FROM embeddings GROUP BY model`)
|
|
918
|
+
.all();
|
|
919
|
+
return {
|
|
920
|
+
totalEmbeddings: totalRow?.count ?? 0,
|
|
921
|
+
totalSizeBytes: totalRow?.totalSize ?? 0,
|
|
922
|
+
avgDims: Math.round((totalRow?.avgDims ?? 0) * 10) / 10,
|
|
923
|
+
models: modelRows.map(r => ({
|
|
924
|
+
model: r.model,
|
|
925
|
+
count: r.count,
|
|
926
|
+
avgDims: Math.round(r.avgDims * 10) / 10,
|
|
927
|
+
})),
|
|
928
|
+
};
|
|
929
|
+
}
|
|
930
|
+
/**
|
|
931
|
+
* Quantize a Float32Array to Float16 (Uint16Array representation).
|
|
932
|
+
* Reduces storage by 50% with minimal accuracy loss.
|
|
933
|
+
*/
|
|
934
|
+
export function quantizeF32ToF16(vec) {
|
|
935
|
+
const result = new Uint16Array(vec.length);
|
|
936
|
+
for (let i = 0; i < vec.length; i++) {
|
|
937
|
+
const f = vec[i];
|
|
938
|
+
// Simple Float32 to Float16 conversion
|
|
939
|
+
// Handles special cases: NaN, Infinity, denormalized numbers
|
|
940
|
+
if (Number.isNaN(f)) {
|
|
941
|
+
result[i] = 0x7E00; // Float16 NaN
|
|
942
|
+
}
|
|
943
|
+
else if (f === Infinity) {
|
|
944
|
+
result[i] = 0x7C00; // Float16 +Infinity
|
|
945
|
+
}
|
|
946
|
+
else if (f === -Infinity) {
|
|
947
|
+
result[i] = 0xFC00; // Float16 -Infinity
|
|
948
|
+
}
|
|
949
|
+
else {
|
|
950
|
+
// Standard conversion
|
|
951
|
+
const buf = new ArrayBuffer(4);
|
|
952
|
+
const f32 = new Float32Array(buf);
|
|
953
|
+
const u32 = new Uint32Array(buf);
|
|
954
|
+
f32[0] = f;
|
|
955
|
+
const x = u32[0];
|
|
956
|
+
let sign = (x >>> 31) & 0x1;
|
|
957
|
+
let exponent = (x >>> 23) & 0xFF;
|
|
958
|
+
let mantissa = x & 0x7FFFFF;
|
|
959
|
+
if (exponent === 0) {
|
|
960
|
+
// Zero or denormalized - map to zero in Float16
|
|
961
|
+
result[i] = sign << 15;
|
|
962
|
+
}
|
|
963
|
+
else if (exponent === 255) {
|
|
964
|
+
// Infinity or NaN (handled above, but just in case)
|
|
965
|
+
result[i] = (sign << 15) | 0x7C00 | (mantissa ? 0x200 : 0);
|
|
966
|
+
}
|
|
967
|
+
else {
|
|
968
|
+
// Normalized number
|
|
969
|
+
exponent = exponent - 127 + 15; // Adjust bias
|
|
970
|
+
if (exponent >= 31) {
|
|
971
|
+
// Overflow to Infinity
|
|
972
|
+
result[i] = (sign << 15) | 0x7C00;
|
|
973
|
+
}
|
|
974
|
+
else if (exponent <= 0) {
|
|
975
|
+
// Underflow to zero or denormalized
|
|
976
|
+
if (exponent < -10) {
|
|
977
|
+
result[i] = sign << 15; // Zero
|
|
978
|
+
}
|
|
979
|
+
else {
|
|
980
|
+
// Denormalized
|
|
981
|
+
mantissa |= 0x800000;
|
|
982
|
+
const shift = 14 - exponent;
|
|
983
|
+
result[i] = (sign << 15) | (mantissa >> shift);
|
|
984
|
+
}
|
|
985
|
+
}
|
|
986
|
+
else {
|
|
987
|
+
// Normal Float16
|
|
988
|
+
result[i] = (sign << 15) | (exponent << 10) | (mantissa >> 13);
|
|
989
|
+
}
|
|
990
|
+
}
|
|
991
|
+
}
|
|
992
|
+
}
|
|
993
|
+
return result;
|
|
994
|
+
}
|
|
995
|
+
/**
|
|
996
|
+
* Dequantize Float16 (Uint16Array) back to Float32Array.
|
|
997
|
+
*/
|
|
998
|
+
export function dequantizeF16ToF32(vec) {
|
|
999
|
+
const result = new Float32Array(vec.length);
|
|
1000
|
+
for (let i = 0; i < vec.length; i++) {
|
|
1001
|
+
const h = vec[i];
|
|
1002
|
+
const sign = (h >>> 15) & 0x1;
|
|
1003
|
+
const exponent = (h >>> 10) & 0x1F;
|
|
1004
|
+
const mantissa = h & 0x3FF;
|
|
1005
|
+
if (exponent === 0) {
|
|
1006
|
+
if (mantissa === 0) {
|
|
1007
|
+
// Zero
|
|
1008
|
+
result[i] = sign ? -0 : 0;
|
|
1009
|
+
}
|
|
1010
|
+
else {
|
|
1011
|
+
// Denormalized
|
|
1012
|
+
const e = Math.clz32(mantissa) - 21;
|
|
1013
|
+
result[i] = (sign ? -1 : 1) * (mantissa << e) * Math.pow(2, -24);
|
|
1014
|
+
}
|
|
1015
|
+
}
|
|
1016
|
+
else if (exponent === 31) {
|
|
1017
|
+
// Infinity or NaN
|
|
1018
|
+
result[i] = mantissa ? NaN : (sign ? -Infinity : Infinity);
|
|
1019
|
+
}
|
|
1020
|
+
else {
|
|
1021
|
+
// Normalized
|
|
1022
|
+
result[i] = (sign ? -1 : 1) * Math.pow(2, exponent - 15) * (1 + mantissa / 1024);
|
|
1023
|
+
}
|
|
1024
|
+
}
|
|
1025
|
+
return result;
|
|
1026
|
+
}
|
|
1027
|
+
/**
|
|
1028
|
+
* Compute cosine similarity between two vectors.
|
|
1029
|
+
* Useful for benchmarks and custom similarity searches.
|
|
1030
|
+
*/
|
|
1031
|
+
export function cosineSimilarity(a, b) {
|
|
1032
|
+
if (a.length !== b.length) {
|
|
1033
|
+
throw new Error(`Vector length mismatch: ${a.length} vs ${b.length}`);
|
|
1034
|
+
}
|
|
1035
|
+
let dotProduct = 0;
|
|
1036
|
+
let normA = 0;
|
|
1037
|
+
let normB = 0;
|
|
1038
|
+
for (let i = 0; i < a.length; i++) {
|
|
1039
|
+
dotProduct += a[i] * b[i];
|
|
1040
|
+
normA += a[i] * a[i];
|
|
1041
|
+
normB += b[i] * b[i];
|
|
1042
|
+
}
|
|
1043
|
+
const denom = Math.sqrt(normA) * Math.sqrt(normB);
|
|
1044
|
+
return denom === 0 ? 0 : dotProduct / denom;
|
|
1045
|
+
}
|
|
1046
|
+
/**
|
|
1047
|
+
* Benchmark cosine similarity performance.
|
|
1048
|
+
* Returns ops/sec for computing similarity between two vectors.
|
|
1049
|
+
*/
|
|
1050
|
+
export function benchmarkCosineSimilarity(dims, iterations = 10000) {
|
|
1051
|
+
// Generate random vectors
|
|
1052
|
+
const a = new Float32Array(dims);
|
|
1053
|
+
const b = new Float32Array(dims);
|
|
1054
|
+
for (let i = 0; i < dims; i++) {
|
|
1055
|
+
a[i] = Math.random() * 2 - 1;
|
|
1056
|
+
b[i] = Math.random() * 2 - 1;
|
|
1057
|
+
}
|
|
1058
|
+
// Warm up
|
|
1059
|
+
for (let i = 0; i < 100; i++) {
|
|
1060
|
+
cosineSimilarity(a, b);
|
|
1061
|
+
}
|
|
1062
|
+
// Benchmark
|
|
1063
|
+
const start = performance.now();
|
|
1064
|
+
for (let i = 0; i < iterations; i++) {
|
|
1065
|
+
cosineSimilarity(a, b);
|
|
1066
|
+
}
|
|
1067
|
+
const end = performance.now();
|
|
1068
|
+
const totalTimeMs = end - start;
|
|
1069
|
+
return {
|
|
1070
|
+
dims,
|
|
1071
|
+
iterations,
|
|
1072
|
+
totalTimeMs,
|
|
1073
|
+
opsPerSecond: Math.round((iterations / totalTimeMs) * 1000),
|
|
1074
|
+
};
|
|
1075
|
+
}
|
|
1076
|
+
/**
|
|
1077
|
+
* Run a comprehensive benchmark suite for embedding operations.
|
|
1078
|
+
*/
|
|
1079
|
+
export function runEmbeddingBenchmark() {
|
|
1080
|
+
const dims = 1024;
|
|
1081
|
+
const iterations = 5000;
|
|
1082
|
+
// Generate test vector
|
|
1083
|
+
const f32 = new Float32Array(dims);
|
|
1084
|
+
for (let i = 0; i < dims; i++) {
|
|
1085
|
+
f32[i] = Math.random() * 2 - 1;
|
|
1086
|
+
}
|
|
1087
|
+
// F32 -> F16 conversion benchmark
|
|
1088
|
+
const start1 = performance.now();
|
|
1089
|
+
for (let i = 0; i < iterations; i++) {
|
|
1090
|
+
quantizeF32ToF16(f32);
|
|
1091
|
+
}
|
|
1092
|
+
const f32toF16 = {
|
|
1093
|
+
dims,
|
|
1094
|
+
iterations,
|
|
1095
|
+
opsPerSecond: Math.round((iterations / (performance.now() - start1)) * 1000),
|
|
1096
|
+
};
|
|
1097
|
+
// F16 -> F32 conversion benchmark
|
|
1098
|
+
const f16 = quantizeF32ToF16(f32);
|
|
1099
|
+
const start2 = performance.now();
|
|
1100
|
+
for (let i = 0; i < iterations; i++) {
|
|
1101
|
+
dequantizeF16ToF32(f16);
|
|
1102
|
+
}
|
|
1103
|
+
const f16toF32 = {
|
|
1104
|
+
dims,
|
|
1105
|
+
iterations,
|
|
1106
|
+
opsPerSecond: Math.round((iterations / (performance.now() - start2)) * 1000),
|
|
1107
|
+
};
|
|
1108
|
+
// F32 cosine benchmark
|
|
1109
|
+
const f32b = new Float32Array(dims);
|
|
1110
|
+
for (let i = 0; i < dims; i++)
|
|
1111
|
+
f32b[i] = Math.random() * 2 - 1;
|
|
1112
|
+
const start3 = performance.now();
|
|
1113
|
+
for (let i = 0; i < iterations; i++) {
|
|
1114
|
+
cosineSimilarity(f32, f32b);
|
|
1115
|
+
}
|
|
1116
|
+
const cosine = {
|
|
1117
|
+
dims,
|
|
1118
|
+
iterations,
|
|
1119
|
+
opsPerSecond: Math.round((iterations / (performance.now() - start3)) * 1000),
|
|
1120
|
+
};
|
|
1121
|
+
// F16 cosine (with conversion) benchmark
|
|
1122
|
+
const f16b = quantizeF32ToF16(f32b);
|
|
1123
|
+
const start4 = performance.now();
|
|
1124
|
+
for (let i = 0; i < iterations; i++) {
|
|
1125
|
+
cosineSimilarity(dequantizeF16ToF32(f16), dequantizeF16ToF32(f16b));
|
|
1126
|
+
}
|
|
1127
|
+
const cosineF16 = {
|
|
1128
|
+
dims,
|
|
1129
|
+
iterations,
|
|
1130
|
+
opsPerSecond: Math.round((iterations / (performance.now() - start4)) * 1000),
|
|
1131
|
+
};
|
|
1132
|
+
return { f32ToF16: f32toF16, f16ToF32: f16toF32, cosine, cosineF16 };
|
|
1133
|
+
}
|
package/package.json
CHANGED