openclaw-memory-alibaba-local 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +88 -0
- package/bm25-recall.ts +71 -0
- package/capture-state.ts +206 -0
- package/categories.ts +106 -0
- package/config.ts +570 -0
- package/db.ts +877 -0
- package/embed-chunks.ts +63 -0
- package/embedding-backend.ts +186 -0
- package/index.ts +1638 -0
- package/openclaw.plugin.json +228 -0
- package/package.json +51 -0
- package/prompt-strip.ts +141 -0
- package/prompts.ts +117 -0
- package/web/memory-routes.ts +433 -0
- package/web/memory-ui.ts +2121 -0
package/embed-chunks.ts
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Paragraph-based chunks with approximate token budget (chars/4), then sub-split long paragraphs.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
export function approxTokenCount(text: string): number {
|
|
6
|
+
return Math.ceil(text.length / 4);
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
function splitLongParagraph(p: string, maxToken: number): string[] {
|
|
10
|
+
const maxTok = Math.max(16, maxToken);
|
|
11
|
+
if (approxTokenCount(p) <= maxTok) {
|
|
12
|
+
return [p];
|
|
13
|
+
}
|
|
14
|
+
const charBudget = Math.max(64, Math.floor(maxTok * 4));
|
|
15
|
+
const chunks: string[] = [];
|
|
16
|
+
let i = 0;
|
|
17
|
+
while (i < p.length) {
|
|
18
|
+
let end = Math.min(p.length, i + charBudget);
|
|
19
|
+
if (end < p.length) {
|
|
20
|
+
const windowStart = Math.max(i, end - Math.floor(charBudget * 0.25));
|
|
21
|
+
const slice = p.slice(windowStart, end);
|
|
22
|
+
const nl = slice.lastIndexOf("\n");
|
|
23
|
+
const dotEn = slice.lastIndexOf(". ");
|
|
24
|
+
const dotZh = slice.lastIndexOf("。");
|
|
25
|
+
const cut = Math.max(nl, dotEn >= 0 ? dotEn + 1 : -1, dotZh >= 0 ? dotZh + 1 : -1);
|
|
26
|
+
if (cut >= 0) {
|
|
27
|
+
end = windowStart + cut;
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
const piece = p.slice(i, end).trim();
|
|
31
|
+
if (piece.length > 0) {
|
|
32
|
+
chunks.push(piece);
|
|
33
|
+
}
|
|
34
|
+
if (end <= i) {
|
|
35
|
+
end = Math.min(p.length, i + charBudget);
|
|
36
|
+
}
|
|
37
|
+
i = end;
|
|
38
|
+
}
|
|
39
|
+
return chunks.length > 0 ? chunks : [p.slice(0, charBudget).trim()].filter(Boolean);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/** Split on blank-line paragraphs; each piece capped at ~maxToken approximate tokens. */
|
|
43
|
+
export function splitTextIntoEmbeddingChunks(text: string, maxToken: number): string[] {
|
|
44
|
+
const maxTok = Math.max(16, Math.floor(maxToken));
|
|
45
|
+
const normalized = text.replace(/\r\n/g, "\n").trim();
|
|
46
|
+
if (!normalized) {
|
|
47
|
+
return [];
|
|
48
|
+
}
|
|
49
|
+
// 整段仍在单次 embedding 预算内时保持一条 chunk。否则带 ``` / OpenClaw metadata 的用户消息里
|
|
50
|
+
// 常有多个空行分段,会被拆成多条向量行,LanceDB 里看起来像「同一句话存了三次」。
|
|
51
|
+
if (approxTokenCount(normalized) <= maxTok) {
|
|
52
|
+
return [normalized];
|
|
53
|
+
}
|
|
54
|
+
const paras = normalized.split(/\n\s*\n/).map((p) => p.trim()).filter((p) => p.length > 0);
|
|
55
|
+
if (paras.length === 0) {
|
|
56
|
+
return splitLongParagraph(normalized, maxTok);
|
|
57
|
+
}
|
|
58
|
+
const out: string[] = [];
|
|
59
|
+
for (const p of paras) {
|
|
60
|
+
out.push(...splitLongParagraph(p, maxTok));
|
|
61
|
+
}
|
|
62
|
+
return out;
|
|
63
|
+
}
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
import { spawn } from "node:child_process";
|
|
2
|
+
import OpenAI from "openai";
|
|
3
|
+
import { splitTextIntoEmbeddingChunks } from "./embed-chunks.js";
|
|
4
|
+
import type { EmbeddingConfig, EmbeddingConfigRemote } from "./config.js";
|
|
5
|
+
import { modelSupportsFlexDimensions } from "./config.js";
|
|
6
|
+
|
|
7
|
+
export type EmbeddingBackend = {
|
|
8
|
+
readonly vectorDim: number;
|
|
9
|
+
readonly maxToken: number;
|
|
10
|
+
/** One request / one subprocess per chunk batch item; splits text using maxToken. */
|
|
11
|
+
embedTexts(texts: string[]): Promise<number[][]>;
|
|
12
|
+
encodeForStorage(fullText: string): Promise<{ chunks: string[]; vectors: number[][] }>;
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
const DEFAULT_LOCAL_PREFIX =
|
|
16
|
+
"llama-embedding -m ~/.openclaw/embedding_model/embeddinggemma-300M-Q8_0.gguf -f /dev/stdin --embd-output-format json ";
|
|
17
|
+
|
|
18
|
+
function expandTildeInCommandPrefix(prefix: string): string {
|
|
19
|
+
const home = process.env.HOME || "";
|
|
20
|
+
return prefix
|
|
21
|
+
.replace(/~/g, "~")
|
|
22
|
+
.replace(/(^|\s)~\//g, `$1${home}/`);
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export function resolveEnvVarsForEmbedding(value: string): string {
|
|
26
|
+
return value.replace(/\$\{([^}]+)\}/g, (_, envVar: string) => {
|
|
27
|
+
const envValue = process.env[envVar];
|
|
28
|
+
if (!envValue) {
|
|
29
|
+
throw new Error(`Environment variable ${envVar} is not set`);
|
|
30
|
+
}
|
|
31
|
+
return envValue;
|
|
32
|
+
});
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
function assertRemoteReady(cfg: EmbeddingConfigRemote): void {
|
|
36
|
+
const apiKey = (cfg.apiKey ?? "").trim();
|
|
37
|
+
const model = (cfg.model ?? "").trim();
|
|
38
|
+
const baseUrl = (cfg.baseUrl ?? "").trim();
|
|
39
|
+
if (!apiKey) {
|
|
40
|
+
throw new Error("embedding.apiKey is required when mode is remote");
|
|
41
|
+
}
|
|
42
|
+
if (!model) {
|
|
43
|
+
throw new Error("embedding.model is required when mode is remote");
|
|
44
|
+
}
|
|
45
|
+
if (!baseUrl) {
|
|
46
|
+
throw new Error("embedding.baseUrl is required when mode is remote");
|
|
47
|
+
}
|
|
48
|
+
if (typeof cfg.dimensions !== "number" || !Number.isFinite(cfg.dimensions) || cfg.dimensions <= 0) {
|
|
49
|
+
throw new Error("embedding.dimensions must be a positive number when mode is remote");
|
|
50
|
+
}
|
|
51
|
+
if (typeof cfg.maxToken !== "number" || !Number.isFinite(cfg.maxToken) || cfg.maxToken <= 0) {
|
|
52
|
+
throw new Error("embedding.maxToken must be a positive number when mode is remote");
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
function parseLlamaEmbeddingStdout(stdout: string): number[] {
|
|
57
|
+
const trimmed = stdout.trim();
|
|
58
|
+
let jsonRaw = trimmed;
|
|
59
|
+
const firstBrace = trimmed.indexOf("{");
|
|
60
|
+
const lastBrace = trimmed.lastIndexOf("}");
|
|
61
|
+
if (firstBrace >= 0 && lastBrace > firstBrace) {
|
|
62
|
+
jsonRaw = trimmed.slice(firstBrace, lastBrace + 1);
|
|
63
|
+
}
|
|
64
|
+
const parsed = JSON.parse(jsonRaw) as {
|
|
65
|
+
data?: Array<{ embedding?: number[] }>;
|
|
66
|
+
embedding?: number[];
|
|
67
|
+
};
|
|
68
|
+
const emb = parsed.data?.[0]?.embedding ?? parsed.embedding;
|
|
69
|
+
if (!Array.isArray(emb) || emb.length === 0) {
|
|
70
|
+
throw new Error("llama-embedding: could not parse embedding from stdout");
|
|
71
|
+
}
|
|
72
|
+
return emb.map((x) => Number(x));
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
function runLocalEmbed(commandPrefix: string, text: string): Promise<number[]> {
|
|
76
|
+
const cmd = expandTildeInCommandPrefix(commandPrefix.trimEnd());
|
|
77
|
+
return new Promise((resolve, reject) => {
|
|
78
|
+
const child = spawn("/bin/sh", ["-c", cmd], {
|
|
79
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
80
|
+
env: process.env,
|
|
81
|
+
});
|
|
82
|
+
let out = "";
|
|
83
|
+
let err = "";
|
|
84
|
+
child.stdout?.on("data", (c: Buffer) => {
|
|
85
|
+
out += c.toString("utf8");
|
|
86
|
+
});
|
|
87
|
+
child.stderr?.on("data", (c: Buffer) => {
|
|
88
|
+
err += c.toString("utf8");
|
|
89
|
+
});
|
|
90
|
+
child.on("error", reject);
|
|
91
|
+
child.on("close", (code) => {
|
|
92
|
+
if (code !== 0) {
|
|
93
|
+
reject(new Error(`llama-embedding exited ${code}: ${err.slice(-2000) || out.slice(-2000)}`));
|
|
94
|
+
return;
|
|
95
|
+
}
|
|
96
|
+
try {
|
|
97
|
+
resolve(parseLlamaEmbeddingStdout(out));
|
|
98
|
+
} catch (e) {
|
|
99
|
+
reject(new Error(`llama-embedding parse failed: ${String(e)}`));
|
|
100
|
+
}
|
|
101
|
+
});
|
|
102
|
+
child.stdin?.write(text, "utf8");
|
|
103
|
+
child.stdin?.end();
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
export function createEmbeddingBackend(cfg: EmbeddingConfig): EmbeddingBackend {
|
|
108
|
+
if (cfg.mode === "remote") {
|
|
109
|
+
const remoteCfg = cfg;
|
|
110
|
+
const maxToken = remoteCfg.maxToken;
|
|
111
|
+
const vectorDim = remoteCfg.dimensions;
|
|
112
|
+
let client: OpenAI | null = null;
|
|
113
|
+
const sendDimensions = modelSupportsFlexDimensions(remoteCfg.model);
|
|
114
|
+
|
|
115
|
+
async function embedTexts(texts: string[]): Promise<number[][]> {
|
|
116
|
+
assertRemoteReady(remoteCfg);
|
|
117
|
+
if (!client) {
|
|
118
|
+
client = new OpenAI({
|
|
119
|
+
apiKey: resolveEnvVarsForEmbedding(remoteCfg.apiKey.trim()),
|
|
120
|
+
baseURL: resolveEnvVarsForEmbedding(remoteCfg.baseUrl.trim()),
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
const model = resolveEnvVarsForEmbedding(remoteCfg.model.trim());
|
|
124
|
+
if (texts.length === 0) {
|
|
125
|
+
return [];
|
|
126
|
+
}
|
|
127
|
+
const params: { model: string; input: string[]; dimensions?: number } = {
|
|
128
|
+
model,
|
|
129
|
+
input: texts,
|
|
130
|
+
};
|
|
131
|
+
if (sendDimensions && vectorDim > 0) {
|
|
132
|
+
params.dimensions = vectorDim;
|
|
133
|
+
}
|
|
134
|
+
const response = await client.embeddings.create(params);
|
|
135
|
+
const out: number[][] = [];
|
|
136
|
+
for (let i = 0; i < texts.length; i++) {
|
|
137
|
+
const row = response.data[i];
|
|
138
|
+
if (!row?.embedding) {
|
|
139
|
+
throw new Error(`embedding API: missing vector at index ${i}`);
|
|
140
|
+
}
|
|
141
|
+
out.push(row.embedding.map((x) => Number(x)));
|
|
142
|
+
}
|
|
143
|
+
return out;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
return {
|
|
147
|
+
vectorDim,
|
|
148
|
+
maxToken,
|
|
149
|
+
embedTexts,
|
|
150
|
+
async encodeForStorage(fullText: string) {
|
|
151
|
+
const chunks = splitTextIntoEmbeddingChunks(fullText, maxToken);
|
|
152
|
+
if (chunks.length === 0) {
|
|
153
|
+
return { chunks: [], vectors: [] };
|
|
154
|
+
}
|
|
155
|
+
const vectors = await embedTexts(chunks);
|
|
156
|
+
return { chunks, vectors };
|
|
157
|
+
},
|
|
158
|
+
};
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
const commandPrefix = (cfg.commandPrefix?.trim() || DEFAULT_LOCAL_PREFIX).trimEnd() + " ";
|
|
162
|
+
const maxToken = cfg.maxToken ?? 2048;
|
|
163
|
+
const vectorDim = cfg.dimensions ?? 768;
|
|
164
|
+
|
|
165
|
+
async function embedTextsLocal(texts: string[]): Promise<number[][]> {
|
|
166
|
+
const out: number[][] = [];
|
|
167
|
+
for (const t of texts) {
|
|
168
|
+
out.push(await runLocalEmbed(commandPrefix, t));
|
|
169
|
+
}
|
|
170
|
+
return out;
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
return {
|
|
174
|
+
vectorDim,
|
|
175
|
+
maxToken,
|
|
176
|
+
embedTexts: embedTextsLocal,
|
|
177
|
+
async encodeForStorage(fullText: string) {
|
|
178
|
+
const chunks = splitTextIntoEmbeddingChunks(fullText, maxToken);
|
|
179
|
+
if (chunks.length === 0) {
|
|
180
|
+
return { chunks: [], vectors: [] };
|
|
181
|
+
}
|
|
182
|
+
const vectors = await embedTextsLocal(chunks);
|
|
183
|
+
return { chunks, vectors };
|
|
184
|
+
},
|
|
185
|
+
};
|
|
186
|
+
}
|