@psiclawops/hypermem 0.5.0 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/background-indexer.d.ts +132 -0
- package/dist/background-indexer.d.ts.map +1 -0
- package/dist/background-indexer.js +1044 -0
- package/dist/cache.d.ts +110 -0
- package/dist/cache.d.ts.map +1 -0
- package/dist/cache.js +495 -0
- package/dist/compaction-fence.d.ts +89 -0
- package/dist/compaction-fence.d.ts.map +1 -0
- package/dist/compaction-fence.js +153 -0
- package/dist/compositor.d.ts +226 -0
- package/dist/compositor.d.ts.map +1 -0
- package/dist/compositor.js +2558 -0
- package/dist/content-type-classifier.d.ts +41 -0
- package/dist/content-type-classifier.d.ts.map +1 -0
- package/dist/content-type-classifier.js +181 -0
- package/dist/cross-agent.d.ts +62 -0
- package/dist/cross-agent.d.ts.map +1 -0
- package/dist/cross-agent.js +259 -0
- package/dist/db.d.ts +131 -0
- package/dist/db.d.ts.map +1 -0
- package/dist/db.js +402 -0
- package/dist/desired-state-store.d.ts +100 -0
- package/dist/desired-state-store.d.ts.map +1 -0
- package/dist/desired-state-store.js +222 -0
- package/dist/doc-chunk-store.d.ts +140 -0
- package/dist/doc-chunk-store.d.ts.map +1 -0
- package/dist/doc-chunk-store.js +391 -0
- package/dist/doc-chunker.d.ts +99 -0
- package/dist/doc-chunker.d.ts.map +1 -0
- package/dist/doc-chunker.js +324 -0
- package/dist/dreaming-promoter.d.ts +86 -0
- package/dist/dreaming-promoter.d.ts.map +1 -0
- package/dist/dreaming-promoter.js +381 -0
- package/dist/episode-store.d.ts +49 -0
- package/dist/episode-store.d.ts.map +1 -0
- package/dist/episode-store.js +135 -0
- package/dist/fact-store.d.ts +75 -0
- package/dist/fact-store.d.ts.map +1 -0
- package/dist/fact-store.js +236 -0
- package/dist/fleet-store.d.ts +144 -0
- package/dist/fleet-store.d.ts.map +1 -0
- package/dist/fleet-store.js +276 -0
- package/dist/fos-mod.d.ts +178 -0
- package/dist/fos-mod.d.ts.map +1 -0
- package/dist/fos-mod.js +416 -0
- package/dist/hybrid-retrieval.d.ts +64 -0
- package/dist/hybrid-retrieval.d.ts.map +1 -0
- package/dist/hybrid-retrieval.js +344 -0
- package/dist/image-eviction.d.ts +49 -0
- package/dist/image-eviction.d.ts.map +1 -0
- package/dist/image-eviction.js +251 -0
- package/dist/index.d.ts +650 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +1072 -0
- package/dist/keystone-scorer.d.ts +51 -0
- package/dist/keystone-scorer.d.ts.map +1 -0
- package/dist/keystone-scorer.js +52 -0
- package/dist/knowledge-graph.d.ts +110 -0
- package/dist/knowledge-graph.d.ts.map +1 -0
- package/dist/knowledge-graph.js +305 -0
- package/dist/knowledge-lint.d.ts +29 -0
- package/dist/knowledge-lint.d.ts.map +1 -0
- package/dist/knowledge-lint.js +116 -0
- package/dist/knowledge-store.d.ts +72 -0
- package/dist/knowledge-store.d.ts.map +1 -0
- package/dist/knowledge-store.js +247 -0
- package/dist/library-schema.d.ts +22 -0
- package/dist/library-schema.d.ts.map +1 -0
- package/dist/library-schema.js +1038 -0
- package/dist/message-store.d.ts +89 -0
- package/dist/message-store.d.ts.map +1 -0
- package/dist/message-store.js +323 -0
- package/dist/metrics-dashboard.d.ts +114 -0
- package/dist/metrics-dashboard.d.ts.map +1 -0
- package/dist/metrics-dashboard.js +260 -0
- package/dist/obsidian-exporter.d.ts +57 -0
- package/dist/obsidian-exporter.d.ts.map +1 -0
- package/dist/obsidian-exporter.js +274 -0
- package/dist/obsidian-watcher.d.ts +147 -0
- package/dist/obsidian-watcher.d.ts.map +1 -0
- package/dist/obsidian-watcher.js +403 -0
- package/dist/open-domain.d.ts +46 -0
- package/dist/open-domain.d.ts.map +1 -0
- package/dist/open-domain.js +125 -0
- package/dist/preference-store.d.ts +54 -0
- package/dist/preference-store.d.ts.map +1 -0
- package/dist/preference-store.js +109 -0
- package/dist/preservation-gate.d.ts +82 -0
- package/dist/preservation-gate.d.ts.map +1 -0
- package/dist/preservation-gate.js +150 -0
- package/dist/proactive-pass.d.ts +63 -0
- package/dist/proactive-pass.d.ts.map +1 -0
- package/dist/proactive-pass.js +239 -0
- package/dist/profiles.d.ts +44 -0
- package/dist/profiles.d.ts.map +1 -0
- package/dist/profiles.js +227 -0
- package/dist/provider-translator.d.ts +50 -0
- package/dist/provider-translator.d.ts.map +1 -0
- package/dist/provider-translator.js +403 -0
- package/dist/rate-limiter.d.ts +76 -0
- package/dist/rate-limiter.d.ts.map +1 -0
- package/dist/rate-limiter.js +179 -0
- package/dist/repair-tool-pairs.d.ts +38 -0
- package/dist/repair-tool-pairs.d.ts.map +1 -0
- package/dist/repair-tool-pairs.js +138 -0
- package/dist/retrieval-policy.d.ts +51 -0
- package/dist/retrieval-policy.d.ts.map +1 -0
- package/dist/retrieval-policy.js +77 -0
- package/dist/schema.d.ts +15 -0
- package/dist/schema.d.ts.map +1 -0
- package/dist/schema.js +229 -0
- package/dist/secret-scanner.d.ts +51 -0
- package/dist/secret-scanner.d.ts.map +1 -0
- package/dist/secret-scanner.js +248 -0
- package/dist/seed.d.ts +108 -0
- package/dist/seed.d.ts.map +1 -0
- package/dist/seed.js +177 -0
- package/dist/session-flusher.d.ts +53 -0
- package/dist/session-flusher.d.ts.map +1 -0
- package/dist/session-flusher.js +69 -0
- package/dist/session-topic-map.d.ts +41 -0
- package/dist/session-topic-map.d.ts.map +1 -0
- package/dist/session-topic-map.js +77 -0
- package/dist/spawn-context.d.ts +54 -0
- package/dist/spawn-context.d.ts.map +1 -0
- package/dist/spawn-context.js +159 -0
- package/dist/system-store.d.ts +73 -0
- package/dist/system-store.d.ts.map +1 -0
- package/dist/system-store.js +182 -0
- package/dist/temporal-store.d.ts +80 -0
- package/dist/temporal-store.d.ts.map +1 -0
- package/dist/temporal-store.js +149 -0
- package/dist/topic-detector.d.ts +35 -0
- package/dist/topic-detector.d.ts.map +1 -0
- package/dist/topic-detector.js +249 -0
- package/dist/topic-store.d.ts +45 -0
- package/dist/topic-store.d.ts.map +1 -0
- package/dist/topic-store.js +136 -0
- package/dist/topic-synthesizer.d.ts +51 -0
- package/dist/topic-synthesizer.d.ts.map +1 -0
- package/dist/topic-synthesizer.js +315 -0
- package/dist/trigger-registry.d.ts +63 -0
- package/dist/trigger-registry.d.ts.map +1 -0
- package/dist/trigger-registry.js +163 -0
- package/dist/types.d.ts +533 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +9 -0
- package/dist/vector-store.d.ts +170 -0
- package/dist/vector-store.d.ts.map +1 -0
- package/dist/vector-store.js +677 -0
- package/dist/version.d.ts +34 -0
- package/dist/version.d.ts.map +1 -0
- package/dist/version.js +34 -0
- package/dist/wiki-page-emitter.d.ts +65 -0
- package/dist/wiki-page-emitter.d.ts.map +1 -0
- package/dist/wiki-page-emitter.js +258 -0
- package/dist/work-store.d.ts +112 -0
- package/dist/work-store.d.ts.map +1 -0
- package/dist/work-store.js +273 -0
- package/package.json +1 -1
|
@@ -0,0 +1,403 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* hypermem Provider Translator
|
|
3
|
+
*
|
|
4
|
+
* Converts between provider-neutral (NeutralMessage) and provider-specific formats.
|
|
5
|
+
* This is the ONLY place where provider-specific formatting exists.
|
|
6
|
+
* Storage is always neutral. Translation happens at the send/receive boundary.
|
|
7
|
+
*
|
|
8
|
+
* This eliminates grafting/stripping entirely — tool calls are stored as structured
|
|
9
|
+
* data, and each provider gets the format it expects at send time.
|
|
10
|
+
*/
|
|
11
|
+
function summarizeOrphanToolResult(tr) {
|
|
12
|
+
const toolName = tr.name || 'tool';
|
|
13
|
+
const status = tr.isError ? 'error' : 'result';
|
|
14
|
+
const content = (tr.content || '').replace(/\s+/g, ' ').trim();
|
|
15
|
+
const preview = content.length > 160 ? `${content.slice(0, 157)}...` : content;
|
|
16
|
+
return preview
|
|
17
|
+
? `[${toolName} ${status} omitted: missing matching tool call] ${preview}`
|
|
18
|
+
: `[${toolName} ${status} omitted: missing matching tool call]`;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Final pair-integrity sweep before provider translation.
|
|
22
|
+
*
|
|
23
|
+
* Invariant: never emit a tool_result unless its matching tool_use/tool_call
|
|
24
|
+
* exists in the immediately prior assistant message with the same ID.
|
|
25
|
+
*
|
|
26
|
+
* If the pair is broken, degrade the orphan tool_result into plain user text
|
|
27
|
+
* so providers never see an invalid tool_result block.
|
|
28
|
+
*/
|
|
29
|
+
export function repairToolCallPairs(messages) {
|
|
30
|
+
const repaired = [];
|
|
31
|
+
for (const msg of messages) {
|
|
32
|
+
if (msg.role !== 'user' || !msg.toolResults || msg.toolResults.length === 0) {
|
|
33
|
+
repaired.push(msg);
|
|
34
|
+
continue;
|
|
35
|
+
}
|
|
36
|
+
const prev = repaired[repaired.length - 1];
|
|
37
|
+
const validCallIds = new Set(prev?.role === 'assistant' && prev.toolCalls
|
|
38
|
+
? prev.toolCalls.map(tc => tc.id)
|
|
39
|
+
: []);
|
|
40
|
+
const keptResults = msg.toolResults.filter(tr => validCallIds.has(tr.callId));
|
|
41
|
+
const orphanResults = msg.toolResults.filter(tr => !validCallIds.has(tr.callId));
|
|
42
|
+
if (orphanResults.length === 0) {
|
|
43
|
+
repaired.push(msg);
|
|
44
|
+
continue;
|
|
45
|
+
}
|
|
46
|
+
const orphanText = orphanResults.map(summarizeOrphanToolResult).join('\n');
|
|
47
|
+
const mergedText = [msg.textContent, orphanText].filter(Boolean).join('\n');
|
|
48
|
+
if (keptResults.length > 0) {
|
|
49
|
+
repaired.push({
|
|
50
|
+
...msg,
|
|
51
|
+
textContent: mergedText || msg.textContent,
|
|
52
|
+
toolResults: keptResults,
|
|
53
|
+
});
|
|
54
|
+
continue;
|
|
55
|
+
}
|
|
56
|
+
repaired.push({
|
|
57
|
+
...msg,
|
|
58
|
+
textContent: mergedText || msg.textContent || '[tool result omitted: missing matching tool call]',
|
|
59
|
+
toolResults: null,
|
|
60
|
+
});
|
|
61
|
+
}
|
|
62
|
+
return repaired;
|
|
63
|
+
}
|
|
64
|
+
import { createHash } from 'node:crypto';
|
|
65
|
+
// ─── ID Generation ───────────────────────────────────────────────
|
|
66
|
+
let idCounter = 0;
|
|
67
|
+
/**
|
|
68
|
+
* Generate a hypermem-native tool call ID.
|
|
69
|
+
* These are provider-neutral and deterministic within a session.
|
|
70
|
+
*/
|
|
71
|
+
export function generateToolCallId() {
|
|
72
|
+
idCounter++;
|
|
73
|
+
const timestamp = Date.now().toString(36);
|
|
74
|
+
const counter = idCounter.toString(36).padStart(4, '0');
|
|
75
|
+
return `hm_${timestamp}_${counter}`;
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* Convert a provider-specific tool call ID to a hypermem ID.
|
|
79
|
+
* Deterministic: same input always produces same output.
|
|
80
|
+
*/
|
|
81
|
+
export function normalizeToolCallId(providerId) {
|
|
82
|
+
if (providerId.startsWith('hm_'))
|
|
83
|
+
return providerId; // already normalized
|
|
84
|
+
const hash = createHash('sha256').update(providerId).digest('hex').substring(0, 12);
|
|
85
|
+
return `hm_${hash}`;
|
|
86
|
+
}
|
|
87
|
+
export function detectProvider(providerString) {
|
|
88
|
+
if (!providerString)
|
|
89
|
+
return 'unknown';
|
|
90
|
+
const lower = providerString.toLowerCase();
|
|
91
|
+
if (lower.includes('anthropic') || lower.includes('claude'))
|
|
92
|
+
return 'anthropic';
|
|
93
|
+
if (lower.includes('codex') || lower.includes('responses'))
|
|
94
|
+
return 'openai-responses';
|
|
95
|
+
if (lower.includes('openai') || lower.includes('gpt') || lower.includes('copilot'))
|
|
96
|
+
return 'openai';
|
|
97
|
+
return 'unknown';
|
|
98
|
+
}
|
|
99
|
+
// ─── To Provider Format ──────────────────────────────────────────
|
|
100
|
+
/**
|
|
101
|
+
* Convert neutral messages to Anthropic Messages API format.
|
|
102
|
+
*
|
|
103
|
+
* Prompt caching (DYNAMIC_BOUNDARY):
|
|
104
|
+
* Anthropic supports prompt caching via cache_control on content blocks.
|
|
105
|
+
* The last system message BEFORE the dynamicBoundary marker gets
|
|
106
|
+
* cache_control: {type: "ephemeral"} to mark the static/dynamic boundary.
|
|
107
|
+
*
|
|
108
|
+
* Static (cacheable): system prompt + identity + stable output profile prefix
|
|
109
|
+
* Dynamic (not cacheable): context block (facts/recall/recent actions), conversation history
|
|
110
|
+
*
|
|
111
|
+
* This allows Anthropic to cache the static prefix and skip re-tokenizing it.
|
|
112
|
+
*/
|
|
113
|
+
function toAnthropic(messages) {
|
|
114
|
+
const result = [];
|
|
115
|
+
// Find the last static system message index (before any dynamicBoundary message)
|
|
116
|
+
// so we can mark it with cache_control.
|
|
117
|
+
let lastStaticSystemIdx = -1;
|
|
118
|
+
for (let i = 0; i < messages.length; i++) {
|
|
119
|
+
const msg = messages[i];
|
|
120
|
+
if (msg.role === 'system' && !msg.metadata?.dynamicBoundary) {
|
|
121
|
+
lastStaticSystemIdx = i;
|
|
122
|
+
}
|
|
123
|
+
else if (msg.metadata?.dynamicBoundary) {
|
|
124
|
+
// Stop scanning — everything after the boundary marker is dynamic
|
|
125
|
+
break;
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
for (let i = 0; i < messages.length; i++) {
|
|
129
|
+
const msg = messages[i];
|
|
130
|
+
if (msg.role === 'system') {
|
|
131
|
+
// Anthropic system messages are handled separately (system parameter)
|
|
132
|
+
// Include them as-is; the gateway will extract them.
|
|
133
|
+
// Mark the last static system message as the cache boundary.
|
|
134
|
+
const isLastStatic = i === lastStaticSystemIdx;
|
|
135
|
+
const providerMsg = {
|
|
136
|
+
role: 'system',
|
|
137
|
+
content: msg.textContent || '',
|
|
138
|
+
};
|
|
139
|
+
if (isLastStatic) {
|
|
140
|
+
// Add cache_control as a hint to the gateway/Anthropic API.
|
|
141
|
+
// The gateway is responsible for lifting this into the correct API position.
|
|
142
|
+
providerMsg.cache_control = { type: 'ephemeral' };
|
|
143
|
+
}
|
|
144
|
+
result.push(providerMsg);
|
|
145
|
+
continue;
|
|
146
|
+
}
|
|
147
|
+
if (msg.role === 'assistant') {
|
|
148
|
+
const content = [];
|
|
149
|
+
if (msg.textContent) {
|
|
150
|
+
content.push({ type: 'text', text: msg.textContent });
|
|
151
|
+
}
|
|
152
|
+
if (msg.toolCalls) {
|
|
153
|
+
for (const tc of msg.toolCalls) {
|
|
154
|
+
// tc may be a NeutralToolCall { id, name, arguments: string }
|
|
155
|
+
// or a raw OpenClaw content block { type, id, name, input: object }
|
|
156
|
+
const rawTc = tc;
|
|
157
|
+
let input;
|
|
158
|
+
if (rawTc.input !== undefined) {
|
|
159
|
+
// Raw content block format — input is already an object
|
|
160
|
+
input = typeof rawTc.input === 'string' ? JSON.parse(rawTc.input) : rawTc.input;
|
|
161
|
+
}
|
|
162
|
+
else if (tc.arguments !== undefined) {
|
|
163
|
+
// NeutralToolCall format — arguments is a JSON string
|
|
164
|
+
input = typeof tc.arguments === 'string' ? JSON.parse(tc.arguments) : (tc.arguments ?? {});
|
|
165
|
+
}
|
|
166
|
+
else {
|
|
167
|
+
input = {};
|
|
168
|
+
}
|
|
169
|
+
content.push({
|
|
170
|
+
type: 'tool_use',
|
|
171
|
+
id: tc.id,
|
|
172
|
+
name: tc.name,
|
|
173
|
+
input,
|
|
174
|
+
});
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
result.push({
|
|
178
|
+
role: 'assistant',
|
|
179
|
+
content: content.length === 1 && typeof content[0] === 'object' && content[0].type === 'text'
|
|
180
|
+
? msg.textContent || ''
|
|
181
|
+
: content,
|
|
182
|
+
});
|
|
183
|
+
continue;
|
|
184
|
+
}
|
|
185
|
+
if (msg.role === 'user') {
|
|
186
|
+
// Tool results go as user messages with tool_result content blocks
|
|
187
|
+
if (msg.toolResults && msg.toolResults.length > 0) {
|
|
188
|
+
const content = [];
|
|
189
|
+
for (const tr of msg.toolResults) {
|
|
190
|
+
content.push({
|
|
191
|
+
type: 'tool_result',
|
|
192
|
+
tool_use_id: tr.callId,
|
|
193
|
+
content: tr.content,
|
|
194
|
+
is_error: tr.isError || false,
|
|
195
|
+
});
|
|
196
|
+
}
|
|
197
|
+
result.push({ role: 'user', content });
|
|
198
|
+
}
|
|
199
|
+
else {
|
|
200
|
+
result.push({ role: 'user', content: msg.textContent || '' });
|
|
201
|
+
}
|
|
202
|
+
continue;
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
return result;
|
|
206
|
+
}
|
|
207
|
+
/**
|
|
208
|
+
* Convert neutral messages to OpenAI Chat Completions API format.
|
|
209
|
+
*/
|
|
210
|
+
function toOpenAI(messages) {
|
|
211
|
+
const result = [];
|
|
212
|
+
for (const msg of messages) {
|
|
213
|
+
if (msg.role === 'system') {
|
|
214
|
+
result.push({ role: 'system', content: msg.textContent || '' });
|
|
215
|
+
continue;
|
|
216
|
+
}
|
|
217
|
+
if (msg.role === 'assistant') {
|
|
218
|
+
const providerMsg = {
|
|
219
|
+
role: 'assistant',
|
|
220
|
+
content: msg.textContent || null,
|
|
221
|
+
};
|
|
222
|
+
if (msg.toolCalls && msg.toolCalls.length > 0) {
|
|
223
|
+
providerMsg.tool_calls = msg.toolCalls.map(tc => {
|
|
224
|
+
// Handle both NeutralToolCall { arguments: string } and raw content block { input: object }
|
|
225
|
+
const rawTc = tc;
|
|
226
|
+
let args;
|
|
227
|
+
if (rawTc.input !== undefined) {
|
|
228
|
+
args = typeof rawTc.input === 'string' ? rawTc.input : JSON.stringify(rawTc.input);
|
|
229
|
+
}
|
|
230
|
+
else if (tc.arguments !== undefined) {
|
|
231
|
+
args = typeof tc.arguments === 'string' ? tc.arguments : JSON.stringify(tc.arguments);
|
|
232
|
+
}
|
|
233
|
+
else {
|
|
234
|
+
args = '{}';
|
|
235
|
+
}
|
|
236
|
+
return {
|
|
237
|
+
id: tc.id,
|
|
238
|
+
type: 'function',
|
|
239
|
+
function: {
|
|
240
|
+
name: tc.name,
|
|
241
|
+
arguments: args,
|
|
242
|
+
},
|
|
243
|
+
};
|
|
244
|
+
});
|
|
245
|
+
}
|
|
246
|
+
result.push(providerMsg);
|
|
247
|
+
continue;
|
|
248
|
+
}
|
|
249
|
+
if (msg.role === 'user') {
|
|
250
|
+
if (msg.toolResults && msg.toolResults.length > 0) {
|
|
251
|
+
// OpenAI tool results are separate "tool" role messages
|
|
252
|
+
for (const tr of msg.toolResults) {
|
|
253
|
+
result.push({
|
|
254
|
+
role: 'tool',
|
|
255
|
+
tool_call_id: tr.callId,
|
|
256
|
+
content: tr.content,
|
|
257
|
+
});
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
else {
|
|
261
|
+
result.push({ role: 'user', content: msg.textContent || '' });
|
|
262
|
+
}
|
|
263
|
+
continue;
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
return result;
|
|
267
|
+
}
|
|
268
|
+
/**
|
|
269
|
+
* Convert neutral messages to OpenAI Responses API format.
|
|
270
|
+
*/
|
|
271
|
+
function toOpenAIResponses(messages) {
|
|
272
|
+
// Responses API uses a different item format
|
|
273
|
+
// For now, use the same as Chat Completions — the gateway handles the conversion
|
|
274
|
+
// This is a stub for when we need direct Responses API support
|
|
275
|
+
return toOpenAI(messages);
|
|
276
|
+
}
|
|
277
|
+
/**
|
|
278
|
+
* Convert neutral messages to provider-specific format.
|
|
279
|
+
*/
|
|
280
|
+
export function toProviderFormat(messages, provider) {
|
|
281
|
+
const repairedMessages = repairToolCallPairs(messages);
|
|
282
|
+
const providerType = detectProvider(provider);
|
|
283
|
+
switch (providerType) {
|
|
284
|
+
case 'anthropic':
|
|
285
|
+
return toAnthropic(repairedMessages);
|
|
286
|
+
case 'openai':
|
|
287
|
+
return toOpenAI(repairedMessages);
|
|
288
|
+
case 'openai-responses':
|
|
289
|
+
return toOpenAIResponses(repairedMessages);
|
|
290
|
+
default:
|
|
291
|
+
// Default to OpenAI format as it's most widely compatible
|
|
292
|
+
return toOpenAI(repairedMessages);
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
// ─── From Provider Format ────────────────────────────────────────
|
|
296
|
+
/**
|
|
297
|
+
* Convert an Anthropic response to neutral format.
|
|
298
|
+
*/
|
|
299
|
+
function fromAnthropic(response) {
|
|
300
|
+
const content = response.content;
|
|
301
|
+
let textContent = null;
|
|
302
|
+
let toolCalls = null;
|
|
303
|
+
if (typeof content === 'string') {
|
|
304
|
+
textContent = content;
|
|
305
|
+
}
|
|
306
|
+
else if (Array.isArray(content)) {
|
|
307
|
+
const textParts = [];
|
|
308
|
+
const tools = [];
|
|
309
|
+
for (const block of content) {
|
|
310
|
+
if (block.type === 'text') {
|
|
311
|
+
textParts.push(block.text);
|
|
312
|
+
}
|
|
313
|
+
else if (block.type === 'tool_use') {
|
|
314
|
+
tools.push({
|
|
315
|
+
id: normalizeToolCallId(block.id),
|
|
316
|
+
name: block.name,
|
|
317
|
+
arguments: JSON.stringify(block.input),
|
|
318
|
+
});
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
if (textParts.length > 0)
|
|
322
|
+
textContent = textParts.join('\n');
|
|
323
|
+
if (tools.length > 0)
|
|
324
|
+
toolCalls = tools;
|
|
325
|
+
}
|
|
326
|
+
return {
|
|
327
|
+
role: 'assistant',
|
|
328
|
+
textContent,
|
|
329
|
+
toolCalls,
|
|
330
|
+
toolResults: null,
|
|
331
|
+
metadata: {
|
|
332
|
+
originalProvider: 'anthropic',
|
|
333
|
+
stopReason: response.stop_reason,
|
|
334
|
+
model: response.model,
|
|
335
|
+
},
|
|
336
|
+
};
|
|
337
|
+
}
|
|
338
|
+
/**
|
|
339
|
+
* Convert an OpenAI response choice to neutral format.
|
|
340
|
+
*/
|
|
341
|
+
function fromOpenAI(choice) {
|
|
342
|
+
const message = choice.message
|
|
343
|
+
|| choice;
|
|
344
|
+
const textContent = message.content || null;
|
|
345
|
+
let toolCalls = null;
|
|
346
|
+
const rawToolCalls = message.tool_calls;
|
|
347
|
+
if (rawToolCalls && rawToolCalls.length > 0) {
|
|
348
|
+
toolCalls = rawToolCalls.map(tc => ({
|
|
349
|
+
id: normalizeToolCallId(tc.id),
|
|
350
|
+
name: tc.function.name,
|
|
351
|
+
arguments: tc.function.arguments,
|
|
352
|
+
}));
|
|
353
|
+
}
|
|
354
|
+
return {
|
|
355
|
+
role: 'assistant',
|
|
356
|
+
textContent,
|
|
357
|
+
toolCalls,
|
|
358
|
+
toolResults: null,
|
|
359
|
+
metadata: {
|
|
360
|
+
originalProvider: 'openai',
|
|
361
|
+
finishReason: message.finish_reason || choice.finish_reason,
|
|
362
|
+
},
|
|
363
|
+
};
|
|
364
|
+
}
|
|
365
|
+
/**
|
|
366
|
+
* Convert a provider-specific response to neutral format.
|
|
367
|
+
*/
|
|
368
|
+
export function fromProviderFormat(response, provider) {
|
|
369
|
+
const providerType = detectProvider(provider);
|
|
370
|
+
switch (providerType) {
|
|
371
|
+
case 'anthropic':
|
|
372
|
+
return fromAnthropic(response);
|
|
373
|
+
case 'openai':
|
|
374
|
+
case 'openai-responses':
|
|
375
|
+
return fromOpenAI(response);
|
|
376
|
+
default:
|
|
377
|
+
return fromOpenAI(response);
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
/**
|
|
381
|
+
* Convert a user message (from chat input) to neutral format.
|
|
382
|
+
*/
|
|
383
|
+
export function userMessageToNeutral(content, metadata) {
|
|
384
|
+
return {
|
|
385
|
+
role: 'user',
|
|
386
|
+
textContent: content,
|
|
387
|
+
toolCalls: null,
|
|
388
|
+
toolResults: null,
|
|
389
|
+
metadata,
|
|
390
|
+
};
|
|
391
|
+
}
|
|
392
|
+
/**
|
|
393
|
+
* Convert tool results to a neutral user message.
|
|
394
|
+
*/
|
|
395
|
+
export function toolResultsToNeutral(results) {
|
|
396
|
+
return {
|
|
397
|
+
role: 'user',
|
|
398
|
+
textContent: null,
|
|
399
|
+
toolCalls: null,
|
|
400
|
+
toolResults: results,
|
|
401
|
+
};
|
|
402
|
+
}
|
|
403
|
+
//# sourceMappingURL=provider-translator.js.map
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* hypermem Rate Limiter
|
|
3
|
+
*
|
|
4
|
+
* Token-bucket rate limiter for embedding API calls.
|
|
5
|
+
* Prevents hammering Ollama during bulk indexing.
|
|
6
|
+
*
|
|
7
|
+
* Strategy:
|
|
8
|
+
* - Burst: allow immediate calls up to bucket capacity
|
|
9
|
+
* - Sustained: refill tokens at a steady rate
|
|
10
|
+
* - Backpressure: when tokens exhausted, delay until available
|
|
11
|
+
* - Priority: high-priority requests (user-facing recall) get reserved tokens
|
|
12
|
+
*
|
|
13
|
+
* Usage:
|
|
14
|
+
* const limiter = new RateLimiter({ tokensPerSecond: 5, burstSize: 10 });
|
|
15
|
+
* await limiter.acquire(); // Waits if necessary
|
|
16
|
+
* const embeddings = await generateEmbeddings(texts);
|
|
17
|
+
*/
|
|
18
|
+
export interface RateLimiterConfig {
|
|
19
|
+
/** Tokens refilled per second. Default: 5 */
|
|
20
|
+
tokensPerSecond: number;
|
|
21
|
+
/** Maximum burst capacity. Default: 10 */
|
|
22
|
+
burstSize: number;
|
|
23
|
+
/** Reserved tokens for high-priority requests. Default: 2 */
|
|
24
|
+
reservedHigh: number;
|
|
25
|
+
/** Maximum wait time before rejecting (ms). Default: 30000 (30s) */
|
|
26
|
+
maxWaitMs: number;
|
|
27
|
+
}
|
|
28
|
+
export type Priority = 'high' | 'normal' | 'low';
|
|
29
|
+
export declare class RateLimiter {
|
|
30
|
+
private tokens;
|
|
31
|
+
private lastRefill;
|
|
32
|
+
private readonly config;
|
|
33
|
+
private waitQueue;
|
|
34
|
+
private refillTimer;
|
|
35
|
+
private _totalAcquired;
|
|
36
|
+
private _totalWaited;
|
|
37
|
+
private _totalRejected;
|
|
38
|
+
constructor(config?: Partial<RateLimiterConfig>);
|
|
39
|
+
/**
|
|
40
|
+
* Acquire tokens. Blocks until tokens are available or maxWaitMs expires.
|
|
41
|
+
*
|
|
42
|
+
* @param count - Number of tokens to acquire (default 1)
|
|
43
|
+
* @param priority - Request priority (high gets reserved tokens)
|
|
44
|
+
* @throws Error if wait exceeds maxWaitMs
|
|
45
|
+
*/
|
|
46
|
+
acquire(count?: number, priority?: Priority): Promise<void>;
|
|
47
|
+
/**
|
|
48
|
+
* Try to acquire tokens without waiting.
|
|
49
|
+
* Returns true if tokens were acquired, false if not.
|
|
50
|
+
*/
|
|
51
|
+
tryAcquire(count?: number, priority?: Priority): boolean;
|
|
52
|
+
/**
|
|
53
|
+
* Get current limiter state.
|
|
54
|
+
*/
|
|
55
|
+
get state(): {
|
|
56
|
+
availableTokens: number;
|
|
57
|
+
pendingRequests: number;
|
|
58
|
+
stats: {
|
|
59
|
+
acquired: number;
|
|
60
|
+
waited: number;
|
|
61
|
+
rejected: number;
|
|
62
|
+
};
|
|
63
|
+
};
|
|
64
|
+
/**
|
|
65
|
+
* Stop the refill timer.
|
|
66
|
+
*/
|
|
67
|
+
destroy(): void;
|
|
68
|
+
private refill;
|
|
69
|
+
private processQueue;
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Rate-limited embedding generator.
|
|
73
|
+
* Wraps generateEmbeddings with rate limiting.
|
|
74
|
+
*/
|
|
75
|
+
export declare function createRateLimitedEmbedder(embedFn: (texts: string[]) => Promise<Float32Array[]>, limiter: RateLimiter): (texts: string[], priority?: Priority) => Promise<Float32Array[]>;
|
|
76
|
+
//# sourceMappingURL=rate-limiter.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"rate-limiter.d.ts","sourceRoot":"","sources":["../src/rate-limiter.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;GAgBG;AAEH,MAAM,WAAW,iBAAiB;IAChC,6CAA6C;IAC7C,eAAe,EAAE,MAAM,CAAC;IACxB,0CAA0C;IAC1C,SAAS,EAAE,MAAM,CAAC;IAClB,6DAA6D;IAC7D,YAAY,EAAE,MAAM,CAAC;IACrB,oEAAoE;IACpE,SAAS,EAAE,MAAM,CAAC;CACnB;AAED,MAAM,MAAM,QAAQ,GAAG,MAAM,GAAG,QAAQ,GAAG,KAAK,CAAC;AASjD,qBAAa,WAAW;IACtB,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,UAAU,CAAS;IAC3B,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAoB;IAC3C,OAAO,CAAC,SAAS,CAMT;IACR,OAAO,CAAC,WAAW,CAA+C;IAClE,OAAO,CAAC,cAAc,CAAK;IAC3B,OAAO,CAAC,YAAY,CAAK;IACzB,OAAO,CAAC,cAAc,CAAK;gBAEf,MAAM,CAAC,EAAE,OAAO,CAAC,iBAAiB,CAAC;IAS/C;;;;;;OAMG;IACG,OAAO,CAAC,KAAK,GAAE,MAAU,EAAE,QAAQ,GAAE,QAAmB,GAAG,OAAO,CAAC,IAAI,CAAC;IAqC9E;;;OAGG;IACH,UAAU,CAAC,KAAK,GAAE,MAAU,EAAE,QAAQ,GAAE,QAAmB,GAAG,OAAO;IAgBrE;;OAEG;IACH,IAAI,KAAK,IAAI;QACX,eAAe,EAAE,MAAM,CAAC;QACxB,eAAe,EAAE,MAAM,CAAC;QACxB,KAAK,EAAE;YAAE,QAAQ,EAAE,MAAM,CAAC;YAAC,MAAM,EAAE,MAAM,CAAC;YAAC,QAAQ,EAAE,MAAM,CAAA;SAAE,CAAC;KAC/D,CAWA;IAED;;OAEG;IACH,OAAO,IAAI,IAAI;IAcf,OAAO,CAAC,MAAM;IAcd,OAAO,CAAC,YAAY;CAiCrB;AAED;;;GAGG;AACH,wBAAgB,yBAAyB,CACvC,OAAO,EAAE,CAAC,KAAK,EAAE,MAAM,EAAE,KAAK,OAAO,CAAC,YAAY,EAAE,CAAC,EACrD,OAAO,EAAE,WAAW,GACnB,CAAC,KAAK,EAAE,MAAM,EAAE,EAAE,QAAQ,CAAC,EAAE,QAAQ,KAAK,OAAO,CAAC,YAAY,EAAE,CAAC,CASnE"}
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* hypermem Rate Limiter
|
|
3
|
+
*
|
|
4
|
+
* Token-bucket rate limiter for embedding API calls.
|
|
5
|
+
* Prevents hammering Ollama during bulk indexing.
|
|
6
|
+
*
|
|
7
|
+
* Strategy:
|
|
8
|
+
* - Burst: allow immediate calls up to bucket capacity
|
|
9
|
+
* - Sustained: refill tokens at a steady rate
|
|
10
|
+
* - Backpressure: when tokens exhausted, delay until available
|
|
11
|
+
* - Priority: high-priority requests (user-facing recall) get reserved tokens
|
|
12
|
+
*
|
|
13
|
+
* Usage:
|
|
14
|
+
* const limiter = new RateLimiter({ tokensPerSecond: 5, burstSize: 10 });
|
|
15
|
+
* await limiter.acquire(); // Waits if necessary
|
|
16
|
+
* const embeddings = await generateEmbeddings(texts);
|
|
17
|
+
*/
|
|
18
|
+
const DEFAULT_CONFIG = {
|
|
19
|
+
tokensPerSecond: 5,
|
|
20
|
+
burstSize: 10,
|
|
21
|
+
reservedHigh: 2,
|
|
22
|
+
maxWaitMs: 30000,
|
|
23
|
+
};
|
|
24
|
+
export class RateLimiter {
|
|
25
|
+
tokens;
|
|
26
|
+
lastRefill;
|
|
27
|
+
config;
|
|
28
|
+
waitQueue = [];
|
|
29
|
+
refillTimer = null;
|
|
30
|
+
_totalAcquired = 0;
|
|
31
|
+
_totalWaited = 0;
|
|
32
|
+
_totalRejected = 0;
|
|
33
|
+
constructor(config) {
|
|
34
|
+
this.config = { ...DEFAULT_CONFIG, ...config };
|
|
35
|
+
this.tokens = this.config.burstSize;
|
|
36
|
+
this.lastRefill = Date.now();
|
|
37
|
+
// Refill tokens periodically
|
|
38
|
+
this.refillTimer = setInterval(() => this.refill(), 200); // 5x per second
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Acquire tokens. Blocks until tokens are available or maxWaitMs expires.
|
|
42
|
+
*
|
|
43
|
+
* @param count - Number of tokens to acquire (default 1)
|
|
44
|
+
* @param priority - Request priority (high gets reserved tokens)
|
|
45
|
+
* @throws Error if wait exceeds maxWaitMs
|
|
46
|
+
*/
|
|
47
|
+
async acquire(count = 1, priority = 'normal') {
|
|
48
|
+
this.refill();
|
|
49
|
+
// High priority can use reserved tokens
|
|
50
|
+
const available = priority === 'high'
|
|
51
|
+
? this.tokens
|
|
52
|
+
: Math.max(0, this.tokens - this.config.reservedHigh);
|
|
53
|
+
if (available >= count) {
|
|
54
|
+
this.tokens -= count;
|
|
55
|
+
this._totalAcquired += count;
|
|
56
|
+
return;
|
|
57
|
+
}
|
|
58
|
+
// Need to wait
|
|
59
|
+
this._totalWaited++;
|
|
60
|
+
const deadline = Date.now() + this.config.maxWaitMs;
|
|
61
|
+
return new Promise((resolve, reject) => {
|
|
62
|
+
this.waitQueue.push({
|
|
63
|
+
resolve,
|
|
64
|
+
reject,
|
|
65
|
+
priority,
|
|
66
|
+
tokensNeeded: count,
|
|
67
|
+
deadline,
|
|
68
|
+
});
|
|
69
|
+
// Sort by priority (high first) then by deadline (earliest first)
|
|
70
|
+
this.waitQueue.sort((a, b) => {
|
|
71
|
+
const priOrder = { high: 0, normal: 1, low: 2 };
|
|
72
|
+
const priDiff = priOrder[a.priority] - priOrder[b.priority];
|
|
73
|
+
if (priDiff !== 0)
|
|
74
|
+
return priDiff;
|
|
75
|
+
return a.deadline - b.deadline;
|
|
76
|
+
});
|
|
77
|
+
});
|
|
78
|
+
}
|
|
79
|
+
/**
|
|
80
|
+
* Try to acquire tokens without waiting.
|
|
81
|
+
* Returns true if tokens were acquired, false if not.
|
|
82
|
+
*/
|
|
83
|
+
tryAcquire(count = 1, priority = 'normal') {
|
|
84
|
+
this.refill();
|
|
85
|
+
const available = priority === 'high'
|
|
86
|
+
? this.tokens
|
|
87
|
+
: Math.max(0, this.tokens - this.config.reservedHigh);
|
|
88
|
+
if (available >= count) {
|
|
89
|
+
this.tokens -= count;
|
|
90
|
+
this._totalAcquired += count;
|
|
91
|
+
return true;
|
|
92
|
+
}
|
|
93
|
+
return false;
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Get current limiter state.
|
|
97
|
+
*/
|
|
98
|
+
get state() {
|
|
99
|
+
this.refill();
|
|
100
|
+
return {
|
|
101
|
+
availableTokens: Math.floor(this.tokens),
|
|
102
|
+
pendingRequests: this.waitQueue.length,
|
|
103
|
+
stats: {
|
|
104
|
+
acquired: this._totalAcquired,
|
|
105
|
+
waited: this._totalWaited,
|
|
106
|
+
rejected: this._totalRejected,
|
|
107
|
+
},
|
|
108
|
+
};
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Stop the refill timer.
|
|
112
|
+
*/
|
|
113
|
+
destroy() {
|
|
114
|
+
if (this.refillTimer) {
|
|
115
|
+
clearInterval(this.refillTimer);
|
|
116
|
+
this.refillTimer = null;
|
|
117
|
+
}
|
|
118
|
+
// Reject all pending
|
|
119
|
+
for (const waiter of this.waitQueue) {
|
|
120
|
+
waiter.reject(new Error('Rate limiter destroyed'));
|
|
121
|
+
}
|
|
122
|
+
this.waitQueue = [];
|
|
123
|
+
}
|
|
124
|
+
// ─── Internal ──────────────────────────────────────────────
|
|
125
|
+
refill() {
|
|
126
|
+
const now = Date.now();
|
|
127
|
+
const elapsed = (now - this.lastRefill) / 1000; // seconds
|
|
128
|
+
const newTokens = elapsed * this.config.tokensPerSecond;
|
|
129
|
+
if (newTokens > 0) {
|
|
130
|
+
this.tokens = Math.min(this.config.burstSize, this.tokens + newTokens);
|
|
131
|
+
this.lastRefill = now;
|
|
132
|
+
}
|
|
133
|
+
// Process wait queue
|
|
134
|
+
this.processQueue();
|
|
135
|
+
}
|
|
136
|
+
processQueue() {
|
|
137
|
+
const now = Date.now();
|
|
138
|
+
const toRemove = [];
|
|
139
|
+
for (let i = 0; i < this.waitQueue.length; i++) {
|
|
140
|
+
const waiter = this.waitQueue[i];
|
|
141
|
+
// Check deadline
|
|
142
|
+
if (now > waiter.deadline) {
|
|
143
|
+
waiter.reject(new Error(`Rate limit wait exceeded ${this.config.maxWaitMs}ms`));
|
|
144
|
+
this._totalRejected++;
|
|
145
|
+
toRemove.push(i);
|
|
146
|
+
continue;
|
|
147
|
+
}
|
|
148
|
+
// Check if tokens available
|
|
149
|
+
const available = waiter.priority === 'high'
|
|
150
|
+
? this.tokens
|
|
151
|
+
: Math.max(0, this.tokens - this.config.reservedHigh);
|
|
152
|
+
if (available >= waiter.tokensNeeded) {
|
|
153
|
+
this.tokens -= waiter.tokensNeeded;
|
|
154
|
+
this._totalAcquired += waiter.tokensNeeded;
|
|
155
|
+
waiter.resolve();
|
|
156
|
+
toRemove.push(i);
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
// Remove processed entries (reverse order to maintain indices)
|
|
160
|
+
for (let i = toRemove.length - 1; i >= 0; i--) {
|
|
161
|
+
this.waitQueue.splice(toRemove[i], 1);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
/**
|
|
166
|
+
* Rate-limited embedding generator.
|
|
167
|
+
* Wraps generateEmbeddings with rate limiting.
|
|
168
|
+
*/
|
|
169
|
+
export function createRateLimitedEmbedder(embedFn, limiter) {
|
|
170
|
+
return async (texts, priority = 'normal') => {
|
|
171
|
+
if (texts.length === 0)
|
|
172
|
+
return [];
|
|
173
|
+
// Each batch counts as 1 token regardless of batch size
|
|
174
|
+
// This limits the number of API calls, not the number of texts
|
|
175
|
+
await limiter.acquire(1, priority);
|
|
176
|
+
return embedFn(texts);
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
//# sourceMappingURL=rate-limiter.js.map
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* repair-tool-pairs.ts
|
|
3
|
+
*
|
|
4
|
+
* Strips orphaned tool result entries from a pi-agent message array.
|
|
5
|
+
*
|
|
6
|
+
* Background: HyperMem compaction and in-memory trim passes can remove assistant
|
|
7
|
+
* messages that contain tool_use/toolCall blocks without removing the corresponding
|
|
8
|
+
* tool result messages that follow them. Anthropic and Gemini reject these orphaned
|
|
9
|
+
* tool results with a 400 error.
|
|
10
|
+
*
|
|
11
|
+
* This module provides a pure repair function that can be applied at any output
|
|
12
|
+
* boundary to sanitise the message list before it reaches the provider.
|
|
13
|
+
*
|
|
14
|
+
* Supported formats:
|
|
15
|
+
* - pi-agent: role:'toolResult' messages with toolCallId field
|
|
16
|
+
* - Anthropic native: user messages with content blocks of type:'tool_result' and tool_use_id
|
|
17
|
+
*
|
|
18
|
+
* Returns a new array. Does not mutate the input.
|
|
19
|
+
*/
|
|
20
|
+
type AnyMessage = Record<string, unknown>;
|
|
21
|
+
/**
|
|
22
|
+
* Repair orphaned tool pairs in a pi-agent / OpenClaw message array.
|
|
23
|
+
*
|
|
24
|
+
* Orphan types handled:
|
|
25
|
+
* 1. role:'toolResult' message whose toolCallId has no matching toolCall/tool_use
|
|
26
|
+
* block in any assistant message in the array.
|
|
27
|
+
* 2. User message whose content contains only type:'tool_result' blocks where all
|
|
28
|
+
* of those blocks reference a tool_use_id that does not appear in any assistant
|
|
29
|
+
* message in the array. (Anthropic-native format.)
|
|
30
|
+
*
|
|
31
|
+
* Also strips orphaned assistant messages that contain ONLY tool_use/toolCall blocks
|
|
32
|
+
* where none of those calls has a corresponding tool result anywhere in the array.
|
|
33
|
+
*
|
|
34
|
+
* Returns a new array (does not mutate input).
|
|
35
|
+
*/
|
|
36
|
+
export declare function repairToolPairs(messages: AnyMessage[]): AnyMessage[];
|
|
37
|
+
export {};
|
|
38
|
+
//# sourceMappingURL=repair-tool-pairs.d.ts.map
|