@martian-engineering/lossless-claw 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,445 @@
1
+ import { resolveLcmConfig } from "./db/config.js";
2
+ import type { LcmDependencies } from "./types.js";
3
+
4
+ export type LcmSummarizeOptions = {
5
+ previousSummary?: string;
6
+ isCondensed?: boolean;
7
+ depth?: number;
8
+ };
9
+
10
+ export type LcmSummarizeFn = (
11
+ text: string,
12
+ aggressive?: boolean,
13
+ options?: LcmSummarizeOptions,
14
+ ) => Promise<string>;
15
+
16
+ export type LcmSummarizerLegacyParams = {
17
+ provider?: unknown;
18
+ model?: unknown;
19
+ config?: unknown;
20
+ agentDir?: unknown;
21
+ authProfileId?: unknown;
22
+ };
23
+
24
+ type SummaryMode = "normal" | "aggressive";
25
+
26
+ const DEFAULT_CONDENSED_TARGET_TOKENS = 2000;
27
+
28
+ /** Normalize provider ids for stable config/profile lookup. */
29
+ function normalizeProviderId(provider: string): string {
30
+ return provider.trim().toLowerCase();
31
+ }
32
+
33
+ /**
34
+ * Resolve provider API override from legacy OpenClaw config.
35
+ *
36
+ * When model ids are custom/forward-compat, this hint allows deps.complete to
37
+ * construct a valid pi-ai Model object even if getModel(provider, model) misses.
38
+ */
39
+ function resolveProviderApiFromLegacyConfig(
40
+ config: unknown,
41
+ provider: string,
42
+ ): string | undefined {
43
+ if (!config || typeof config !== "object") {
44
+ return undefined;
45
+ }
46
+ const providers = (config as { models?: { providers?: Record<string, unknown> } }).models
47
+ ?.providers;
48
+ if (!providers || typeof providers !== "object") {
49
+ return undefined;
50
+ }
51
+
52
+ const direct = providers[provider];
53
+ if (direct && typeof direct === "object") {
54
+ const api = (direct as { api?: unknown }).api;
55
+ if (typeof api === "string" && api.trim()) {
56
+ return api.trim();
57
+ }
58
+ }
59
+
60
+ const normalizedProvider = normalizeProviderId(provider);
61
+ for (const [entryProvider, value] of Object.entries(providers)) {
62
+ if (normalizeProviderId(entryProvider) !== normalizedProvider) {
63
+ continue;
64
+ }
65
+ if (!value || typeof value !== "object") {
66
+ continue;
67
+ }
68
+ const api = (value as { api?: unknown }).api;
69
+ if (typeof api === "string" && api.trim()) {
70
+ return api.trim();
71
+ }
72
+ }
73
+ return undefined;
74
+ }
75
+
76
+ /** Approximate token estimate used for target-sizing prompts. */
77
+ function estimateTokens(text: string): number {
78
+ return Math.ceil(text.length / 4);
79
+ }
80
+
81
+ /** Narrows completion response blocks to plain text blocks. */
82
+ function isTextBlock(block: unknown): block is { type: string; text: string } {
83
+ if (!block || typeof block !== "object" || Array.isArray(block)) {
84
+ return false;
85
+ }
86
+ const record = block as { type?: unknown; text?: unknown };
87
+ return record.type === "text" && typeof record.text === "string";
88
+ }
89
+
90
+ /**
91
+ * Resolve a practical target token count for leaf and condensed summaries.
92
+ * Aggressive leaf mode intentionally aims lower so compaction converges faster.
93
+ */
94
+ function resolveTargetTokens(params: {
95
+ inputTokens: number;
96
+ mode: SummaryMode;
97
+ isCondensed: boolean;
98
+ condensedTargetTokens: number;
99
+ }): number {
100
+ if (params.isCondensed) {
101
+ return Math.max(512, params.condensedTargetTokens);
102
+ }
103
+
104
+ const { inputTokens, mode } = params;
105
+ if (mode === "aggressive") {
106
+ return Math.max(96, Math.min(640, Math.floor(inputTokens * 0.2)));
107
+ }
108
+ return Math.max(192, Math.min(1200, Math.floor(inputTokens * 0.35)));
109
+ }
110
+
111
+ /**
112
+ * Build a leaf (segment) summarization prompt.
113
+ *
114
+ * Normal leaf mode preserves details; aggressive leaf mode keeps only the
115
+ * highest-value facts needed for follow-up turns.
116
+ */
117
+ function buildLeafSummaryPrompt(params: {
118
+ text: string;
119
+ mode: SummaryMode;
120
+ targetTokens: number;
121
+ previousSummary?: string;
122
+ customInstructions?: string;
123
+ }): string {
124
+ const { text, mode, targetTokens, previousSummary, customInstructions } = params;
125
+ const previousContext = previousSummary?.trim() || "(none)";
126
+
127
+ const policy =
128
+ mode === "aggressive"
129
+ ? [
130
+ "Aggressive summary policy:",
131
+ "- Keep only durable facts and current task state.",
132
+ "- Remove examples, repetition, and low-value narrative details.",
133
+ "- Preserve explicit TODOs, blockers, decisions, and constraints.",
134
+ ].join("\n")
135
+ : [
136
+ "Normal summary policy:",
137
+ "- Preserve key decisions, rationale, constraints, and active tasks.",
138
+ "- Keep essential technical details needed to continue work safely.",
139
+ "- Remove obvious repetition and conversational filler.",
140
+ ].join("\n");
141
+
142
+ const instructionBlock = customInstructions?.trim()
143
+ ? `Operator instructions:\n${customInstructions.trim()}`
144
+ : "Operator instructions: (none)";
145
+
146
+ return [
147
+ "You summarize a SEGMENT of an OpenClaw conversation for future model turns.",
148
+ "Treat this as incremental memory compaction input, not a full-conversation summary.",
149
+ policy,
150
+ instructionBlock,
151
+ [
152
+ "Output requirements:",
153
+ "- Plain text only.",
154
+ "- No preamble, headings, or markdown formatting.",
155
+ "- Keep it concise while preserving required details.",
156
+ "- Track file operations (created, modified, deleted, renamed) with file paths and current status.",
157
+ '- If no file operations appear, include exactly: "Files: none".',
158
+ '- End with exactly: "Expand for details about: <comma-separated list of what was dropped or compressed>".',
159
+ `- Target length: about ${targetTokens} tokens or less.`,
160
+ ].join("\n"),
161
+ `<previous_context>\n${previousContext}\n</previous_context>`,
162
+ `<conversation_segment>\n${text}\n</conversation_segment>`,
163
+ ].join("\n\n");
164
+ }
165
+
166
+ function buildD1Prompt(params: {
167
+ text: string;
168
+ targetTokens: number;
169
+ previousSummary?: string;
170
+ customInstructions?: string;
171
+ }): string {
172
+ const { text, targetTokens, previousSummary, customInstructions } = params;
173
+ const instructionBlock = customInstructions?.trim()
174
+ ? `Operator instructions:\n${customInstructions.trim()}`
175
+ : "Operator instructions: (none)";
176
+ const previousContext = previousSummary?.trim();
177
+ const previousContextBlock = previousContext
178
+ ? [
179
+ "It already has this preceding summary as context. Do not repeat information",
180
+ "that appears there unchanged. Focus on what is new, changed, or resolved:",
181
+ "",
182
+ `<previous_context>\n${previousContext}\n</previous_context>`,
183
+ ].join("\n")
184
+ : "Focus on what matters for continuation:";
185
+
186
+ return [
187
+ "You are compacting leaf-level conversation summaries into a single condensed memory node.",
188
+ "You are preparing context for a fresh model instance that will continue this conversation.",
189
+ instructionBlock,
190
+ previousContextBlock,
191
+ [
192
+ "Preserve:",
193
+ "- Decisions made and their rationale when rationale matters going forward.",
194
+ "- Earlier decisions that were superseded, and what replaced them.",
195
+ "- Completed tasks/topics with outcomes.",
196
+ "- In-progress items with current state and what remains.",
197
+ "- Blockers, open questions, and unresolved tensions.",
198
+ "- Specific references (names, paths, URLs, identifiers) needed for continuation.",
199
+ "",
200
+ "Drop low-value detail:",
201
+ "- Context that has not changed from previous_context.",
202
+ "- Intermediate dead ends where the conclusion is already known.",
203
+ "- Transient states that are already resolved.",
204
+ "- Tool-internal mechanics and process scaffolding.",
205
+ "",
206
+ "Use plain text. No mandatory structure.",
207
+ "Include a timeline with timestamps (hour or half-hour) for significant events.",
208
+ "Present information chronologically and mark superseded decisions.",
209
+ 'End with exactly: "Expand for details about: <comma-separated list of what was dropped or compressed>".',
210
+ `Target length: about ${targetTokens} tokens.`,
211
+ ].join("\n"),
212
+ `<conversation_to_condense>\n${text}\n</conversation_to_condense>`,
213
+ ].join("\n\n");
214
+ }
215
+
216
+ function buildD2Prompt(params: {
217
+ text: string;
218
+ targetTokens: number;
219
+ customInstructions?: string;
220
+ }): string {
221
+ const { text, targetTokens, customInstructions } = params;
222
+ const instructionBlock = customInstructions?.trim()
223
+ ? `Operator instructions:\n${customInstructions.trim()}`
224
+ : "Operator instructions: (none)";
225
+
226
+ return [
227
+ "You are condensing multiple session-level summaries into a higher-level memory node.",
228
+ "A future model should understand trajectory, not per-session minutiae.",
229
+ instructionBlock,
230
+ [
231
+ "Preserve:",
232
+ "- Decisions still in effect and their rationale.",
233
+ "- Decisions that evolved: what changed and why.",
234
+ "- Completed work with outcomes.",
235
+ "- Active constraints, limitations, and known issues.",
236
+ "- Current state of in-progress work.",
237
+ "",
238
+ "Drop:",
239
+ "- Session-local operational detail and process mechanics.",
240
+ "- Identifiers that are no longer relevant.",
241
+ "- Intermediate states superseded by later outcomes.",
242
+ "",
243
+ "Use plain text. Brief headers are fine if useful.",
244
+ "Include a timeline with dates and approximate time of day for key milestones.",
245
+ 'End with exactly: "Expand for details about: <comma-separated list of what was dropped or compressed>".',
246
+ `Target length: about ${targetTokens} tokens.`,
247
+ ].join("\n"),
248
+ `<conversation_to_condense>\n${text}\n</conversation_to_condense>`,
249
+ ].join("\n\n");
250
+ }
251
+
252
+ function buildD3PlusPrompt(params: {
253
+ text: string;
254
+ targetTokens: number;
255
+ customInstructions?: string;
256
+ }): string {
257
+ const { text, targetTokens, customInstructions } = params;
258
+ const instructionBlock = customInstructions?.trim()
259
+ ? `Operator instructions:\n${customInstructions.trim()}`
260
+ : "Operator instructions: (none)";
261
+
262
+ return [
263
+ "You are creating a high-level memory node from multiple phase-level summaries.",
264
+ "This may persist for the rest of the conversation. Keep only durable context.",
265
+ instructionBlock,
266
+ [
267
+ "Preserve:",
268
+ "- Key decisions and rationale.",
269
+ "- What was accomplished and current state.",
270
+ "- Active constraints and hard limitations.",
271
+ "- Important relationships between people, systems, or concepts.",
272
+ "- Durable lessons learned.",
273
+ "",
274
+ "Drop:",
275
+ "- Operational and process detail.",
276
+ "- Method details unless the method itself was the decision.",
277
+ "- Specific references unless essential for continuation.",
278
+ "",
279
+ "Use plain text. Be concise.",
280
+ "Include a brief timeline with dates (or date ranges) for major milestones.",
281
+ 'End with exactly: "Expand for details about: <comma-separated list of what was dropped or compressed>".',
282
+ `Target length: about ${targetTokens} tokens.`,
283
+ ].join("\n"),
284
+ `<conversation_to_condense>\n${text}\n</conversation_to_condense>`,
285
+ ].join("\n\n");
286
+ }
287
+
288
+ /** Build a condensed prompt variant based on the output node depth. */
289
+ function buildCondensedSummaryPrompt(params: {
290
+ text: string;
291
+ targetTokens: number;
292
+ depth: number;
293
+ previousSummary?: string;
294
+ customInstructions?: string;
295
+ }): string {
296
+ if (params.depth <= 1) {
297
+ return buildD1Prompt(params);
298
+ }
299
+ if (params.depth === 2) {
300
+ return buildD2Prompt(params);
301
+ }
302
+ return buildD3PlusPrompt(params);
303
+ }
304
+
305
+ /**
306
+ * Deterministic fallback summary when model output is empty.
307
+ *
308
+ * Keeps compaction progress monotonic instead of throwing and aborting the
309
+ * whole compaction pass.
310
+ */
311
+ function buildDeterministicFallbackSummary(text: string, targetTokens: number): string {
312
+ const trimmed = text.trim();
313
+ if (!trimmed) {
314
+ return "";
315
+ }
316
+
317
+ const maxChars = Math.max(256, targetTokens * 4);
318
+ if (trimmed.length <= maxChars) {
319
+ return trimmed;
320
+ }
321
+
322
+ return `${trimmed.slice(0, maxChars)}\n[LCM fallback summary; truncated for context management]`;
323
+ }
324
+
325
+ /**
326
+ * Builds a model-backed LCM summarize callback from runtime legacy params.
327
+ *
328
+ * Returns `undefined` when model/provider context is unavailable so callers can
329
+ * choose a fallback summarizer.
330
+ */
331
+ export async function createLcmSummarizeFromLegacyParams(params: {
332
+ deps: LcmDependencies;
333
+ legacyParams: LcmSummarizerLegacyParams;
334
+ customInstructions?: string;
335
+ }): Promise<LcmSummarizeFn | undefined> {
336
+ const providerHint =
337
+ typeof params.legacyParams.provider === "string" ? params.legacyParams.provider.trim() : "";
338
+ const modelHint =
339
+ typeof params.legacyParams.model === "string" ? params.legacyParams.model.trim() : "";
340
+ const modelRef = modelHint || undefined;
341
+ console.error(`[lcm] createLcmSummarize: providerHint="${providerHint}", modelHint="${modelHint}", modelRef="${modelRef}"`);
342
+
343
+ let resolved: { provider: string; model: string };
344
+ try {
345
+ resolved = params.deps.resolveModel(modelRef, providerHint || undefined);
346
+ console.error(`[lcm] createLcmSummarize: resolved model=${resolved.model}, provider=${resolved.provider}`);
347
+ } catch (err) {
348
+ console.error(`[lcm] createLcmSummarize: resolveModel FAILED:`, err instanceof Error ? err.message : err);
349
+ return undefined;
350
+ }
351
+
352
+ const { provider, model } = resolved;
353
+ if (!provider || !model) {
354
+ console.error(`[lcm] createLcmSummarize: empty provider="${provider}" or model="${model}"`);
355
+ return undefined;
356
+ }
357
+ const authProfileId =
358
+ typeof params.legacyParams.authProfileId === "string" &&
359
+ params.legacyParams.authProfileId.trim()
360
+ ? params.legacyParams.authProfileId.trim()
361
+ : undefined;
362
+ const agentDir =
363
+ typeof params.legacyParams.agentDir === "string" && params.legacyParams.agentDir.trim()
364
+ ? params.legacyParams.agentDir.trim()
365
+ : undefined;
366
+ const providerApi = resolveProviderApiFromLegacyConfig(params.legacyParams.config, provider);
367
+
368
+ const apiKey = params.deps.getApiKey(provider, model);
369
+
370
+ const runtimeLcmConfig = resolveLcmConfig();
371
+ const condensedTargetTokens =
372
+ Number.isFinite(runtimeLcmConfig.condensedTargetTokens) &&
373
+ runtimeLcmConfig.condensedTargetTokens > 0
374
+ ? runtimeLcmConfig.condensedTargetTokens
375
+ : DEFAULT_CONDENSED_TARGET_TOKENS;
376
+
377
+ return async (
378
+ text: string,
379
+ aggressive?: boolean,
380
+ options?: LcmSummarizeOptions,
381
+ ): Promise<string> => {
382
+ if (!text.trim()) {
383
+ return "";
384
+ }
385
+
386
+ const mode: SummaryMode = aggressive ? "aggressive" : "normal";
387
+ const isCondensed = options?.isCondensed === true;
388
+ const targetTokens = resolveTargetTokens({
389
+ inputTokens: estimateTokens(text),
390
+ mode,
391
+ isCondensed,
392
+ condensedTargetTokens,
393
+ });
394
+ const prompt = isCondensed
395
+ ? buildCondensedSummaryPrompt({
396
+ text,
397
+ targetTokens,
398
+ depth:
399
+ typeof options?.depth === "number" && Number.isFinite(options.depth)
400
+ ? Math.max(1, Math.floor(options.depth))
401
+ : 1,
402
+ previousSummary: options?.previousSummary,
403
+ customInstructions: params.customInstructions,
404
+ })
405
+ : buildLeafSummaryPrompt({
406
+ text,
407
+ mode,
408
+ targetTokens,
409
+ previousSummary: options?.previousSummary,
410
+ customInstructions: params.customInstructions,
411
+ });
412
+
413
+ const result = await params.deps.complete({
414
+ provider,
415
+ model,
416
+ apiKey,
417
+ providerApi,
418
+ authProfileId,
419
+ agentDir,
420
+ runtimeConfig: params.legacyParams.config,
421
+ messages: [
422
+ {
423
+ role: "user",
424
+ content: prompt,
425
+ },
426
+ ],
427
+ maxTokens: targetTokens,
428
+ temperature: aggressive ? 0.1 : 0.2,
429
+ });
430
+
431
+ const summary = result.content
432
+ .filter(isTextBlock)
433
+ .map((block) => block.text.trim())
434
+ .filter(Boolean)
435
+ .join("\n")
436
+ .trim();
437
+
438
+ if (!summary) {
439
+ console.error(`[lcm] summarize got empty content from LLM (${result.content.length} blocks, types: ${result.content.map(b => b.type).join(",")}), falling back to truncation`);
440
+ return buildDeterministicFallbackSummary(text, targetTokens);
441
+ }
442
+
443
+ return summary;
444
+ };
445
+ }
@@ -0,0 +1,53 @@
1
+ import type { AnyAgentTool as OpenClawAnyAgentTool } from "openclaw/plugin-sdk";
2
+
3
+ export type AnyAgentTool = OpenClawAnyAgentTool;
4
+
5
+ /** Render structured payloads as deterministic text tool results. */
6
+ export function jsonResult(payload: unknown): {
7
+ content: Array<{ type: "text"; text: string }>;
8
+ details: unknown;
9
+ } {
10
+ return {
11
+ content: [
12
+ {
13
+ type: "text",
14
+ text: JSON.stringify(payload, null, 2),
15
+ },
16
+ ],
17
+ details: payload,
18
+ };
19
+ }
20
+
21
+ /** Read a string param with optional trimming/required checks. */
22
+ export function readStringParam(
23
+ params: Record<string, unknown>,
24
+ key: string,
25
+ options?: {
26
+ required?: boolean;
27
+ trim?: boolean;
28
+ allowEmpty?: boolean;
29
+ label?: string;
30
+ },
31
+ ): string | undefined {
32
+ const raw = params[key];
33
+ if (raw == null) {
34
+ if (options?.required) {
35
+ throw new Error(`${options.label ?? key} is required.`);
36
+ }
37
+ return undefined;
38
+ }
39
+
40
+ if (typeof raw !== "string") {
41
+ throw new Error(`${options?.label ?? key} must be a string.`);
42
+ }
43
+
44
+ const value = options?.trim === false ? raw : raw.trim();
45
+ if (!options?.allowEmpty && value.length === 0) {
46
+ if (options?.required) {
47
+ throw new Error(`${options.label ?? key} is required.`);
48
+ }
49
+ return undefined;
50
+ }
51
+
52
+ return value;
53
+ }
@@ -0,0 +1,76 @@
1
+ import type { LcmContextEngine } from "../engine.js";
2
+ import type { LcmDependencies } from "../types.js";
3
+
4
+ export type LcmConversationScope = {
5
+ conversationId?: number;
6
+ allConversations: boolean;
7
+ };
8
+
9
+ /**
10
+ * Parse an ISO-8601 timestamp tool parameter into a Date.
11
+ *
12
+ * Throws when the value is not a parseable timestamp string.
13
+ */
14
+ export function parseIsoTimestampParam(
15
+ params: Record<string, unknown>,
16
+ key: string,
17
+ ): Date | undefined {
18
+ const raw = params[key];
19
+ if (typeof raw !== "string") {
20
+ return undefined;
21
+ }
22
+ const value = raw.trim();
23
+ if (!value) {
24
+ return undefined;
25
+ }
26
+ const parsed = new Date(value);
27
+ if (Number.isNaN(parsed.getTime())) {
28
+ throw new Error(`${key} must be a valid ISO timestamp.`);
29
+ }
30
+ return parsed;
31
+ }
32
+
33
+ /**
34
+ * Resolve LCM conversation scope for tool calls.
35
+ *
36
+ * Priority:
37
+ * 1. Explicit conversationId parameter
38
+ * 2. allConversations=true (cross-conversation mode)
39
+ * 3. Current session's LCM conversation
40
+ */
41
+ export async function resolveLcmConversationScope(input: {
42
+ lcm: LcmContextEngine;
43
+ params: Record<string, unknown>;
44
+ sessionId?: string;
45
+ sessionKey?: string;
46
+ deps?: Pick<LcmDependencies, "resolveSessionIdFromSessionKey">;
47
+ }): Promise<LcmConversationScope> {
48
+ const { lcm, params } = input;
49
+
50
+ const explicitConversationId =
51
+ typeof params.conversationId === "number" && Number.isFinite(params.conversationId)
52
+ ? Math.trunc(params.conversationId)
53
+ : undefined;
54
+ if (explicitConversationId != null) {
55
+ return { conversationId: explicitConversationId, allConversations: false };
56
+ }
57
+
58
+ if (params.allConversations === true) {
59
+ return { conversationId: undefined, allConversations: true };
60
+ }
61
+
62
+ let normalizedSessionId = input.sessionId?.trim();
63
+ if (!normalizedSessionId && input.sessionKey && input.deps) {
64
+ normalizedSessionId = await input.deps.resolveSessionIdFromSessionKey(input.sessionKey.trim());
65
+ }
66
+ if (!normalizedSessionId) {
67
+ return { conversationId: undefined, allConversations: false };
68
+ }
69
+
70
+ const conversation = await lcm.getConversationStore().getConversationBySessionId(normalizedSessionId);
71
+ if (!conversation) {
72
+ return { conversationId: undefined, allConversations: false };
73
+ }
74
+
75
+ return { conversationId: conversation.conversationId, allConversations: false };
76
+ }