@byte5ai/palaia 2.1.0 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.ts +40 -21
- package/package.json +2 -2
- package/skill/SKILL.md +284 -706
- package/src/context-engine.ts +439 -0
- package/src/hooks/capture.ts +740 -0
- package/src/hooks/index.ts +823 -0
- package/src/hooks/reactions.ts +168 -0
- package/src/hooks/recall.ts +369 -0
- package/src/hooks/state.ts +317 -0
- package/src/priorities.ts +221 -0
- package/src/tools.ts +3 -2
- package/src/types.ts +119 -0
- package/src/hooks.ts +0 -2091
|
@@ -0,0 +1,439 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ContextEngine adapter for deeper OpenClaw integration.
|
|
3
|
+
*
|
|
4
|
+
* Maps the 7 ContextEngine lifecycle hooks to palaia functionality,
|
|
5
|
+
* using the decomposed hooks modules as the implementation layer.
|
|
6
|
+
*
|
|
7
|
+
* Phase 1.5: Created as a thin adapter over existing recall/capture logic.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import type { OpenClawPluginApi } from "./types.js";
|
|
11
|
+
import type { PalaiaPluginConfig } from "./config.js";
|
|
12
|
+
import { run, recover, type RunnerOpts, getEmbedServerManager } from "./runner.js";
|
|
13
|
+
|
|
14
|
+
import {
|
|
15
|
+
extractMessageTexts,
|
|
16
|
+
buildRecallQuery,
|
|
17
|
+
rerankByTypeWeight,
|
|
18
|
+
checkNudges,
|
|
19
|
+
type QueryResult,
|
|
20
|
+
} from "./hooks/recall.js";
|
|
21
|
+
|
|
22
|
+
import {
|
|
23
|
+
extractWithLLM,
|
|
24
|
+
shouldAttemptCapture,
|
|
25
|
+
extractSignificance,
|
|
26
|
+
stripPalaiaInjectedContext,
|
|
27
|
+
trimToRecentExchanges,
|
|
28
|
+
parsePalaiaHints,
|
|
29
|
+
loadProjects,
|
|
30
|
+
resolveCaptureModel,
|
|
31
|
+
type ExtractionResult,
|
|
32
|
+
} from "./hooks/capture.js";
|
|
33
|
+
|
|
34
|
+
import {
|
|
35
|
+
loadPluginState,
|
|
36
|
+
savePluginState,
|
|
37
|
+
resolvePerAgentContext,
|
|
38
|
+
sanitizeScope,
|
|
39
|
+
isValidScope,
|
|
40
|
+
} from "./hooks/state.js";
|
|
41
|
+
|
|
42
|
+
import {
|
|
43
|
+
loadPriorities,
|
|
44
|
+
resolvePriorities,
|
|
45
|
+
filterBlocked,
|
|
46
|
+
} from "./priorities.js";
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* ContextEngine adapter for deeper OpenClaw integration.
|
|
50
|
+
* Maps the 7 ContextEngine lifecycle hooks to palaia functionality.
|
|
51
|
+
*/
|
|
52
|
+
export interface PalaiaContextEngine {
|
|
53
|
+
bootstrap(): Promise<void>;
|
|
54
|
+
ingest(messages: unknown[]): Promise<void>;
|
|
55
|
+
assemble(budget: { maxTokens: number }): Promise<{ content: string; tokenEstimate: number }>;
|
|
56
|
+
compact(): Promise<void>;
|
|
57
|
+
afterTurn(turn: unknown): Promise<void>;
|
|
58
|
+
prepareSubagentSpawn(parentContext: unknown): Promise<unknown>;
|
|
59
|
+
onSubagentEnded(result: unknown): Promise<void>;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
function buildRunnerOpts(config: PalaiaPluginConfig, overrides?: { workspace?: string }): RunnerOpts {
|
|
63
|
+
return {
|
|
64
|
+
binaryPath: config.binaryPath,
|
|
65
|
+
workspace: overrides?.workspace || config.workspace,
|
|
66
|
+
timeoutMs: config.timeoutMs,
|
|
67
|
+
};
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/** Rough token estimate: ~4 chars per token for English text. */
|
|
71
|
+
function estimateTokens(text: string): number {
|
|
72
|
+
return Math.ceil(text.length / 4);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
export function createPalaiaContextEngine(
|
|
76
|
+
api: OpenClawPluginApi,
|
|
77
|
+
config: PalaiaPluginConfig,
|
|
78
|
+
): PalaiaContextEngine {
|
|
79
|
+
const logger = api.logger;
|
|
80
|
+
const opts = buildRunnerOpts(config);
|
|
81
|
+
|
|
82
|
+
/** Last messages seen via ingest(), used by assemble() for query building. */
|
|
83
|
+
let _lastMessages: unknown[] = [];
|
|
84
|
+
|
|
85
|
+
/** State from the last assemble() call, used by afterTurn(). */
|
|
86
|
+
let _lastAssembleState: {
|
|
87
|
+
recallOccurred: boolean;
|
|
88
|
+
capturedInThisTurn: boolean;
|
|
89
|
+
} = { recallOccurred: false, capturedInThisTurn: false };
|
|
90
|
+
|
|
91
|
+
return {
|
|
92
|
+
/**
|
|
93
|
+
* Bootstrap: WAL recovery + embed-server start.
|
|
94
|
+
* Maps to the palaia-recovery service from runner.ts.
|
|
95
|
+
*/
|
|
96
|
+
async bootstrap(): Promise<void> {
|
|
97
|
+
try {
|
|
98
|
+
const result = await recover(opts);
|
|
99
|
+
if (result.replayed > 0) {
|
|
100
|
+
logger.info(`[palaia] WAL recovery: replayed ${result.replayed} entries`);
|
|
101
|
+
}
|
|
102
|
+
if (result.errors > 0) {
|
|
103
|
+
logger.warn(`[palaia] WAL recovery completed with ${result.errors} error(s)`);
|
|
104
|
+
}
|
|
105
|
+
} catch (error) {
|
|
106
|
+
logger.warn(`[palaia] Bootstrap WAL recovery failed: ${error}`);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Start embed server if configured (lazy — first query will start it)
|
|
110
|
+
if (config.embeddingServer) {
|
|
111
|
+
try {
|
|
112
|
+
getEmbedServerManager(opts);
|
|
113
|
+
} catch {
|
|
114
|
+
// Non-fatal — embed server start is lazy
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
},
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Ingest: auto-capture logic from hooks/capture.ts.
|
|
121
|
+
* Called with the turn's messages after the agent responds.
|
|
122
|
+
*/
|
|
123
|
+
async ingest(messages: unknown[]): Promise<void> {
|
|
124
|
+
_lastMessages = messages;
|
|
125
|
+
|
|
126
|
+
if (!config.autoCapture) return;
|
|
127
|
+
if (!messages || messages.length === 0) return;
|
|
128
|
+
|
|
129
|
+
try {
|
|
130
|
+
const allTexts = extractMessageTexts(messages);
|
|
131
|
+
const userTurns = allTexts.filter((t) => t.role === "user").length;
|
|
132
|
+
if (userTurns < config.captureMinTurns) return;
|
|
133
|
+
|
|
134
|
+
// Parse capture hints
|
|
135
|
+
const collectedHints: { project?: string; scope?: string }[] = [];
|
|
136
|
+
for (const t of allTexts) {
|
|
137
|
+
const { hints } = parsePalaiaHints(t.text);
|
|
138
|
+
collectedHints.push(...hints);
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// Strip injected context and trim to recent
|
|
142
|
+
const cleanedTexts = allTexts.map(t =>
|
|
143
|
+
t.role === "user"
|
|
144
|
+
? { ...t, text: stripPalaiaInjectedContext(t.text) }
|
|
145
|
+
: t
|
|
146
|
+
);
|
|
147
|
+
const recentTexts = trimToRecentExchanges(cleanedTexts);
|
|
148
|
+
const exchangeParts: string[] = [];
|
|
149
|
+
for (const t of recentTexts) {
|
|
150
|
+
const { cleanedText } = parsePalaiaHints(t.text);
|
|
151
|
+
exchangeParts.push(`[${t.role}]: ${cleanedText}`);
|
|
152
|
+
}
|
|
153
|
+
const exchangeText = exchangeParts.join("\n");
|
|
154
|
+
|
|
155
|
+
if (!shouldAttemptCapture(exchangeText)) return;
|
|
156
|
+
|
|
157
|
+
const hookOpts = buildRunnerOpts(config);
|
|
158
|
+
const knownProjects = await loadProjects(hookOpts);
|
|
159
|
+
const agentName = process.env.PALAIA_AGENT || undefined;
|
|
160
|
+
|
|
161
|
+
// LLM-based extraction (primary)
|
|
162
|
+
let llmHandled = false;
|
|
163
|
+
try {
|
|
164
|
+
const results = await extractWithLLM(messages, api.config, {
|
|
165
|
+
captureModel: config.captureModel,
|
|
166
|
+
}, knownProjects);
|
|
167
|
+
|
|
168
|
+
for (const r of results) {
|
|
169
|
+
if (r.significance >= config.captureMinSignificance) {
|
|
170
|
+
const hintProject = collectedHints.find((h) => h.project)?.project;
|
|
171
|
+
const hintScope = collectedHints.find((h) => h.scope)?.scope;
|
|
172
|
+
const effectiveProject = hintProject || r.project;
|
|
173
|
+
const scope = config.captureScope
|
|
174
|
+
? sanitizeScope(config.captureScope, "team", true)
|
|
175
|
+
: sanitizeScope(hintScope || r.scope, "team", false);
|
|
176
|
+
const tags = [...r.tags];
|
|
177
|
+
if (!tags.includes("auto-capture")) tags.push("auto-capture");
|
|
178
|
+
|
|
179
|
+
const args: string[] = [
|
|
180
|
+
"write", r.content,
|
|
181
|
+
"--type", r.type,
|
|
182
|
+
"--tags", tags.join(",") || "auto-capture",
|
|
183
|
+
"--scope", scope,
|
|
184
|
+
];
|
|
185
|
+
const project = config.captureProject || effectiveProject;
|
|
186
|
+
if (project) args.push("--project", project);
|
|
187
|
+
if (agentName) args.push("--agent", agentName);
|
|
188
|
+
|
|
189
|
+
await run(args, { ...hookOpts, timeoutMs: 10_000 });
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
llmHandled = true;
|
|
193
|
+
} catch {
|
|
194
|
+
// Fall through to rule-based
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
// Rule-based fallback
|
|
198
|
+
if (!llmHandled) {
|
|
199
|
+
if (config.captureFrequency === "significant") {
|
|
200
|
+
const significance = extractSignificance(exchangeText);
|
|
201
|
+
if (!significance) return;
|
|
202
|
+
const tags = [...significance.tags];
|
|
203
|
+
if (!tags.includes("auto-capture")) tags.push("auto-capture");
|
|
204
|
+
const scope = config.captureScope
|
|
205
|
+
? sanitizeScope(config.captureScope, "team", true)
|
|
206
|
+
: "team";
|
|
207
|
+
const args: string[] = [
|
|
208
|
+
"write", significance.summary,
|
|
209
|
+
"--type", significance.type,
|
|
210
|
+
"--tags", tags.join(","),
|
|
211
|
+
"--scope", scope,
|
|
212
|
+
];
|
|
213
|
+
if (agentName) args.push("--agent", agentName);
|
|
214
|
+
await run(args, { ...hookOpts, timeoutMs: 10_000 });
|
|
215
|
+
} else {
|
|
216
|
+
const summary = exchangeParts.slice(-4).map(p => p.slice(0, 200)).join(" | ").slice(0, 500);
|
|
217
|
+
const args: string[] = [
|
|
218
|
+
"write", summary,
|
|
219
|
+
"--type", "memory",
|
|
220
|
+
"--tags", "auto-capture",
|
|
221
|
+
"--scope", config.captureScope || "team",
|
|
222
|
+
];
|
|
223
|
+
if (agentName) args.push("--agent", agentName);
|
|
224
|
+
await run(args, { ...hookOpts, timeoutMs: 10_000 });
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
_lastAssembleState.capturedInThisTurn = true;
|
|
229
|
+
} catch (error) {
|
|
230
|
+
logger.warn(`[palaia] ContextEngine ingest failed: ${error}`);
|
|
231
|
+
}
|
|
232
|
+
},
|
|
233
|
+
|
|
234
|
+
/**
|
|
235
|
+
* Assemble: recall logic with token budget awareness from hooks/recall.ts.
|
|
236
|
+
* Returns memory context string and token estimate, respecting the budget.
|
|
237
|
+
*/
|
|
238
|
+
async assemble(budget: { maxTokens: number }): Promise<{ content: string; tokenEstimate: number }> {
|
|
239
|
+
_lastAssembleState = { recallOccurred: false, capturedInThisTurn: _lastAssembleState.capturedInThisTurn };
|
|
240
|
+
|
|
241
|
+
if (!config.memoryInject) {
|
|
242
|
+
return { content: "", tokenEstimate: 0 };
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
try {
|
|
246
|
+
// Load and resolve priorities (Issue #121)
|
|
247
|
+
const prio = await loadPriorities(config.workspace || "");
|
|
248
|
+
const agentId = process.env.PALAIA_AGENT || undefined;
|
|
249
|
+
const project = config.captureProject || undefined;
|
|
250
|
+
const resolvedPrio = resolvePriorities(prio, {
|
|
251
|
+
recallTypeWeight: config.recallTypeWeight,
|
|
252
|
+
recallMinScore: config.recallMinScore,
|
|
253
|
+
maxInjectedChars: config.maxInjectedChars,
|
|
254
|
+
tier: config.tier,
|
|
255
|
+
}, agentId, project);
|
|
256
|
+
|
|
257
|
+
// Convert token budget to char budget (~4 chars per token)
|
|
258
|
+
const maxChars = Math.min(resolvedPrio.maxInjectedChars || 4000, budget.maxTokens * 4);
|
|
259
|
+
const limit = Math.min(config.maxResults || 10, 20);
|
|
260
|
+
let entries: QueryResult["results"] = [];
|
|
261
|
+
|
|
262
|
+
if (config.recallMode === "query" && _lastMessages.length > 0) {
|
|
263
|
+
const userMessage = buildRecallQuery(_lastMessages);
|
|
264
|
+
if (userMessage && userMessage.length >= 5) {
|
|
265
|
+
// Try embed server first
|
|
266
|
+
let serverQueried = false;
|
|
267
|
+
if (config.embeddingServer) {
|
|
268
|
+
try {
|
|
269
|
+
const mgr = getEmbedServerManager(opts);
|
|
270
|
+
const resp = await mgr.query({
|
|
271
|
+
text: userMessage,
|
|
272
|
+
top_k: limit,
|
|
273
|
+
include_cold: resolvedPrio.tier === "all",
|
|
274
|
+
}, config.timeoutMs || 3000);
|
|
275
|
+
if (resp?.result?.results && Array.isArray(resp.result.results)) {
|
|
276
|
+
entries = resp.result.results;
|
|
277
|
+
serverQueried = true;
|
|
278
|
+
}
|
|
279
|
+
} catch {
|
|
280
|
+
// Fall through to CLI
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
if (!serverQueried) {
|
|
285
|
+
try {
|
|
286
|
+
const { runJson } = await import("./runner.js");
|
|
287
|
+
const queryArgs: string[] = ["query", userMessage, "--limit", String(limit)];
|
|
288
|
+
if (resolvedPrio.tier === "all") queryArgs.push("--all");
|
|
289
|
+
const result = await runJson<QueryResult>(queryArgs, { ...opts, timeoutMs: 15000 });
|
|
290
|
+
if (result && Array.isArray(result.results)) {
|
|
291
|
+
entries = result.results;
|
|
292
|
+
}
|
|
293
|
+
} catch {
|
|
294
|
+
// Fall through to list
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
// List fallback
|
|
301
|
+
if (entries.length === 0) {
|
|
302
|
+
try {
|
|
303
|
+
const { runJson } = await import("./runner.js");
|
|
304
|
+
const listArgs: string[] = ["list"];
|
|
305
|
+
if (resolvedPrio.tier === "all") {
|
|
306
|
+
listArgs.push("--all");
|
|
307
|
+
} else {
|
|
308
|
+
listArgs.push("--tier", resolvedPrio.tier || "hot");
|
|
309
|
+
}
|
|
310
|
+
const result = await runJson<QueryResult>(listArgs, opts);
|
|
311
|
+
if (result && Array.isArray(result.results)) {
|
|
312
|
+
entries = result.results;
|
|
313
|
+
}
|
|
314
|
+
} catch {
|
|
315
|
+
return { content: "", tokenEstimate: 0 };
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
if (entries.length === 0) {
|
|
320
|
+
return { content: "", tokenEstimate: 0 };
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
// Apply type-weighted reranking and blocked filtering (Issue #121)
|
|
324
|
+
const rankedRaw = rerankByTypeWeight(entries, resolvedPrio.recallTypeWeight);
|
|
325
|
+
const ranked = filterBlocked(rankedRaw, resolvedPrio.blocked);
|
|
326
|
+
|
|
327
|
+
// Build context string
|
|
328
|
+
const SCOPE_SHORT: Record<string, string> = { team: "t", private: "p", public: "pub" };
|
|
329
|
+
const TYPE_SHORT: Record<string, string> = { memory: "m", process: "pr", task: "tk" };
|
|
330
|
+
|
|
331
|
+
let text = "## Active Memory (Palaia)\n\n";
|
|
332
|
+
let chars = text.length;
|
|
333
|
+
|
|
334
|
+
for (const entry of ranked) {
|
|
335
|
+
const scopeKey = SCOPE_SHORT[entry.scope] || entry.scope;
|
|
336
|
+
const typeKey = TYPE_SHORT[entry.type] || entry.type;
|
|
337
|
+
const prefix = `[${scopeKey}/${typeKey}]`;
|
|
338
|
+
|
|
339
|
+
let line: string;
|
|
340
|
+
if (entry.body.toLowerCase().startsWith(entry.title.toLowerCase())) {
|
|
341
|
+
line = `${prefix} ${entry.body}\n\n`;
|
|
342
|
+
} else {
|
|
343
|
+
line = `${prefix} ${entry.title}\n${entry.body}\n\n`;
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
if (chars + line.length > maxChars) break;
|
|
347
|
+
text += line;
|
|
348
|
+
chars += line.length;
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
// Build nudge text and check remaining budget before appending
|
|
352
|
+
const USAGE_NUDGE = "[palaia] auto-capture=on. Manual write: --type process (SOPs/checklists) or --type task (todos with assignee/deadline) only. Conversation knowledge is auto-captured — do not duplicate with manual writes.";
|
|
353
|
+
let agentNudges = "";
|
|
354
|
+
try {
|
|
355
|
+
const pluginState = await loadPluginState(config.workspace);
|
|
356
|
+
pluginState.successfulRecalls++;
|
|
357
|
+
if (!pluginState.firstRecallTimestamp) {
|
|
358
|
+
pluginState.firstRecallTimestamp = new Date().toISOString();
|
|
359
|
+
}
|
|
360
|
+
const { nudges } = checkNudges(pluginState);
|
|
361
|
+
if (nudges.length > 0) {
|
|
362
|
+
agentNudges = "\n\n## Agent Nudge (Palaia)\n\n" + nudges.join("\n\n");
|
|
363
|
+
}
|
|
364
|
+
await savePluginState(pluginState, config.workspace);
|
|
365
|
+
} catch {
|
|
366
|
+
// Non-fatal
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
// Before adding nudges, check remaining budget
|
|
370
|
+
const nudgeText = USAGE_NUDGE + "\n\n" + agentNudges;
|
|
371
|
+
if (chars + nudgeText.length <= maxChars) {
|
|
372
|
+
text += nudgeText;
|
|
373
|
+
}
|
|
374
|
+
// If nudges don't fit, skip them — the recall content is more important
|
|
375
|
+
|
|
376
|
+
_lastAssembleState.recallOccurred = entries.some(
|
|
377
|
+
(e) => typeof e.score === "number" && e.score >= resolvedPrio.recallMinScore,
|
|
378
|
+
);
|
|
379
|
+
|
|
380
|
+
return {
|
|
381
|
+
content: text,
|
|
382
|
+
tokenEstimate: estimateTokens(text),
|
|
383
|
+
};
|
|
384
|
+
} catch (error) {
|
|
385
|
+
logger.warn(`[palaia] ContextEngine assemble failed: ${error}`);
|
|
386
|
+
return { content: "", tokenEstimate: 0 };
|
|
387
|
+
}
|
|
388
|
+
},
|
|
389
|
+
|
|
390
|
+
/**
|
|
391
|
+
* Compact: trigger `palaia gc` via runner.
|
|
392
|
+
*/
|
|
393
|
+
async compact(): Promise<void> {
|
|
394
|
+
try {
|
|
395
|
+
await run(["gc"], { ...opts, timeoutMs: 30_000 });
|
|
396
|
+
logger.info("[palaia] GC compaction completed");
|
|
397
|
+
} catch (error) {
|
|
398
|
+
logger.warn(`[palaia] GC compaction failed: ${error}`);
|
|
399
|
+
}
|
|
400
|
+
},
|
|
401
|
+
|
|
402
|
+
/**
|
|
403
|
+
* AfterTurn: state save + emoji reactions.
|
|
404
|
+
* Called after each agent turn completes.
|
|
405
|
+
*/
|
|
406
|
+
async afterTurn(turn: unknown): Promise<void> {
|
|
407
|
+
// State save is handled implicitly by assemble() nudge logic.
|
|
408
|
+
// Reset for next turn.
|
|
409
|
+
_lastAssembleState = { recallOccurred: false, capturedInThisTurn: false };
|
|
410
|
+
_lastMessages = [];
|
|
411
|
+
},
|
|
412
|
+
|
|
413
|
+
/**
|
|
414
|
+
* PrepareSubagentSpawn: pass workspace + agent identity.
|
|
415
|
+
* Returns context for the sub-agent to inherit.
|
|
416
|
+
*/
|
|
417
|
+
async prepareSubagentSpawn(parentContext: unknown): Promise<unknown> {
|
|
418
|
+
const workspace = typeof api.workspace === "string"
|
|
419
|
+
? api.workspace
|
|
420
|
+
: api.workspace?.dir;
|
|
421
|
+
const agentId = process.env.PALAIA_AGENT || undefined;
|
|
422
|
+
|
|
423
|
+
return {
|
|
424
|
+
palaiaWorkspace: workspace || config.workspace,
|
|
425
|
+
palaiaAgentId: agentId,
|
|
426
|
+
parentContext,
|
|
427
|
+
};
|
|
428
|
+
},
|
|
429
|
+
|
|
430
|
+
/**
|
|
431
|
+
* OnSubagentEnded: placeholder for future sub-agent memory merge.
|
|
432
|
+
* Will eventually merge sub-agent captures into parent context.
|
|
433
|
+
*/
|
|
434
|
+
async onSubagentEnded(_result: unknown): Promise<void> {
|
|
435
|
+
// Future: merge sub-agent memory captures into parent agent's context.
|
|
436
|
+
// For now, sub-agents write to the same palaia store, so no merge needed.
|
|
437
|
+
},
|
|
438
|
+
};
|
|
439
|
+
}
|