thoughtgear 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,743 @@
1
+ /**
2
+ * PromptHandler — minimal emulation of openclaw's "chain of thoughts" loop.
3
+ *
4
+ * Shape mirrors src/agents/pi-embedded-runner:
5
+ * handlePrompt() persists the inbound message + a RunState, then asks the
6
+ * Executor to drive the loop. Each iteration (continueRun) is stateless:
7
+ * it rebuilds the prompt from ORM history, opens a model stream, executes
8
+ * any tool calls, persists results, and either finishes or schedules the
9
+ * next iteration. In Lambda mode the next iteration is a fresh invocation.
10
+ */
11
+ import { randomUUID } from "node:crypto";
12
+ import { GoogleGenAI } from "@google/genai";
13
+ import OpenAI from "openai";
14
+ /**
15
+ * OpenAI / Anthropic / Gemini all require tool/function names to match a
16
+ * narrow charset (roughly `^[A-Za-z0-9_-]+$`). Skill keys are human-friendly
17
+ * (may contain spaces). We translate one-way and match back by normalized key.
18
+ */
19
+ function normalizeToolName(key) {
20
+ return key.replace(/[^A-Za-z0-9_-]+/g, "_");
21
+ }
22
+ export class ORM {
23
+ config;
24
+ adapter;
25
+ constructor(config) {
26
+ this.config = config;
27
+ this.adapter =
28
+ config.type === "memory"
29
+ ? new InMemoryOrmAdapter()
30
+ : config.type === "mongodb"
31
+ ? new MongoOrmAdapter(config)
32
+ : new SqlOrmAdapter(config);
33
+ }
34
+ saveMessage(msg) {
35
+ return this.adapter.saveMessage(msg);
36
+ }
37
+ getHistory(runId) {
38
+ return this.adapter.getHistory(runId);
39
+ }
40
+ getSessionHistory(sessionId) {
41
+ return this.adapter.getSessionHistory(sessionId);
42
+ }
43
+ saveRunState(state) {
44
+ return this.adapter.saveRunState(state);
45
+ }
46
+ getRunState(runId) {
47
+ return this.adapter.getRunState(runId);
48
+ }
49
+ cacheGet(key) {
50
+ return this.adapter.cacheGet(key);
51
+ }
52
+ cacheSet(key, value, ttl) {
53
+ return this.adapter.cacheSet(key, value, ttl);
54
+ }
55
+ saveMemory(scope, key, value) {
56
+ return this.adapter.saveMemory(scope, key, value);
57
+ }
58
+ getMemory(scope, key) {
59
+ return this.adapter.getMemory(scope, key);
60
+ }
61
+ }
62
+ class InMemoryOrmAdapter {
63
+ messages = new Map();
64
+ runs = new Map();
65
+ cache = new Map();
66
+ memory = new Map();
67
+ async saveMessage(msg) {
68
+ const list = this.messages.get(msg.runId) ?? [];
69
+ list.push(msg);
70
+ this.messages.set(msg.runId, list);
71
+ }
72
+ async getHistory(runId) {
73
+ return [...(this.messages.get(runId) ?? [])];
74
+ }
75
+ async getSessionHistory(sessionId) {
76
+ const out = [];
77
+ for (const list of this.messages.values()) {
78
+ for (const m of list)
79
+ if (m.sessionId === sessionId)
80
+ out.push(m);
81
+ }
82
+ return out.sort((a, b) => a.createdAt.getTime() - b.createdAt.getTime());
83
+ }
84
+ async saveRunState(state) {
85
+ this.runs.set(state.runId, { ...state });
86
+ }
87
+ async getRunState(runId) {
88
+ const s = this.runs.get(runId);
89
+ return s ? { ...s } : null;
90
+ }
91
+ async cacheGet(key) {
92
+ return this.cache.get(key) ?? null;
93
+ }
94
+ async cacheSet(key, value) {
95
+ this.cache.set(key, value);
96
+ }
97
+ async saveMemory(scope, key, value) {
98
+ this.memory.set(`${scope}:${key}`, value);
99
+ }
100
+ async getMemory(scope, key) {
101
+ return this.memory.get(`${scope}:${key}`) ?? null;
102
+ }
103
+ }
104
+ class MongoOrmAdapter {
105
+ cfg;
106
+ // Wire `mongodb` driver here. Collections: messages, run_states, cache, memory.
107
+ constructor(cfg) {
108
+ this.cfg = cfg;
109
+ }
110
+ async saveMessage(_msg) {
111
+ /* db.collection('messages').insertOne(_msg) */
112
+ }
113
+ async getHistory(_runId) {
114
+ return [];
115
+ }
116
+ async getSessionHistory(_sessionId) {
117
+ /* db.collection('messages').find({ sessionId: _sessionId }).sort({ createdAt: 1 }) */
118
+ return [];
119
+ }
120
+ async saveRunState(_state) {
121
+ /* upsert by runId */
122
+ }
123
+ async getRunState(_runId) {
124
+ return null;
125
+ }
126
+ async cacheGet(_key) {
127
+ return null;
128
+ }
129
+ async cacheSet(_key, _value, _ttl) { }
130
+ async saveMemory(_scope, _key, _value) { }
131
+ async getMemory(_scope, _key) {
132
+ return null;
133
+ }
134
+ }
135
+ class SqlOrmAdapter {
136
+ cfg;
137
+ // Wire `kysely` / `pg` / `better-sqlite3`. Tables mirror Mongo collections.
138
+ constructor(cfg) {
139
+ this.cfg = cfg;
140
+ }
141
+ async saveMessage(_msg) { }
142
+ async getHistory(_runId) {
143
+ return [];
144
+ }
145
+ async getSessionHistory(_sessionId) {
146
+ return [];
147
+ }
148
+ async saveRunState(_state) { }
149
+ async getRunState(_runId) {
150
+ return null;
151
+ }
152
+ async cacheGet(_key) {
153
+ return null;
154
+ }
155
+ async cacheSet(_key, _value, _ttl) { }
156
+ async saveMemory(_scope, _key, _value) { }
157
+ async getMemory(_scope, _key) {
158
+ return null;
159
+ }
160
+ }
161
+ export function createLLMProvider(model) {
162
+ switch (model.provider) {
163
+ case "anthropic":
164
+ return new AnthropicProvider(model);
165
+ case "openai":
166
+ return new OpenAIProvider(model);
167
+ case "google":
168
+ return new GoogleProvider(model);
169
+ case "mock":
170
+ return new MockProvider();
171
+ }
172
+ }
173
+ class AnthropicProvider {
174
+ cfg;
175
+ constructor(cfg) {
176
+ this.cfg = cfg;
177
+ }
178
+ async stream(_args) {
179
+ // Wire @anthropic-ai/sdk messages.stream(); accumulate blocks; map
180
+ // content_block_delta -> onPartialReply / onReasoningStream; map
181
+ // tool_use blocks -> ContentBlock('tool_call'); finalize on message_stop.
182
+ return { blocks: [{ type: "text", text: "" }], stopReason: "end_turn" };
183
+ }
184
+ }
185
+ /**
186
+ * OpenAI provider — backed by the official `openai` SDK, using the
187
+ * Chat Completions streaming API.
188
+ *
189
+ * - delta.content -> ContentBlock('text') + onPartialReply
190
+ * - delta.tool_calls[].function -> ContentBlock('tool_call') (accumulated
191
+ * across chunks; OpenAI streams arguments
192
+ * as JSON-string deltas)
193
+ * - finish_reason "stop" | "length" | "tool_calls" -> stopReason
194
+ */
195
+ class OpenAIProvider {
196
+ cfg;
197
+ client;
198
+ constructor(cfg) {
199
+ this.cfg = cfg;
200
+ this.client = new OpenAI({ apiKey: cfg.apiKey, baseURL: cfg.baseUrl });
201
+ }
202
+ async stream(args) {
203
+ const messages = this.toOpenAIMessages(args.system, args.messages);
204
+ const tools = args.tools.length > 0
205
+ ? args.tools.map((t) => ({
206
+ type: "function",
207
+ function: {
208
+ name: normalizeToolName(t.key),
209
+ description: t.description,
210
+ parameters: t.parameters ?? { type: "object", properties: {}, additionalProperties: true },
211
+ },
212
+ }))
213
+ : undefined;
214
+ const stream = await this.client.chat.completions.create({
215
+ model: this.cfg.name,
216
+ messages,
217
+ tools,
218
+ stream: true,
219
+ });
220
+ let textBuf = "";
221
+ let finishReason;
222
+ // Accumulate streamed tool calls by index.
223
+ const toolAcc = new Map();
224
+ for await (const chunk of stream) {
225
+ const choice = chunk.choices?.[0];
226
+ if (!choice)
227
+ continue;
228
+ const delta = choice.delta;
229
+ if (delta?.content) {
230
+ textBuf += delta.content;
231
+ args.callbacks?.onPartialReply?.(delta.content, args.runId);
232
+ }
233
+ if (delta?.tool_calls) {
234
+ for (const tc of delta.tool_calls) {
235
+ const idx = tc.index ?? 0;
236
+ const slot = toolAcc.get(idx) ?? { argsStr: "" };
237
+ if (tc.id)
238
+ slot.id = tc.id;
239
+ if (tc.function?.name)
240
+ slot.name = tc.function.name;
241
+ if (tc.function?.arguments)
242
+ slot.argsStr += tc.function.arguments;
243
+ toolAcc.set(idx, slot);
244
+ }
245
+ }
246
+ if (choice.finish_reason)
247
+ finishReason = choice.finish_reason;
248
+ }
249
+ const blocks = [];
250
+ if (textBuf) {
251
+ const block = { type: "text", text: textBuf };
252
+ blocks.push(block);
253
+ args.callbacks?.onBlockReply?.(block, args.runId);
254
+ }
255
+ const toolCalls = [];
256
+ for (const slot of toolAcc.values()) {
257
+ let parsed = {};
258
+ if (slot.argsStr) {
259
+ try {
260
+ parsed = JSON.parse(slot.argsStr);
261
+ }
262
+ catch {
263
+ parsed = { _raw: slot.argsStr };
264
+ }
265
+ }
266
+ const call = {
267
+ type: "tool_call",
268
+ id: slot.id ?? randomUUID(),
269
+ name: slot.name ?? "",
270
+ input: parsed,
271
+ };
272
+ toolCalls.push(call);
273
+ args.callbacks?.onBlockReply?.(call, args.runId);
274
+ }
275
+ blocks.push(...toolCalls);
276
+ let stopReason = "end_turn";
277
+ if (finishReason === "tool_calls" || toolCalls.length > 0)
278
+ stopReason = "tool_use";
279
+ else if (finishReason === "length")
280
+ stopReason = "max_tokens";
281
+ else if (finishReason === "content_filter")
282
+ stopReason = "error";
283
+ return { blocks, stopReason };
284
+ }
285
+ toOpenAIMessages(system, messages) {
286
+ const out = [
287
+ { role: "system", content: system },
288
+ ];
289
+ for (const msg of messages) {
290
+ if (msg.role === "system") {
291
+ const text = msg.blocks
292
+ .filter((b) => b.type === "text")
293
+ .map((b) => b.text)
294
+ .join("\n");
295
+ if (text)
296
+ out.push({ role: "system", content: text });
297
+ continue;
298
+ }
299
+ if (msg.role === "user") {
300
+ const parts = [];
301
+ for (const b of msg.blocks) {
302
+ if (b.type === "text")
303
+ parts.push({ type: "text", text: b.text });
304
+ else if (b.type === "file" && b.file.mimeType.startsWith("image/")) {
305
+ parts.push({
306
+ type: "image_url",
307
+ image_url: { url: `data:${b.file.mimeType};base64,${b.file.data}` },
308
+ });
309
+ }
310
+ }
311
+ out.push({ role: "user", content: parts.length > 0 ? parts : "" });
312
+ continue;
313
+ }
314
+ if (msg.role === "assistant") {
315
+ const text = msg.blocks
316
+ .filter((b) => b.type === "text")
317
+ .map((b) => b.text)
318
+ .join("");
319
+ const toolCalls = msg.blocks
320
+ .filter((b) => b.type === "tool_call")
321
+ .map((b) => ({
322
+ id: b.id,
323
+ type: "function",
324
+ function: {
325
+ name: normalizeToolName(b.name),
326
+ arguments: JSON.stringify(b.input ?? {}),
327
+ },
328
+ }));
329
+ out.push({
330
+ role: "assistant",
331
+ content: text || null,
332
+ ...(toolCalls.length > 0 ? { tool_calls: toolCalls } : {}),
333
+ });
334
+ continue;
335
+ }
336
+ if (msg.role === "tool") {
337
+ for (const b of msg.blocks) {
338
+ if (b.type === "tool_result") {
339
+ out.push({ role: "tool", tool_call_id: b.toolCallId, content: b.output });
340
+ }
341
+ }
342
+ }
343
+ }
344
+ return out;
345
+ }
346
+ }
347
+ /**
348
+ * Google Gemini provider — backed by @google/genai. Streams generateContent
349
+ * chunks and normalizes them into ContentBlocks.
350
+ *
351
+ * - parts[].text -> ContentBlock('text') + onPartialReply
352
+ * - parts[].thought -> ContentBlock('reasoning') + onReasoningStream
353
+ * - parts[].functionCall -> ContentBlock('tool_call')
354
+ * - finishReason STOP|MAX_TOKENS|SAFETY|TOOL_USE -> stopReason
355
+ *
356
+ * Gemini quirks:
357
+ * - System prompt goes through `systemInstruction`, not a leading message.
358
+ * - Tool results return as `role: 'user'` parts of type `functionResponse`.
359
+ */
360
+ class GoogleProvider {
361
+ cfg;
362
+ ai;
363
+ constructor(cfg) {
364
+ this.cfg = cfg;
365
+ this.ai = new GoogleGenAI({ apiKey: cfg.apiKey });
366
+ }
367
+ async stream(args) {
368
+ const contents = this.toGeminiContents(args.messages);
369
+ const config = { systemInstruction: args.system };
370
+ if (args.tools.length > 0) {
371
+ config.tools = [
372
+ {
373
+ functionDeclarations: args.tools.map((t) => ({
374
+ name: t.key,
375
+ description: t.description,
376
+ parameters: { type: "object", properties: {} },
377
+ })),
378
+ },
379
+ ];
380
+ }
381
+ const stream = await this.ai.models.generateContentStream({
382
+ model: this.cfg.name,
383
+ contents: contents,
384
+ config,
385
+ });
386
+ let textBuf = "";
387
+ let reasoningBuf = "";
388
+ const toolCalls = [];
389
+ let finishReason;
390
+ for await (const chunk of stream) {
391
+ const parts = chunk.candidates?.[0]?.content?.parts ?? [];
392
+ for (const part of parts) {
393
+ if (typeof part.text === "string" && part.text.length > 0) {
394
+ if (part.thought) {
395
+ reasoningBuf += part.text;
396
+ args.callbacks?.onReasoningStream?.(part.text, args.runId);
397
+ }
398
+ else {
399
+ textBuf += part.text;
400
+ args.callbacks?.onPartialReply?.(part.text, args.runId);
401
+ }
402
+ }
403
+ if (part.functionCall) {
404
+ const call = {
405
+ type: "tool_call",
406
+ id: randomUUID(),
407
+ name: part.functionCall.name ?? "",
408
+ input: part.functionCall.args ?? {},
409
+ };
410
+ toolCalls.push(call);
411
+ args.callbacks?.onBlockReply?.(call, args.runId);
412
+ }
413
+ }
414
+ const fr = chunk.candidates?.[0]?.finishReason;
415
+ if (fr)
416
+ finishReason = fr;
417
+ }
418
+ const blocks = [];
419
+ if (reasoningBuf) {
420
+ const block = { type: "reasoning", text: reasoningBuf };
421
+ blocks.push(block);
422
+ args.callbacks?.onBlockReply?.(block, args.runId);
423
+ }
424
+ if (textBuf) {
425
+ const block = { type: "text", text: textBuf };
426
+ blocks.push(block);
427
+ args.callbacks?.onBlockReply?.(block, args.runId);
428
+ }
429
+ blocks.push(...toolCalls);
430
+ let stopReason = "end_turn";
431
+ if (toolCalls.length > 0)
432
+ stopReason = "tool_use";
433
+ else if (finishReason === "MAX_TOKENS")
434
+ stopReason = "max_tokens";
435
+ else if (finishReason === "SAFETY" || finishReason === "RECITATION")
436
+ stopReason = "error";
437
+ return { blocks, stopReason };
438
+ }
439
+ toGeminiContents(messages) {
440
+ const contents = [];
441
+ for (const msg of messages) {
442
+ if (msg.role === "system")
443
+ continue;
444
+ const role = msg.role === "assistant" ? "model" : "user";
445
+ const parts = [];
446
+ for (const block of msg.blocks) {
447
+ if (block.type === "text")
448
+ parts.push({ text: block.text });
449
+ else if (block.type === "tool_call") {
450
+ parts.push({ functionCall: { name: block.name, args: block.input } });
451
+ }
452
+ else if (block.type === "tool_result") {
453
+ parts.push({
454
+ functionResponse: {
455
+ name: block.toolCallId,
456
+ response: { output: block.output },
457
+ },
458
+ });
459
+ }
460
+ else if (block.type === "file") {
461
+ parts.push({
462
+ inlineData: { mimeType: block.file.mimeType, data: block.file.data },
463
+ });
464
+ }
465
+ }
466
+ if (parts.length > 0)
467
+ contents.push({ role, parts });
468
+ }
469
+ return contents;
470
+ }
471
+ }
472
+ /** Deterministic fake that emits one tool call on iteration 0, text on >0. */
473
+ class MockProvider {
474
+ async stream(args) {
475
+ const turn = args.messages.filter((m) => m.role === "assistant").length;
476
+ if (turn === 0 && args.tools[0]) {
477
+ const block = {
478
+ type: "tool_call",
479
+ id: randomUUID(),
480
+ name: args.tools[0].key,
481
+ input: { q: "demo" },
482
+ };
483
+ args.callbacks?.onBlockReply?.(block, args.runId);
484
+ return { blocks: [block], stopReason: "tool_use" };
485
+ }
486
+ const block = { type: "text", text: "Done." };
487
+ args.callbacks?.onPartialReply?.("Done.", args.runId);
488
+ args.callbacks?.onBlockReply?.(block, args.runId);
489
+ return { blocks: [block], stopReason: "end_turn" };
490
+ }
491
+ }
492
+ export class LocalExecutor {
493
+ handler;
494
+ bind(handler) {
495
+ this.handler = handler;
496
+ }
497
+ async scheduleNextIteration(runId) {
498
+ // Yield to the event loop so the current iteration's awaits unwind first.
499
+ await new Promise((r) => setImmediate(r));
500
+ await this.handler.continueRun(runId);
501
+ }
502
+ }
503
+ export class LambdaExecutor {
504
+ invoke;
505
+ constructor(invoke) {
506
+ this.invoke = invoke;
507
+ }
508
+ async scheduleNextIteration(runId) {
509
+ // Fire a fresh invocation; the entry handler routes action=continue to
510
+ // PromptHandler.continueRun(). The current invocation returns immediately.
511
+ await this.invoke({ runId, action: "continue" });
512
+ }
513
+ }
514
+ export class PromptHandler {
515
+ context;
516
+ tools;
517
+ model;
518
+ orm;
519
+ executor;
520
+ callbacks;
521
+ maxIterations;
522
+ compactionCharThreshold;
523
+ llm;
524
+ /** Session this handler is bound to. Undefined means "no session" — each
525
+ * prompt is an independent run with no shared history. */
526
+ sessionId;
527
+ constructor(opts) {
528
+ this.context = opts.context;
529
+ this.tools = opts.tools;
530
+ this.model = opts.model;
531
+ this.orm = opts.orm ?? new ORM(opts.db);
532
+ this.sessionId = opts.sessionId;
533
+ this.callbacks = opts.callbacks ?? {};
534
+ this.maxIterations = opts.maxIterations ?? 16;
535
+ this.compactionCharThreshold = opts.compactionCharThreshold ?? 80_000;
536
+ this.llm = createLLMProvider(opts.model);
537
+ const executor = opts.executor ?? new LocalExecutor();
538
+ if (executor instanceof LocalExecutor)
539
+ executor.bind(this);
540
+ this.executor = executor;
541
+ }
542
+ /**
543
+ * Entry point. Persists the inbound user message + initial RunState,
544
+ * then hands control to the Executor to drive the first iteration.
545
+ */
546
+ async handlePrompt(input) {
547
+ const runId = randomUUID();
548
+ const now = new Date();
549
+ const blocks = [{ type: "text", text: input.text }];
550
+ for (const f of input.files ?? [])
551
+ blocks.push({ type: "file", file: f });
552
+ await this.orm.saveMessage({
553
+ id: randomUUID(),
554
+ runId,
555
+ sessionId: this.sessionId,
556
+ role: "user",
557
+ blocks,
558
+ createdAt: now,
559
+ iteration: 0,
560
+ });
561
+ await this.orm.saveRunState({
562
+ runId,
563
+ sessionId: this.sessionId,
564
+ status: "pending",
565
+ iteration: 0,
566
+ createdAt: now,
567
+ updatedAt: now,
568
+ });
569
+ await this.executor.scheduleNextIteration(runId);
570
+ return { runId };
571
+ }
572
+ /**
573
+ * One iteration of the loop. Public so a Lambda entry-point can route
574
+ * { action: 'continue', runId } straight here.
575
+ *
576
+ * Phases — mirrors run/attempt.ts:
577
+ * 1. Load state + history.
578
+ * 2. Compact if oversized.
579
+ * 3. Build system prompt + payload.
580
+ * 4. Open model stream; collect blocks.
581
+ * 5. Persist assistant message.
582
+ * 6. If tool_use: execute tools, persist tool_result messages,
583
+ * schedule next iteration. Else: finalize.
584
+ */
585
+ async continueRun(runId) {
586
+ const state = await this.orm.getRunState(runId);
587
+ if (!state || state.status === "done" || state.status === "failed")
588
+ return;
589
+ if (state.iteration >= this.maxIterations) {
590
+ await this.fail(state, `max iterations (${this.maxIterations}) exceeded`);
591
+ return;
592
+ }
593
+ // Session-bound handlers see every prior turn in the session; otherwise
594
+ // history is scoped to this single run.
595
+ let history = state.sessionId
596
+ ? await this.orm.getSessionHistory(state.sessionId)
597
+ : await this.orm.getHistory(runId);
598
+ history = await this.maybeCompact(history, state);
599
+ const system = this.buildSystemPrompt();
600
+ await this.updateState(state, { status: "streaming" });
601
+ let result;
602
+ try {
603
+ result = await this.llm.stream({
604
+ system,
605
+ messages: history,
606
+ tools: this.tools,
607
+ callbacks: this.callbacks,
608
+ runId,
609
+ });
610
+ }
611
+ catch (err) {
612
+ console.error("[PromptHandler] stream error:", err);
613
+ await this.fail(state, err.message);
614
+ return;
615
+ }
616
+ const assistantMsg = {
617
+ id: randomUUID(),
618
+ runId,
619
+ sessionId: state.sessionId,
620
+ role: "assistant",
621
+ blocks: result.blocks,
622
+ createdAt: new Date(),
623
+ iteration: state.iteration,
624
+ };
625
+ await this.orm.saveMessage(assistantMsg);
626
+ const toolCalls = result.blocks.filter((b) => b.type === "tool_call");
627
+ if (result.stopReason === "tool_use" && toolCalls.length > 0) {
628
+ await this.updateState(state, { status: "awaiting_tools" });
629
+ for (const call of toolCalls) {
630
+ this.callbacks.onToolStart?.(call, runId);
631
+ const resultBlock = await this.executeTool(call);
632
+ this.callbacks.onToolResult?.(resultBlock, runId);
633
+ await this.orm.saveMessage({
634
+ id: randomUUID(),
635
+ runId,
636
+ sessionId: state.sessionId,
637
+ role: "tool",
638
+ blocks: [resultBlock],
639
+ createdAt: new Date(),
640
+ iteration: state.iteration,
641
+ });
642
+ }
643
+ await this.updateState(state, {
644
+ status: "pending",
645
+ iteration: state.iteration + 1,
646
+ lastStopReason: "tool_use",
647
+ });
648
+ await this.executor.scheduleNextIteration(runId);
649
+ return;
650
+ }
651
+ await this.updateState(state, { status: "done", lastStopReason: result.stopReason });
652
+ this.callbacks.onDone?.(runId);
653
+ }
654
+ // -------------------------------------------------------------------------
655
+ // Helpers
656
+ // -------------------------------------------------------------------------
657
+ buildSystemPrompt() {
658
+ if (this.tools.length === 0)
659
+ return this.context;
660
+ const skills = this.tools
661
+ .map((t) => `## ${t.key}\n${t.description}\n\n${t.content}`)
662
+ .join("\n\n---\n\n");
663
+ return `${this.context}\n\n# Available Tools / Skills\n\n${skills}`;
664
+ }
665
+ async executeTool(call) {
666
+ const tool = this.tools.find((t) => t.key === call.name) ??
667
+ this.tools.find((t) => normalizeToolName(t.key) === call.name);
668
+ if (!tool) {
669
+ return {
670
+ type: "tool_result",
671
+ toolCallId: call.id,
672
+ output: `Error: tool "${call.name}" not registered`,
673
+ isError: true,
674
+ };
675
+ }
676
+ // Function-style tools: run the handler and return its result.
677
+ if (tool.handler) {
678
+ try {
679
+ const output = await tool.handler({ params: call.input });
680
+ return { type: "tool_result", toolCallId: call.id, output };
681
+ }
682
+ catch (err) {
683
+ return {
684
+ type: "tool_result",
685
+ toolCallId: call.id,
686
+ output: `Error executing tool "${tool.key}": ${err.message}`,
687
+ isError: true,
688
+ };
689
+ }
690
+ }
691
+ // Skill-style tools: returning the skill content lets the next iteration
692
+ // act on its instructions.
693
+ return { type: "tool_result", toolCallId: call.id, output: tool.content };
694
+ }
695
+ /**
696
+ * Cheap stand-in for pi-embedded-runner compaction. If the transcript is
697
+ * over the char budget, summarize older messages into a single system note
698
+ * and keep the tail. Real implementation would call the model to summarize.
699
+ */
700
+ async maybeCompact(history, state) {
701
+ const size = history.reduce((n, m) => n + JSON.stringify(m.blocks).length, 0);
702
+ if (size <= this.compactionCharThreshold)
703
+ return history;
704
+ await this.updateState(state, { status: "compacting" });
705
+ const keep = history.slice(-4);
706
+ const dropped = history.slice(0, -4);
707
+ const summary = {
708
+ id: randomUUID(),
709
+ runId: state.runId,
710
+ sessionId: state.sessionId,
711
+ role: "system",
712
+ blocks: [
713
+ {
714
+ type: "text",
715
+ text: `[compacted ${dropped.length} earlier messages, ~${size} chars]`,
716
+ },
717
+ ],
718
+ createdAt: new Date(),
719
+ iteration: state.iteration,
720
+ };
721
+ await this.orm.saveMessage(summary);
722
+ return [summary, ...keep];
723
+ }
724
+ async updateState(state, patch) {
725
+ const next = { ...state, ...patch, updatedAt: new Date() };
726
+ Object.assign(state, next);
727
+ await this.orm.saveRunState(next);
728
+ }
729
+ async fail(state, error) {
730
+ await this.updateState(state, { status: "failed", lastError: error });
731
+ }
732
+ }
733
+ export function makeLambdaHandler(handler) {
734
+ return async (event) => {
735
+ if (event.action === "start")
736
+ return handler.handlePrompt({ text: event.text, files: event.files });
737
+ if (event.action === "continue") {
738
+ await handler.continueRun(event.runId);
739
+ return { runId: event.runId };
740
+ }
741
+ };
742
+ }
743
+ //# sourceMappingURL=PromptHandler.js.map