@devxiyang/agent-kernel 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +525 -0
  2. package/dist/core/agent/agent.d.ts +132 -0
  3. package/dist/core/agent/agent.d.ts.map +1 -0
  4. package/dist/core/agent/agent.js +301 -0
  5. package/dist/core/agent/agent.js.map +1 -0
  6. package/dist/core/agent/index.d.ts +13 -0
  7. package/dist/core/agent/index.d.ts.map +1 -0
  8. package/dist/core/agent/index.js +10 -0
  9. package/dist/core/agent/index.js.map +1 -0
  10. package/dist/core/agent/loop.d.ts +30 -0
  11. package/dist/core/agent/loop.d.ts.map +1 -0
  12. package/dist/core/agent/loop.js +378 -0
  13. package/dist/core/agent/loop.js.map +1 -0
  14. package/dist/core/agent/types.d.ts +231 -0
  15. package/dist/core/agent/types.d.ts.map +1 -0
  16. package/dist/core/agent/types.js +9 -0
  17. package/dist/core/agent/types.js.map +1 -0
  18. package/dist/core/agent/wrap-tool.d.ts +12 -0
  19. package/dist/core/agent/wrap-tool.d.ts.map +1 -0
  20. package/dist/core/agent/wrap-tool.js +37 -0
  21. package/dist/core/agent/wrap-tool.js.map +1 -0
  22. package/dist/core/kernel/index.d.ts +12 -0
  23. package/dist/core/kernel/index.d.ts.map +1 -0
  24. package/dist/core/kernel/index.js +10 -0
  25. package/dist/core/kernel/index.js.map +1 -0
  26. package/dist/core/kernel/kernel.d.ts +15 -0
  27. package/dist/core/kernel/kernel.d.ts.map +1 -0
  28. package/dist/core/kernel/kernel.js +320 -0
  29. package/dist/core/kernel/kernel.js.map +1 -0
  30. package/dist/core/kernel/session-store.d.ts +24 -0
  31. package/dist/core/kernel/session-store.d.ts.map +1 -0
  32. package/dist/core/kernel/session-store.js +90 -0
  33. package/dist/core/kernel/session-store.js.map +1 -0
  34. package/dist/core/kernel/types.d.ts +215 -0
  35. package/dist/core/kernel/types.d.ts.map +1 -0
  36. package/dist/core/kernel/types.js +11 -0
  37. package/dist/core/kernel/types.js.map +1 -0
  38. package/dist/event-stream.d.ts +25 -0
  39. package/dist/event-stream.d.ts.map +1 -0
  40. package/dist/event-stream.js +58 -0
  41. package/dist/event-stream.js.map +1 -0
  42. package/dist/index.d.ts +12 -0
  43. package/dist/index.d.ts.map +1 -0
  44. package/dist/index.js +12 -0
  45. package/dist/index.js.map +1 -0
  46. package/package.json +57 -0
package/README.md ADDED
@@ -0,0 +1,525 @@
1
+ # agent-kernel
2
+
3
+ `agent-kernel` is a TypeScript library that provides a provider-agnostic agent runtime:
4
+ - a persistent/in-memory conversation kernel
5
+ - an event-driven agent loop with tool execution and parameter validation
6
+ - a reusable async event stream primitive
7
+
8
+ ## Core Concepts
9
+
10
+ - `Agent`: stateful runtime that orchestrates model calls, tool execution, and event emission.
11
+ - `Kernel`: conversation state store (in-memory or persisted), with branching and compaction support.
12
+ - `StreamFn`: provider adapter contract — you implement it once for any LLM backend.
13
+ - `AgentTool`: executable unit with a TypeBox schema for parameter validation and provider schema generation.
14
+
15
+ ## Project Structure
16
+
17
+ ```text
18
+ src/
19
+ core/
20
+ agent/
21
+ kernel/
22
+ event-stream.ts
23
+ index.ts
24
+ ```
25
+
26
+ ## Feature Map
27
+
28
+ - Provider-agnostic runtime via `StreamFn`
29
+ - Real-time events (`text_delta`, `tool_call`, `tool_result`, `step_done`, etc.)
30
+ - Tool execution loop with automatic parameter validation (TypeBox + `Value.Parse`)
31
+ - Validation errors returned as `tool_result` so the LLM can self-correct
32
+ - Persistent sessions via `createAgent({ session: { dir, sessionId } })`
33
+ - Conversation compaction via `kernel.compact(fromId, toId, summaryText)`
34
+ - Strong TypeScript types — `execute` input is inferred from the TypeBox schema
35
+ - **Parallel tool execution** — run all tool calls in a single turn concurrently
36
+ - **Tool timeout** — per-call deadline; timed-out tools return an error result automatically
37
+ - **Auto-compaction hook** — `onContextFull` fires when the context window is full
38
+ - **Stream error retry** — automatic retry with fixed delay for transient LLM errors
39
+ - **Session metadata** — attach a `title` (or any custom field) to a session; read it back from `listSessions`
40
+
41
+ ## Install
42
+
43
+ ```bash
44
+ npm install @devxiyang/agent-kernel
45
+ npm install @sinclair/typebox
46
+ npm install openai # if using OpenAI SDK adapter
47
+ npm install ai @ai-sdk/openai # if using Vercel AI SDK adapter
48
+ ```
49
+
50
+ ## Module Index
51
+
52
+ - `@devxiyang/agent-kernel` — root export (agent APIs + `EventStream`)
53
+ - `@devxiyang/agent-kernel/agent` — direct agent module
54
+ - `@devxiyang/agent-kernel/kernel` — kernel module (`createKernel`, kernel types)
55
+ - `@devxiyang/agent-kernel/event-stream` — `EventStream`
56
+
57
+ ---
58
+
59
+ ## Quick Start
60
+
61
+ ```ts
62
+ import { Type } from '@sinclair/typebox'
63
+ import { createAgent, type StreamFn, type AgentTool } from '@devxiyang/agent-kernel'
64
+
65
+ // Minimal echo stream (replace with a real provider adapter)
66
+ const stream: StreamFn = async (messages, _tools, onEvent) => {
67
+ const last = messages.filter((m) => m.role === 'user').at(-1)
68
+ const reply = `Echo: ${typeof last?.content === 'string' ? last.content : '[multi-part]'}`
69
+ onEvent({ type: 'text-delta', delta: reply })
70
+ return {
71
+ text: reply, toolCalls: [], stopReason: 'stop',
72
+ usage: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, totalTokens: 0,
73
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 } },
74
+ }
75
+ }
76
+
77
+ const getTimeSchema = Type.Object({})
78
+
79
+ const tools: AgentTool[] = [
80
+ {
81
+ name: 'get_time',
82
+ description: 'Returns the current UTC time as an ISO string.',
83
+ parameters: getTimeSchema,
84
+ execute: async () => ({ content: new Date().toISOString(), isError: false }),
85
+ },
86
+ ]
87
+
88
+ const agent = createAgent({ stream, tools, maxSteps: 8 })
89
+
90
+ agent.subscribe((event) => {
91
+ if (event.type === 'text_delta') process.stdout.write(event.delta)
92
+ })
93
+
94
+ agent.prompt({ type: 'user', payload: { parts: [{ type: 'text', text: 'What time is it?' }] } })
95
+ await agent.waitForIdle()
96
+ ```
97
+
98
+ ---
99
+
100
+ ## Defining Tools
101
+
102
+ Tools carry their TypeBox schema in `parameters`. The loop validates and coerces LLM-supplied
103
+ arguments before calling `execute`; validation errors are returned as `tool_result` so the LLM
104
+ can retry with corrected parameters.
105
+
106
+ ```ts
107
+ import { Type } from '@sinclair/typebox'
108
+ import type { AgentTool } from '@devxiyang/agent-kernel'
109
+
110
+ const searchSchema = Type.Object({
111
+ query: Type.String({ description: 'Search query string' }),
112
+ limit: Type.Optional(Type.Number({ description: 'Max results (default 10)' })),
113
+ })
114
+
115
+ // typeof searchSchema drives the input type — no manual annotation needed
116
+ const searchTool: AgentTool<typeof searchSchema> = {
117
+ name: 'search_docs',
118
+ description: 'Search project documentation by query.',
119
+ parameters: searchSchema,
120
+ execute: async (_toolCallId, input) => {
121
+ // input: { query: string; limit?: number }
122
+ return {
123
+ content: `Found results for: ${input.query}`,
124
+ isError: false,
125
+ details: { hits: 3 },
126
+ }
127
+ },
128
+ }
129
+ ```
130
+
131
+ `parameters` is a standard JSON Schema at runtime (TypeBox schemas are JSON Schema), so
132
+ provider adapters can pass `tool.parameters` directly to any LLM API.
133
+
134
+ ---
135
+
136
+ ## Implementing a `StreamFn`
137
+
138
+ `StreamFn` receives the current conversation messages and the full tool list on every call.
139
+ Use `tools` to generate the provider-specific schema — no hardcoding needed.
140
+
141
+ ```ts
142
+ type StreamFn = (
143
+ messages: AgentMessage[],
144
+ tools: AgentTool[],
145
+ onEvent: (event: LLMStreamEvent) => void,
146
+ signal?: AbortSignal,
147
+ ) => Promise<LLMStepResult>
148
+ ```
149
+
150
+ ---
151
+
152
+ ## Example: OpenAI SDK Adapter
153
+
154
+ Uses the OpenAI Responses API. Tools are converted from TypeBox schemas on every call.
155
+
156
+ ```ts
157
+ import OpenAI from 'openai'
158
+ import type { AgentMessage, StreamFn, ToolCallInfo } from '@devxiyang/agent-kernel'
159
+
160
+ const client = new OpenAI({ apiKey: process.env.OPENAI_API_KEY })
161
+
162
+ function toOpenAIMessages(messages: AgentMessage[]) {
163
+ return messages.map((m) => ({
164
+ role: m.role as 'user' | 'assistant' | 'tool',
165
+ content: typeof m.content === 'string' ? m.content : JSON.stringify(m.content),
166
+ }))
167
+ }
168
+
169
+ export const openaiStream: StreamFn = async (messages, tools, onEvent, signal) => {
170
+ const response = await client.responses.create({
171
+ model: 'gpt-4o',
172
+ input: toOpenAIMessages(messages),
173
+ // TypeBox schemas are plain JSON Schema — pass them directly
174
+ tools: tools.map((t) => ({
175
+ type: 'function' as const,
176
+ name: t.name,
177
+ description: t.description,
178
+ parameters: t.parameters ?? { type: 'object', properties: {} },
179
+ })),
180
+ signal,
181
+ })
182
+
183
+ let text = ''
184
+ const toolCalls: ToolCallInfo[] = []
185
+
186
+ for (const item of response.output ?? []) {
187
+ if (item.type === 'message') {
188
+ for (const part of item.content ?? []) {
189
+ if (part.type === 'output_text') {
190
+ text += part.text
191
+ onEvent({ type: 'text-delta', delta: part.text })
192
+ }
193
+ }
194
+ }
195
+ if (item.type === 'function_call') {
196
+ let input: Record<string, unknown> = {}
197
+ try { input = JSON.parse(item.arguments ?? '{}') } catch { /* ignore */ }
198
+ const tc = { toolCallId: item.call_id ?? item.id, toolName: item.name, input }
199
+ toolCalls.push(tc)
200
+ onEvent({ type: 'tool-call', ...tc })
201
+ }
202
+ }
203
+
204
+ const inputTokens = response.usage?.input_tokens ?? 0
205
+ const outputTokens = response.usage?.output_tokens ?? 0
206
+
207
+ return {
208
+ text,
209
+ toolCalls,
210
+ stopReason: toolCalls.length > 0 ? 'tool_use' : 'stop',
211
+ usage: {
212
+ input: inputTokens,
213
+ output: outputTokens,
214
+ cacheRead: 0,
215
+ cacheWrite: 0,
216
+ totalTokens: response.usage?.total_tokens ?? inputTokens + outputTokens,
217
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
218
+ },
219
+ }
220
+ }
221
+
222
+ // Usage
223
+ import { Type } from '@sinclair/typebox'
224
+ import { createAgent } from '@devxiyang/agent-kernel'
225
+
226
+ const searchSchema = Type.Object({
227
+ query: Type.String({ description: 'Search query string' }),
228
+ })
229
+
230
+ const agent = createAgent({
231
+ stream: openaiStream,
232
+ tools: [
233
+ {
234
+ name: 'search_docs',
235
+ description: 'Search project documentation by query.',
236
+ parameters: searchSchema,
237
+ execute: async (_id, input) => ({
238
+ content: `Results for: ${input.query}`,
239
+ isError: false,
240
+ }),
241
+ },
242
+ ],
243
+ maxSteps: 10,
244
+ })
245
+
246
+ agent.subscribe((e) => { if (e.type === 'text_delta') process.stdout.write(e.delta) })
247
+ agent.prompt({ type: 'user', payload: { parts: [{ type: 'text', text: 'Find compact API docs' }] } })
248
+ await agent.waitForIdle()
249
+ ```
250
+
251
+ ---
252
+
253
+ ## Example: Vercel AI SDK v6 Adapter
254
+
255
+ Uses `streamText` from `ai`. Tools without an `execute` function are returned as tool calls
256
+ for our loop to handle. `jsonSchema()` wraps TypeBox schemas as AI SDK-compatible schemas.
257
+
258
+ ```ts
259
+ import { streamText, jsonSchema, tool } from 'ai'
260
+ import { openai } from '@ai-sdk/openai'
261
+ import type { AgentMessage, StreamFn, ToolCallInfo } from '@devxiyang/agent-kernel'
262
+
263
+ function toAISDKMessages(messages: AgentMessage[]) {
264
+ return messages.map((m) => {
265
+ if (m.role === 'tool') {
266
+ // tool_result entries — AI SDK expects role 'tool'
267
+ const payload = m.content as { toolCallId: string; content: string }
268
+ return { role: 'tool' as const, content: [{ type: 'tool-result' as const, toolCallId: payload.toolCallId, result: payload.content }] }
269
+ }
270
+ return {
271
+ role: m.role as 'user' | 'assistant',
272
+ content: typeof m.content === 'string' ? m.content : JSON.stringify(m.content),
273
+ }
274
+ })
275
+ }
276
+
277
+ export const aiSdkStream: StreamFn = async (messages, tools, onEvent, signal) => {
278
+ // Build AI SDK tool definitions — no execute, our loop handles execution
279
+ const aiTools = Object.fromEntries(
280
+ tools.map((t) => [
281
+ t.name,
282
+ tool({
283
+ description: t.description,
284
+ // jsonSchema() accepts any plain JSON Schema — TypeBox schemas qualify
285
+ inputSchema: t.parameters ? jsonSchema(t.parameters) : jsonSchema({ type: 'object', properties: {} }),
286
+ }),
287
+ ]),
288
+ )
289
+
290
+ const result = streamText({
291
+ model: openai('gpt-4o'),
292
+ messages: toAISDKMessages(messages),
293
+ tools: aiTools,
294
+ maxSteps: 1, // one LLM call per StreamFn invocation; our kernel loops
295
+ abortSignal: signal,
296
+ })
297
+
298
+ let text = ''
299
+ const toolCalls: ToolCallInfo[] = []
300
+
301
+ for await (const chunk of result.fullStream) {
302
+ if (chunk.type === 'text-delta') {
303
+ text += chunk.textDelta
304
+ onEvent({ type: 'text-delta', delta: chunk.textDelta })
305
+ }
306
+ if (chunk.type === 'tool-call') {
307
+ const tc = { toolCallId: chunk.toolCallId, toolName: chunk.toolName, input: chunk.input as Record<string, unknown> }
308
+ toolCalls.push(tc)
309
+ onEvent({ type: 'tool-call', ...tc })
310
+ }
311
+ }
312
+
313
+ const usage = await result.usage
314
+
315
+ return {
316
+ text,
317
+ toolCalls,
318
+ stopReason: toolCalls.length > 0 ? 'tool_use' : 'stop',
319
+ usage: {
320
+ input: usage.promptTokens,
321
+ output: usage.completionTokens,
322
+ cacheRead: 0,
323
+ cacheWrite: 0,
324
+ totalTokens: usage.totalTokens,
325
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
326
+ },
327
+ }
328
+ }
329
+
330
+ // Usage
331
+ import { Type } from '@sinclair/typebox'
332
+ import { createAgent } from '@devxiyang/agent-kernel'
333
+
334
+ const searchSchema = Type.Object({
335
+ query: Type.String({ description: 'Search query string' }),
336
+ })
337
+
338
+ const agent = createAgent({
339
+ stream: aiSdkStream,
340
+ tools: [
341
+ {
342
+ name: 'search_docs',
343
+ description: 'Search project documentation by query.',
344
+ parameters: searchSchema,
345
+ execute: async (_id, input) => ({
346
+ content: `Results for: ${input.query}`,
347
+ isError: false,
348
+ }),
349
+ },
350
+ ],
351
+ maxSteps: 10,
352
+ })
353
+
354
+ agent.subscribe((e) => { if (e.type === 'text_delta') process.stdout.write(e.delta) })
355
+ agent.prompt({ type: 'user', payload: { parts: [{ type: 'text', text: 'Find compact API docs' }] } })
356
+ await agent.waitForIdle()
357
+ ```
358
+
359
+ ---
360
+
361
+ ## Advanced Agent Options
362
+
363
+ ### Parallel Tool Execution
364
+
365
+ By default tools run sequentially. Set `parallelTools: true` to run all tool calls in a turn concurrently with `Promise.allSettled`. If a steering message arrives after all tools complete, their results are discarded and replaced with skipped markers.
366
+
367
+ ```ts
368
+ const agent = createAgent({
369
+ stream, tools, maxSteps: 10,
370
+ parallelTools: true,
371
+ })
372
+ ```
373
+
374
+ ### Tool Timeout
375
+
376
+ Set a per-tool execution deadline in milliseconds. Tools that exceed it return an `isError: true` result so the LLM can handle the failure gracefully.
377
+
378
+ ```ts
379
+ const agent = createAgent({
380
+ stream, tools, maxSteps: 10,
381
+ toolTimeout: 15_000, // 15 s per tool call
382
+ })
383
+ ```
384
+
385
+ ### Auto-Compaction Hook (`onContextFull`)
386
+
387
+ Fires after a step when `kernel.contextSize >= kernel.budget.limit`. The callback is responsible for compacting the kernel; the loop just provides the hook.
388
+
389
+ ```ts
390
+ const agent = createAgent({
391
+ stream, tools, maxSteps: 10,
392
+ onContextFull: async (kernel) => {
393
+ const entries = kernel.read()
394
+ const from = entries[0].id
395
+ const to = entries[Math.floor(entries.length / 2)].id
396
+ kernel.compact(from, to, 'Earlier context summarised.')
397
+ },
398
+ })
399
+
400
+ agent.kernel.budget.set(80_000) // tokens — trigger at 80 k input tokens
401
+ ```
402
+
403
+ Only fires when `budget.limit` is explicitly set (the default is `Infinity`).
404
+
405
+ ### Stream Error Retry
406
+
407
+ Automatically retry transient LLM errors with a fixed delay. Abort signals are respected — no retry happens after abort.
408
+
409
+ ```ts
410
+ const agent = createAgent({
411
+ stream, tools, maxSteps: 10,
412
+ retryOnError: {
413
+ maxAttempts: 3, // total attempts including the first
414
+ delayMs: 500,
415
+ },
416
+ })
417
+ ```
418
+
419
+ Only `stream()` calls are retried; tool execution is not affected.
420
+
421
+ ---
422
+
423
+ ## Persistent Session + Kernel Compaction
424
+
425
+ ```ts
426
+ import { createAgent } from '@devxiyang/agent-kernel'
427
+
428
+ const agent = createAgent({
429
+ stream: openaiStream, // or aiSdkStream
430
+ tools: [],
431
+ maxSteps: 8,
432
+ session: {
433
+ dir: './.agent-sessions',
434
+ sessionId: 'demo-session-001',
435
+ },
436
+ })
437
+
438
+ agent.prompt({ type: 'user', payload: { parts: [{ type: 'text', text: 'Summarize our last discussion.' }] } })
439
+ await agent.waitForIdle()
440
+
441
+ // Compact old entries when context grows
442
+ const entries = agent.kernel.read()
443
+ if (entries.length > 12) {
444
+ const fromId = entries[0].id
445
+ const toId = entries[Math.min(8, entries.length - 1)].id
446
+ agent.kernel.compact(fromId, toId, 'Summary of earlier context and decisions.')
447
+ }
448
+ ```
449
+
450
+ Session files are written under `./.agent-sessions/<sessionId>/` (`kernel.jsonl`, `log.jsonl`).
451
+
452
+ ---
453
+
454
+ ## Session Management
455
+
456
+ `listSessions`, `deleteSession`, and `updateSessionMeta` are standalone utilities for CLI and Web API use cases.
457
+
458
+ ```ts
459
+ import {
460
+ listSessions,
461
+ deleteSession,
462
+ updateSessionMeta,
463
+ } from '@devxiyang/agent-kernel/kernel'
464
+
465
+ // List all sessions, sorted by most recently updated
466
+ const sessions = listSessions('./.agent-sessions')
467
+ // [
468
+ // { sessionId: 'demo-001', updatedAt: 1740000000000, messageCount: 12,
469
+ // meta: { createdAt: 1739999000000, title: 'My first session' } },
470
+ // { sessionId: 'demo-002', updatedAt: 1739000000000, messageCount: 4,
471
+ // meta: { createdAt: 1738999000000 } },
472
+ // ]
473
+
474
+ // Delete a session
475
+ deleteSession('./.agent-sessions', 'demo-001')
476
+
477
+ // Update a session's metadata (merge — never overwrites createdAt)
478
+ updateSessionMeta('./.agent-sessions', 'demo-002', { title: 'Renamed session' })
479
+ ```
480
+
481
+ ### Session Metadata
482
+
483
+ Pass `meta` when creating a session to set an initial title or other fields. `createdAt` is set automatically on first creation and is never overwritten.
484
+
485
+ ```ts
486
+ const agent = createAgent({
487
+ stream, tools, maxSteps: 8,
488
+ session: {
489
+ dir: './.agent-sessions',
490
+ sessionId: 'my-session',
491
+ meta: { title: 'Code review assistant' },
492
+ },
493
+ })
494
+ ```
495
+
496
+ `SessionMeta` and `SessionInfo` types:
497
+
498
+ ```ts
499
+ type SessionMeta = {
500
+ createdAt: number // Unix ms — set once at creation
501
+ title?: string
502
+ }
503
+
504
+ type SessionInfo = {
505
+ sessionId: string // directory name used as session ID
506
+ updatedAt: number // log.jsonl mtime in milliseconds
507
+ messageCount: number // number of entries in log.jsonl
508
+ meta: SessionMeta | null
509
+ }
510
+ ```
511
+
512
+ All functions are safe to call on non-existent paths — `listSessions` returns `[]`,
513
+ `deleteSession` and `updateSessionMeta` are silent no-ops when the session does not exist.
514
+
515
+ ---
516
+
517
+ ## Build Output
518
+
519
+ Compiled files and type declarations are generated into `dist/`.
520
+
521
+ ```bash
522
+ npm run build # compile TypeScript to dist/
523
+ npm run typecheck # type-check without emitting
524
+ npm test # run unit tests (vitest)
525
+ ```
@@ -0,0 +1,132 @@
1
+ /**
2
+ * Agent — stateful wrapper around kernel + runLoop.
3
+ *
4
+ * Owns:
5
+ * - AbortController lifecycle (per-prompt)
6
+ * - Steering / follow-up message queues
7
+ * - Event fan-out to subscribers
8
+ * - Mutable config (stream, tools, maxSteps)
9
+ * - Runtime state (streaming message, pending tool calls, error)
10
+ *
11
+ * Does not own:
12
+ * - Kernel lifecycle (caller creates and passes in)
13
+ * - Conversation log (kernel handles both kernel.jsonl and log.jsonl)
14
+ * - Compaction decisions (caller checks kernel.contextSize and calls kernel.compact())
15
+ */
16
+ import type { AgentKernel, KernelOptions } from '../kernel';
17
+ import type { AgentEntry, AgentEvent, AgentOptions, AgentTool, StreamFn, QueueMode } from './types';
18
+ export interface AgentState {
19
+ /** Whether the agent is currently running a loop. */
20
+ isRunning: boolean;
21
+ /** The partial assistant entry being streamed (null when idle). */
22
+ streamEntry: AgentEntry | null;
23
+ /** Tool call IDs currently executing. */
24
+ pendingToolCalls: Set<string>;
25
+ /** Last error message (cleared on next prompt/continue). */
26
+ error: string | null;
27
+ }
28
+ /**
29
+ * High-level stateful agent.
30
+ *
31
+ * Owns the AbortController lifecycle, steering/follow-up queues, event fan-out
32
+ * to subscribers, and mutable config. The underlying kernel (conversation store)
33
+ * is created externally and injected — the Agent never touches persistence directly.
34
+ */
35
+ export declare class Agent {
36
+ private readonly _kernel;
37
+ private _stream;
38
+ private _tools;
39
+ private _maxSteps;
40
+ private _transformContext;
41
+ private _onStepEnd;
42
+ private _steeringMode;
43
+ private _followUpMode;
44
+ private _parallelTools;
45
+ private _onContextFull;
46
+ private _toolTimeout;
47
+ private _retryOnError;
48
+ private _abortController;
49
+ private _runningPromise;
50
+ private readonly _steeringQueue;
51
+ private readonly _followUpQueue;
52
+ private readonly _listeners;
53
+ private _streamEntry;
54
+ private _streamText;
55
+ private _streamReasoning;
56
+ private _streamToolCalls;
57
+ private _pendingToolCalls;
58
+ private _error;
59
+ constructor(kernel: AgentKernel, options: AgentOptions);
60
+ /** The underlying kernel that owns conversation history and persistence. */
61
+ get kernel(): AgentKernel;
62
+ /** Snapshot of the current runtime state (read-only). */
63
+ get state(): AgentState;
64
+ /** Replace the LLM streaming function (takes effect on the next run). */
65
+ setStream(stream: StreamFn): void;
66
+ /** Replace the tool set (takes effect on the next run). */
67
+ setTools(tools: AgentTool[]): void;
68
+ /** Update the maximum number of loop steps (takes effect on the next run). */
69
+ setMaxSteps(maxSteps: number): void;
70
+ /** Change how many steering messages are dequeued per check. */
71
+ setSteeringMode(mode: QueueMode): void;
72
+ /** Change how many follow-up messages are dequeued per check. */
73
+ setFollowUpMode(mode: QueueMode): void;
74
+ /**
75
+ * Register a listener for all agent events.
76
+ * Returns an unsubscribe function — call it to stop receiving events.
77
+ */
78
+ subscribe(fn: (event: AgentEvent) => void): () => void;
79
+ /**
80
+ * Append one or more user entries to the kernel and start a new agent run.
81
+ * Throws if the agent is already running — use steer() or followUp() instead.
82
+ */
83
+ prompt(entries: AgentEntry | AgentEntry[]): void;
84
+ /**
85
+ * Resume execution after an error, abort, or when steering/follow-up messages
86
+ * are queued but the loop has already exited.
87
+ * Throws if already running or if there is nothing to continue from.
88
+ */
89
+ continue(): void;
90
+ /**
91
+ * Queue a steering message that interrupts the current run between tool calls.
92
+ * Safe to call while the agent is running. The loop picks it up on the next
93
+ * steering check and skips any remaining tool calls in the current batch.
94
+ */
95
+ steer(entries: AgentEntry | AgentEntry[]): void;
96
+ /**
97
+ * Queue a follow-up message to be processed after the current run completes.
98
+ * Causes the outer loop to continue rather than stop when the agent would
99
+ * otherwise go idle.
100
+ */
101
+ followUp(entries: AgentEntry | AgentEntry[]): void;
102
+ /** Cancel the current run. No-op if not running. */
103
+ abort(): void;
104
+ /**
105
+ * Clear all queues and transient runtime state (stream entry, pending tool calls, error).
106
+ * Does NOT touch the kernel or conversation history.
107
+ * Throws if called while running — abort() first.
108
+ */
109
+ reset(): void;
110
+ /** Resolves when the agent finishes its current run (or immediately if idle). */
111
+ waitForIdle(): Promise<void>;
112
+ private _run;
113
+ private _consume;
114
+ private _handleEvent;
115
+ private _updateStreamEntry;
116
+ private _drainSteering;
117
+ private _drainFollowUp;
118
+ }
119
+ export interface AgentSessionOptions extends AgentOptions {
120
+ /** Session persistence. Omit for in-memory mode (testing). */
121
+ session?: {
122
+ dir: string;
123
+ sessionId: string;
124
+ meta?: KernelOptions['meta'];
125
+ };
126
+ }
127
+ /**
128
+ * Convenience factory that creates a kernel (optionally with persistence) and
129
+ * wraps it in an Agent. Prefer this over constructing Agent directly.
130
+ */
131
+ export declare function createAgent(options: AgentSessionOptions): Agent;
132
+ //# sourceMappingURL=agent.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"agent.d.ts","sourceRoot":"","sources":["../../../src/core/agent/agent.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;GAcG;AAEH,OAAO,KAAK,EAAE,WAAW,EAAE,aAAa,EAAE,MAAM,WAAW,CAAA;AAG3D,OAAO,KAAK,EACV,UAAU,EACV,UAAU,EAEV,YAAY,EACZ,SAAS,EACT,QAAQ,EAER,SAAS,EACV,MAAM,SAAS,CAAA;AAIhB,MAAM,WAAW,UAAU;IACzB,qDAAqD;IACrD,SAAS,EAAS,OAAO,CAAA;IACzB,mEAAmE;IACnE,WAAW,EAAO,UAAU,GAAG,IAAI,CAAA;IACnC,yCAAyC;IACzC,gBAAgB,EAAE,GAAG,CAAC,MAAM,CAAC,CAAA;IAC7B,4DAA4D;IAC5D,KAAK,EAAa,MAAM,GAAG,IAAI,CAAA;CAChC;AAID;;;;;;GAMG;AACH,qBAAa,KAAK;IAChB,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAa;IAErC,OAAO,CAAC,OAAO,CAAoB;IACnC,OAAO,CAAC,MAAM,CAAwB;IACtC,OAAO,CAAC,SAAS,CAAgB;IACjC,OAAO,CAAC,iBAAiB,CAAkC;IAC3D,OAAO,CAAC,UAAU,CAAkC;IACpD,OAAO,CAAC,aAAa,CAAe;IACpC,OAAO,CAAC,aAAa,CAAe;IACpC,OAAO,CAAC,cAAc,CAAkC;IACxD,OAAO,CAAC,cAAc,CAAkC;IACxD,OAAO,CAAC,YAAY,CAAkC;IACtD,OAAO,CAAC,aAAa,CAAkC;IAEvD,OAAO,CAAC,gBAAgB,CAA+B;IACvD,OAAO,CAAC,eAAe,CAAiD;IAExE,OAAO,CAAC,QAAQ,CAAC,cAAc,CAAmB;IAClD,OAAO,CAAC,QAAQ,CAAC,cAAc,CAAmB;IAClD,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAyC;IAGpE,OAAO,CAAC,YAAY,CAA8B;IAClD,OAAO,CAAC,WAAW,CAAW;IAC9B,OAAO,CAAC,gBAAgB,CAAO;IAC/B,OAAO,CAAC,gBAAgB,CAAqB;IAC7C,OAAO,CAAC,iBAAiB,CAAoB;IAC7C,OAAO,CAAC,MAAM,CAAgC;gBAElC,MAAM,EAAE,WAAW,EAAE,OAAO,EAAE,YAAY;IAiBtD,4EAA4E;IAC5E,IAAI,MAAM,IAAI,WAAW,CAAwB;IAEjD,yDAAyD;IACzD,IAAI,KAAK,IAAI,UAAU,CAOtB;IAID,yEAAyE;IACzE,SAAS,CAAC,MAAM,EAAE,QAAQ,GAAG,IAAI;IACjC,2DAA2D;IAC3D,QAAQ,CAAC,KAAK,EAAE,SAAS,EAAE,GAAG,IAAI;IAClC,8EAA8E;IAC9E,WAAW,CAAC,QAAQ,EAAE,MAAM,GAAG,IAAI;IACnC,gEAAgE;IAChE,eAAe,CAAC,IAAI,EAAE,SAAS,GAAG,IAAI;IACtC,iEAAiE;IACjE,eAAe,CAAC,IAAI,EAAE,SAAS,GAAG,IAAI;IAItC;;;OAGG;IACH,SAAS,CAAC,EAAE,EAAE,CAAC,KAAK,EAAE,UAAU,KAAK,IAAI,GAAG,MAAM,IAAI;IAOtD;;;OAGG;IACH,MAAM,CAAC,OAAO,EAAE,UAAU,GAAG,UAAU,EAAE,GAAG,IAAI;IAahD;;;;OAIG;IACH,QAAQ,IAAI,IAAI;IA0BhB;;;;OAIG;IACH,KAAK,CAAC,OAAO,EAAE,UAAU,GAAG,UAAU,EAAE,GAAG,IAAI;IAI/C;;;;OAIG;IACH,QAAQ,CAAC,OAAO,EAAE,UAAU,GAAG,UAAU,EAAE,GAAG,IAAI;IAMlD,oDAAoD;IACpD,KAAK,IAAI,IAAI;IAMb;;;;OAIG;IACH,KAAK,IAAI,IAAI;IAiBb,iFAAiF;IAC3E,WAAW,IAAI,OAAO,CAAC,IAAI,CAAC;IAQlC,OAAO,CAAC,IAAI;YAqBE,QAAQ;IAmBtB,OAAO,CAAC,YAAY;IAmDpB,OAAO,CAAC,kBAAkB;YAWZ,cAAc;YAOd,cAAc;CAM7B;AAID,MAAM,WAAW,mBAAoB,SAAQ,YAAY;IACvD,8DAA8D;IAC9D,OAAO,CAAC,EAAE;QAAE,GAAG,EAAE,MAAM,CAAC;QAAC,SAAS,EAAE,MAAM,CAAC;QAAC,IAAI,CAAC,EAAE,aAAa,CAAC,MAAM,CAAC,CAAA;KAAE,CAAA;CAC3E;AAED;;;GAGG;AACH,wBAAgB,WAAW,CAAC,OAAO,EAAE,mBAAmB,GAAG,KAAK,CAI/D"}