@soederpop/luca 0.0.26 → 0.0.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,6 +7,7 @@ import type { AGIContainer } from '../container.server.js'
7
7
  import type { ContentDb } from '@soederpop/luca/node'
8
8
  import type { ConversationHistory, ConversationMeta } from './conversation-history'
9
9
  import hashObject from '../../hash-object.js'
10
+ import { InterceptorChain, type InterceptorFn, type InterceptorPoints, type InterceptorPoint } from '../lib/interceptor-chain.js'
10
11
 
11
12
  declare module '@soederpop/luca/feature' {
12
13
  interface AvailableFeatures {
@@ -28,6 +29,7 @@ export const AssistantEventsSchema = FeatureEventsSchema.extend({
28
29
  toolResult: z.tuple([z.string().describe('Tool name'), z.any().describe('Result value')]).describe('Emitted when a tool returns a result'),
29
30
  toolError: z.tuple([z.string().describe('Tool name'), z.any().describe('Error')]).describe('Emitted when a tool call fails'),
30
31
  hookFired: z.tuple([z.string().describe('Hook/event name')]).describe('Emitted when a hook function is called'),
32
+ systemPromptExtensionsChanged: z.tuple([]).describe('Emitted when system prompt extensions are added or removed'),
31
33
  })
32
34
 
33
35
  export const AssistantStateSchema = FeatureStateSchema.extend({
@@ -39,6 +41,7 @@ export const AssistantStateSchema = FeatureStateSchema.extend({
39
41
  conversationId: z.string().optional().describe('The active conversation persistence ID'),
40
42
  threadId: z.string().optional().describe('The active thread ID'),
41
43
  systemPrompt: z.string().describe('The loaded system prompt text'),
44
+ systemPromptExtensions: z.record(z.string(), z.string()).describe('Named extensions appended to the system prompt'),
42
45
  meta: z.record(z.string(), z.any()).describe('Parsed YAML frontmatter from CORE.md'),
43
46
  tools: z.record(z.string(), z.any()).describe('Registered tool implementations'),
44
47
  hooks: z.record(z.string(), z.any()).describe('Loaded event hook functions'),
@@ -111,6 +114,26 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
111
114
 
112
115
  static { Feature.register(this, 'assistant') }
113
116
 
117
+ readonly interceptors = {
118
+ beforeAsk: new InterceptorChain<InterceptorPoints['beforeAsk']>(),
119
+ beforeTurn: new InterceptorChain<InterceptorPoints['beforeTurn']>(),
120
+ beforeToolCall: new InterceptorChain<InterceptorPoints['beforeToolCall']>(),
121
+ afterToolCall: new InterceptorChain<InterceptorPoints['afterToolCall']>(),
122
+ beforeResponse: new InterceptorChain<InterceptorPoints['beforeResponse']>(),
123
+ }
124
+
125
+ /**
126
+ * Register an interceptor at a given point in the pipeline.
127
+ *
128
+ * @param point - The interception point
129
+ * @param fn - Middleware function receiving (ctx, next)
130
+ * @returns this, for chaining
131
+ */
132
+ intercept<K extends InterceptorPoint>(point: K, fn: InterceptorFn<InterceptorPoints[K]>): this {
133
+ this.interceptors[point].add(fn as any)
134
+ return this
135
+ }
136
+
114
137
  /** @returns Default state with the assistant not started, zero conversations, and the resolved folder path. */
115
138
  override get initialState(): AssistantState {
116
139
  return {
@@ -120,6 +143,7 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
120
143
  lastResponse: '',
121
144
  folder: this.resolvedFolder,
122
145
  systemPrompt: '',
146
+ systemPromptExtensions: {},
123
147
  meta: {},
124
148
  tools: {},
125
149
  hooks: {},
@@ -228,7 +252,7 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
228
252
  api: 'chat',
229
253
  ...(this.options.maxTokens ? { maxTokens: this.options.maxTokens } : {}),
230
254
  history: [
231
- { role: 'system', content: this.systemPrompt || this.loadSystemPrompt() },
255
+ { role: 'system', content: this.effectiveSystemPrompt },
232
256
  ],
233
257
  })
234
258
  this.state.set('conversation', conv)
@@ -254,6 +278,60 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
254
278
  return this.state.get('systemPrompt') || ''
255
279
  }
256
280
 
281
+ /** The named extensions appended to the system prompt. */
282
+ get systemPromptExtensions(): Record<string, string> {
283
+ return (this.state.get('systemPromptExtensions') || {}) as Record<string, string>
284
+ }
285
+
286
+ /** The system prompt with all extensions appended. This is the value passed to the conversation. */
287
+ get effectiveSystemPrompt(): string {
288
+ const base = this.systemPrompt
289
+ const extensions = Object.values(this.systemPromptExtensions)
290
+ if (!extensions.length) return base
291
+ return [base, ...extensions].join('\n\n')
292
+ }
293
+
294
+ /**
295
+ * Add or update a named system prompt extension. The value is appended
296
+ * to the base system prompt when passed to the conversation.
297
+ *
298
+ * @param key - A unique identifier for this extension
299
+ * @param value - The text to append
300
+ * @returns this, for chaining
301
+ */
302
+ addSystemPromptExtension(key: string, value: string): this {
303
+ this.state.set('systemPromptExtensions', { ...this.systemPromptExtensions, [key]: value })
304
+ this.syncSystemPromptToConversation()
305
+ this.emit('systemPromptExtensionsChanged')
306
+ return this
307
+ }
308
+
309
+ /**
310
+ * Remove a named system prompt extension.
311
+ *
312
+ * @param key - The identifier of the extension to remove
313
+ * @returns this, for chaining
314
+ */
315
+ removeSystemPromptExtension(key: string): this {
316
+ const current = { ...this.systemPromptExtensions }
317
+ delete current[key]
318
+ this.state.set('systemPromptExtensions', current)
319
+ this.syncSystemPromptToConversation()
320
+ this.emit('systemPromptExtensionsChanged')
321
+ return this
322
+ }
323
+
324
+ /** Update the conversation's system message to reflect the current effective prompt. */
325
+ private syncSystemPromptToConversation() {
326
+ const conv = this.state.get('conversation') as Conversation | null
327
+ if (!conv) return
328
+ const messages = [...conv.messages]
329
+ if (messages.length > 0 && (messages[0]!.role === 'system' || messages[0]!.role === 'developer')) {
330
+ messages[0] = { ...messages[0]!, content: this.effectiveSystemPrompt }
331
+ conv.state.set('messages', messages)
332
+ }
333
+ }
334
+
257
335
  /** The tools registered with this assistant. */
258
336
  get tools(): Record<string, ConversationTool> {
259
337
  return (this.state.get('tools') || {}) as Record<string, ConversationTool>
@@ -287,6 +365,9 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
287
365
  }
288
366
  } else if (fnOrHelper && typeof (fnOrHelper as any).toTools === 'function') {
289
367
  this._registerTools((fnOrHelper as any).toTools())
368
+ if (typeof (fnOrHelper as any).setupToolsConsumer === 'function') {
369
+ (fnOrHelper as any).setupToolsConsumer(this)
370
+ }
290
371
  } else if (fnOrHelper && 'schemas' in fnOrHelper && 'handlers' in fnOrHelper) {
291
372
  this._registerTools(fnOrHelper as { schemas: Record<string, z.ZodType>, handlers: Record<string, Function> })
292
373
  }
@@ -768,7 +849,7 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
768
849
 
769
850
  // Swap in fresh system prompt if it changed
770
851
  if (messages.length > 0 && (messages[0]!.role === 'system' || messages[0]!.role === 'developer')) {
771
- messages[0] = { role: messages[0]!.role, content: this.systemPrompt }
852
+ messages[0] = { role: messages[0]!.role, content: this.effectiveSystemPrompt }
772
853
  }
773
854
 
774
855
  this.conversation.state.set('id', existing.id)
@@ -851,6 +932,38 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
851
932
  this.conversation.on('toolResult', (name: string, result: any) => this.emit('toolResult', name, result))
852
933
  this.conversation.on('toolError', (name: string, error: any) => this.emit('toolError', name, error))
853
934
 
935
+ // Install interceptor-aware tool executor on the conversation
936
+ this.conversation.toolExecutor = async (name: string, args: Record<string, any>, handler: (...a: any[]) => Promise<any>) => {
937
+ const ctx = { name, args, result: undefined as string | undefined, error: undefined, skip: false }
938
+
939
+ await this.interceptors.beforeToolCall.run(ctx, async () => {})
940
+
941
+ if (ctx.skip) {
942
+ const result = ctx.result ?? JSON.stringify({ skipped: true })
943
+ this.emit('toolResult', ctx.name, result)
944
+ return result
945
+ }
946
+
947
+ try {
948
+ this.emit('toolCall', ctx.name, ctx.args)
949
+ const output = await handler(ctx.args)
950
+ ctx.result = typeof output === 'string' ? output : JSON.stringify(output)
951
+ } catch (err: any) {
952
+ ctx.error = err
953
+ ctx.result = JSON.stringify({ error: err.message || String(err) })
954
+ }
955
+
956
+ await this.interceptors.afterToolCall.run(ctx, async () => {})
957
+
958
+ if (ctx.error && !ctx.result?.includes('"error"')) {
959
+ this.emit('toolError', ctx.name, ctx.error)
960
+ } else {
961
+ this.emit('toolResult', ctx.name, ctx.result!)
962
+ }
963
+
964
+ return ctx.result!
965
+ }
966
+
854
967
  // Load conversation history for non-lifecycle modes
855
968
  await this.loadConversationHistory()
856
969
 
@@ -901,7 +1014,23 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
901
1014
  question = this.prependTimestamp(question)
902
1015
  }
903
1016
 
904
- const result = await this.conversation.ask(question, options)
1017
+ // Run beforeAsk interceptors — they can rewrite the question or short-circuit
1018
+ if (this.interceptors.beforeAsk.hasInterceptors) {
1019
+ const ctx = { question, options } as InterceptorPoints['beforeAsk']
1020
+ await this.interceptors.beforeAsk.run(ctx, async () => {})
1021
+ if (ctx.result !== undefined) return ctx.result
1022
+ question = ctx.question
1023
+ options = ctx.options
1024
+ }
1025
+
1026
+ let result = await this.conversation.ask(question, options)
1027
+
1028
+ // Run beforeResponse interceptors — they can rewrite the final text
1029
+ if (this.interceptors.beforeResponse.hasInterceptors) {
1030
+ const ctx = { text: result }
1031
+ await this.interceptors.beforeResponse.run(ctx, async () => {})
1032
+ result = ctx.text
1033
+ }
905
1034
 
906
1035
  // Auto-save for non-lifecycle modes
907
1036
  if (this.options.historyMode !== 'lifecycle' && this.state.get('threadId')) {
@@ -125,6 +125,44 @@ export type ConversationState = z.infer<typeof ConversationStateSchema>
125
125
 
126
126
  export type AskOptions = {
127
127
  maxTokens?: number
128
+ /**
129
+ * When provided, enables OpenAI Structured Outputs. The model is constrained
130
+ * to return JSON matching this Zod schema. The return value of ask() will be
131
+ * the parsed object instead of a raw string.
132
+ */
133
+ schema?: z.ZodType
134
+ }
135
+
136
+ /**
137
+ * Recursively set `additionalProperties: false` on every object-type node
138
+ * in a JSON Schema tree. OpenAI strict mode requires this at every level.
139
+ * Also ensures every object has a `required` array listing all its property keys.
140
+ */
141
+ function strictifySchema(schema: Record<string, any>): Record<string, any> {
142
+ const clone = { ...schema }
143
+
144
+ if (clone.type === 'object' && clone.properties) {
145
+ clone.additionalProperties = false
146
+ clone.required = Object.keys(clone.properties)
147
+ const props: Record<string, any> = {}
148
+ for (const [key, val] of Object.entries(clone.properties)) {
149
+ props[key] = strictifySchema(val as Record<string, any>)
150
+ }
151
+ clone.properties = props
152
+ }
153
+
154
+ if (clone.items) {
155
+ clone.items = strictifySchema(clone.items)
156
+ }
157
+
158
+ // anyOf / oneOf / allOf
159
+ for (const combiner of ['anyOf', 'oneOf', 'allOf'] as const) {
160
+ if (Array.isArray(clone[combiner])) {
161
+ clone[combiner] = clone[combiner].map((s: Record<string, any>) => strictifySchema(s))
162
+ }
163
+ }
164
+
165
+ return clone
128
166
  }
129
167
 
130
168
  /**
@@ -151,6 +189,16 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
151
189
 
152
190
  static { Feature.register(this, 'conversation') }
153
191
 
192
+ /**
193
+ * Pluggable tool executor. Called for each tool invocation with the tool
194
+ * name, parsed args, and the default handler. Return the serialized result string.
195
+ * The Assistant replaces this to wire in beforeToolCall/afterToolCall interceptors.
196
+ */
197
+ toolExecutor: ((name: string, args: Record<string, any>, handler: (...args: any[]) => Promise<any>) => Promise<string>) | null = null
198
+
199
+ /** The active structured output schema for the current ask() call, if any. */
200
+ private _activeSchema: z.ZodType | null = null
201
+
154
202
  /** Resolved max tokens: per-call override > options-level > undefined (no limit). */
155
203
  private get maxTokens(): number | undefined {
156
204
  return (this.state.get('callMaxTokens') as number | null) ?? this.options.maxTokens ?? undefined
@@ -419,6 +467,7 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
419
467
  */
420
468
  async ask(content: string | ContentPart[], options?: AskOptions): Promise<string> {
421
469
  this.state.set('callMaxTokens', options?.maxTokens ?? null)
470
+ this._activeSchema = options?.schema ?? null
422
471
 
423
472
  // Auto-compact before adding the new message
424
473
  if (this.options.autoCompact) {
@@ -436,6 +485,8 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
436
485
  this.emit('userMessage', content)
437
486
 
438
487
  try {
488
+ let raw: string
489
+
439
490
  if (this.apiMode === 'responses') {
440
491
  const previousResponseId = this.state.get('lastResponseId') || undefined
441
492
  let input: OpenAI.Responses.ResponseInput
@@ -449,17 +500,31 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
449
500
  input = this.messagesToResponsesInput()
450
501
  }
451
502
 
452
- return await this.runResponsesLoop({
503
+ raw = await this.runResponsesLoop({
453
504
  turn: 1,
454
505
  accumulated: '',
455
506
  input,
456
507
  previousResponseId,
457
508
  })
509
+ } else {
510
+ raw = await this.runChatCompletionLoop({ turn: 1, accumulated: '' })
458
511
  }
459
512
 
460
- return await this.runChatCompletionLoop({ turn: 1, accumulated: '' })
513
+ // When a structured output schema is active, parse the JSON response
514
+ if (this._activeSchema) {
515
+ try {
516
+ const parsed = JSON.parse(raw)
517
+ return parsed
518
+ } catch {
519
+ // Model returned something that isn't valid JSON — return raw
520
+ return raw
521
+ }
522
+ }
523
+
524
+ return raw
461
525
  } finally {
462
526
  this.state.set('callMaxTokens', null)
527
+ this._activeSchema = null
463
528
  }
464
529
  }
465
530
 
@@ -545,6 +610,28 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
545
610
  return input
546
611
  }
547
612
 
613
+ /**
614
+ * Build the OpenAI response_format / text.format config from the active Zod schema.
615
+ * Returns undefined when no schema is active.
616
+ */
617
+ private get structuredOutputConfig(): { name: string; schema: Record<string, any>; strict: true } | undefined {
618
+ if (!this._activeSchema) return undefined
619
+
620
+ const raw = (this._activeSchema as any).toJSONSchema() as Record<string, any>
621
+ const strict = strictifySchema(raw)
622
+
623
+ // Derive a name from the schema description or fall back to a default.
624
+ // OpenAI requires [a-zA-Z0-9_-] max 64 chars.
625
+ const desc = raw.description || 'structured_output'
626
+ const name = desc.replace(/[^a-zA-Z0-9_-]/g, '_').slice(0, 64)
627
+
628
+ return {
629
+ name,
630
+ schema: { type: strict.type || 'object', properties: strict.properties, required: strict.required, additionalProperties: false },
631
+ strict: true,
632
+ }
633
+ }
634
+
548
635
  /** Returns the OpenAI client instance from the container. */
549
636
  get openai() {
550
637
  let baseURL = this.options.clientOptions?.baseURL ? this.options.clientOptions.baseURL : undefined
@@ -603,6 +690,40 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
603
690
  })
604
691
  }
605
692
 
693
+ /**
694
+ * Execute a single tool call, routing through the pluggable toolExecutor
695
+ * if one is set (e.g. by the Assistant's interceptor chain).
696
+ */
697
+ private async executeTool(toolName: string, rawArgs: string): Promise<string> {
698
+ const tool = this.tools[toolName]
699
+ const callCount = (this.state.get('toolCalls') || 0) + 1
700
+ this.state.set('toolCalls', callCount)
701
+
702
+ if (!tool) {
703
+ const result = JSON.stringify({ error: `Unknown tool: ${toolName}` })
704
+ this.emit('toolError', toolName, result)
705
+ return result
706
+ }
707
+
708
+ if (this.toolExecutor) {
709
+ const args = rawArgs ? JSON.parse(rawArgs) : {}
710
+ return this.toolExecutor(toolName, args, tool.handler)
711
+ }
712
+
713
+ try {
714
+ const args = rawArgs ? JSON.parse(rawArgs) : {}
715
+ this.emit('toolCall', toolName, args)
716
+ const output = await tool.handler(args)
717
+ const result = typeof output === 'string' ? output : JSON.stringify(output)
718
+ this.emit('toolResult', toolName, result)
719
+ return result
720
+ } catch (err: any) {
721
+ const result = JSON.stringify({ error: err.message || String(err) })
722
+ this.emit('toolError', toolName, err)
723
+ return result
724
+ }
725
+ }
726
+
606
727
  /**
607
728
  * Runs the streaming Responses API loop. Handles local function calls by
608
729
  * executing handlers and submitting `function_call_output` items until
@@ -625,6 +746,10 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
625
746
  this.state.set('streaming', true)
626
747
  this.emit('turnStart', { turn, isFollowUp: turn > 1 })
627
748
 
749
+ const textFormat = this.structuredOutputConfig
750
+ ? { text: { format: { type: 'json_schema' as const, ...this.structuredOutputConfig } } }
751
+ : {}
752
+
628
753
  try {
629
754
  const stream = await this.openai.raw.responses.create({
630
755
  model: this.model as OpenAI.Responses.ResponseCreateParams['model'],
@@ -634,6 +759,7 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
634
759
  ...(toolsParam ? { tools: toolsParam, tool_choice: 'auto', parallel_tool_calls: true } : {}),
635
760
  ...(this.responsesInstructions ? { instructions: this.responsesInstructions } : {}),
636
761
  ...(this.maxTokens ? { max_output_tokens: this.maxTokens } : {}),
762
+ ...textFormat,
637
763
  })
638
764
 
639
765
  for await (const event of stream) {
@@ -690,27 +816,7 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
690
816
 
691
817
  const functionOutputs: OpenAI.Responses.ResponseInputItem.FunctionCallOutput[] = []
692
818
  for (const call of functionCalls) {
693
- const toolName = call.name
694
- const tool = this.tools[toolName]
695
- const callCount = (this.state.get('toolCalls') || 0) + 1
696
- this.state.set('toolCalls', callCount)
697
-
698
- let result: string
699
- if (!tool) {
700
- result = JSON.stringify({ error: `Unknown tool: ${toolName}` })
701
- this.emit('toolError', toolName, result)
702
- } else {
703
- try {
704
- const args = call.arguments ? JSON.parse(call.arguments) : {}
705
- this.emit('toolCall', toolName, args)
706
- const output = await tool.handler(args)
707
- result = typeof output === 'string' ? output : JSON.stringify(output)
708
- this.emit('toolResult', toolName, result)
709
- } catch (err: any) {
710
- result = JSON.stringify({ error: err.message || String(err) })
711
- this.emit('toolError', toolName, err)
712
- }
713
- }
819
+ const result = await this.executeTool(call.name, call.arguments || '{}')
714
820
 
715
821
  this.pushMessage({
716
822
  role: 'tool',
@@ -785,6 +891,10 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
785
891
  let turnContent = ''
786
892
  let toolCalls: Array<{ id: string; function: { name: string; arguments: string }; type: 'function' }> = []
787
893
 
894
+ const responseFormat = this.structuredOutputConfig
895
+ ? { response_format: { type: 'json_schema' as const, json_schema: this.structuredOutputConfig } }
896
+ : {}
897
+
788
898
  try {
789
899
  const stream = await this.openai.raw.chat.completions.create({
790
900
  model: this.model,
@@ -792,6 +902,7 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
792
902
  stream: true,
793
903
  ...(toolsParam ? { tools: toolsParam, tool_choice: 'auto' } : {}),
794
904
  ...(this.maxTokens ? { max_tokens: this.maxTokens } : {}),
905
+ ...responseFormat,
795
906
  })
796
907
 
797
908
  for await (const chunk of stream) {
@@ -850,28 +961,7 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
850
961
  this.emit('toolCallsStart', toolCalls)
851
962
 
852
963
  for (const tc of toolCalls) {
853
- const toolName = tc.function.name
854
- const tool = this.tools[toolName]
855
- const callCount = (this.state.get('toolCalls') || 0) + 1
856
- this.state.set('toolCalls', callCount)
857
-
858
- let result: string
859
-
860
- if (!tool) {
861
- result = JSON.stringify({ error: `Unknown tool: ${toolName}` })
862
- this.emit('toolError', toolName, result)
863
- } else {
864
- try {
865
- const args = JSON.parse(tc.function.arguments)
866
- this.emit('toolCall', toolName, args)
867
- const output = await tool.handler(args)
868
- result = typeof output === 'string' ? output : JSON.stringify(output)
869
- this.emit('toolResult', toolName, result)
870
- } catch (err: any) {
871
- result = JSON.stringify({ error: err.message || String(err) })
872
- this.emit('toolError', toolName, err)
873
- }
874
- }
964
+ const result = await this.executeTool(tc.function.name, tc.function.arguments)
875
965
 
876
966
  const toolMessage: OpenAI.Chat.Completions.ChatCompletionToolMessageParam = {
877
967
  role: 'tool',
@@ -0,0 +1,79 @@
1
+ /**
2
+ * A composable middleware chain. Each interceptor receives a mutable
3
+ * context and a `next` function. Calling `next()` continues the chain;
4
+ * skipping it short-circuits.
5
+ */
6
+
7
+ export type InterceptorFn<T> = (ctx: T, next: () => Promise<void>) => Promise<void>
8
+
9
+ export class InterceptorChain<T> {
10
+ private fns: InterceptorFn<T>[] = []
11
+
12
+ add(fn: InterceptorFn<T>): void {
13
+ this.fns.push(fn)
14
+ }
15
+
16
+ remove(fn: InterceptorFn<T>): void {
17
+ const idx = this.fns.indexOf(fn)
18
+ if (idx !== -1) this.fns.splice(idx, 1)
19
+ }
20
+
21
+ get hasInterceptors(): boolean {
22
+ return this.fns.length > 0
23
+ }
24
+
25
+ get size(): number {
26
+ return this.fns.length
27
+ }
28
+
29
+ async run(ctx: T, final: () => Promise<void>): Promise<void> {
30
+ let index = 0
31
+ const fns = this.fns
32
+
33
+ const next = async (): Promise<void> => {
34
+ if (index < fns.length) {
35
+ const fn = fns[index++]!
36
+ await fn(ctx, next)
37
+ } else {
38
+ await final()
39
+ }
40
+ }
41
+
42
+ await next()
43
+ }
44
+ }
45
+
46
+ export interface BeforeAskCtx {
47
+ question: string | any[]
48
+ options?: any
49
+ result?: string
50
+ }
51
+
52
+ export interface ToolCallCtx {
53
+ name: string
54
+ args: Record<string, any>
55
+ result?: string
56
+ error?: any
57
+ skip?: boolean
58
+ }
59
+
60
+ export interface BeforeResponseCtx {
61
+ text: string
62
+ }
63
+
64
+ export interface BeforeTurnCtx {
65
+ turn: number
66
+ isFollowUp: boolean
67
+ messages: any[]
68
+ skip?: boolean
69
+ }
70
+
71
+ export interface InterceptorPoints {
72
+ beforeAsk: BeforeAskCtx
73
+ beforeTurn: BeforeTurnCtx
74
+ beforeToolCall: ToolCallCtx
75
+ afterToolCall: ToolCallCtx
76
+ beforeResponse: BeforeResponseCtx
77
+ }
78
+
79
+ export type InterceptorPoint = keyof InterceptorPoints