@soederpop/luca 0.0.28 → 0.0.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,144 @@
1
+ ---
2
+ title: "Structured Output with Assistants"
3
+ tags: [assistant, conversation, structured-output, zod, openai]
4
+ lastTested: null
5
+ lastTestPassed: null
6
+ ---
7
+
8
+ # Structured Output with Assistants
9
+
10
+ Get typed, schema-validated JSON responses from OpenAI instead of raw text strings.
11
+
12
+ ## Overview
13
+
14
+ OpenAI's Structured Outputs feature constrains the model to return JSON that exactly matches a schema you provide. Combined with Zod, this means `ask()` can return parsed objects instead of strings — no regex parsing, no "please respond in JSON", no malformed output.
15
+
16
+ Pass a `schema` option to `ask()` and the response comes back as a parsed object guaranteed to match your schema.
17
+
18
+ ## Basic: Extract Structured Data
19
+
20
+ The simplest use case — ask a question and get structured data back.
21
+
22
+ ```ts
23
+ const { z } = container
24
+ const conversation = container.feature('conversation', {
25
+ model: 'gpt-4.1-mini',
26
+ history: [{ role: 'system', content: 'You are a helpful data extraction assistant.' }]
27
+ })
28
+
29
+ const result = await conversation.ask('The founders of Apple are Steve Jobs, Steve Wozniak, and Ronald Wayne. They started it in 1976 in Los Altos, California.', {
30
+ schema: z.object({
31
+ company: z.string(),
32
+ foundedYear: z.number(),
33
+ location: z.string(),
34
+ founders: z.array(z.string()),
35
+ }).describe('CompanyInfo')
36
+ })
37
+
38
+ console.log('Company:', result.company)
39
+ console.log('Founded:', result.foundedYear)
40
+ console.log('Location:', result.location)
41
+ console.log('Founders:', result.founders)
42
+ ```
43
+
44
+ The `.describe()` on the schema gives OpenAI the schema name — keep it short and descriptive.
45
+
46
+ ## Enums and Categorization
47
+
48
+ Structured outputs work great for classification tasks where you want the model to pick from a fixed set of values.
49
+
50
+ ```ts
51
+ const { z } = container
52
+ const conversation = container.feature('conversation', {
53
+ model: 'gpt-4.1-mini',
54
+ history: [{ role: 'system', content: 'You are a helpful assistant.' }]
55
+ })
56
+
57
+ const sentiment = await conversation.ask('I absolutely love this product, it changed my life!', {
58
+ schema: z.object({
59
+ sentiment: z.enum(['positive', 'negative', 'neutral', 'mixed']),
60
+ confidence: z.number(),
61
+ reasoning: z.string(),
62
+ }).describe('SentimentAnalysis')
63
+ })
64
+
65
+ console.log('Sentiment:', sentiment.sentiment)
66
+ console.log('Confidence:', sentiment.confidence)
67
+ console.log('Reasoning:', sentiment.reasoning)
68
+ ```
69
+
70
+ Because the model is constrained by the schema, `sentiment` will always be one of the four allowed values.
71
+
72
+ ## Nested Objects and Arrays
73
+
74
+ Schemas can be as complex as you need. Here we extract a structured analysis with nested objects.
75
+
76
+ ```ts
77
+ const { z } = container
78
+ const conversation = container.feature('conversation', {
79
+ model: 'gpt-4.1-mini',
80
+ history: [{ role: 'system', content: 'You are a technical analyst.' }]
81
+ })
82
+
83
+ const analysis = await conversation.ask(
84
+ 'TypeScript 5.5 introduced inferred type predicates, which automatically narrow types in filter callbacks. It also added isolated declarations for faster builds in monorepos, and a new regex syntax checking feature.',
85
+ {
86
+ schema: z.object({
87
+ subject: z.string(),
88
+ version: z.string(),
89
+ features: z.array(z.object({
90
+ name: z.string(),
91
+ category: z.enum(['type-system', 'performance', 'developer-experience', 'syntax', 'other']),
92
+ summary: z.string(),
93
+ })),
94
+ featureCount: z.number(),
95
+ }).describe('ReleaseAnalysis')
96
+ }
97
+ )
98
+
99
+ console.log('Subject:', analysis.subject, analysis.version)
100
+ console.log('Features:')
101
+ for (const f of analysis.features) {
102
+ console.log(` [${f.category}] ${f.name}: ${f.summary}`)
103
+ }
104
+ console.log('Total features:', analysis.featureCount)
105
+ ```
106
+
107
+ Every level of nesting is validated — the model cannot return a feature without a category or skip required fields.
108
+
109
+ ## With an Assistant
110
+
111
+ Structured outputs work the same way through the assistant API. The schema passes straight through to the underlying conversation.
112
+
113
+ ```ts
114
+ const { z } = container
115
+ const assistant = container.feature('assistant', {
116
+ systemPrompt: 'You are a code review assistant. You analyze code snippets and provide structured feedback.',
117
+ model: 'gpt-4.1-mini',
118
+ })
119
+
120
+ const review = await assistant.ask(
121
+ 'Review this: function add(a, b) { return a + b }',
122
+ {
123
+ schema: z.object({
124
+ issues: z.array(z.object({
125
+ severity: z.enum(['info', 'warning', 'error']),
126
+ message: z.string(),
127
+ })),
128
+ suggestion: z.string(),
129
+ score: z.number(),
130
+ }).describe('CodeReview')
131
+ }
132
+ )
133
+
134
+ console.log('Score:', review.score)
135
+ console.log('Suggestion:', review.suggestion)
136
+ console.log('Issues:')
137
+ for (const issue of review.issues) {
138
+ console.log(` [${issue.severity}] ${issue.message}`)
139
+ }
140
+ ```
141
+
142
+ ## Summary
143
+
144
+ This demo covered extracting structured data, classification with enums, nested schema validation, and using structured outputs through both the conversation and assistant APIs. The key is passing a Zod schema via `{ schema }` in the options to `ask()` — OpenAI guarantees the response matches, and you get a parsed object back.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@soederpop/luca",
3
- "version": "0.0.28",
3
+ "version": "0.0.29",
4
4
  "website": "https://luca.soederpop.com",
5
5
  "description": "lightweight universal conversational architecture AKA Le Ultimate Component Architecture AKA Last Universal Common Ancestor, part AI part Human",
6
6
  "author": "jon soeder aka the people's champ <jon@soederpop.com>",
@@ -7,6 +7,7 @@ import type { AGIContainer } from '../container.server.js'
7
7
  import type { ContentDb } from '@soederpop/luca/node'
8
8
  import type { ConversationHistory, ConversationMeta } from './conversation-history'
9
9
  import hashObject from '../../hash-object.js'
10
+ import { InterceptorChain, type InterceptorFn, type InterceptorPoints, type InterceptorPoint } from '../lib/interceptor-chain.js'
10
11
 
11
12
  declare module '@soederpop/luca/feature' {
12
13
  interface AvailableFeatures {
@@ -113,6 +114,26 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
113
114
 
114
115
  static { Feature.register(this, 'assistant') }
115
116
 
117
+ readonly interceptors = {
118
+ beforeAsk: new InterceptorChain<InterceptorPoints['beforeAsk']>(),
119
+ beforeTurn: new InterceptorChain<InterceptorPoints['beforeTurn']>(),
120
+ beforeToolCall: new InterceptorChain<InterceptorPoints['beforeToolCall']>(),
121
+ afterToolCall: new InterceptorChain<InterceptorPoints['afterToolCall']>(),
122
+ beforeResponse: new InterceptorChain<InterceptorPoints['beforeResponse']>(),
123
+ }
124
+
125
+ /**
126
+ * Register an interceptor at a given point in the pipeline.
127
+ *
128
+ * @param point - The interception point
129
+ * @param fn - Middleware function receiving (ctx, next)
130
+ * @returns this, for chaining
131
+ */
132
+ intercept<K extends InterceptorPoint>(point: K, fn: InterceptorFn<InterceptorPoints[K]>): this {
133
+ this.interceptors[point].add(fn as any)
134
+ return this
135
+ }
136
+
116
137
  /** @returns Default state with the assistant not started, zero conversations, and the resolved folder path. */
117
138
  override get initialState(): AssistantState {
118
139
  return {
@@ -911,6 +932,38 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
911
932
  this.conversation.on('toolResult', (name: string, result: any) => this.emit('toolResult', name, result))
912
933
  this.conversation.on('toolError', (name: string, error: any) => this.emit('toolError', name, error))
913
934
 
935
+ // Install interceptor-aware tool executor on the conversation
936
+ this.conversation.toolExecutor = async (name: string, args: Record<string, any>, handler: (...a: any[]) => Promise<any>) => {
937
+ const ctx = { name, args, result: undefined as string | undefined, error: undefined, skip: false }
938
+
939
+ await this.interceptors.beforeToolCall.run(ctx, async () => {})
940
+
941
+ if (ctx.skip) {
942
+ const result = ctx.result ?? JSON.stringify({ skipped: true })
943
+ this.emit('toolResult', ctx.name, result)
944
+ return result
945
+ }
946
+
947
+ try {
948
+ this.emit('toolCall', ctx.name, ctx.args)
949
+ const output = await handler(ctx.args)
950
+ ctx.result = typeof output === 'string' ? output : JSON.stringify(output)
951
+ } catch (err: any) {
952
+ ctx.error = err
953
+ ctx.result = JSON.stringify({ error: err.message || String(err) })
954
+ }
955
+
956
+ await this.interceptors.afterToolCall.run(ctx, async () => {})
957
+
958
+ if (ctx.error && !ctx.result?.includes('"error"')) {
959
+ this.emit('toolError', ctx.name, ctx.error)
960
+ } else {
961
+ this.emit('toolResult', ctx.name, ctx.result!)
962
+ }
963
+
964
+ return ctx.result!
965
+ }
966
+
914
967
  // Load conversation history for non-lifecycle modes
915
968
  await this.loadConversationHistory()
916
969
 
@@ -961,7 +1014,23 @@ export class Assistant extends Feature<AssistantState, AssistantOptions> {
961
1014
  question = this.prependTimestamp(question)
962
1015
  }
963
1016
 
964
- const result = await this.conversation.ask(question, options)
1017
+ // Run beforeAsk interceptors — they can rewrite the question or short-circuit
1018
+ if (this.interceptors.beforeAsk.hasInterceptors) {
1019
+ const ctx = { question, options } as InterceptorPoints['beforeAsk']
1020
+ await this.interceptors.beforeAsk.run(ctx, async () => {})
1021
+ if (ctx.result !== undefined) return ctx.result
1022
+ question = ctx.question
1023
+ options = ctx.options
1024
+ }
1025
+
1026
+ let result = await this.conversation.ask(question, options)
1027
+
1028
+ // Run beforeResponse interceptors — they can rewrite the final text
1029
+ if (this.interceptors.beforeResponse.hasInterceptors) {
1030
+ const ctx = { text: result }
1031
+ await this.interceptors.beforeResponse.run(ctx, async () => {})
1032
+ result = ctx.text
1033
+ }
965
1034
 
966
1035
  // Auto-save for non-lifecycle modes
967
1036
  if (this.options.historyMode !== 'lifecycle' && this.state.get('threadId')) {
@@ -125,6 +125,44 @@ export type ConversationState = z.infer<typeof ConversationStateSchema>
125
125
 
126
126
  export type AskOptions = {
127
127
  maxTokens?: number
128
+ /**
129
+ * When provided, enables OpenAI Structured Outputs. The model is constrained
130
+ * to return JSON matching this Zod schema. The return value of ask() will be
131
+ * the parsed object instead of a raw string.
132
+ */
133
+ schema?: z.ZodType
134
+ }
135
+
136
+ /**
137
+ * Recursively set `additionalProperties: false` on every object-type node
138
+ * in a JSON Schema tree. OpenAI strict mode requires this at every level.
139
+ * Also ensures every object has a `required` array listing all its property keys.
140
+ */
141
+ function strictifySchema(schema: Record<string, any>): Record<string, any> {
142
+ const clone = { ...schema }
143
+
144
+ if (clone.type === 'object' && clone.properties) {
145
+ clone.additionalProperties = false
146
+ clone.required = Object.keys(clone.properties)
147
+ const props: Record<string, any> = {}
148
+ for (const [key, val] of Object.entries(clone.properties)) {
149
+ props[key] = strictifySchema(val as Record<string, any>)
150
+ }
151
+ clone.properties = props
152
+ }
153
+
154
+ if (clone.items) {
155
+ clone.items = strictifySchema(clone.items)
156
+ }
157
+
158
+ // anyOf / oneOf / allOf
159
+ for (const combiner of ['anyOf', 'oneOf', 'allOf'] as const) {
160
+ if (Array.isArray(clone[combiner])) {
161
+ clone[combiner] = clone[combiner].map((s: Record<string, any>) => strictifySchema(s))
162
+ }
163
+ }
164
+
165
+ return clone
128
166
  }
129
167
 
130
168
  /**
@@ -151,6 +189,16 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
151
189
 
152
190
  static { Feature.register(this, 'conversation') }
153
191
 
192
+ /**
193
+ * Pluggable tool executor. Called for each tool invocation with the tool
194
+ * name, parsed args, and the default handler. Return the serialized result string.
195
+ * The Assistant replaces this to wire in beforeToolCall/afterToolCall interceptors.
196
+ */
197
+ toolExecutor: ((name: string, args: Record<string, any>, handler: (...args: any[]) => Promise<any>) => Promise<string>) | null = null
198
+
199
+ /** The active structured output schema for the current ask() call, if any. */
200
+ private _activeSchema: z.ZodType | null = null
201
+
154
202
  /** Resolved max tokens: per-call override > options-level > undefined (no limit). */
155
203
  private get maxTokens(): number | undefined {
156
204
  return (this.state.get('callMaxTokens') as number | null) ?? this.options.maxTokens ?? undefined
@@ -419,6 +467,7 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
419
467
  */
420
468
  async ask(content: string | ContentPart[], options?: AskOptions): Promise<string> {
421
469
  this.state.set('callMaxTokens', options?.maxTokens ?? null)
470
+ this._activeSchema = options?.schema ?? null
422
471
 
423
472
  // Auto-compact before adding the new message
424
473
  if (this.options.autoCompact) {
@@ -436,6 +485,8 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
436
485
  this.emit('userMessage', content)
437
486
 
438
487
  try {
488
+ let raw: string
489
+
439
490
  if (this.apiMode === 'responses') {
440
491
  const previousResponseId = this.state.get('lastResponseId') || undefined
441
492
  let input: OpenAI.Responses.ResponseInput
@@ -449,17 +500,31 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
449
500
  input = this.messagesToResponsesInput()
450
501
  }
451
502
 
452
- return await this.runResponsesLoop({
503
+ raw = await this.runResponsesLoop({
453
504
  turn: 1,
454
505
  accumulated: '',
455
506
  input,
456
507
  previousResponseId,
457
508
  })
509
+ } else {
510
+ raw = await this.runChatCompletionLoop({ turn: 1, accumulated: '' })
458
511
  }
459
512
 
460
- return await this.runChatCompletionLoop({ turn: 1, accumulated: '' })
513
+ // When a structured output schema is active, parse the JSON response
514
+ if (this._activeSchema) {
515
+ try {
516
+ const parsed = JSON.parse(raw)
517
+ return parsed
518
+ } catch {
519
+ // Model returned something that isn't valid JSON — return raw
520
+ return raw
521
+ }
522
+ }
523
+
524
+ return raw
461
525
  } finally {
462
526
  this.state.set('callMaxTokens', null)
527
+ this._activeSchema = null
463
528
  }
464
529
  }
465
530
 
@@ -545,6 +610,28 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
545
610
  return input
546
611
  }
547
612
 
613
+ /**
614
+ * Build the OpenAI response_format / text.format config from the active Zod schema.
615
+ * Returns undefined when no schema is active.
616
+ */
617
+ private get structuredOutputConfig(): { name: string; schema: Record<string, any>; strict: true } | undefined {
618
+ if (!this._activeSchema) return undefined
619
+
620
+ const raw = (this._activeSchema as any).toJSONSchema() as Record<string, any>
621
+ const strict = strictifySchema(raw)
622
+
623
+ // Derive a name from the schema description or fall back to a default.
624
+ // OpenAI requires [a-zA-Z0-9_-] max 64 chars.
625
+ const desc = raw.description || 'structured_output'
626
+ const name = desc.replace(/[^a-zA-Z0-9_-]/g, '_').slice(0, 64)
627
+
628
+ return {
629
+ name,
630
+ schema: { type: strict.type || 'object', properties: strict.properties, required: strict.required, additionalProperties: false },
631
+ strict: true,
632
+ }
633
+ }
634
+
548
635
  /** Returns the OpenAI client instance from the container. */
549
636
  get openai() {
550
637
  let baseURL = this.options.clientOptions?.baseURL ? this.options.clientOptions.baseURL : undefined
@@ -603,6 +690,40 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
603
690
  })
604
691
  }
605
692
 
693
+ /**
694
+ * Execute a single tool call, routing through the pluggable toolExecutor
695
+ * if one is set (e.g. by the Assistant's interceptor chain).
696
+ */
697
+ private async executeTool(toolName: string, rawArgs: string): Promise<string> {
698
+ const tool = this.tools[toolName]
699
+ const callCount = (this.state.get('toolCalls') || 0) + 1
700
+ this.state.set('toolCalls', callCount)
701
+
702
+ if (!tool) {
703
+ const result = JSON.stringify({ error: `Unknown tool: ${toolName}` })
704
+ this.emit('toolError', toolName, result)
705
+ return result
706
+ }
707
+
708
+ if (this.toolExecutor) {
709
+ const args = rawArgs ? JSON.parse(rawArgs) : {}
710
+ return this.toolExecutor(toolName, args, tool.handler)
711
+ }
712
+
713
+ try {
714
+ const args = rawArgs ? JSON.parse(rawArgs) : {}
715
+ this.emit('toolCall', toolName, args)
716
+ const output = await tool.handler(args)
717
+ const result = typeof output === 'string' ? output : JSON.stringify(output)
718
+ this.emit('toolResult', toolName, result)
719
+ return result
720
+ } catch (err: any) {
721
+ const result = JSON.stringify({ error: err.message || String(err) })
722
+ this.emit('toolError', toolName, err)
723
+ return result
724
+ }
725
+ }
726
+
606
727
  /**
607
728
  * Runs the streaming Responses API loop. Handles local function calls by
608
729
  * executing handlers and submitting `function_call_output` items until
@@ -625,6 +746,10 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
625
746
  this.state.set('streaming', true)
626
747
  this.emit('turnStart', { turn, isFollowUp: turn > 1 })
627
748
 
749
+ const textFormat = this.structuredOutputConfig
750
+ ? { text: { format: { type: 'json_schema' as const, ...this.structuredOutputConfig } } }
751
+ : {}
752
+
628
753
  try {
629
754
  const stream = await this.openai.raw.responses.create({
630
755
  model: this.model as OpenAI.Responses.ResponseCreateParams['model'],
@@ -634,6 +759,7 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
634
759
  ...(toolsParam ? { tools: toolsParam, tool_choice: 'auto', parallel_tool_calls: true } : {}),
635
760
  ...(this.responsesInstructions ? { instructions: this.responsesInstructions } : {}),
636
761
  ...(this.maxTokens ? { max_output_tokens: this.maxTokens } : {}),
762
+ ...textFormat,
637
763
  })
638
764
 
639
765
  for await (const event of stream) {
@@ -690,27 +816,7 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
690
816
 
691
817
  const functionOutputs: OpenAI.Responses.ResponseInputItem.FunctionCallOutput[] = []
692
818
  for (const call of functionCalls) {
693
- const toolName = call.name
694
- const tool = this.tools[toolName]
695
- const callCount = (this.state.get('toolCalls') || 0) + 1
696
- this.state.set('toolCalls', callCount)
697
-
698
- let result: string
699
- if (!tool) {
700
- result = JSON.stringify({ error: `Unknown tool: ${toolName}` })
701
- this.emit('toolError', toolName, result)
702
- } else {
703
- try {
704
- const args = call.arguments ? JSON.parse(call.arguments) : {}
705
- this.emit('toolCall', toolName, args)
706
- const output = await tool.handler(args)
707
- result = typeof output === 'string' ? output : JSON.stringify(output)
708
- this.emit('toolResult', toolName, result)
709
- } catch (err: any) {
710
- result = JSON.stringify({ error: err.message || String(err) })
711
- this.emit('toolError', toolName, err)
712
- }
713
- }
819
+ const result = await this.executeTool(call.name, call.arguments || '{}')
714
820
 
715
821
  this.pushMessage({
716
822
  role: 'tool',
@@ -785,6 +891,10 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
785
891
  let turnContent = ''
786
892
  let toolCalls: Array<{ id: string; function: { name: string; arguments: string }; type: 'function' }> = []
787
893
 
894
+ const responseFormat = this.structuredOutputConfig
895
+ ? { response_format: { type: 'json_schema' as const, json_schema: this.structuredOutputConfig } }
896
+ : {}
897
+
788
898
  try {
789
899
  const stream = await this.openai.raw.chat.completions.create({
790
900
  model: this.model,
@@ -792,6 +902,7 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
792
902
  stream: true,
793
903
  ...(toolsParam ? { tools: toolsParam, tool_choice: 'auto' } : {}),
794
904
  ...(this.maxTokens ? { max_tokens: this.maxTokens } : {}),
905
+ ...responseFormat,
795
906
  })
796
907
 
797
908
  for await (const chunk of stream) {
@@ -850,28 +961,7 @@ export class Conversation extends Feature<ConversationState, ConversationOptions
850
961
  this.emit('toolCallsStart', toolCalls)
851
962
 
852
963
  for (const tc of toolCalls) {
853
- const toolName = tc.function.name
854
- const tool = this.tools[toolName]
855
- const callCount = (this.state.get('toolCalls') || 0) + 1
856
- this.state.set('toolCalls', callCount)
857
-
858
- let result: string
859
-
860
- if (!tool) {
861
- result = JSON.stringify({ error: `Unknown tool: ${toolName}` })
862
- this.emit('toolError', toolName, result)
863
- } else {
864
- try {
865
- const args = JSON.parse(tc.function.arguments)
866
- this.emit('toolCall', toolName, args)
867
- const output = await tool.handler(args)
868
- result = typeof output === 'string' ? output : JSON.stringify(output)
869
- this.emit('toolResult', toolName, result)
870
- } catch (err: any) {
871
- result = JSON.stringify({ error: err.message || String(err) })
872
- this.emit('toolError', toolName, err)
873
- }
874
- }
964
+ const result = await this.executeTool(tc.function.name, tc.function.arguments)
875
965
 
876
966
  const toolMessage: OpenAI.Chat.Completions.ChatCompletionToolMessageParam = {
877
967
  role: 'tool',
@@ -0,0 +1,79 @@
1
+ /**
2
+ * A composable middleware chain. Each interceptor receives a mutable
3
+ * context and a `next` function. Calling `next()` continues the chain;
4
+ * skipping it short-circuits.
5
+ */
6
+
7
+ export type InterceptorFn<T> = (ctx: T, next: () => Promise<void>) => Promise<void>
8
+
9
+ export class InterceptorChain<T> {
10
+ private fns: InterceptorFn<T>[] = []
11
+
12
+ add(fn: InterceptorFn<T>): void {
13
+ this.fns.push(fn)
14
+ }
15
+
16
+ remove(fn: InterceptorFn<T>): void {
17
+ const idx = this.fns.indexOf(fn)
18
+ if (idx !== -1) this.fns.splice(idx, 1)
19
+ }
20
+
21
+ get hasInterceptors(): boolean {
22
+ return this.fns.length > 0
23
+ }
24
+
25
+ get size(): number {
26
+ return this.fns.length
27
+ }
28
+
29
+ async run(ctx: T, final: () => Promise<void>): Promise<void> {
30
+ let index = 0
31
+ const fns = this.fns
32
+
33
+ const next = async (): Promise<void> => {
34
+ if (index < fns.length) {
35
+ const fn = fns[index++]!
36
+ await fn(ctx, next)
37
+ } else {
38
+ await final()
39
+ }
40
+ }
41
+
42
+ await next()
43
+ }
44
+ }
45
+
46
+ export interface BeforeAskCtx {
47
+ question: string | any[]
48
+ options?: any
49
+ result?: string
50
+ }
51
+
52
+ export interface ToolCallCtx {
53
+ name: string
54
+ args: Record<string, any>
55
+ result?: string
56
+ error?: any
57
+ skip?: boolean
58
+ }
59
+
60
+ export interface BeforeResponseCtx {
61
+ text: string
62
+ }
63
+
64
+ export interface BeforeTurnCtx {
65
+ turn: number
66
+ isFollowUp: boolean
67
+ messages: any[]
68
+ skip?: boolean
69
+ }
70
+
71
+ export interface InterceptorPoints {
72
+ beforeAsk: BeforeAskCtx
73
+ beforeTurn: BeforeTurnCtx
74
+ beforeToolCall: ToolCallCtx
75
+ afterToolCall: ToolCallCtx
76
+ beforeResponse: BeforeResponseCtx
77
+ }
78
+
79
+ export type InterceptorPoint = keyof InterceptorPoints