@codex-native/sdk 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,4 +1,8 @@
1
1
  import { ContentBlock } from '@modelcontextprotocol/sdk/types.js';
2
+ import { Diagnostic } from 'vscode-languageserver-types';
3
+ import { ModelProvider, Model, StreamEvent } from '@openai/agents-core';
4
+ import { tool } from '@openai/agents';
5
+ import { OpencodeClient, Event } from '@opencode-ai/sdk';
2
6
 
3
7
  /** The status of a command execution. */
4
8
  type CommandExecutionStatus = "in_progress" | "completed" | "failed";
@@ -116,7 +120,7 @@ type TurnStartedEvent = {
116
120
  type: "turn.started";
117
121
  };
118
122
  /** Describes the usage of tokens during a turn. */
119
- type Usage$1 = {
123
+ type Usage = {
120
124
  /** The number of input tokens used during the turn. */
121
125
  input_tokens: number;
122
126
  /** The number of cached input tokens used during the turn. */
@@ -127,13 +131,18 @@ type Usage$1 = {
127
131
  /** Emitted when a turn is completed. Typically right after the assistant's response. */
128
132
  type TurnCompletedEvent = {
129
133
  type: "turn.completed";
130
- usage: Usage$1;
134
+ usage: Usage;
131
135
  };
132
136
  /** Indicates that a turn failed with an error. */
133
137
  type TurnFailedEvent = {
134
138
  type: "turn.failed";
135
139
  error: ThreadError;
136
140
  };
141
+ /** Background notification emitted during an active turn. */
142
+ type BackgroundEvent = {
143
+ type: "background_event";
144
+ message: string;
145
+ };
137
146
  /** Emitted when a new item is added to the thread. Typically the item is initially "in progress". */
138
147
  type ItemStartedEvent = {
139
148
  type: "item.started";
@@ -158,19 +167,294 @@ type ThreadErrorEvent = {
158
167
  type: "error";
159
168
  message: string;
160
169
  };
170
+ /** Review finding with code location */
171
+ type ReviewFinding = {
172
+ title: string;
173
+ body: string;
174
+ confidence_score: number;
175
+ priority: number;
176
+ code_location: {
177
+ absolute_file_path: string;
178
+ line_range: {
179
+ start: number;
180
+ end: number;
181
+ };
182
+ };
183
+ };
184
+ /** Structured review output */
185
+ type ReviewOutputEvent = {
186
+ findings: ReviewFinding[];
187
+ overall_correctness: string;
188
+ overall_explanation: string;
189
+ overall_confidence_score: number;
190
+ };
191
+ /** Emitted when exiting review mode with optional structured results */
192
+ type ExitedReviewModeEvent = {
193
+ type: "exited_review_mode";
194
+ review_output: ReviewOutputEvent | null;
195
+ };
161
196
  /** Top-level JSONL events emitted by codex exec. */
162
- type ThreadEvent = ThreadStartedEvent | TurnStartedEvent | TurnCompletedEvent | TurnFailedEvent | ItemStartedEvent | ItemUpdatedEvent | ItemCompletedEvent | ThreadErrorEvent;
197
+ type ThreadEvent = ThreadStartedEvent | TurnStartedEvent | TurnCompletedEvent | TurnFailedEvent | BackgroundEvent | ItemStartedEvent | ItemUpdatedEvent | ItemCompletedEvent | ExitedReviewModeEvent | ThreadErrorEvent | RawThreadEvent;
198
+ /** Raw protocol event forwarded without transformation. */
199
+ type RawThreadEvent = {
200
+ type: "raw_event";
201
+ raw: unknown;
202
+ };
203
+
204
+ type ApprovalMode = "never" | "on-request" | "on-failure" | "untrusted";
205
+ type SandboxMode = "read-only" | "workspace-write" | "danger-full-access";
206
+ /**
207
+ * Reasoning effort level for reasoning-capable models (e.g., o1, o3).
208
+ * See https://platform.openai.com/docs/guides/reasoning
209
+ *
210
+ * @default "medium" - When undefined, codex uses "medium" as the default
211
+ */
212
+ type ReasoningEffort = "minimal" | "low" | "medium" | "high" | "xhigh";
213
+ /**
214
+ * Controls whether reasoning summaries are included for reasoning-capable models.
215
+ * See https://platform.openai.com/docs/guides/reasoning#reasoning-summaries
216
+ *
217
+ * @default "auto" - When undefined, codex uses "auto" as the default
218
+ */
219
+ type ReasoningSummary = "auto" | "concise" | "detailed" | "none";
220
+ type WorkspaceWriteOptions = {
221
+ /** Enable network access in workspace-write mode. Default: false */
222
+ networkAccess?: boolean;
223
+ /** Additional directories that should be writable */
224
+ writableRoots?: string[];
225
+ /** Exclude the TMPDIR environment variable from writable roots. Default: false */
226
+ excludeTmpdirEnvVar?: boolean;
227
+ /** Exclude /tmp from writable roots on Unix. Default: false */
228
+ excludeSlashTmp?: boolean;
229
+ };
230
+ type ThreadOptions = {
231
+ model?: string;
232
+ /** Override the model provider declared in config.toml */
233
+ modelProvider?: string;
234
+ /** Use local OSS provider via Ollama (pulls models as needed) */
235
+ oss?: boolean;
236
+ sandboxMode?: SandboxMode;
237
+ /** Approval policy for command execution */
238
+ approvalMode?: ApprovalMode;
239
+ /** Options for workspace-write sandbox mode */
240
+ workspaceWriteOptions?: WorkspaceWriteOptions;
241
+ workingDirectory?: string;
242
+ skipGitRepoCheck?: boolean;
243
+ /** Reasoning effort level (only honored for reasoning-capable models). Defaults to "medium" when undefined. */
244
+ reasoningEffort?: ReasoningEffort;
245
+ /** Reasoning summary preference (only honored for reasoning-capable models). Defaults to "auto" when undefined. */
246
+ reasoningSummary?: ReasoningSummary;
247
+ /** @deprecated Use sandboxMode and approvalMode instead */
248
+ fullAuto?: boolean;
249
+ };
163
250
 
164
251
  type TurnOptions = {
165
252
  /** JSON schema describing the expected agent output. */
166
253
  outputSchema?: unknown;
254
+ /** Whether to use OSS mode with Ollama models */
255
+ oss?: boolean;
256
+ };
257
+
258
+ type NativeConversationSummary = {
259
+ id: string;
260
+ path: string;
261
+ createdAt?: string;
262
+ updatedAt?: string;
263
+ };
264
+ type NativeConversationListPage = {
265
+ conversations: NativeConversationSummary[];
266
+ nextCursor?: string;
267
+ numScannedFiles: number;
268
+ reachedScanCap: boolean;
269
+ };
270
+ type NativeTuiRequest = {
271
+ prompt?: string;
272
+ images?: string[];
273
+ model?: string;
274
+ oss?: boolean;
275
+ sandboxMode?: SandboxMode;
276
+ approvalMode?: ApprovalMode;
277
+ resumeSessionId?: string;
278
+ resumeLast?: boolean;
279
+ resumePicker?: boolean;
280
+ fullAuto?: boolean;
281
+ dangerouslyBypassApprovalsAndSandbox?: boolean;
282
+ workingDirectory?: string;
283
+ configProfile?: string;
284
+ configOverrides?: string[];
285
+ addDir?: string[];
286
+ webSearch?: boolean;
287
+ linuxSandboxPath?: string;
288
+ baseUrl?: string;
289
+ apiKey?: string;
290
+ reasoningEffort?: ReasoningEffort;
291
+ reasoningSummary?: ReasoningSummary;
292
+ };
293
+ type NativeTokenUsage = {
294
+ inputTokens: number;
295
+ cachedInputTokens: number;
296
+ outputTokens: number;
297
+ reasoningOutputTokens: number;
298
+ totalTokens: number;
299
+ };
300
+ type NativeUpdateActionKind = "npmGlobalLatest" | "bunGlobalLatest" | "brewUpgrade";
301
+ type NativeUpdateActionInfo = {
302
+ kind: NativeUpdateActionKind;
303
+ command: string;
304
+ };
305
+ type NativeTuiExitInfo = {
306
+ tokenUsage: NativeTokenUsage;
307
+ conversationId?: string;
308
+ updateAction?: NativeUpdateActionInfo;
309
+ };
310
+ type RepoDiffFileChange = {
311
+ path: string;
312
+ status: string;
313
+ diff: string;
314
+ truncated: boolean;
315
+ previousPath?: string | null;
316
+ };
317
+ type RepoDiffSummary = {
318
+ repoPath: string;
319
+ branch: string;
320
+ baseBranch: string;
321
+ upstreamRef?: string | null;
322
+ mergeBase: string;
323
+ statusSummary: string;
324
+ diffStat: string;
325
+ recentCommits: string;
326
+ changedFiles: RepoDiffFileChange[];
327
+ totalChangedFiles: number;
328
+ };
329
+ type RepoDiffSummaryOptions = {
330
+ cwd?: string;
331
+ baseBranchOverride?: string;
332
+ maxFiles?: number;
333
+ diffContextLines?: number;
334
+ diffCharLimit?: number;
335
+ };
336
+ type ReverieConversation = {
337
+ id: string;
338
+ path: string;
339
+ createdAt?: string;
340
+ updatedAt?: string;
341
+ headRecords: string[];
342
+ tailRecords: string[];
343
+ headRecordsToon: string[];
344
+ tailRecordsToon: string[];
345
+ };
346
+ type ReverieSearchResult = {
347
+ conversation: ReverieConversation;
348
+ relevanceScore: number;
349
+ matchingExcerpts: string[];
350
+ insights: string[];
351
+ rerankerScore?: number;
352
+ };
353
+ type FastEmbedRerankerModelCode = "BAAI/bge-reranker-base" | "rozgo/bge-reranker-v2-m3" | "jinaai/jina-reranker-v1-turbo-en" | "jinaai/jina-reranker-v2-base-multilingual";
354
+ type ReverieSemanticSearchOptions = {
355
+ limit?: number;
356
+ maxCandidates?: number;
357
+ projectRoot?: string;
358
+ batchSize?: number;
359
+ normalize?: boolean;
360
+ cache?: boolean;
361
+ rerankerModel?: FastEmbedRerankerModelCode;
362
+ rerankerCacheDir?: string;
363
+ rerankerMaxLength?: number;
364
+ rerankerShowProgress?: boolean;
365
+ rerankerBatchSize?: number;
366
+ rerankerTopK?: number;
367
+ };
368
+ type ReverieSemanticIndexStats = {
369
+ conversationsIndexed: number;
370
+ documentsEmbedded: number;
371
+ batches: number;
372
+ };
373
+ type FastEmbedInitOptions = {
374
+ model?: string;
375
+ cacheDir?: string;
376
+ maxLength?: number;
377
+ showDownloadProgress?: boolean;
378
+ };
379
+ type FastEmbedEmbedRequest = {
380
+ inputs: string[];
381
+ batchSize?: number;
382
+ normalize?: boolean;
383
+ projectRoot?: string;
384
+ cache?: boolean;
385
+ };
386
+ type TokenizerOptions = {
387
+ model?: string;
388
+ encoding?: "o200k_base" | "cl100k_base";
389
+ };
390
+ type TokenizerEncodeOptions = TokenizerOptions & {
391
+ withSpecialTokens?: boolean;
392
+ };
393
+ type NativeToolInfo = {
394
+ name: string;
395
+ description?: string;
396
+ parameters?: unknown;
397
+ strict?: boolean;
398
+ supportsParallel?: boolean;
399
+ };
400
+ type NativeToolInvocation = {
401
+ toolName: string;
402
+ callId: string;
403
+ arguments?: string;
404
+ input?: string;
167
405
  };
406
+ type NativeToolResult = {
407
+ output?: string;
408
+ success?: boolean;
409
+ error?: string;
410
+ };
411
+ type NativeForkResult = {
412
+ threadId: string;
413
+ rolloutPath: string;
414
+ };
415
+ type ApprovalRequest = {
416
+ type: "shell" | "file_write" | "network_access";
417
+ details?: unknown;
418
+ context?: string;
419
+ };
420
+ declare function reverieListConversations(codexHomePath: string, limit?: number, offset?: number): Promise<ReverieConversation[]>;
421
+ declare function reverieSearchConversations(codexHomePath: string, query: string, limit?: number): Promise<ReverieSearchResult[]>;
422
+ declare function reverieSearchSemantic(codexHomePath: string, context: string, options?: ReverieSemanticSearchOptions): Promise<ReverieSearchResult[]>;
423
+ declare function reverieIndexSemantic(codexHomePath: string, options?: ReverieSemanticSearchOptions): Promise<ReverieSemanticIndexStats>;
424
+ declare function reverieGetConversationInsights(conversationPath: string, query?: string): Promise<string[]>;
425
+ declare function encodeToToon(value: unknown): string;
426
+ declare function fastEmbedInit(options: FastEmbedInitOptions): Promise<void>;
427
+ declare function fastEmbedEmbed(request: FastEmbedEmbedRequest): Promise<number[][]>;
428
+ declare function tokenizerCount(text: string, options?: TokenizerOptions): number;
429
+ declare function tokenizerEncode(text: string, options?: TokenizerEncodeOptions): number[];
430
+ declare function tokenizerDecode(tokens: number[], options?: TokenizerOptions): string;
431
+ declare function collectRepoDiffSummary(options?: RepoDiffSummaryOptions): Promise<RepoDiffSummary>;
432
+
433
+ interface TuiSession {
434
+ wait(): Promise<NativeTuiExitInfo>;
435
+ shutdown(): void;
436
+ readonly closed: boolean;
437
+ }
438
+ interface RunTuiOptions {
439
+ signal?: AbortSignal;
440
+ }
441
+ /**
442
+ * Starts the Codex TUI (Terminal User Interface) and returns a controllable session handle.
443
+ *
444
+ * Use {@link TuiSession.wait} to await completion or {@link TuiSession.shutdown} to
445
+ * request a graceful exit from another part of your program.
446
+ */
447
+ declare function startTui(request: NativeTuiRequest): TuiSession;
448
+ /**
449
+ * Launches the Codex TUI and waits for it to exit. Supports optional cancellation via AbortSignal.
450
+ */
451
+ declare function runTui(request: NativeTuiRequest, options?: RunTuiOptions): Promise<NativeTuiExitInfo>;
168
452
 
169
453
  /** Completed turn. */
170
454
  type Turn = {
171
455
  items: ThreadItem[];
172
456
  finalResponse: string;
173
- usage: Usage$1 | null;
457
+ usage: Usage | null;
174
458
  };
175
459
  /** Alias for `Turn` to describe the result of `run()`. */
176
460
  type RunResult = Turn;
@@ -189,49 +473,166 @@ type UserInput = {
189
473
  path: string;
190
474
  };
191
475
  type Input = string | UserInput[];
476
+ type ForkOptions = {
477
+ nthUserMessage: number;
478
+ threadOptions?: Partial<ThreadOptions>;
479
+ };
192
480
  /** Respesent a thread of conversation with the agent. One thread can have multiple consecutive turns. */
193
481
  declare class Thread {
194
482
  private _exec;
195
483
  private _options;
196
484
  private _id;
197
485
  private _threadOptions;
486
+ private _eventListeners;
487
+ private _approvalHandler;
198
488
  /** Returns the ID of the thread. Populated after the first turn starts. */
199
489
  get id(): string | null;
490
+ /**
491
+ * Register an event listener for thread events.
492
+ * @param listener Callback function that receives ThreadEvent objects
493
+ * @returns Unsubscribe function to remove the listener
494
+ */
495
+ onEvent(listener: (event: ThreadEvent) => void): () => void;
496
+ /**
497
+ * Remove an event listener.
498
+ * @param listener The listener function to remove
499
+ */
500
+ offEvent(listener: (event: ThreadEvent) => void): void;
501
+ /**
502
+ * Register a callback to handle approval requests from the agent.
503
+ * The handler should return true to approve the action, false to deny it.
504
+ *
505
+ * @param handler Callback function that receives ApprovalRequest and returns approval decision
506
+ * @example
507
+ * ```typescript
508
+ * thread.onApprovalRequest(async (request) => {
509
+ * console.log(`Approval requested for ${request.type}`);
510
+ * return true; // Auto-approve
511
+ * });
512
+ * ```
513
+ */
514
+ onApprovalRequest(handler: (request: ApprovalRequest) => boolean | Promise<boolean>): void;
515
+ /**
516
+ * Emit a background notification while the agent is running the current turn.
517
+ * The message is surfaced to event subscribers but does not modify the user input queue.
518
+ *
519
+ * @throws Error if the thread has not been started yet.
520
+ */
521
+ sendBackgroundEvent(message: string): Promise<void>;
522
+ /**
523
+ * Programmatically update the agent's plan/todo list.
524
+ * The plan will be applied at the start of the next turn.
525
+ *
526
+ * @param args The plan update arguments
527
+ * @throws Error if no thread ID is available
528
+ */
529
+ updatePlan(args: {
530
+ explanation?: string;
531
+ plan: Array<{
532
+ step: string;
533
+ status: "pending" | "in_progress" | "completed";
534
+ }>;
535
+ }): void;
536
+ /**
537
+ * Modify the agent's plan/todo list with granular operations.
538
+ * Changes will be applied at the start of the next turn.
539
+ *
540
+ * @param operations Array of operations to perform on the plan
541
+ * @throws Error if no thread ID is available
542
+ */
543
+ modifyPlan(operations: Array<{
544
+ type: "add";
545
+ item: {
546
+ step: string;
547
+ status?: "pending" | "in_progress" | "completed";
548
+ };
549
+ } | {
550
+ type: "update";
551
+ index: number;
552
+ updates: Partial<{
553
+ step: string;
554
+ status: "pending" | "in_progress" | "completed";
555
+ }>;
556
+ } | {
557
+ type: "remove";
558
+ index: number;
559
+ } | {
560
+ type: "reorder";
561
+ newOrder: number[];
562
+ }>): void;
563
+ /**
564
+ * Add a new todo item to the agent's plan.
565
+ *
566
+ * @param step The todo step description
567
+ * @param status The initial status (defaults to "pending")
568
+ */
569
+ addTodo(step: string, status?: "pending" | "in_progress" | "completed"): void;
570
+ /**
571
+ * Update an existing todo item.
572
+ *
573
+ * @param index The index of the todo item to update
574
+ * @param updates The updates to apply
575
+ */
576
+ updateTodo(index: number, updates: Partial<{
577
+ step: string;
578
+ status: "pending" | "in_progress" | "completed";
579
+ }>): void;
580
+ /**
581
+ * Remove a todo item from the plan.
582
+ *
583
+ * @param index The index of the todo item to remove
584
+ */
585
+ removeTodo(index: number): void;
586
+ /**
587
+ * Reorder the todo items in the plan.
588
+ *
589
+ * @param newOrder Array of indices representing the new order
590
+ */
591
+ reorderTodos(newOrder: number[]): void;
592
+ /** Compacts the conversation history for this thread using Codex's builtin compaction. */
593
+ compact(): Promise<void>;
594
+ /**
595
+ * Fork this thread at the specified user message, returning a new thread that starts
596
+ * from the conversation history prior to that message.
597
+ *
598
+ * @param options Fork configuration including which user message to branch before and optional thread overrides.
599
+ */
600
+ fork(options: ForkOptions): Promise<Thread>;
200
601
  /** Provides the input to the agent and streams events as they are produced during the turn. */
201
602
  runStreamed(input: Input, turnOptions?: TurnOptions): Promise<StreamedTurn>;
202
603
  private runStreamedInternal;
203
604
  /** Provides the input to the agent and returns the completed turn. */
204
605
  run(input: Input, turnOptions?: TurnOptions): Promise<Turn>;
606
+ private buildTuiRequest;
607
+ /**
608
+ * Launches the interactive Codex TUI (Terminal User Interface) for this thread and returns a session handle.
609
+ *
610
+ * The handle allows advanced workflows where the TUI can be started and stopped programmatically,
611
+ * while preserving the underlying conversation state.
612
+ */
613
+ launchTui(overrides?: Partial<NativeTuiRequest>): TuiSession;
614
+ /**
615
+ * Launches the interactive Codex TUI (Terminal User Interface) for this thread.
616
+ *
617
+ * This method enables seamless transition from programmatic agent interaction to
618
+ * interactive terminal chat within the same session. The TUI takes over the terminal
619
+ * and allows you to continue the conversation interactively.
620
+ *
621
+ * @param overrides - Optional configuration to override thread defaults. Supports all TUI options
622
+ * including prompt, sandbox mode, approval mode, and resume options.
623
+ * @param options - Optional run options including an AbortSignal to request shutdown.
624
+ * @returns A Promise that resolves to TUI exit information including:
625
+ * - tokenUsage: Token consumption statistics
626
+ * - conversationId: Session ID for resuming later
627
+ * - updateAction: Optional suggested update command
628
+ * @throws {Error} If not in a trusted git repository (unless skipGitRepoCheck is set)
629
+ * @throws {Error} If the terminal is not interactive (TTY required)
630
+ */
631
+ tui(overrides?: Partial<NativeTuiRequest>, options?: RunTuiOptions): Promise<NativeTuiExitInfo>;
632
+ private wrapTuiSession;
633
+ private attachDefaultLspBridge;
205
634
  }
206
635
 
207
- type ApprovalMode = "never" | "on-request" | "on-failure" | "untrusted";
208
- type SandboxMode = "read-only" | "workspace-write" | "danger-full-access";
209
- type ThreadOptions = {
210
- model?: string;
211
- sandboxMode?: SandboxMode;
212
- workingDirectory?: string;
213
- skipGitRepoCheck?: boolean;
214
- };
215
-
216
- type NativeToolInfo = {
217
- name: string;
218
- description?: string;
219
- parameters?: unknown;
220
- strict?: boolean;
221
- supportsParallel?: boolean;
222
- };
223
- type NativeToolInvocation = {
224
- toolName: string;
225
- callId: string;
226
- arguments?: string;
227
- input?: string;
228
- };
229
- type NativeToolResult = {
230
- output?: string;
231
- success?: boolean;
232
- error?: string;
233
- };
234
-
235
636
  type NativeToolDefinition = NativeToolInfo & {
236
637
  handler: (call: NativeToolInvocation) => Promise<NativeToolResult> | NativeToolResult;
237
638
  };
@@ -239,7 +640,52 @@ type CodexOptions = {
239
640
  codexPathOverride?: string;
240
641
  baseUrl?: string;
241
642
  apiKey?: string;
643
+ /** Optional model provider override to use instead of the default */
644
+ modelProvider?: string;
645
+ /** Default model to use when a thread omits an explicit choice */
646
+ defaultModel?: string;
242
647
  tools?: NativeToolDefinition[];
648
+ /**
649
+ * When true, constructor will not clear already-registered tools on the native binding.
650
+ * Useful when other code (CLI, plugins) pre-register tools before instantiating Codex.
651
+ */
652
+ preserveRegisteredTools?: boolean;
653
+ };
654
+
655
+ type CurrentChangesReview = {
656
+ type: "current_changes";
657
+ };
658
+ type BranchReview = {
659
+ type: "branch";
660
+ baseBranch: string;
661
+ };
662
+ type CommitReview = {
663
+ type: "commit";
664
+ sha: string;
665
+ subject?: string;
666
+ };
667
+ type CustomReview = {
668
+ type: "custom";
669
+ prompt: string;
670
+ hint?: string;
671
+ };
672
+ type ReviewTarget = CurrentChangesReview | BranchReview | CommitReview | CustomReview;
673
+ type ReviewInvocationOptions = {
674
+ target: ReviewTarget;
675
+ threadOptions?: ThreadOptions;
676
+ turnOptions?: TurnOptions;
677
+ };
678
+
679
+ type NativeToolInterceptorContext = {
680
+ invocation: NativeToolInvocation;
681
+ callBuiltin: (invocation?: NativeToolInvocation) => Promise<NativeToolResult>;
682
+ };
683
+ type ConversationSummary = NativeConversationSummary;
684
+ type ConversationListPage = NativeConversationListPage;
685
+ type ConversationListOptions = ThreadOptions & {
686
+ pageSize?: number;
687
+ cursor?: string;
688
+ modelProviders?: string[];
243
689
  };
244
690
 
245
691
  /**
@@ -253,8 +699,30 @@ declare class Codex {
253
699
  private exec;
254
700
  private options;
255
701
  private readonly nativeBinding;
702
+ private readonly lspForTools;
256
703
  constructor(options?: CodexOptions);
704
+ /**
705
+ * Register a tool for Codex. When `tool.name` matches a built-in Codex tool,
706
+ * the native implementation is replaced for this Codex instance.
707
+ */
257
708
  registerTool(tool: NativeToolDefinition): void;
709
+ /**
710
+ * Register a tool interceptor for Codex. Interceptors can modify tool invocations
711
+ * and results, and can call the built-in implementation.
712
+ */
713
+ registerToolInterceptor(toolName: string, handler: (context: NativeToolInterceptorContext) => Promise<NativeToolResult> | NativeToolResult): void;
714
+ /**
715
+ * Clear all registered tools, restoring built-in defaults.
716
+ */
717
+ clearTools(): void;
718
+ private buildConversationConfig;
719
+ private createLspManagerForTools;
720
+ private registerDefaultReadFileInterceptor;
721
+ /**
722
+ * Register a programmatic approval callback that Codex will call before executing
723
+ * sensitive operations (e.g., shell commands, file writes).
724
+ */
725
+ setApprovalCallback(handler: (request: ApprovalRequest) => boolean | Promise<boolean>): void;
258
726
  /**
259
727
  * Starts a new conversation with an agent.
260
728
  * @returns A new thread instance.
@@ -268,295 +736,1499 @@ declare class Codex {
268
736
  * @returns A new thread instance.
269
737
  */
270
738
  resumeThread(id: string, options?: ThreadOptions): Thread;
739
+ listConversations(options?: ConversationListOptions): Promise<ConversationListPage>;
740
+ deleteConversation(id: string, options?: ThreadOptions): Promise<boolean>;
741
+ resumeConversationFromRollout(rolloutPath: string, options?: ThreadOptions): Promise<Thread>;
742
+ /**
743
+ * Starts a review task using the built-in Codex review flow.
744
+ */
745
+ review(options: ReviewInvocationOptions): Promise<Turn>;
746
+ /**
747
+ * Starts a review task and returns the event stream.
748
+ */
749
+ reviewStreamed(options: ReviewInvocationOptions): Promise<StreamedTurn>;
750
+ private reviewStreamedInternal;
751
+ }
752
+
753
+ type LspDiagnosticSeverity = "error" | "warning" | "info" | "hint";
754
+ type NormalizedDiagnostic = {
755
+ message: string;
756
+ severity: LspDiagnosticSeverity;
757
+ source?: string;
758
+ code?: string | number;
759
+ range: Diagnostic["range"];
760
+ };
761
+ type FileDiagnostics = {
762
+ path: string;
763
+ diagnostics: NormalizedDiagnostic[];
764
+ };
765
+ type WorkspaceLocator = {
766
+ type: "markers";
767
+ include: string[];
768
+ exclude?: string[];
769
+ } | {
770
+ type: "fixed";
771
+ path: string;
772
+ };
773
+ type LspServerConfig = {
774
+ id: string;
775
+ displayName: string;
776
+ command: string[];
777
+ extensions: string[];
778
+ env?: NodeJS.ProcessEnv;
779
+ initializationOptions?: Record<string, unknown>;
780
+ workspace?: WorkspaceLocator;
781
+ };
782
+ type LspManagerOptions = {
783
+ workingDirectory: string;
784
+ waitForDiagnostics?: boolean;
785
+ };
786
+
787
+ declare class LspDiagnosticsBridge {
788
+ private readonly options;
789
+ private readonly manager;
790
+ private readonly attached;
791
+ constructor(options: LspManagerOptions);
792
+ attach(thread: Thread): () => void;
793
+ dispose(): Promise<void>;
794
+ private processDiagnostics;
795
+ }
796
+
797
+ /**
798
+ * Attaches the LSP diagnostics bridge to a thread.
799
+ * Returns a cleanup function that detaches the bridge and disposes shared resources.
800
+ */
801
+ declare function attachLspDiagnostics(thread: Thread, options: LspManagerOptions): () => void;
802
+
803
+ declare class LspManager {
804
+ private readonly options;
805
+ private clients;
806
+ constructor(options: LspManagerOptions);
807
+ collectDiagnostics(files: string[]): Promise<FileDiagnostics[]>;
808
+ dispose(): Promise<void>;
809
+ private getClient;
810
+ private createClient;
271
811
  }
272
812
 
813
+ declare const DEFAULT_SERVERS: LspServerConfig[];
814
+ declare function findServerForFile(filePath: string): LspServerConfig | undefined;
815
+ declare function resolveWorkspaceRoot(filePath: string, locator: WorkspaceLocator | undefined, fallbackDir: string): string;
816
+
817
+ type DiagnosticSeverity = "error" | "warning" | "info" | "hint";
818
+ declare function formatDiagnosticsForTool(diagnostics: FileDiagnostics[]): string;
819
+ declare function formatDiagnosticsForBackgroundEvent(diagnostics: FileDiagnostics[], cwd: string): string;
820
+ /**
821
+ * Filter diagnostics by minimum severity level
822
+ */
823
+ declare function filterBySeverity(diagnostics: FileDiagnostics[], minSeverity?: DiagnosticSeverity): FileDiagnostics[];
824
+ /**
825
+ * Generate summary statistics for diagnostics
826
+ */
827
+ declare function summarizeDiagnostics(diagnostics: FileDiagnostics[]): {
828
+ fileCount: number;
829
+ errorCount: number;
830
+ warningCount: number;
831
+ infoCount: number;
832
+ hintCount: number;
833
+ totalCount: number;
834
+ };
835
+ /**
836
+ * Format diagnostics with summary (concise format for post-merge validation)
837
+ */
838
+ declare function formatDiagnosticsWithSummary(diagnostics: FileDiagnostics[], cwd: string, options?: {
839
+ minSeverity?: DiagnosticSeverity;
840
+ maxPerFile?: number;
841
+ }): string;
842
+
273
843
  /**
274
- * Type definitions for OpenAI Agents JS framework compatibility
275
- * Based on @openai/agents-core package
844
+ * Options for creating a CodexProvider
276
845
  */
277
- interface ModelProvider {
846
+ interface CodexProviderOptions extends CodexOptions {
278
847
  /**
279
- * Get a model by name
280
- * @param modelName - The name of the model to get.
848
+ * Default model to use when none is specified
281
849
  */
282
- getModel(modelName?: string): Promise<Model> | Model;
283
- }
284
- interface Model {
850
+ defaultModel?: string;
285
851
  /**
286
- * Get a response from the model (buffered)
852
+ * Approval policy forwarded to threads created by this provider.
287
853
  */
288
- getResponse(request: ModelRequest): Promise<ModelResponse>;
854
+ approvalMode?: ThreadOptions["approvalMode"];
289
855
  /**
290
- * Get a streamed response from the model
856
+ * Use local OSS provider via Ollama (pulls models as needed)
291
857
  */
292
- getStreamedResponse(request: ModelRequest): AsyncIterable<StreamEvent>;
293
- }
294
- interface ModelRequest {
295
- systemInstructions?: string;
296
- input: string | AgentInputItem[];
297
- previousResponseId?: string;
298
- conversationId?: string;
299
- modelSettings: ModelSettings;
300
- tools: SerializedTool[];
301
- outputType: SerializedOutputType;
302
- handoffs: SerializedHandoff[];
303
- tracing: ModelTracing;
304
- signal?: AbortSignal;
305
- prompt?: Prompt;
306
- overridePromptModel?: boolean;
307
- }
308
- interface ModelSettings {
309
- temperature?: number;
310
- topP?: number;
311
- frequencyPenalty?: number;
312
- presencePenalty?: number;
313
- toolChoice?: ModelSettingsToolChoice;
314
- parallelToolCalls?: boolean;
315
- truncation?: "auto" | "disabled";
316
- maxTokens?: number;
317
- store?: boolean;
318
- reasoning?: ModelSettingsReasoning;
319
- text?: ModelSettingsText;
320
- providerData?: Record<string, unknown>;
321
- }
322
- interface ModelSettingsToolChoice {
323
- type: "auto" | "required" | "none" | "function";
324
- functionName?: string;
325
- }
326
- interface ModelSettingsReasoning {
327
- type: "internal" | "explicit";
328
- maxTokens?: number;
329
- }
330
- interface ModelSettingsText {
331
- format?: {
332
- type: string;
333
- schema?: Record<string, unknown>;
334
- };
335
- }
336
- type AgentInputItem = InputText | InputImage | InputFile | InputAudio | InputFunctionCallResult | InputRefusal;
337
- interface InputText {
338
- type: "input_text";
339
- text: string;
340
- }
341
- interface InputImage {
342
- type: "input_image";
343
- image: string | {
344
- url: string;
345
- } | {
346
- fileId: string;
347
- };
348
- detail?: "auto" | "low" | "high";
349
- }
350
- interface InputFile {
351
- type: "input_file";
352
- file: {
353
- fileId: string;
354
- };
355
- }
356
- interface InputAudio {
357
- type: "input_audio";
358
- audio: string | {
359
- data: string;
360
- format: string;
361
- };
362
- transcript?: string;
363
- }
364
- interface InputFunctionCallResult {
365
- type: "function_call_result";
366
- callId: string;
367
- name: string;
368
- result: string;
369
- }
370
- interface InputRefusal {
371
- type: "input_refusal";
372
- refusal: string;
858
+ oss?: boolean;
859
+ /**
860
+ * Working directory for Codex operations
861
+ * @default process.cwd()
862
+ */
863
+ workingDirectory?: string;
864
+ /**
865
+ * Skip git repository check
866
+ * @default false
867
+ */
868
+ skipGitRepoCheck?: boolean;
869
+ /**
870
+ * Sandbox policy to use when executing shell commands
871
+ * @default "danger-full-access"
872
+ */
873
+ sandboxMode?: ThreadOptions["sandboxMode"];
874
+ /**
875
+ * Reasoning effort level for reasoning-capable models
876
+ * @default "medium"
877
+ */
878
+ reasoningEffort?: ThreadOptions["reasoningEffort"];
879
+ /**
880
+ * Reasoning summary preference for reasoning-capable models
881
+ * @default "auto"
882
+ */
883
+ reasoningSummary?: ThreadOptions["reasoningSummary"];
884
+ /**
885
+ * Enable LSP diagnostics for threads created by this provider
886
+ * @default true
887
+ */
888
+ enableLsp?: boolean;
373
889
  }
374
- interface ModelResponse {
375
- usage: Usage;
376
- output: AgentOutputItem[];
377
- responseId?: string;
378
- providerData?: Record<string, unknown>;
379
- }
380
- interface Usage {
381
- inputTokens: number;
382
- outputTokens: number;
383
- totalTokens: number;
384
- inputTokensDetails?: Array<Record<string, number>>;
385
- outputTokensDetails?: Array<Record<string, number>>;
386
- }
387
- type AgentOutputItem = AssistantMessageItem | FunctionCallItem | Refusal | AudioContent | ImageContent | Reasoning;
388
- interface AssistantMessageItem {
389
- type: "assistant_message";
390
- role: "assistant";
391
- status: "in_progress" | "completed";
392
- content: OutputContent[];
393
- }
394
- interface FunctionCallItem {
395
- type: "function_call";
396
- callId: string;
397
- name: string;
398
- arguments: string;
890
+ /**
891
+ * Provider implementation that uses Codex as the backend for OpenAI Agents
892
+ *
893
+ * @example
894
+ * ```typescript
895
+ * import { CodexProvider } from '@openai/codex-native/agents';
896
+ * import { Agent, Runner } from '@openai/agents';
897
+ *
898
+ * defaultModel: 'gpt-5-codex'
899
+ * });
900
+ *
901
+ * const agent = new Agent({
902
+ * name: 'CodeAssistant',
903
+ * instructions: 'You are a helpful coding assistant'
904
+ * });
905
+ *
906
+ * const runner = new Runner({ modelProvider: provider });
907
+ * const result = await runner.run(agent, 'Fix the failing tests');
908
+ * ```
909
+ */
910
+ declare class CodexProvider implements ModelProvider {
911
+ private codex;
912
+ private options;
913
+ constructor(options?: CodexProviderOptions);
914
+ /**
915
+ * Lazy initialization of Codex instance
916
+ */
917
+ private getCodex;
918
+ getModel(modelName?: string): Model;
919
+ /**
920
+ * Register a programmatic approval callback on the underlying Codex instance.
921
+ */
922
+ setApprovalCallback(callback: (request: ApprovalRequest) => boolean | Promise<boolean>): void;
399
923
  }
400
- interface Refusal {
401
- type: "refusal";
402
- refusal: string;
924
+
925
+ type BaseToolOptions = Parameters<typeof tool>[0];
926
+ type AgentTool = ReturnType<typeof tool>;
927
+ type CodexToolOptions = BaseToolOptions & {
928
+ codexExecute: (input: unknown) => Promise<unknown> | unknown;
929
+ };
930
+ declare function codexTool(options: CodexToolOptions): AgentTool;
931
+
932
+ type ToolCallEvent = {
933
+ name?: string;
934
+ input?: unknown;
935
+ output?: unknown;
936
+ status?: "started" | "completed";
937
+ };
938
+ type FormattedStream = {
939
+ text: string;
940
+ reasoning: string;
941
+ toolCalls: ToolCallEvent[];
942
+ usage?: {
943
+ requests?: number;
944
+ inputTokens: number;
945
+ outputTokens: number;
946
+ totalTokens: number;
947
+ inputTokensDetails?: Record<string, number>;
948
+ outputTokensDetails?: Record<string, number>;
949
+ };
950
+ /**
951
+ * Convenience field when providers report cached tokens (e.g. via inputTokensDetails.cachedTokens)
952
+ */
953
+ cachedTokens?: number;
954
+ responseId?: string;
955
+ /**
956
+ * Raw provider-specific data (e.g., costs, cache hit ratios, rate limit info)
957
+ */
958
+ providerData?: Record<string, unknown>;
959
+ errors: {
960
+ message: string;
961
+ }[];
962
+ };
963
+ type FormatStreamOptions = {
964
+ onUpdate?: (partial: Partial<FormattedStream>) => void;
965
+ };
966
+ /**
967
+ * Consume a stream of StreamEvent and aggregate into a coherent object:
968
+ * - Concatenates output_text deltas into `text`
969
+ * - Concatenates reasoning deltas into `reasoning`
970
+ * - Captures usage and responseId on response_done
971
+ * - Prepares space for tool call events (future-friendly; empty for now)
972
+ *
973
+ * Optionally invokes `onUpdate` with partial snapshots as data arrives.
974
+ */
975
+ declare function formatStream(stream: AsyncIterable<StreamEvent>, options?: FormatStreamOptions): Promise<FormattedStream>;
976
+
977
+ type PermissionDecision = boolean | "once" | "always" | "reject" | {
978
+ response: "once" | "always" | "reject";
979
+ };
980
+ interface PermissionRequest {
981
+ id: string;
982
+ type: string;
983
+ title: string;
984
+ sessionId: string;
985
+ metadata: Record<string, unknown>;
986
+ pattern?: string | string[];
403
987
  }
404
- interface AudioContent {
405
- type: "audio";
406
- audio: string;
407
- transcript?: string;
988
+ interface OpenCodeAgentOptions {
989
+ /** Fully qualified base URL for an existing opencode server. When omitted the agent will start its own server. */
990
+ baseUrl?: string;
991
+ /** Hostname passed to `createOpencode` when auto-starting the server. */
992
+ hostname?: string;
993
+ /** Port passed to `createOpencode` when auto-starting the server. */
994
+ port?: number;
995
+ /** Additional configuration forwarded to `createOpencode`. */
996
+ config?: Record<string, unknown>;
997
+ /** Preferred model string in the form `provider/model`. */
998
+ model?: string;
999
+ /** Directory the OpenCode session should operate within. Defaults to the current working directory. */
1000
+ workingDirectory?: string;
1001
+ /** Optional user-friendly session title. */
1002
+ title?: string;
1003
+ /** Callback invoked whenever opencode asks for a permission decision. */
1004
+ onApprovalRequest?: (request: PermissionRequest) => PermissionDecision | Promise<PermissionDecision>;
1005
+ /** Override for tests – returns a hydrated opencode client. */
1006
+ clientFactory?: () => Promise<{
1007
+ client: OpencodeClient;
1008
+ close?: () => void;
1009
+ }>;
408
1010
  }
409
- interface ImageContent {
410
- type: "image";
411
- image: string;
1011
+ interface DelegationResult {
1012
+ sessionId: string;
1013
+ /** Deprecated alias retained for backwards compatibility. */
1014
+ threadId?: string;
1015
+ output: string;
1016
+ success: boolean;
1017
+ error?: string;
1018
+ usage?: Usage | null;
412
1019
  }
413
- interface Reasoning {
414
- type: "reasoning";
415
- reasoning: string;
1020
+ declare class OpenCodeAgent {
1021
+ private readonly options;
1022
+ private readonly approvalHandler?;
1023
+ private clientPromise?;
1024
+ private closeCallback?;
1025
+ constructor(options?: OpenCodeAgentOptions);
1026
+ /**
1027
+ * Cleanup method to shut down the OpenCode server if one was started.
1028
+ * Should be called when done using the agent to prevent zombie processes.
1029
+ */
1030
+ close(): Promise<void>;
1031
+ delegate(task: string): Promise<DelegationResult>;
1032
+ delegateStreaming(task: string, onEvent?: (event: Event) => void, sessionId?: string): Promise<DelegationResult>;
1033
+ resume(sessionId: string, task: string): Promise<DelegationResult>;
1034
+ workflow(steps: string[]): Promise<DelegationResult[]>;
1035
+ private executeTask;
1036
+ private ensureClient;
1037
+ private ensureSession;
1038
+ private createSessionTitle;
1039
+ private parseModel;
1040
+ private collectText;
1041
+ private toUsage;
1042
+ private extractData;
1043
+ private describeError;
1044
+ private watchEvents;
1045
+ private extractSessionId;
1046
+ private respondToPermission;
1047
+ private normalizeDecision;
1048
+ private getWorkingDirectory;
416
1049
  }
417
- type OutputContent = OutputText | OutputImage | OutputAudio | OutputRefusal;
418
- interface OutputText {
419
- type: "output_text";
420
- text: string;
1050
+
1051
+ type CloudTaskStatus = "pending" | "ready" | "applied" | "error";
1052
+ type DiffSummary = {
1053
+ files_changed: number;
1054
+ lines_added: number;
1055
+ lines_removed: number;
1056
+ };
1057
+ type CloudTaskSummary = {
1058
+ id: string;
1059
+ title: string;
1060
+ status: CloudTaskStatus;
1061
+ updated_at: string;
1062
+ environment_id?: string | null;
1063
+ environment_label?: string | null;
1064
+ summary: DiffSummary;
1065
+ is_review?: boolean;
1066
+ attempt_total?: number | null;
1067
+ };
1068
+ type CloudApplyStatus = "success" | "partial" | "error";
1069
+ type CloudApplyOutcome = {
1070
+ applied: boolean;
1071
+ status: CloudApplyStatus;
1072
+ message: string;
1073
+ skipped_paths: string[];
1074
+ conflict_paths: string[];
1075
+ };
1076
+ type CloudTaskCreateResult = {
1077
+ id: string;
1078
+ };
1079
+ type CloudTasksOptions = {
1080
+ baseUrl?: string;
1081
+ apiKey?: string;
1082
+ };
1083
+ declare class CloudTasks {
1084
+ private readonly options;
1085
+ constructor(options?: CloudTasksOptions);
1086
+ private binding;
1087
+ list(env?: string): Promise<CloudTaskSummary[]>;
1088
+ getDiff(taskId: string): Promise<string | null>;
1089
+ applyPreflight(taskId: string, diffOverride?: string): Promise<CloudApplyOutcome>;
1090
+ apply(taskId: string, diffOverride?: string): Promise<CloudApplyOutcome>;
1091
+ create(envId: string, prompt: string, opts?: {
1092
+ gitRef?: string;
1093
+ qaMode?: boolean;
1094
+ bestOfN?: number;
1095
+ }): Promise<CloudTaskCreateResult>;
421
1096
  }
422
- interface OutputImage {
423
- type: "output_image";
424
- image: string;
1097
+
1098
+ /**
1099
+ * Log level enumeration
1100
+ */
1101
+ declare enum LogLevel {
1102
+ DEBUG = 0,
1103
+ INFO = 1,
1104
+ WARN = 2,
1105
+ ERROR = 3,
1106
+ SILENT = 4
425
1107
  }
426
- interface OutputAudio {
427
- type: "output_audio";
428
- audio: string;
429
- transcript?: string;
1108
+ /**
1109
+ * Log scopes for different subsystems
1110
+ */
1111
+ type LogScope = "thread" | "merge" | "git" | "coordinator" | "worker" | "supervisor" | "reviewer" | "validation" | "lsp" | "agent" | "provider" | "ci" | "test" | "system";
1112
+ /**
1113
+ * Configuration for logger instances
1114
+ */
1115
+ interface LoggerConfig {
1116
+ /** Minimum log level to output */
1117
+ level?: LogLevel;
1118
+ /** Enable colored output (default: true for TTY) */
1119
+ colors?: boolean;
1120
+ /** Include timestamps in output (default: false) */
1121
+ timestamps?: boolean;
1122
+ /** Prefix for all log messages */
1123
+ prefix?: string;
1124
+ /** Enable structured JSON output instead of formatted text */
1125
+ json?: boolean;
1126
+ /** Custom output stream (default: console) */
1127
+ output?: LogOutput;
430
1128
  }
431
- interface OutputRefusal {
432
- type: "output_refusal";
433
- refusal: string;
1129
+ /**
1130
+ * Output interface for log messages
1131
+ */
1132
+ interface LogOutput {
1133
+ debug(message: string): void;
1134
+ info(message: string): void;
1135
+ warn(message: string): void;
1136
+ error(message: string): void;
434
1137
  }
435
- type StreamEvent = ResponseStartedEvent | OutputTextDeltaEvent | OutputTextDoneEvent | OutputAudioDeltaEvent | OutputAudioDoneEvent | FunctionCallArgumentsDeltaEvent | FunctionCallArgumentsDoneEvent | ReasoningDeltaEvent | ReasoningDoneEvent | ResponseDoneEvent | ErrorEvent;
436
- interface ResponseStartedEvent {
437
- type: "response_started";
1138
+ /**
1139
+ * Thread logging sink interface
1140
+ */
1141
+ interface ThreadLoggingSink {
1142
+ info(message: string): void;
1143
+ warn(message: string): void;
1144
+ recordUsage?(usage: Usage): void;
438
1145
  }
439
- interface OutputTextDeltaEvent {
440
- type: "output_text_delta";
441
- delta: string;
1146
+ /**
1147
+ * Structured log entry for JSON output
1148
+ */
1149
+ interface LogEntry {
1150
+ timestamp: string;
1151
+ level: string;
1152
+ scope?: string;
1153
+ subject?: string;
1154
+ message: string;
1155
+ data?: Record<string, unknown>;
442
1156
  }
443
- interface OutputTextDoneEvent {
444
- type: "output_text_done";
445
- text: string;
1157
+
1158
+ /**
1159
+ * Centralized logger with support for scopes, levels, and structured output
1160
+ */
1161
+ declare class Logger {
1162
+ private level;
1163
+ private colors;
1164
+ private timestamps;
1165
+ private prefix;
1166
+ private json;
1167
+ private output;
1168
+ constructor(config?: LoggerConfig);
1169
+ /**
1170
+ * Create a new logger with modified configuration
1171
+ */
1172
+ configure(config: Partial<LoggerConfig>): Logger;
1173
+ /**
1174
+ * Create a scoped logger
1175
+ */
1176
+ scope(scope: LogScope, subject?: string): ScopedLogger;
1177
+ /**
1178
+ * Log a debug message
1179
+ */
1180
+ debug(message: string, data?: Record<string, unknown>): void;
1181
+ /**
1182
+ * Log an info message
1183
+ */
1184
+ info(message: string, data?: Record<string, unknown>): void;
1185
+ /**
1186
+ * Log a warning message
1187
+ */
1188
+ warn(message: string, data?: Record<string, unknown>): void;
1189
+ /**
1190
+ * Log an error message
1191
+ */
1192
+ error(message: string, data?: Record<string, unknown>): void;
1193
+ /**
1194
+ * Internal log method
1195
+ */
1196
+ private log;
1197
+ /**
1198
+ * Log in JSON format
1199
+ */
1200
+ private logJson;
1201
+ /**
1202
+ * Log in formatted text
1203
+ */
1204
+ private logFormatted;
1205
+ /**
1206
+ * Internal scoped log method (used by ScopedLogger)
1207
+ */
1208
+ logScoped(level: LogLevel, message: string, scope: LogScope, subject?: string, data?: Record<string, unknown>): void;
446
1209
  }
447
- interface OutputAudioDeltaEvent {
448
- type: "output_audio_delta";
449
- delta: string;
1210
+ /**
1211
+ * Scoped logger for a specific subsystem
1212
+ */
1213
+ declare class ScopedLogger {
1214
+ private logger;
1215
+ private scope;
1216
+ private subject?;
1217
+ constructor(logger: Logger, scope: LogScope, subject?: string | undefined);
1218
+ /**
1219
+ * Log a debug message
1220
+ */
1221
+ debug(message: string, data?: Record<string, unknown>): void;
1222
+ /**
1223
+ * Log an info message
1224
+ */
1225
+ info(message: string, data?: Record<string, unknown>): void;
1226
+ /**
1227
+ * Log a warning message
1228
+ */
1229
+ warn(message: string, data?: Record<string, unknown>): void;
1230
+ /**
1231
+ * Log an error message
1232
+ */
1233
+ error(message: string, data?: Record<string, unknown>): void;
1234
+ /**
1235
+ * Create a ThreadLoggingSink adapter
1236
+ */
1237
+ asThreadSink(): ThreadLoggingSink;
450
1238
  }
451
- interface OutputAudioDoneEvent {
452
- type: "output_audio_done";
453
- audio: string;
454
- transcript?: string;
1239
+ /**
1240
+ * Global default logger instance
1241
+ */
1242
+ declare const logger: Logger;
1243
+
1244
+ /**
1245
+ * Create a thread logging sink from a scoped logger
1246
+ */
1247
+ declare function createThreadLogger(scopedLogger: ScopedLogger, onUsage?: (usage: Usage) => void): ThreadLoggingSink;
1248
+ /**
1249
+ * Run a thread turn with automatic event logging
1250
+ */
1251
+ declare function runThreadTurnWithLogs(thread: Thread, sink: ThreadLoggingSink, prompt: string, turnOptions?: TurnOptions): Promise<Turn>;
1252
+
1253
+ /**
1254
+ * Reverie System Constants
1255
+ *
1256
+ * Configuration constants for reverie search, filtering, and grading.
1257
+ * These values are tuned for optimal balance between result quality and performance.
1258
+ */
1259
+ /**
1260
+ * Default number of final reverie insights to return.
1261
+ * After all filtering and grading, this is the target result count.
1262
+ */
1263
+ declare const DEFAULT_REVERIE_LIMIT = 6;
1264
+ /**
1265
+ * Maximum number of candidate insights to fetch initially.
1266
+ * We fetch many candidates upfront and then filter aggressively.
1267
+ */
1268
+ declare const DEFAULT_REVERIE_MAX_CANDIDATES = 80;
1269
+ /**
1270
+ * Embedding model for semantic search.
1271
+ * Large model provides better semantic understanding at cost of memory/speed.
1272
+ */
1273
+ declare const REVERIE_EMBED_MODEL = "BAAI/bge-large-en-v1.5";
1274
+ /**
1275
+ * Reranker model for improving search precision.
1276
+ * Applied after initial embedding search to rerank top candidates.
1277
+ */
1278
+ declare const REVERIE_RERANKER_MODEL = "rozgo/bge-reranker-v2-m3";
1279
+ /**
1280
+ * Candidate multiplier for aggressive filtering.
1281
+ * Fetch 3x candidates since we'll filter heavily for quality.
1282
+ */
1283
+ declare const REVERIE_CANDIDATE_MULTIPLIER = 3;
1284
+ /**
1285
+ * Minimum relevance score threshold for LLM grading.
1286
+ * Only insights scoring >= 0.7 are sent for expensive LLM evaluation.
1287
+ * This optimizes API costs by skipping obvious low-quality candidates.
1288
+ */
1289
+ declare const REVERIE_LLM_GRADE_THRESHOLD = 0.7;
1290
+ /**
1291
+ * Default reranker top-k value.
1292
+ * Number of results to rerank after initial retrieval.
1293
+ */
1294
+ declare const DEFAULT_RERANKER_TOP_K = 20;
1295
+ /**
1296
+ * Default reranker batch size.
1297
+ * Number of candidates to process per reranker batch.
1298
+ */
1299
+ declare const DEFAULT_RERANKER_BATCH_SIZE = 8;
1300
+
1301
+ /**
1302
+ * Reverie Type Definitions
1303
+ *
1304
+ * Core types used throughout the reverie system.
1305
+ */
1306
+ /**
1307
+ * Represents a single reverie insight from past conversations.
1308
+ */
1309
+ interface ReverieInsight$1 {
1310
+ /** Unique identifier for the conversation */
1311
+ conversationId: string;
1312
+ /** ISO timestamp of when the conversation occurred */
1313
+ timestamp: string;
1314
+ /** Relevance score from semantic search (0-1) */
1315
+ relevance: number;
1316
+ /** Text excerpt from the conversation */
1317
+ excerpt: string;
1318
+ /** Extracted insights or key points from the excerpt */
1319
+ insights: string[];
455
1320
  }
456
- interface FunctionCallArgumentsDeltaEvent {
457
- type: "function_call_arguments_delta";
458
- callId: string;
459
- name: string;
460
- delta: string;
1321
+ interface ReverieEpisodeSummary {
1322
+ conversationId: string;
1323
+ episodeId: string;
1324
+ timestamp: string;
1325
+ summary: string;
1326
+ keyDecisions?: string[];
1327
+ importance?: number;
461
1328
  }
462
- interface FunctionCallArgumentsDoneEvent {
463
- type: "function_call_arguments_done";
464
- callId: string;
465
- name: string;
466
- arguments: string;
1329
+ /**
1330
+ * Options for reverie semantic search.
1331
+ */
1332
+ interface ReverieSearchOptions {
1333
+ /** Maximum number of final results to return (after all filtering) */
1334
+ limit?: number;
1335
+ /** Maximum number of candidates to fetch initially */
1336
+ maxCandidates?: number;
1337
+ /** Whether to use reranker for improving precision */
1338
+ useReranker?: boolean;
1339
+ /** Reranker model identifier */
1340
+ rerankerModel?: string;
1341
+ /** Number of results to rerank */
1342
+ rerankerTopK?: number;
1343
+ /** Batch size for reranking operations */
1344
+ rerankerBatchSize?: number;
1345
+ /** Multiplier for candidate fetching (fetch N × limit candidates) */
1346
+ candidateMultiplier?: number;
467
1347
  }
468
- interface ReasoningDeltaEvent {
469
- type: "reasoning_delta";
470
- delta: string;
1348
+ /**
1349
+ * Options for LLM-based relevance grading.
1350
+ */
1351
+ interface GradingOptions {
1352
+ /** Minimum relevance score to trigger LLM grading (default: 0.7) */
1353
+ minRelevanceForGrading?: number;
1354
+ /** Whether to grade insights in parallel (default: true) */
1355
+ parallel?: boolean;
471
1356
  }
472
- interface ReasoningDoneEvent {
473
- type: "reasoning_done";
474
- reasoning: string;
1357
+ /**
1358
+ * Statistics from reverie filtering pipeline.
1359
+ */
1360
+ interface ReverieFilterStats {
1361
+ /** Total raw results from search */
1362
+ total: number;
1363
+ /** Results after basic quality filtering */
1364
+ afterQuality: number;
1365
+ /** Results after embedding-based boilerplate filtering */
1366
+ afterBoilerplate?: number;
1367
+ /** Results after relevance score threshold */
1368
+ afterScore: number;
1369
+ /** Results after deduplication */
1370
+ afterDedup: number;
1371
+ /** Results after LLM grading */
1372
+ afterLLMGrade?: number;
1373
+ /** Final result count */
1374
+ final: number;
475
1375
  }
476
- interface ResponseDoneEvent {
477
- type: "response_done";
478
- response: ModelResponse;
1376
+ /**
1377
+ * Complete pipeline options combining search, filtering, and grading.
1378
+ */
1379
+ interface ReveriePipelineOptions extends ReverieSearchOptions, GradingOptions {
1380
+ /** Whether to skip LLM grading entirely (default: false) */
1381
+ skipLLMGrading?: boolean;
479
1382
  }
480
- interface ErrorEvent {
481
- type: "error";
482
- error: {
483
- message: string;
484
- code?: string;
485
- };
1383
+ /**
1384
+ * Reverie search level types for multi-level search hierarchy.
1385
+ */
1386
+ type ReverieSearchLevel = 'project' | 'branch' | 'file';
1387
+ /**
1388
+ * Project-level search context for repository-wide patterns.
1389
+ */
1390
+ interface ProjectLevelContext {
1391
+ /** Search level identifier */
1392
+ level: 'project';
1393
+ /** Repository root path */
1394
+ repoPath: string;
1395
+ /** Search query describing what to find */
1396
+ query: string;
1397
+ /** Optional file patterns to filter search scope (e.g., ["*.ts", "src/**"]) */
1398
+ filePatterns?: string[];
486
1399
  }
487
- interface SerializedTool {
488
- type: "function";
489
- name: string;
490
- description?: string;
491
- parameters?: Record<string, unknown>;
1400
+ /**
1401
+ * Branch-level search context for feature/branch-specific work.
1402
+ */
1403
+ interface BranchLevelContext {
1404
+ /** Search level identifier */
1405
+ level: 'branch';
1406
+ /** Repository root path */
1407
+ repoPath: string;
1408
+ /** Current branch name */
1409
+ branch: string;
1410
+ /** Base branch for comparison (e.g., "main") */
1411
+ baseBranch?: string;
1412
+ /** List of changed file paths in this branch */
1413
+ changedFiles: string[];
1414
+ /** Recent commit messages or summaries */
1415
+ recentCommits?: string;
492
1416
  }
493
- interface SerializedOutputType {
494
- type: "json_schema";
495
- schema: Record<string, unknown>;
1417
+ /**
1418
+ * File-level search context for individual file changes.
1419
+ */
1420
+ interface FileLevelContext {
1421
+ /** Search level identifier */
1422
+ level: 'file';
1423
+ /** Repository root path */
1424
+ repoPath: string;
1425
+ /** Path to the file being analyzed */
1426
+ filePath: string;
1427
+ /** Git diff or change content */
1428
+ diff?: string;
1429
+ /** Extracted symbols from the file (functions, classes, etc.) */
1430
+ symbols?: string[];
496
1431
  }
497
- interface SerializedHandoff {
498
- name: string;
499
- description?: string;
1432
+ /**
1433
+ * Union type representing any level of search context.
1434
+ */
1435
+ type ReverieContext = ProjectLevelContext | BranchLevelContext | FileLevelContext;
1436
+
1437
+ /**
1438
+ * Reverie Quality Utilities
1439
+ *
1440
+ * Provides filtering, deduplication, and quality assessment for reverie search results.
1441
+ * Ensures that only meaningful conversation excerpts are surfaced to agents and users.
1442
+ */
1443
+ /**
1444
+ * Represents a single reverie insight from past conversations.
1445
+ * This is a generic interface that can be extended with additional metadata.
1446
+ */
1447
+ interface ReverieInsight {
1448
+ /** Unique identifier for the conversation */
1449
+ conversationId: string;
1450
+ /** ISO timestamp of when the conversation occurred */
1451
+ timestamp: string;
1452
+ /** Relevance score from semantic search (0-1) */
1453
+ relevance: number;
1454
+ /** Text excerpt from the conversation */
1455
+ excerpt: string;
1456
+ /** Extracted insights or key points from the excerpt */
1457
+ insights: string[];
500
1458
  }
501
- interface ModelTracing {
502
- enabled: boolean;
503
- metadata?: Record<string, unknown>;
1459
+ /**
1460
+ * Type alias for reverie results (used for logging compatibility).
1461
+ */
1462
+ type ReverieResult = ReverieInsight;
1463
+ /**
1464
+ * Statistics from the quality filtering pipeline.
1465
+ */
1466
+ interface QualityFilterStats {
1467
+ /** Number of insights before filtering */
1468
+ initial: number;
1469
+ /** Number after validity filtering */
1470
+ afterValidityFilter: number;
1471
+ /** Number after deduplication */
1472
+ afterDeduplication: number;
1473
+ /** Final number of insights */
1474
+ final: number;
504
1475
  }
505
- interface Prompt {
506
- model?: string;
507
- template?: string;
1476
+ /**
1477
+ * Validates whether a reverie excerpt contains meaningful content worth indexing.
1478
+ *
1479
+ * Filters out:
1480
+ * - Very short excerpts (< 20 chars)
1481
+ * - System prompts and boilerplate text
1482
+ * - Tool outputs and structured data
1483
+ * - Excerpts with excessive XML/HTML tags
1484
+ * - JSON objects and configuration snippets
1485
+ *
1486
+ * @param excerpt - The text excerpt to validate
1487
+ * @returns true if the excerpt contains meaningful content, false otherwise
1488
+ *
1489
+ * @example
1490
+ * ```typescript
1491
+ * const excerpt = "Let's refactor the auth module to use async/await";
1492
+ * isValidReverieExcerpt(excerpt); // true
1493
+ *
1494
+ * const systemPrompt = "<INSTRUCTIONS>You are a coding assistant</INSTRUCTIONS>";
1495
+ * isValidReverieExcerpt(systemPrompt); // false
1496
+ * ```
1497
+ */
1498
+ declare function isValidReverieExcerpt(excerpt: string): boolean;
1499
+ /**
1500
+ * Removes duplicate or highly similar reverie insights based on content fingerprinting.
1501
+ *
1502
+ * CRITICAL FIX: Groups by fingerprint and keeps the insight with the HIGHEST relevance score.
1503
+ * Previous implementations incorrectly kept the first occurrence, which could discard
1504
+ * higher-quality duplicates found later in the list.
1505
+ *
1506
+ * Uses the first 100 characters of each excerpt (normalized) as a fingerprint
1507
+ * to identify duplicates. This prevents redundant insights from being shown
1508
+ * to the user while preserving the most relevant unique insights.
1509
+ *
1510
+ * @param insights - Array of reverie insights to deduplicate
1511
+ * @returns Deduplicated array of reverie insights, sorted by relevance (highest first)
1512
+ *
1513
+ * @example
1514
+ * ```typescript
1515
+ * const insights = [
1516
+ * { excerpt: "We refactored the auth module...", relevance: 0.7, ... },
1517
+ * { excerpt: "We refactored the auth module to use async/await", relevance: 0.9, ... },
1518
+ * { excerpt: "Updated the database schema", relevance: 0.8, ... }
1519
+ * ];
1520
+ *
1521
+ * const deduplicated = deduplicateReverieInsights(insights);
1522
+ * // Returns 2 insights: the higher-scoring auth one (0.9) and the database one (0.8)
1523
+ * ```
1524
+ */
1525
+ declare function deduplicateReverieInsights<T extends ReverieInsight>(insights: T[]): T[];
1526
+ /**
1527
+ * Applies the complete quality pipeline to reverie insights.
1528
+ *
1529
+ * Pipeline steps:
1530
+ * 1. Filter out invalid excerpts (system prompts, boilerplate, etc.)
1531
+ * 2. Deduplicate similar insights, keeping highest relevance
1532
+ * 3. Sort by relevance score (highest first)
1533
+ * 4. Limit to top N results
1534
+ *
1535
+ * @param insights - Raw reverie insights from search
1536
+ * @param limit - Maximum number of insights to return (default: 10)
1537
+ * @returns Filtered, deduplicated, and sorted insights with statistics
1538
+ *
1539
+ * @example
1540
+ * ```typescript
1541
+ * const rawInsights = await reverieSearchSemantic(codexHome, query, options);
1542
+ * const { insights, stats } = applyQualityPipeline(rawInsights, 5);
1543
+ *
1544
+ * console.log(`Filtered ${stats.initial} → ${stats.final} insights`);
1545
+ * insights.forEach(insight => {
1546
+ * console.log(`[${insight.relevance.toFixed(2)}] ${insight.excerpt.slice(0, 100)}`);
1547
+ * });
1548
+ * ```
1549
+ */
1550
+ declare function applyQualityPipeline<T extends ReverieInsight>(insights: T[], limit?: number): {
1551
+ insights: T[];
1552
+ stats: QualityFilterStats;
1553
+ };
1554
+
1555
+ /**
1556
+ * LLM-Based Relevance Grading for Reverie Insights
1557
+ *
1558
+ * Uses an LLM to evaluate whether reverie excerpts contain specific technical details
1559
+ * relevant to the current work context. This provides a more sophisticated filter than
1560
+ * simple keyword matching or relevance scores.
1561
+ *
1562
+ * Key optimizations:
1563
+ * - Only grades high-scoring candidates (relevance >= 0.7) to minimize API costs
1564
+ * - Parallel grading for performance
1565
+ * - Strict filtering to reject boilerplate and generic content
1566
+ */
1567
+
1568
+ /**
1569
+ * Minimal interface for an agent runner that can execute prompts.
1570
+ * Compatible with @openai/agents Runner and similar implementations.
1571
+ */
1572
+ interface AgentRunner {
1573
+ run(agent: {
1574
+ name: string;
1575
+ instructions: string | ((...args: any[]) => any);
1576
+ outputType?: unknown;
1577
+ getEnabledHandoffs?: (...args: any[]) => Promise<unknown> | unknown;
1578
+ getAllTools?: (...args: any[]) => Promise<unknown> | unknown;
1579
+ }, prompt: string): Promise<{
1580
+ finalOutput?: unknown;
1581
+ }>;
508
1582
  }
1583
+ /**
1584
+ * Uses LLM to evaluate if a reverie excerpt contains specific technical details
1585
+ * relevant to the search context.
1586
+ *
1587
+ * The grader is extremely strict and only approves excerpts with:
1588
+ * - Specific code/file references
1589
+ * - Technical decisions and rationale
1590
+ * - Error messages and debugging details
1591
+ * - Implementation specifics
1592
+ *
1593
+ * It rejects:
1594
+ * - Greetings and pleasantries
1595
+ * - Thinking markers (**, ##)
1596
+ * - JSON objects and structured data
1597
+ * - Generic phrases ("Context from past work")
1598
+ * - Metadata and system information
1599
+ *
1600
+ * @param runner - Agent runner capable of executing LLM prompts
1601
+ * @param searchContext - Context describing what we're searching for
1602
+ * @param insight - Reverie insight to evaluate
1603
+ * @returns true if the excerpt contains valuable technical details, false otherwise
1604
+ *
1605
+ * @example
1606
+ * ```typescript
1607
+ * const context = "Implementing authentication with JWT tokens";
1608
+ * const insight = {
1609
+ * excerpt: "We decided to use RS256 for JWT signing because...",
1610
+ * relevance: 0.85,
1611
+ * // ...
1612
+ * };
1613
+ *
1614
+ * const isRelevant = await gradeReverieRelevance(runner, context, insight);
1615
+ * // Returns: true (contains specific technical decision)
1616
+ * ```
1617
+ */
1618
+ declare function gradeReverieRelevance(runner: AgentRunner, searchContext: string, insight: ReverieInsight$1): Promise<boolean>;
1619
+ /**
1620
+ * Grades multiple reverie insights in parallel using LLM evaluation.
1621
+ *
1622
+ * Pipeline:
1623
+ * 1. Filter insights by minimum relevance threshold (default: 0.7)
1624
+ * 2. Send high-scoring insights to LLM grader in parallel
1625
+ * 3. Return only insights that pass LLM evaluation
1626
+ *
1627
+ * This approach optimizes API costs by:
1628
+ * - Skipping low-scoring candidates entirely
1629
+ * - Running high-scoring evaluations in parallel for speed
1630
+ * - Using strict filtering to minimize false positives
1631
+ *
1632
+ * @param runner - Agent runner capable of executing LLM prompts
1633
+ * @param context - Search context describing what we're looking for
1634
+ * @param insights - Array of insights to grade
1635
+ * @param options - Grading configuration options
1636
+ * @returns Filtered array containing only LLM-approved insights
1637
+ *
1638
+ * @example
1639
+ * ```typescript
1640
+ * const allInsights = await searchReveries("authentication bug", repo);
1641
+ * const approved = await gradeReveriesInParallel(
1642
+ * runner,
1643
+ * "Fix authentication token validation",
1644
+ * allInsights,
1645
+ * { minRelevanceForGrading: 0.75, parallel: true }
1646
+ * );
1647
+ *
1648
+ * console.log(`${approved.length}/${allInsights.length} insights approved`);
1649
+ * ```
1650
+ */
1651
+ declare function gradeReveriesInParallel(runner: AgentRunner, context: string, insights: ReverieInsight$1[], options?: GradingOptions): Promise<ReverieInsight$1[]>;
509
1652
 
510
1653
  /**
511
- * Options for creating a CodexProvider
1654
+ * Complete Reverie Pipeline
1655
+ *
1656
+ * Orchestrates the full reverie search and filtering process:
1657
+ * 1. Search with 3x candidates for aggressive filtering headroom
1658
+ * 2. Basic quality filter (remove boilerplate and system prompts)
1659
+ * 3. Split by relevance threshold (high vs low scoring)
1660
+ * 4. LLM grade high-scoring candidates only (cost optimization)
1661
+ * 5. Deduplicate results (keep highest relevance)
1662
+ * 6. Log statistics at every stage (transparent operation)
1663
+ *
1664
+ * This pipeline matches diff-agent's sophistication while being fully generic
1665
+ * and reusable across different contexts.
512
1666
  */
513
- interface CodexProviderOptions extends CodexOptions {
514
- /**
515
- * Default model to use when none is specified
516
- */
517
- defaultModel?: string;
518
- /**
519
- * Working directory for Codex operations
520
- * @default process.cwd()
521
- */
522
- workingDirectory?: string;
523
- /**
524
- * Skip git repository check
525
- * @default false
526
- */
527
- skipGitRepoCheck?: boolean;
1667
+
1668
+ /**
1669
+ * Result from the complete reverie pipeline.
1670
+ */
1671
+ interface ReveriePipelineResult {
1672
+ /** Final filtered and graded insights */
1673
+ insights: ReverieInsight$1[];
1674
+ /** Statistics from each pipeline stage */
1675
+ stats: ReverieFilterStats;
528
1676
  }
529
1677
  /**
530
- * Provider implementation that uses Codex as the backend for OpenAI Agents
1678
+ * Applies the complete reverie pipeline with all sophisticated features from diff-agent.
1679
+ *
1680
+ * Pipeline stages:
1681
+ * 1. **Search** - Fetch 3x candidates with optional reranking
1682
+ * 2. **Quality Filter** - Remove system prompts, boilerplate, JSON objects
1683
+ * 3. **Score Split** - Separate high-scoring (≥0.7) from low-scoring candidates
1684
+ * 4. **LLM Grading** - Grade only high-scoring candidates (cost optimization)
1685
+ * 5. **Deduplication** - Remove similar excerpts, keeping highest relevance
1686
+ * 6. **Logging** - Transparent statistics at each stage
1687
+ *
1688
+ * Key optimizations:
1689
+ * - 3x candidate multiplier provides headroom for aggressive filtering
1690
+ * - LLM grading only applied to high-scoring candidates (≥0.7)
1691
+ * - Parallel grading for performance
1692
+ * - Deduplication preserves highest-relevance duplicates
1693
+ * - Comprehensive logging for debugging and monitoring
1694
+ *
1695
+ * @param codexHome - Path to .codex directory containing conversation data
1696
+ * @param searchText - Search query describing what to look for
1697
+ * @param repo - Repository root path for filtering conversations
1698
+ * @param runner - Agent runner for LLM-based relevance grading (required unless skipLLMGrading is true)
1699
+ * @param options - Pipeline configuration options
1700
+ * @returns Pipeline result with filtered insights and statistics
531
1701
  *
532
1702
  * @example
533
1703
  * ```typescript
534
- * import { CodexProvider } from '@openai/codex-native/agents';
535
- * import { Agent, Runner } from '@openai/agents';
1704
+ * // Full pipeline with LLM grading
1705
+ * const result = await applyReveriePipeline(
1706
+ * "/Users/me/.codex",
1707
+ * "authentication bug with JWT tokens",
1708
+ * "/Users/me/my-project",
1709
+ * runner,
1710
+ * {
1711
+ * limit: 6,
1712
+ * useReranker: true,
1713
+ * minRelevanceForGrading: 0.7
1714
+ * }
1715
+ * );
1716
+ *
1717
+ * console.log(`Found ${result.insights.length} relevant insights`);
1718
+ * console.log(`Filtered: ${result.stats.total} → ${result.stats.final}`);
1719
+ *
1720
+ * // Without LLM grading (faster, lower quality)
1721
+ * const fastResult = await applyReveriePipeline(
1722
+ * codexHome,
1723
+ * query,
1724
+ * repo,
1725
+ * null,
1726
+ * { skipLLMGrading: true }
1727
+ * );
1728
+ * ```
1729
+ */
1730
+ declare function applyReveriePipeline(codexHome: string, searchText: string, repo: string, runner: AgentRunner | null, options?: ReveriePipelineOptions): Promise<ReveriePipelineResult>;
1731
+ /**
1732
+ * Simplified pipeline for file-specific searches.
1733
+ *
1734
+ * Similar to main pipeline but optimized for individual file contexts:
1735
+ * - Uses fewer candidates (maxCandidates / 2)
1736
+ * - Same filtering and grading logic
1737
+ * - Transparent logging
1738
+ *
1739
+ * @param codexHome - Path to .codex directory
1740
+ * @param filePath - File path being analyzed
1741
+ * @param fileContext - Contextual information about the file (symbols, changes, etc.)
1742
+ * @param repo - Repository root path
1743
+ * @param runner - Agent runner for LLM grading
1744
+ * @param options - Pipeline options
1745
+ * @returns Pipeline result with file-specific insights
1746
+ *
1747
+ * @example
1748
+ * ```typescript
1749
+ * const fileInsights = await applyFileReveriePipeline(
1750
+ * codexHome,
1751
+ * "src/auth/jwt.ts",
1752
+ * "File: src/auth/jwt.ts\nImplementing: validateToken, generateToken",
1753
+ * repo,
1754
+ * runner,
1755
+ * { limit: 3 }
1756
+ * );
1757
+ * ```
1758
+ */
1759
+ declare function applyFileReveriePipeline(codexHome: string, filePath: string, fileContext: string, repo: string, runner: AgentRunner | null, options?: ReveriePipelineOptions): Promise<ReveriePipelineResult>;
1760
+ /**
1761
+ * Multi-level reverie search pipeline.
1762
+ *
1763
+ * Executes searches at multiple levels (project, branch, file) and returns
1764
+ * results organized by level. This enables comprehensive context gathering
1765
+ * from different scopes in a single operation.
1766
+ *
1767
+ * @param codexHome - Path to .codex directory
1768
+ * @param contexts - Array of search contexts at different levels
1769
+ * @param runner - Agent runner for LLM grading (optional if skipLLMGrading is true)
1770
+ * @param options - Pipeline options
1771
+ * @returns Map of search level to pipeline results
1772
+ *
1773
+ * @example
1774
+ * ```typescript
1775
+ * import { buildProjectContext, buildBranchContext, buildFileContext } from './context.js';
536
1776
  *
537
- * const provider = new CodexProvider({
538
- * apiKey: process.env.CODEX_API_KEY,
539
- * defaultModel: 'claude-sonnet-4.5'
1777
+ * const contexts = [
1778
+ * buildProjectContext("Testing conventions in this codebase"),
1779
+ * buildBranchContext("feat/auth", ["src/auth.ts", "src/login.ts"]),
1780
+ * buildFileContext("src/auth.ts", { extractSymbols: true })
1781
+ * ];
1782
+ *
1783
+ * const results = await searchMultiLevel(codexHome, contexts, runner, {
1784
+ * limit: 5,
1785
+ * useReranker: true
540
1786
  * });
541
1787
  *
542
- * const agent = new Agent({
543
- * name: 'CodeAssistant',
544
- * instructions: 'You are a helpful coding assistant'
1788
+ * // Access results by level
1789
+ * const projectInsights = results.get('project')?.insights || [];
1790
+ * const branchInsights = results.get('branch')?.insights || [];
1791
+ * const fileInsights = results.get('file')?.insights || [];
1792
+ * ```
1793
+ */
1794
+ declare function searchMultiLevel(codexHome: string, contexts: ReverieContext[], runner: AgentRunner | null, options?: ReveriePipelineOptions): Promise<Map<ReverieSearchLevel, ReveriePipelineResult>>;
1795
+ /**
1796
+ * Search at project level for repository-wide patterns.
1797
+ *
1798
+ * Optimized for broad searches across the entire codebase to find
1799
+ * architectural decisions, common practices, and project conventions.
1800
+ *
1801
+ * @param codexHome - Path to .codex directory
1802
+ * @param context - Project-level search context
1803
+ * @param runner - Agent runner for LLM grading
1804
+ * @param options - Pipeline options
1805
+ * @returns Pipeline result with project-wide insights
1806
+ *
1807
+ * @example
1808
+ * ```typescript
1809
+ * const context = buildProjectContext(
1810
+ * "How we handle database migrations",
1811
+ * { repoPath: "/Users/me/my-project" }
1812
+ * );
1813
+ *
1814
+ * const result = await searchProjectLevel(codexHome, context, runner, {
1815
+ * limit: 8,
1816
+ * useReranker: true
545
1817
  * });
546
1818
  *
547
- * const runner = new Runner({ modelProvider: provider });
548
- * const result = await runner.run(agent, 'Fix the failing tests');
1819
+ * console.log(`Found ${result.insights.length} project-wide insights`);
549
1820
  * ```
550
1821
  */
551
- declare class CodexProvider implements ModelProvider {
552
- private codex;
553
- private options;
554
- constructor(options?: CodexProviderOptions);
555
- /**
556
- * Lazy initialization of Codex instance
557
- */
558
- private getCodex;
559
- getModel(modelName?: string): Model;
560
- }
1822
+ declare function searchProjectLevel(codexHome: string, context: ProjectLevelContext, runner: AgentRunner | null, options?: ReveriePipelineOptions): Promise<ReveriePipelineResult>;
1823
+ /**
1824
+ * Search at branch level for feature-specific context.
1825
+ *
1826
+ * Optimized for understanding work done in a specific branch,
1827
+ * including intent, changed files, and commit history.
1828
+ *
1829
+ * @param codexHome - Path to .codex directory
1830
+ * @param context - Branch-level search context
1831
+ * @param runner - Agent runner for LLM grading
1832
+ * @param options - Pipeline options
1833
+ * @returns Pipeline result with branch-specific insights
1834
+ *
1835
+ * @example
1836
+ * ```typescript
1837
+ * const context = buildBranchContext(
1838
+ * "feat/oauth2",
1839
+ * ["src/auth.ts", "src/login.ts"],
1840
+ * {
1841
+ * baseBranch: "main",
1842
+ * recentCommits: "Add OAuth2 support\nImplement token refresh"
1843
+ * }
1844
+ * );
1845
+ *
1846
+ * const result = await searchBranchLevel(codexHome, context, runner, {
1847
+ * limit: 6
1848
+ * });
1849
+ *
1850
+ * console.log(`Found ${result.insights.length} branch insights`);
1851
+ * ```
1852
+ */
1853
+ declare function searchBranchLevel(codexHome: string, context: BranchLevelContext, runner: AgentRunner | null, options?: ReveriePipelineOptions): Promise<ReveriePipelineResult>;
1854
+ /**
1855
+ * Search at file level for specific file changes.
1856
+ *
1857
+ * Optimized for focused searches on individual file modifications,
1858
+ * using extracted symbols for better targeting.
1859
+ *
1860
+ * @param codexHome - Path to .codex directory
1861
+ * @param context - File-level search context
1862
+ * @param runner - Agent runner for LLM grading
1863
+ * @param options - Pipeline options
1864
+ * @returns Pipeline result with file-specific insights
1865
+ *
1866
+ * @example
1867
+ * ```typescript
1868
+ * const context = buildFileContext(
1869
+ * "src/auth/jwt.ts",
1870
+ * {
1871
+ * diff: "+function validateToken(...)\n+function refreshToken(...)",
1872
+ * extractSymbols: true
1873
+ * }
1874
+ * );
1875
+ *
1876
+ * const result = await searchFileLevel(codexHome, context, runner, {
1877
+ * limit: 3
1878
+ * });
1879
+ *
1880
+ * console.log(`Found ${result.insights.length} file-specific insights`);
1881
+ * ```
1882
+ */
1883
+ declare function searchFileLevel(codexHome: string, context: FileLevelContext, runner: AgentRunner | null, options?: ReveriePipelineOptions): Promise<ReveriePipelineResult>;
1884
+
1885
+ /**
1886
+ * Reverie logging utilities.
1887
+ * Provides transparent logging for reverie search and filtering operations.
1888
+ */
1889
+
1890
+ /**
1891
+ * Logs reverie search operation details.
1892
+ *
1893
+ * @param query - The search query
1894
+ * @param context - Optional context about the search
1895
+ */
1896
+ declare function logReverieSearch(query: string, context?: string): void;
1897
+ /**
1898
+ * Logs reverie filtering pipeline statistics.
1899
+ *
1900
+ * @param stats - Filtering statistics
1901
+ */
1902
+ declare function logReverieFiltering(stats: {
1903
+ total: number;
1904
+ afterQuality: number;
1905
+ afterBoilerplate?: number;
1906
+ afterScore: number;
1907
+ afterDedup: number;
1908
+ minScore?: number;
1909
+ }): void;
1910
+ /**
1911
+ * Logs top reverie insights for debugging.
1912
+ *
1913
+ * @param insights - Filtered reverie insights
1914
+ * @param limit - Maximum number of insights to log (default: 3)
1915
+ */
1916
+ declare function logReverieInsights(insights: ReverieResult[], limit?: number): void;
1917
+ /**
1918
+ * Logs quality filtering statistics for hint collection.
1919
+ *
1920
+ * @param stats - Hint collection statistics
1921
+ */
1922
+ declare function logReverieHintQuality(stats: {
1923
+ totalRaw: number;
1924
+ afterQuality: number;
1925
+ afterDedup: number;
1926
+ }): void;
1927
+ /**
1928
+ * Logs LLM grading statistics showing approved vs rejected counts.
1929
+ *
1930
+ * @param stats - LLM grading statistics
1931
+ */
1932
+ declare function logLLMGrading(stats: {
1933
+ total: number;
1934
+ approved: number;
1935
+ rejected: number;
1936
+ minScore?: number;
1937
+ }): void;
1938
+ /**
1939
+ * Logs approved reverie excerpts with relevance scores (verbose mode).
1940
+ *
1941
+ * @param insights - Approved reverie insights to log
1942
+ * @param maxToShow - Maximum number of insights to display (default: 5)
1943
+ */
1944
+ declare function logApprovedReveries(insights: ReverieResult[], maxToShow?: number): void;
1945
+ /**
1946
+ * Truncates a string to a maximum length, adding ellipsis if needed.
1947
+ */
1948
+ declare function truncate(text: string, maxLength: number): string;
1949
+
1950
+ /**
1951
+ * Logs multi-level search initiation.
1952
+ *
1953
+ * @param levels - Array of search levels being executed
1954
+ *
1955
+ * @example
1956
+ * ```typescript
1957
+ * logMultiLevelSearch(['project', 'branch', 'file']);
1958
+ * // Output: "🔍 Multi-level reverie search: project → branch → file"
1959
+ * ```
1960
+ */
1961
+ declare function logMultiLevelSearch(levels: ReverieSearchLevel[]): void;
1962
+ /**
1963
+ * Logs results for a specific search level.
1964
+ *
1965
+ * @param level - The search level
1966
+ * @param result - Pipeline result for this level
1967
+ *
1968
+ * @example
1969
+ * ```typescript
1970
+ * logLevelResults('project', {
1971
+ * insights: [...],
1972
+ * stats: { total: 50, final: 8, ... }
1973
+ * });
1974
+ * // Output: " 🌐 Project level: 8 insights (50 → 8, 84% filtered)"
1975
+ * ```
1976
+ */
1977
+ declare function logLevelResults(level: ReverieSearchLevel, result: ReveriePipelineResult): void;
1978
+ /**
1979
+ * Logs a summary of multi-level search results.
1980
+ *
1981
+ * @param results - Map of level to pipeline results
1982
+ *
1983
+ * @example
1984
+ * ```typescript
1985
+ * const results = new Map([
1986
+ * ['project', { insights: [...], stats: {...} }],
1987
+ * ['branch', { insights: [...], stats: {...} }],
1988
+ * ['file', { insights: [...], stats: {...} }]
1989
+ * ]);
1990
+ *
1991
+ * logMultiLevelSummary(results);
1992
+ * // Output summary of all levels with total counts
1993
+ * ```
1994
+ */
1995
+ declare function logMultiLevelSummary(results: Map<ReverieSearchLevel, ReveriePipelineResult>): void;
1996
+
1997
+ /**
1998
+ * Symbol Extraction for Reverie Search
1999
+ *
2000
+ * Extracts key code symbols from diffs to create more focused search queries.
2001
+ * This improves search precision by targeting specific functions, classes, and variables.
2002
+ */
2003
+ /**
2004
+ * Extracts key symbols and terms from a diff to make search queries more targeted.
2005
+ *
2006
+ * Focuses on:
2007
+ * - Function and class definitions
2008
+ * - Variable declarations (const, let, var)
2009
+ * - Exported symbols
2010
+ * - Interface and type definitions
2011
+ *
2012
+ * Avoids:
2013
+ * - Language keywords (true, false, null, etc.)
2014
+ * - Very short symbols (< 3 chars)
2015
+ * - Boilerplate patterns
2016
+ *
2017
+ * @param diff - Git diff content to extract symbols from
2018
+ * @returns Comma-separated string of top 5 symbols, or "code changes" if none found
2019
+ *
2020
+ * @example
2021
+ * ```typescript
2022
+ * const diff = `
2023
+ * +function processUser(user: User) {
2024
+ * + const userName = user.name;
2025
+ * + return userName;
2026
+ * +}
2027
+ * `;
2028
+ *
2029
+ * extractKeySymbols(diff); // "processUser, userName"
2030
+ * ```
2031
+ */
2032
+ declare function extractKeySymbols(diff: string): string;
2033
+
2034
+ /**
2035
+ * Advanced Reverie Search
2036
+ *
2037
+ * Provides semantic search over past conversation history with sophisticated filtering:
2038
+ * - 3x candidate multiplier for aggressive filtering
2039
+ * - Reranker support for improved precision
2040
+ * - Multi-stage filtering with transparent logging
2041
+ * - Quality and deduplication pipelines
2042
+ */
2043
+
2044
+ /**
2045
+ * Performs advanced semantic search over reverie conversation history.
2046
+ *
2047
+ * Search pipeline:
2048
+ * 1. Fetch 3x candidates (candidateMultiplier × limit)
2049
+ * 2. Apply quality filtering (remove boilerplate, system prompts)
2050
+ * 3. Deduplicate similar excerpts (keep highest relevance)
2051
+ * 4. Apply reranker if enabled (improve precision)
2052
+ * 5. Return top N results
2053
+ *
2054
+ * Key features:
2055
+ * - Aggressive candidate fetching for better filtering headroom
2056
+ * - Optional reranker support for precision improvement
2057
+ * - Quality filtering removes system prompts and boilerplate
2058
+ * - Deduplication preserves highest-relevance duplicates
2059
+ * - Transparent logging at each stage
2060
+ *
2061
+ * @param codexHome - Path to .codex directory containing conversation data
2062
+ * @param text - Search query text
2063
+ * @param repo - Repository root path for filtering conversations
2064
+ * @param options - Search configuration options
2065
+ * @returns Array of relevant reverie insights, sorted by relevance
2066
+ *
2067
+ * @example
2068
+ * ```typescript
2069
+ * const insights = await searchReveries(
2070
+ * "/Users/me/.codex",
2071
+ * "authentication bug with JWT tokens",
2072
+ * "/Users/me/my-project",
2073
+ * {
2074
+ * limit: 6,
2075
+ * useReranker: true,
2076
+ * candidateMultiplier: 3
2077
+ * }
2078
+ * );
2079
+ *
2080
+ * console.log(`Found ${insights.length} relevant insights`);
2081
+ * insights.forEach(insight => {
2082
+ * console.log(`[${insight.relevance.toFixed(2)}] ${insight.excerpt.slice(0, 100)}`);
2083
+ * });
2084
+ * ```
2085
+ */
2086
+ declare function searchReveries(codexHome: string, text: string, repo: string, options?: ReverieSearchOptions): Promise<ReverieInsight$1[]>;
2087
+
2088
+ /**
2089
+ * Reverie Context Builders
2090
+ *
2091
+ * Utilities for building search contexts at different levels:
2092
+ * - Project level: Repository-wide patterns and architecture
2093
+ * - Branch level: Feature/branch-specific work and intent
2094
+ * - File level: Individual file changes and symbols
2095
+ */
2096
+
2097
+ /**
2098
+ * Builds project-level search context for repository-wide patterns.
2099
+ *
2100
+ * Use this for searching architectural decisions, common practices,
2101
+ * and project-wide patterns across the entire codebase.
2102
+ *
2103
+ * @param query - Natural language query describing what to find
2104
+ * @param options - Optional configuration
2105
+ * @returns Project-level context ready for search
2106
+ *
2107
+ * @example
2108
+ * ```typescript
2109
+ * const context = buildProjectContext(
2110
+ * "How we handle database migrations in this repository",
2111
+ * { repoPath: "/Users/me/my-project" }
2112
+ * );
2113
+ *
2114
+ * const results = await searchProjectLevel(codexHome, context, runner);
2115
+ * ```
2116
+ */
2117
+ declare function buildProjectContext(query: string, options?: {
2118
+ repoPath?: string;
2119
+ filePatterns?: string[];
2120
+ }): ProjectLevelContext;
2121
+ /**
2122
+ * Builds branch-level search context for feature/branch-specific work.
2123
+ *
2124
+ * Use this for understanding branch intent, feature context, and changes
2125
+ * made across multiple files in a feature branch.
2126
+ *
2127
+ * @param branch - Current branch name
2128
+ * @param changedFiles - List of files modified in this branch
2129
+ * @param options - Optional configuration
2130
+ * @returns Branch-level context ready for search
2131
+ *
2132
+ * @example
2133
+ * ```typescript
2134
+ * const context = buildBranchContext(
2135
+ * "feat/oauth2",
2136
+ * ["src/auth.ts", "src/login.ts", "test/auth.test.ts"],
2137
+ * {
2138
+ * baseBranch: "main",
2139
+ * recentCommits: "Add OAuth2 support\nImplement token refresh",
2140
+ * repoPath: "/Users/me/my-project"
2141
+ * }
2142
+ * );
2143
+ *
2144
+ * const results = await searchBranchLevel(codexHome, context, runner);
2145
+ * ```
2146
+ */
2147
+ declare function buildBranchContext(branch: string, changedFiles: string[], options?: {
2148
+ baseBranch?: string;
2149
+ recentCommits?: string;
2150
+ repoPath?: string;
2151
+ }): BranchLevelContext;
2152
+ /**
2153
+ * Builds file-level search context for individual file changes.
2154
+ *
2155
+ * Use this for focused searches on specific file modifications,
2156
+ * with optional symbol extraction for better targeting.
2157
+ *
2158
+ * @param filePath - Path to the file being analyzed
2159
+ * @param options - Optional configuration
2160
+ * @returns File-level context ready for search
2161
+ *
2162
+ * @example
2163
+ * ```typescript
2164
+ * // Without symbol extraction
2165
+ * const context = buildFileContext(
2166
+ * "src/auth/jwt.ts",
2167
+ * {
2168
+ * diff: "... git diff content ...",
2169
+ * repoPath: "/Users/me/my-project"
2170
+ * }
2171
+ * );
2172
+ *
2173
+ * // With automatic symbol extraction
2174
+ * const context = buildFileContext(
2175
+ * "src/auth/jwt.ts",
2176
+ * {
2177
+ * diff: "+function validateToken(...)\n+function refreshToken(...)",
2178
+ * extractSymbols: true,
2179
+ * repoPath: "/Users/me/my-project"
2180
+ * }
2181
+ * );
2182
+ * // context.symbols will be: ["validateToken", "refreshToken"]
2183
+ *
2184
+ * const results = await searchFileLevel(codexHome, context, runner);
2185
+ * ```
2186
+ */
2187
+ declare function buildFileContext(filePath: string, options?: {
2188
+ diff?: string;
2189
+ extractSymbols?: boolean;
2190
+ repoPath?: string;
2191
+ }): FileLevelContext;
2192
+ /**
2193
+ * Converts a ReverieContext to a search query string.
2194
+ *
2195
+ * Transforms structured context objects into natural language queries
2196
+ * suitable for semantic search.
2197
+ *
2198
+ * @param context - Any level of reverie context
2199
+ * @returns Formatted search query string
2200
+ *
2201
+ * @example
2202
+ * ```typescript
2203
+ * const projectCtx = buildProjectContext("Authentication patterns");
2204
+ * const query = contextToQuery(projectCtx);
2205
+ * // Returns: "Project-wide: Authentication patterns"
2206
+ *
2207
+ * const branchCtx = buildBranchContext("feat/auth", ["auth.ts", "login.ts"]);
2208
+ * const query = contextToQuery(branchCtx);
2209
+ * // Returns: "Branch: feat/auth\nFiles: auth.ts, login.ts"
2210
+ *
2211
+ * const fileCtx = buildFileContext("auth.ts", {
2212
+ * symbols: ["validateToken", "refreshToken"]
2213
+ * });
2214
+ * const query = contextToQuery(fileCtx);
2215
+ * // Returns: "File: auth.ts\nSymbols: validateToken, refreshToken"
2216
+ * ```
2217
+ */
2218
+ declare function contextToQuery(context: ReverieContext): string;
2219
+ /**
2220
+ * Helper to format file paths for display in contexts.
2221
+ *
2222
+ * @param files - Array of file paths
2223
+ * @param maxFiles - Maximum number of files to show before truncating
2224
+ * @returns Formatted file list string
2225
+ */
2226
+ declare function formatFileList(files: string[], maxFiles?: number): string;
2227
+
2228
+ declare function evCompleted(id: string): string;
2229
+ declare function evResponseCreated(id: string): string;
2230
+ declare function evAssistantMessage(id: string, text: string): string;
2231
+ declare function evFunctionCall(callId: string, name: string, args: string): string;
2232
+ declare function sse(events: string[]): string;
561
2233
 
562
- export { type AgentMessageItem, type ApprovalMode, Codex, type CodexOptions, CodexProvider, type CodexProviderOptions, type CommandExecutionItem, type ErrorItem, type FileChangeItem, type Input, type ItemCompletedEvent, type ItemStartedEvent, type ItemUpdatedEvent, type McpToolCallItem, type NativeToolDefinition, type NativeToolInvocation, type NativeToolResult, type ReasoningItem, type RunResult, type RunStreamedResult, type SandboxMode, Thread, type ThreadError, type ThreadErrorEvent, type ThreadEvent, type ThreadItem, type ThreadOptions, type ThreadStartedEvent, type TodoListItem, type TurnCompletedEvent, type TurnFailedEvent, type TurnOptions, type TurnStartedEvent, type Usage$1 as Usage, type UserInput, type WebSearchItem };
2234
+ export { type AgentMessageItem, type AgentRunner, type ApprovalMode, type ApprovalRequest, type BranchLevelContext, type BranchReview, type CloudApplyOutcome, type CloudApplyStatus, type DiffSummary as CloudDiffSummary, type CloudTaskStatus, type CloudTaskSummary, CloudTasks, type CloudTasksOptions, Codex, type CodexOptions, CodexProvider, type CodexProviderOptions, type CodexToolOptions, type CommandExecutionItem, type CommandExecutionStatus, type CommitReview, type ConversationListOptions, type ConversationListPage, type ConversationSummary, type CurrentChangesReview, type CustomReview, DEFAULT_RERANKER_BATCH_SIZE, DEFAULT_RERANKER_TOP_K, DEFAULT_REVERIE_LIMIT, DEFAULT_REVERIE_MAX_CANDIDATES, DEFAULT_SERVERS, type DelegationResult, type DiagnosticSeverity, type ErrorItem, type FastEmbedEmbedRequest, type FastEmbedInitOptions, type FastEmbedRerankerModelCode, type FileChangeItem, type FileDiagnostics, type FileLevelContext, type FileUpdateChange, type ForkOptions, type FormatStreamOptions, type FormattedStream, type GradingOptions, type Input, type ItemCompletedEvent, type ItemStartedEvent, type ItemUpdatedEvent, type LogEntry, LogLevel, type LogOutput, type LogScope, Logger, type LoggerConfig, type LspDiagnosticSeverity, LspDiagnosticsBridge, LspManager, type LspManagerOptions, type LspServerConfig, type McpToolCallItem, type McpToolCallStatus, type NativeForkResult, type NativeTokenUsage, type NativeToolDefinition, type NativeToolInterceptorContext, type NativeToolInvocation, type NativeToolResult, type NativeTuiExitInfo, type NativeTuiRequest, type NativeUpdateActionInfo, type NativeUpdateActionKind, type NormalizedDiagnostic, OpenCodeAgent, type OpenCodeAgentOptions, type PatchApplyStatus, type PatchChangeKind, type PermissionDecision, type PermissionRequest, type ProjectLevelContext, type QualityFilterStats, REVERIE_CANDIDATE_MULTIPLIER, REVERIE_EMBED_MODEL, REVERIE_LLM_GRADE_THRESHOLD, REVERIE_RERANKER_MODEL, type ReasoningItem, type RepoDiffFileChange, type RepoDiffSummary, type RepoDiffSummaryOptions, type ReverieContext, type ReverieEpisodeSummary, type ReverieFilterStats, type ReverieInsight$1 as ReverieInsight, type ReveriePipelineOptions, type ReveriePipelineResult, type ReverieResult, type ReverieSearchLevel, type ReverieSearchOptions, type ReverieSemanticIndexStats, type ReverieSemanticSearchOptions, type ReviewInvocationOptions, type ReviewTarget, type RunResult, type RunStreamedResult, type RunTuiOptions, type SandboxMode, ScopedLogger, Thread, type ThreadError, type ThreadErrorEvent, type ThreadEvent, type ThreadItem, type ThreadLoggingSink, type ThreadOptions, type ThreadStartedEvent, type TodoItem, type TodoListItem, type TokenizerEncodeOptions, type TokenizerOptions, type ToolCallEvent, type TuiSession, type TurnCompletedEvent, type TurnFailedEvent, type TurnOptions, type TurnStartedEvent, type Usage, type UserInput, type WebSearchItem, type WorkspaceLocator, applyFileReveriePipeline, applyQualityPipeline, applyReveriePipeline, attachLspDiagnostics, buildBranchContext, buildFileContext, buildProjectContext, codexTool, collectRepoDiffSummary, contextToQuery, createThreadLogger, deduplicateReverieInsights, encodeToToon, evAssistantMessage, evCompleted, evFunctionCall, evResponseCreated, extractKeySymbols, fastEmbedEmbed, fastEmbedInit, filterBySeverity, findServerForFile, formatDiagnosticsForBackgroundEvent, formatDiagnosticsForTool, formatDiagnosticsWithSummary, formatFileList, formatStream, gradeReverieRelevance, gradeReveriesInParallel, isValidReverieExcerpt, logApprovedReveries, logLLMGrading, logLevelResults, logMultiLevelSearch, logMultiLevelSummary, logReverieFiltering, logReverieHintQuality, logReverieInsights, logReverieSearch, logger, resolveWorkspaceRoot, reverieGetConversationInsights, reverieIndexSemantic, reverieListConversations, reverieSearchConversations, reverieSearchSemantic, runThreadTurnWithLogs, runTui, searchBranchLevel, searchFileLevel, searchMultiLevel, searchProjectLevel, searchReveries, sse, startTui, summarizeDiagnostics, tokenizerCount, tokenizerDecode, tokenizerEncode, truncate as truncateText };