@codelia/core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +2341 -0
- package/dist/index.d.cts +638 -0
- package/dist/index.d.ts +638 -0
- package/dist/index.js +2283 -0
- package/package.json +36 -0
- package/prompts/system.md +147 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,638 @@
|
|
|
1
|
+
import { JSONSchema7 } from 'json-schema';
|
|
2
|
+
import { ZodSchema } from 'zod';
|
|
3
|
+
import { Logger } from 'openai/client';
|
|
4
|
+
import { AgentEvent, SessionStateSummary as SessionStateSummary$1 } from '@codelia/shared-types';
|
|
5
|
+
export { AgentEvent, FinalResponseEvent, ReasoningEvent, StepCompleteEvent, StepStartEvent, TextEvent, ToolCallEvent, ToolResultEvent } from '@codelia/shared-types';
|
|
6
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
7
|
+
import { MessageCreateParamsNonStreaming } from '@anthropic-ai/sdk/resources/messages/messages';
|
|
8
|
+
import OpenAI, { ClientOptions } from 'openai';
|
|
9
|
+
import { ResponseCreateParamsBase, ResponseTextConfig } from 'openai/resources/responses/responses';
|
|
10
|
+
import { ReasoningEffort } from 'openai/resources/shared';
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Message Content Parts
|
|
14
|
+
*/
|
|
15
|
+
type TextPart = {
|
|
16
|
+
type: "text";
|
|
17
|
+
text: string;
|
|
18
|
+
};
|
|
19
|
+
type ImagePart = {
|
|
20
|
+
type: "image_url";
|
|
21
|
+
image_url: {
|
|
22
|
+
url: string;
|
|
23
|
+
detail?: "auto" | "low" | "high";
|
|
24
|
+
media_type?: "image/png" | "image/jpeg" | "image/webp" | "image/gif";
|
|
25
|
+
};
|
|
26
|
+
};
|
|
27
|
+
type DocumentPart = {
|
|
28
|
+
type: "document";
|
|
29
|
+
source: {
|
|
30
|
+
data: string;
|
|
31
|
+
media_type: "application/pdf";
|
|
32
|
+
};
|
|
33
|
+
};
|
|
34
|
+
type OtherPart = {
|
|
35
|
+
type: "other";
|
|
36
|
+
provider: string;
|
|
37
|
+
kind: string;
|
|
38
|
+
payload: unknown;
|
|
39
|
+
};
|
|
40
|
+
type ContentPart = TextPart | ImagePart | DocumentPart | OtherPart;
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Tool Call
|
|
44
|
+
*/
|
|
45
|
+
type ToolCall = {
|
|
46
|
+
id: string;
|
|
47
|
+
type: "function";
|
|
48
|
+
function: {
|
|
49
|
+
name: string;
|
|
50
|
+
arguments: string;
|
|
51
|
+
};
|
|
52
|
+
provider_meta?: unknown;
|
|
53
|
+
};
|
|
54
|
+
/**
|
|
55
|
+
* Tool Definition
|
|
56
|
+
*/
|
|
57
|
+
type ToolDefinition = {
|
|
58
|
+
name: string;
|
|
59
|
+
description: string;
|
|
60
|
+
parameters: JSONSchema7;
|
|
61
|
+
strict?: boolean;
|
|
62
|
+
};
|
|
63
|
+
type ToolChoice = "auto" | "required" | "none" | string;
|
|
64
|
+
type JsonValue = string | number | boolean | null | {
|
|
65
|
+
[key: string]: JsonValue;
|
|
66
|
+
} | JsonValue[];
|
|
67
|
+
type ToolResult = {
|
|
68
|
+
type: "text";
|
|
69
|
+
text: string;
|
|
70
|
+
} | {
|
|
71
|
+
type: "parts";
|
|
72
|
+
parts: ContentPart[];
|
|
73
|
+
} | {
|
|
74
|
+
type: "json";
|
|
75
|
+
value: unknown;
|
|
76
|
+
};
|
|
77
|
+
type ToolReturn = ToolResult | string | ContentPart[] | JsonValue;
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Base Message
|
|
81
|
+
*/
|
|
82
|
+
type UserMessage = {
|
|
83
|
+
role: "user";
|
|
84
|
+
content: string | ContentPart[];
|
|
85
|
+
name?: string;
|
|
86
|
+
};
|
|
87
|
+
type SystemMessage = {
|
|
88
|
+
role: "system";
|
|
89
|
+
content: string | TextPart[];
|
|
90
|
+
name?: string;
|
|
91
|
+
cache?: boolean;
|
|
92
|
+
};
|
|
93
|
+
type AssistantMessage = {
|
|
94
|
+
role: "assistant";
|
|
95
|
+
content: string | ContentPart[] | null;
|
|
96
|
+
name?: string;
|
|
97
|
+
tool_calls?: ToolCall[];
|
|
98
|
+
refusal?: string | null;
|
|
99
|
+
};
|
|
100
|
+
type ToolOutputRef = {
|
|
101
|
+
id: string;
|
|
102
|
+
byte_size?: number;
|
|
103
|
+
line_count?: number;
|
|
104
|
+
};
|
|
105
|
+
type ReasoningMessage<T = unknown> = {
|
|
106
|
+
role: "reasoning";
|
|
107
|
+
content: string | null;
|
|
108
|
+
raw_item?: T | null;
|
|
109
|
+
};
|
|
110
|
+
type ToolMessage = {
|
|
111
|
+
role: "tool";
|
|
112
|
+
tool_call_id: string;
|
|
113
|
+
tool_name: string;
|
|
114
|
+
content: string | ContentPart[];
|
|
115
|
+
is_error?: boolean;
|
|
116
|
+
output_ref?: ToolOutputRef;
|
|
117
|
+
trimmed?: boolean;
|
|
118
|
+
};
|
|
119
|
+
type BaseMessage = UserMessage | SystemMessage | AssistantMessage | ToolMessage | ReasoningMessage;
|
|
120
|
+
|
|
121
|
+
type ChatInvokeUsage = {
|
|
122
|
+
model: string;
|
|
123
|
+
input_tokens: number;
|
|
124
|
+
input_cached_tokens?: number | null;
|
|
125
|
+
input_cache_creation_tokens?: number | null;
|
|
126
|
+
input_image_tokens?: number | null;
|
|
127
|
+
output_tokens: number;
|
|
128
|
+
total_tokens: number;
|
|
129
|
+
};
|
|
130
|
+
type ChatInvokeCompletion = {
|
|
131
|
+
messages: BaseMessage[];
|
|
132
|
+
usage?: ChatInvokeUsage | null;
|
|
133
|
+
stop_reason?: string | null;
|
|
134
|
+
provider_meta?: unknown;
|
|
135
|
+
};
|
|
136
|
+
|
|
137
|
+
type ToolOutputCacheRecord = {
|
|
138
|
+
tool_call_id: string;
|
|
139
|
+
tool_name: string;
|
|
140
|
+
content: string;
|
|
141
|
+
is_error?: boolean;
|
|
142
|
+
};
|
|
143
|
+
type ToolOutputCacheReadOptions = {
|
|
144
|
+
offset?: number;
|
|
145
|
+
limit?: number;
|
|
146
|
+
};
|
|
147
|
+
type ToolOutputCacheSearchOptions = {
|
|
148
|
+
pattern: string;
|
|
149
|
+
regex?: boolean;
|
|
150
|
+
before?: number;
|
|
151
|
+
after?: number;
|
|
152
|
+
max_matches?: number;
|
|
153
|
+
};
|
|
154
|
+
type ToolOutputCacheStore = {
|
|
155
|
+
save: (record: ToolOutputCacheRecord) => Promise<ToolOutputRef> | ToolOutputRef;
|
|
156
|
+
read?: (refId: string, options?: ToolOutputCacheReadOptions) => Promise<string> | string;
|
|
157
|
+
grep?: (refId: string, options: ToolOutputCacheSearchOptions) => Promise<string> | string;
|
|
158
|
+
};
|
|
159
|
+
|
|
160
|
+
type ModelCost = {
|
|
161
|
+
input?: number;
|
|
162
|
+
output?: number;
|
|
163
|
+
reasoning?: number;
|
|
164
|
+
cacheRead?: number;
|
|
165
|
+
cacheWrite?: number;
|
|
166
|
+
inputAudio?: number;
|
|
167
|
+
outputAudio?: number;
|
|
168
|
+
};
|
|
169
|
+
type ModelLimits = {
|
|
170
|
+
contextWindow?: number;
|
|
171
|
+
inputTokens?: number;
|
|
172
|
+
outputTokens?: number;
|
|
173
|
+
};
|
|
174
|
+
type ModelEntry = {
|
|
175
|
+
provider: string;
|
|
176
|
+
modelId: string;
|
|
177
|
+
cost?: ModelCost;
|
|
178
|
+
limits?: ModelLimits;
|
|
179
|
+
};
|
|
180
|
+
type ModelMetadataIndex = {
|
|
181
|
+
models: Record<string, Record<string, ModelEntry>>;
|
|
182
|
+
};
|
|
183
|
+
interface ModelMetadataService {
|
|
184
|
+
getModelEntry(provider: string, modelId: string): Promise<ModelEntry | null>;
|
|
185
|
+
getModelEntries(provider: string): Promise<ModelEntry[] | null>;
|
|
186
|
+
getAllModelEntries(): Promise<Record<string, Record<string, ModelEntry>>>;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
type AgentServices = {
|
|
190
|
+
modelMetadata?: ModelMetadataService;
|
|
191
|
+
toolOutputCacheStore?: ToolOutputCacheStore | null;
|
|
192
|
+
};
|
|
193
|
+
|
|
194
|
+
type ProviderName = "openai" | "anthropic" | "google";
|
|
195
|
+
type ChatInvokeInput = {
|
|
196
|
+
messages: BaseMessage[];
|
|
197
|
+
model?: string;
|
|
198
|
+
tools?: ToolDefinition[] | null;
|
|
199
|
+
toolChoice?: ToolChoice | null;
|
|
200
|
+
signal?: AbortSignal;
|
|
201
|
+
};
|
|
202
|
+
interface BaseChatModel<P extends ProviderName = ProviderName, O = unknown> {
|
|
203
|
+
readonly provider: P;
|
|
204
|
+
readonly model: string;
|
|
205
|
+
ainvoke(input: ChatInvokeInput & {
|
|
206
|
+
options?: O;
|
|
207
|
+
}): Promise<ChatInvokeCompletion>;
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
type ModelSpec = {
|
|
211
|
+
id: string;
|
|
212
|
+
provider: ProviderName;
|
|
213
|
+
aliases?: string[];
|
|
214
|
+
contextWindow?: number;
|
|
215
|
+
maxInputTokens?: number;
|
|
216
|
+
maxOutputTokens?: number;
|
|
217
|
+
supportsTools?: boolean;
|
|
218
|
+
supportsVision?: boolean;
|
|
219
|
+
supportsReasoning?: boolean;
|
|
220
|
+
supportsJsonSchema?: boolean;
|
|
221
|
+
};
|
|
222
|
+
type ModelRegistry = {
|
|
223
|
+
modelsById: Record<string, ModelSpec>;
|
|
224
|
+
aliasesByProvider: Record<ProviderName, Record<string, string>>;
|
|
225
|
+
};
|
|
226
|
+
declare function createModelRegistry(specs: ModelSpec[]): ModelRegistry;
|
|
227
|
+
declare function registerModels(registry: ModelRegistry, specs: ModelSpec[]): void;
|
|
228
|
+
declare function resolveModel(registry: ModelRegistry, idOrAlias: string, provider?: ProviderName): ModelSpec | undefined;
|
|
229
|
+
declare function listModels(registry: ModelRegistry, provider?: ProviderName): ModelSpec[];
|
|
230
|
+
declare function applyModelMetadata(registry: ModelRegistry, index: ModelMetadataIndex): ModelRegistry;
|
|
231
|
+
|
|
232
|
+
type CompactionConfig = {
|
|
233
|
+
enabled?: boolean;
|
|
234
|
+
auto?: boolean;
|
|
235
|
+
thresholdRatio?: number;
|
|
236
|
+
model?: string | null;
|
|
237
|
+
summaryPrompt?: string;
|
|
238
|
+
summaryDirectives?: string[];
|
|
239
|
+
retainPrompt?: string | null;
|
|
240
|
+
retainDirectives?: string[];
|
|
241
|
+
retainLastTurns?: number;
|
|
242
|
+
};
|
|
243
|
+
|
|
244
|
+
type ToolOutputCacheConfig = {
|
|
245
|
+
enabled?: boolean;
|
|
246
|
+
contextBudgetTokens?: number | null;
|
|
247
|
+
maxMessageBytes?: number;
|
|
248
|
+
maxLineLength?: number;
|
|
249
|
+
};
|
|
250
|
+
|
|
251
|
+
type ToolOutputCacheDependencies = {
|
|
252
|
+
modelRegistry: ModelRegistry;
|
|
253
|
+
store?: ToolOutputCacheStore | null;
|
|
254
|
+
};
|
|
255
|
+
type TrimResult = {
|
|
256
|
+
messages: BaseMessage[];
|
|
257
|
+
trimmed: boolean;
|
|
258
|
+
};
|
|
259
|
+
declare class ToolOutputCacheService {
|
|
260
|
+
private readonly config;
|
|
261
|
+
private readonly modelRegistry;
|
|
262
|
+
private readonly store?;
|
|
263
|
+
constructor(config: ToolOutputCacheConfig, deps: ToolOutputCacheDependencies);
|
|
264
|
+
processToolMessage(message: ToolMessage): Promise<ToolMessage>;
|
|
265
|
+
trimMessages(llm: BaseChatModel, messages: BaseMessage[]): Promise<TrimResult>;
|
|
266
|
+
private persistToolOutput;
|
|
267
|
+
private resolveContextBudgetTokens;
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
type UsageSummary = {
|
|
271
|
+
total_calls: number;
|
|
272
|
+
total_tokens: number;
|
|
273
|
+
total_input_tokens: number;
|
|
274
|
+
total_output_tokens: number;
|
|
275
|
+
total_cached_input_tokens: number;
|
|
276
|
+
total_cache_creation_tokens: number;
|
|
277
|
+
total_cost_usd?: number | null;
|
|
278
|
+
by_model: Record<string, {
|
|
279
|
+
calls: number;
|
|
280
|
+
input_tokens: number;
|
|
281
|
+
output_tokens: number;
|
|
282
|
+
cached_input_tokens: number;
|
|
283
|
+
cache_creation_tokens: number;
|
|
284
|
+
total_tokens: number;
|
|
285
|
+
cost_usd?: number | null;
|
|
286
|
+
}>;
|
|
287
|
+
};
|
|
288
|
+
|
|
289
|
+
type DependencyKey<T> = {
|
|
290
|
+
id: string;
|
|
291
|
+
create: () => T | Promise<T>;
|
|
292
|
+
};
|
|
293
|
+
type ToolContext = {
|
|
294
|
+
signal?: AbortSignal;
|
|
295
|
+
logger?: Logger;
|
|
296
|
+
now?: () => Date;
|
|
297
|
+
deps: Record<string, unknown>;
|
|
298
|
+
resolve: <T>(key: DependencyKey<T>) => Promise<T>;
|
|
299
|
+
};
|
|
300
|
+
|
|
301
|
+
type DefineToolOptions<TInput, TResult extends ToolReturn = ToolReturn> = {
|
|
302
|
+
name: string;
|
|
303
|
+
description: string;
|
|
304
|
+
input: ZodSchema<TInput>;
|
|
305
|
+
execute: (input: TInput, ctx: ToolContext) => Promise<TResult> | TResult;
|
|
306
|
+
};
|
|
307
|
+
type Tool = {
|
|
308
|
+
name: string;
|
|
309
|
+
description: string;
|
|
310
|
+
definition: ToolDefinition;
|
|
311
|
+
executeRaw: (rawArgsJson: string, ctx: ToolContext) => Promise<ToolResult>;
|
|
312
|
+
};
|
|
313
|
+
type ToolExecution = {
|
|
314
|
+
message: ToolMessage;
|
|
315
|
+
done?: boolean;
|
|
316
|
+
finalMessage?: string;
|
|
317
|
+
};
|
|
318
|
+
|
|
319
|
+
type ToolPermissionDecision = {
|
|
320
|
+
decision: "allow" | "deny";
|
|
321
|
+
reason?: string;
|
|
322
|
+
stop_turn?: boolean;
|
|
323
|
+
};
|
|
324
|
+
type ToolPermissionHook = (call: ToolCall, rawArgs: string, ctx: ToolContext) => Promise<ToolPermissionDecision> | ToolPermissionDecision;
|
|
325
|
+
|
|
326
|
+
type SessionHeader = {
|
|
327
|
+
type: "header";
|
|
328
|
+
schema_version: 1;
|
|
329
|
+
run_id: string;
|
|
330
|
+
session_id?: string;
|
|
331
|
+
started_at: string;
|
|
332
|
+
client?: {
|
|
333
|
+
name: string;
|
|
334
|
+
version: string;
|
|
335
|
+
};
|
|
336
|
+
server?: {
|
|
337
|
+
name: string;
|
|
338
|
+
version: string;
|
|
339
|
+
};
|
|
340
|
+
model?: {
|
|
341
|
+
provider?: string;
|
|
342
|
+
name?: string;
|
|
343
|
+
reasoning?: string;
|
|
344
|
+
};
|
|
345
|
+
prompts?: {
|
|
346
|
+
system?: string;
|
|
347
|
+
};
|
|
348
|
+
tools?: {
|
|
349
|
+
definitions?: ToolDefinition[];
|
|
350
|
+
source?: string;
|
|
351
|
+
};
|
|
352
|
+
runtime?: {
|
|
353
|
+
cwd?: string;
|
|
354
|
+
os?: string;
|
|
355
|
+
arch?: string;
|
|
356
|
+
version?: string;
|
|
357
|
+
};
|
|
358
|
+
meta?: Record<string, unknown>;
|
|
359
|
+
};
|
|
360
|
+
type RunStartRecord = {
|
|
361
|
+
type: "run.start";
|
|
362
|
+
run_id: string;
|
|
363
|
+
session_id?: string;
|
|
364
|
+
ts: string;
|
|
365
|
+
input: {
|
|
366
|
+
type: "text";
|
|
367
|
+
text: string;
|
|
368
|
+
};
|
|
369
|
+
ui_context?: unknown;
|
|
370
|
+
meta?: Record<string, unknown>;
|
|
371
|
+
};
|
|
372
|
+
type RunContextRecord = {
|
|
373
|
+
type: "run.context";
|
|
374
|
+
run_id: string;
|
|
375
|
+
ts: string;
|
|
376
|
+
context_left_percent: number;
|
|
377
|
+
meta?: Record<string, unknown>;
|
|
378
|
+
};
|
|
379
|
+
type AgentEventRecord = {
|
|
380
|
+
type: "agent.event";
|
|
381
|
+
run_id: string;
|
|
382
|
+
ts: string;
|
|
383
|
+
seq: number;
|
|
384
|
+
event: AgentEvent;
|
|
385
|
+
meta?: Record<string, unknown>;
|
|
386
|
+
};
|
|
387
|
+
type ToolOutputRecord = {
|
|
388
|
+
type: "tool.output";
|
|
389
|
+
run_id: string;
|
|
390
|
+
ts: string;
|
|
391
|
+
tool: string;
|
|
392
|
+
tool_call_id: string;
|
|
393
|
+
result_raw: string;
|
|
394
|
+
is_error?: boolean;
|
|
395
|
+
output_ref?: ToolOutputRef;
|
|
396
|
+
meta?: Record<string, unknown>;
|
|
397
|
+
};
|
|
398
|
+
type LlmRequestRecord = {
|
|
399
|
+
type: "llm.request";
|
|
400
|
+
run_id: string;
|
|
401
|
+
ts: string;
|
|
402
|
+
seq: number;
|
|
403
|
+
model?: {
|
|
404
|
+
provider?: string;
|
|
405
|
+
name?: string;
|
|
406
|
+
reasoning?: string;
|
|
407
|
+
};
|
|
408
|
+
input: {
|
|
409
|
+
messages: BaseMessage[];
|
|
410
|
+
tools?: ToolDefinition[] | null;
|
|
411
|
+
tool_choice?: ToolChoice | null;
|
|
412
|
+
model?: string;
|
|
413
|
+
};
|
|
414
|
+
meta?: Record<string, unknown>;
|
|
415
|
+
};
|
|
416
|
+
type LlmResponseRecord = {
|
|
417
|
+
type: "llm.response";
|
|
418
|
+
run_id: string;
|
|
419
|
+
ts: string;
|
|
420
|
+
seq: number;
|
|
421
|
+
output: {
|
|
422
|
+
messages: BaseMessage[];
|
|
423
|
+
usage?: ChatInvokeUsage | null;
|
|
424
|
+
stop_reason?: string | null;
|
|
425
|
+
provider_meta?: unknown;
|
|
426
|
+
};
|
|
427
|
+
meta?: Record<string, unknown>;
|
|
428
|
+
};
|
|
429
|
+
type RunStatusRecord = {
|
|
430
|
+
type: "run.status";
|
|
431
|
+
run_id: string;
|
|
432
|
+
ts: string;
|
|
433
|
+
status: "running" | "awaiting_ui" | "completed" | "error" | "cancelled";
|
|
434
|
+
message?: string;
|
|
435
|
+
meta?: Record<string, unknown>;
|
|
436
|
+
};
|
|
437
|
+
type RunErrorRecord = {
|
|
438
|
+
type: "run.error";
|
|
439
|
+
run_id: string;
|
|
440
|
+
ts: string;
|
|
441
|
+
error: {
|
|
442
|
+
name: string;
|
|
443
|
+
message: string;
|
|
444
|
+
stack?: string;
|
|
445
|
+
};
|
|
446
|
+
meta?: Record<string, unknown>;
|
|
447
|
+
};
|
|
448
|
+
type RunEndRecord = {
|
|
449
|
+
type: "run.end";
|
|
450
|
+
run_id: string;
|
|
451
|
+
ts: string;
|
|
452
|
+
outcome: "completed" | "cancelled" | "error";
|
|
453
|
+
final?: string;
|
|
454
|
+
meta?: Record<string, unknown>;
|
|
455
|
+
};
|
|
456
|
+
type SessionRecord = SessionHeader | RunStartRecord | RunContextRecord | AgentEventRecord | ToolOutputRecord | LlmRequestRecord | LlmResponseRecord | RunStatusRecord | RunErrorRecord | RunEndRecord;
|
|
457
|
+
type SessionStore = {
|
|
458
|
+
append: (record: SessionRecord) => Promise<void> | void;
|
|
459
|
+
};
|
|
460
|
+
type RunEventStoreInit = {
|
|
461
|
+
runId: string;
|
|
462
|
+
startedAt: string;
|
|
463
|
+
};
|
|
464
|
+
type RunEventStoreFactory = {
|
|
465
|
+
create: (init: RunEventStoreInit) => SessionStore;
|
|
466
|
+
};
|
|
467
|
+
type SessionState = {
|
|
468
|
+
schema_version: 1;
|
|
469
|
+
session_id: string;
|
|
470
|
+
updated_at: string;
|
|
471
|
+
run_id?: string;
|
|
472
|
+
invoke_seq?: number;
|
|
473
|
+
messages: BaseMessage[];
|
|
474
|
+
meta?: Record<string, unknown>;
|
|
475
|
+
};
|
|
476
|
+
type SessionStateSummary = SessionStateSummary$1;
|
|
477
|
+
type SessionStateStore = {
|
|
478
|
+
load: (sessionId: string) => Promise<SessionState | null>;
|
|
479
|
+
save: (state: SessionState) => Promise<void>;
|
|
480
|
+
list: () => Promise<SessionStateSummary[]>;
|
|
481
|
+
};
|
|
482
|
+
type AgentSession = {
|
|
483
|
+
run_id: string;
|
|
484
|
+
session_id?: string;
|
|
485
|
+
invoke_seq?: number;
|
|
486
|
+
on_error?: (error: unknown, record: SessionRecord) => void;
|
|
487
|
+
append: (record: SessionRecord) => void;
|
|
488
|
+
};
|
|
489
|
+
|
|
490
|
+
type AgentRunOptions = {
|
|
491
|
+
session?: AgentSession;
|
|
492
|
+
signal?: AbortSignal;
|
|
493
|
+
forceCompaction?: boolean;
|
|
494
|
+
};
|
|
495
|
+
type AgentOptions = {
|
|
496
|
+
llm: BaseChatModel;
|
|
497
|
+
tools: Tool[];
|
|
498
|
+
systemPrompt?: string;
|
|
499
|
+
maxIterations?: number;
|
|
500
|
+
toolChoice?: ToolChoice;
|
|
501
|
+
compaction?: CompactionConfig | null;
|
|
502
|
+
toolOutputCache?: ToolOutputCacheConfig | null;
|
|
503
|
+
services?: AgentServices;
|
|
504
|
+
modelRegistry?: ModelRegistry;
|
|
505
|
+
enableUsageTracking?: boolean;
|
|
506
|
+
requireDoneTool?: boolean;
|
|
507
|
+
llmMaxRetries?: number;
|
|
508
|
+
llmRetryBaseDelayMs?: number;
|
|
509
|
+
llmRetryMaxDelayMs?: number;
|
|
510
|
+
llmRetryableStatusCodes?: number[];
|
|
511
|
+
canExecuteTool?: ToolPermissionHook;
|
|
512
|
+
};
|
|
513
|
+
declare class Agent {
|
|
514
|
+
private readonly llm;
|
|
515
|
+
private readonly tools;
|
|
516
|
+
private readonly systemPrompt?;
|
|
517
|
+
private readonly maxIterations;
|
|
518
|
+
private readonly toolChoice?;
|
|
519
|
+
private readonly requireDoneTool;
|
|
520
|
+
private readonly compactionService?;
|
|
521
|
+
private readonly toolOutputCacheService?;
|
|
522
|
+
private readonly services;
|
|
523
|
+
private readonly modelRegistry;
|
|
524
|
+
private readonly canExecuteTool?;
|
|
525
|
+
private history;
|
|
526
|
+
private usageService;
|
|
527
|
+
constructor(options: AgentOptions);
|
|
528
|
+
getUsageSummary(): UsageSummary;
|
|
529
|
+
getContextLeftPercent(): number | null;
|
|
530
|
+
getHistoryMessages(): BaseMessage[];
|
|
531
|
+
replaceHistoryMessages(messages: BaseMessage[]): void;
|
|
532
|
+
private checkAndCompact;
|
|
533
|
+
private trimToolOutputs;
|
|
534
|
+
private processToolMessage;
|
|
535
|
+
private buildToolContext;
|
|
536
|
+
private recordLlmRequest;
|
|
537
|
+
private recordLlmResponse;
|
|
538
|
+
run(message: string, options?: AgentRunOptions): Promise<string>;
|
|
539
|
+
runStream(message: string, options?: AgentRunOptions): AsyncGenerator<AgentEvent>;
|
|
540
|
+
private generateFinalResponse;
|
|
541
|
+
private executeToolCall;
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
type StringifyContentMode = "display" | "log";
|
|
545
|
+
type StringifyContentOptions = {
|
|
546
|
+
mode?: StringifyContentMode;
|
|
547
|
+
joiner?: string;
|
|
548
|
+
includeOtherPayload?: boolean;
|
|
549
|
+
};
|
|
550
|
+
declare const stringifyContentParts: (content: ContentPart[], options?: StringifyContentOptions) => string;
|
|
551
|
+
declare const stringifyContent: (content: string | ContentPart[] | null | undefined, options?: StringifyContentOptions) => string;
|
|
552
|
+
|
|
553
|
+
type StorageLayout = "home" | "xdg";
|
|
554
|
+
type StoragePaths = {
|
|
555
|
+
root: string;
|
|
556
|
+
configDir: string;
|
|
557
|
+
configFile: string;
|
|
558
|
+
authFile: string;
|
|
559
|
+
mcpAuthFile: string;
|
|
560
|
+
cacheDir: string;
|
|
561
|
+
toolOutputCacheDir: string;
|
|
562
|
+
sessionsDir: string;
|
|
563
|
+
logsDir: string;
|
|
564
|
+
};
|
|
565
|
+
type ResolveStorageOptions = {
|
|
566
|
+
layout?: StorageLayout;
|
|
567
|
+
rootOverride?: string;
|
|
568
|
+
};
|
|
569
|
+
interface StoragePathService {
|
|
570
|
+
resolvePaths(options?: ResolveStorageOptions): StoragePaths;
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
declare const PROVIDER_NAME$1: "anthropic";
|
|
574
|
+
type AnthropicMessageCreateParams = MessageCreateParamsNonStreaming;
|
|
575
|
+
type AnthropicInvokeOptions = Omit<AnthropicMessageCreateParams, "model" | "messages" | "tools" | "tool_choice" | "system" | "stream">;
|
|
576
|
+
type ChatAnthropicOptions = {
|
|
577
|
+
client?: Anthropic;
|
|
578
|
+
clientOptions?: ConstructorParameters<typeof Anthropic>[0];
|
|
579
|
+
model?: string;
|
|
580
|
+
maxTokens?: number;
|
|
581
|
+
};
|
|
582
|
+
declare class ChatAnthropic implements BaseChatModel<typeof PROVIDER_NAME$1, AnthropicInvokeOptions> {
|
|
583
|
+
readonly provider: typeof PROVIDER_NAME$1;
|
|
584
|
+
readonly model: string;
|
|
585
|
+
private readonly client;
|
|
586
|
+
private readonly defaultMaxTokens;
|
|
587
|
+
constructor(options?: ChatAnthropicOptions);
|
|
588
|
+
ainvoke(input: ChatInvokeInput & {
|
|
589
|
+
options?: AnthropicInvokeOptions;
|
|
590
|
+
}, verbose?: boolean): Promise<ChatInvokeCompletion>;
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
declare const PROVIDER_NAME: "openai";
|
|
594
|
+
type OpenAITextVerbosity = Exclude<ResponseTextConfig["verbosity"], null>;
|
|
595
|
+
type OpenAIInvokeOptions = Omit<ResponseCreateParamsBase, "model" | "input" | "tools" | "tool_choice" | "stream"> & {
|
|
596
|
+
reasoningEffort?: ReasoningEffort;
|
|
597
|
+
textVerbosity?: OpenAITextVerbosity;
|
|
598
|
+
};
|
|
599
|
+
type ChatOpenAIOptions = {
|
|
600
|
+
client?: OpenAI;
|
|
601
|
+
clientOptions?: ClientOptions;
|
|
602
|
+
model?: string;
|
|
603
|
+
reasoningEffort?: ReasoningEffort;
|
|
604
|
+
textVerbosity?: OpenAITextVerbosity;
|
|
605
|
+
};
|
|
606
|
+
declare class ChatOpenAI implements BaseChatModel<typeof PROVIDER_NAME, OpenAIInvokeOptions> {
|
|
607
|
+
readonly provider: typeof PROVIDER_NAME;
|
|
608
|
+
readonly model: string;
|
|
609
|
+
private readonly client;
|
|
610
|
+
private readonly defaultReasoningEffort?;
|
|
611
|
+
private readonly defaultTextVerbosity?;
|
|
612
|
+
constructor(options?: ChatOpenAIOptions);
|
|
613
|
+
ainvoke(input: ChatInvokeInput & {
|
|
614
|
+
options?: OpenAIInvokeOptions;
|
|
615
|
+
}, verbose?: boolean): Promise<ChatInvokeCompletion>;
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
declare const ANTHROPIC_DEFAULT_MODEL = "claude-sonnet-4-5";
|
|
619
|
+
declare const ANTHROPIC_MODELS: ModelSpec[];
|
|
620
|
+
|
|
621
|
+
declare const GOOGLE_MODELS: ModelSpec[];
|
|
622
|
+
|
|
623
|
+
declare const OPENAI_DEFAULT_MODEL = "gpt-5.2-codex";
|
|
624
|
+
declare const OPENAI_DEFAULT_REASONING_EFFORT = "medium";
|
|
625
|
+
declare const OPENAI_MODELS: ModelSpec[];
|
|
626
|
+
|
|
627
|
+
declare const DEFAULT_MODEL_REGISTRY: ModelRegistry;
|
|
628
|
+
|
|
629
|
+
declare const getDefaultSystemPromptPath: () => string;
|
|
630
|
+
|
|
631
|
+
declare function defineTool<TInput, TResult extends ToolReturn>(toolOptions: DefineToolOptions<TInput, TResult>): Tool;
|
|
632
|
+
|
|
633
|
+
declare class TaskComplete extends Error {
|
|
634
|
+
readonly finalMessage?: string;
|
|
635
|
+
constructor(finalMessage?: string);
|
|
636
|
+
}
|
|
637
|
+
|
|
638
|
+
export { ANTHROPIC_DEFAULT_MODEL, ANTHROPIC_MODELS, Agent, type AgentEventRecord, type AgentOptions, type AgentRunOptions, type AgentServices, type AgentSession, type BaseChatModel, type BaseMessage, ChatAnthropic, type ChatInvokeInput, ChatOpenAI, type ContentPart, DEFAULT_MODEL_REGISTRY, type DependencyKey, GOOGLE_MODELS, type LlmRequestRecord, type LlmResponseRecord, type ModelCost, type ModelEntry, type ModelLimits, type ModelMetadataIndex, type ModelMetadataService, type ModelRegistry, type ModelSpec, OPENAI_DEFAULT_MODEL, OPENAI_DEFAULT_REASONING_EFFORT, OPENAI_MODELS, type ResolveStorageOptions, type RunContextRecord, type RunEndRecord, type RunErrorRecord, type RunEventStoreFactory, type RunEventStoreInit, type RunStartRecord, type RunStatusRecord, type SessionHeader, type SessionRecord, type SessionState, type SessionStateStore, type SessionStateSummary, type SessionStore, type StorageLayout, type StoragePathService, type StoragePaths, type StringifyContentMode, type StringifyContentOptions, TaskComplete, type Tool, type ToolContext, type ToolDefinition, type ToolExecution, type ToolOutputCacheConfig, type ToolOutputCacheReadOptions, type ToolOutputCacheRecord, type ToolOutputCacheSearchOptions, ToolOutputCacheService, type ToolOutputCacheStore, type ToolOutputRecord, type ToolOutputRef, type ToolPermissionDecision, type ToolPermissionHook, applyModelMetadata, createModelRegistry, defineTool, getDefaultSystemPromptPath, listModels, registerModels, resolveModel, stringifyContent, stringifyContentParts };
|