@omarestrella/ai-sdk-agent-sdk 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +91 -0
  3. package/dist/src/index.d.ts +4 -0
  4. package/dist/src/index.d.ts.map +1 -0
  5. package/dist/src/index.js +6 -0
  6. package/dist/src/index.js.map +1 -0
  7. package/dist/src/json.d.ts +6 -0
  8. package/dist/src/json.d.ts.map +1 -0
  9. package/dist/src/json.js +29 -0
  10. package/dist/src/json.js.map +1 -0
  11. package/dist/src/language-model.d.ts +23 -0
  12. package/dist/src/language-model.d.ts.map +1 -0
  13. package/dist/src/language-model.js +440 -0
  14. package/dist/src/language-model.js.map +1 -0
  15. package/dist/src/logger.d.ts +15 -0
  16. package/dist/src/logger.d.ts.map +1 -0
  17. package/dist/src/logger.js +142 -0
  18. package/dist/src/logger.js.map +1 -0
  19. package/dist/src/messages.d.ts +14 -0
  20. package/dist/src/messages.d.ts.map +1 -0
  21. package/dist/src/messages.js +92 -0
  22. package/dist/src/messages.js.map +1 -0
  23. package/dist/src/provider.d.ts +15 -0
  24. package/dist/src/provider.d.ts.map +1 -0
  25. package/dist/src/provider.js +19 -0
  26. package/dist/src/provider.js.map +1 -0
  27. package/dist/src/tools.d.ts +21 -0
  28. package/dist/src/tools.d.ts.map +1 -0
  29. package/dist/src/tools.js +82 -0
  30. package/dist/src/tools.js.map +1 -0
  31. package/dist/test/messages.test.d.ts +2 -0
  32. package/dist/test/messages.test.d.ts.map +1 -0
  33. package/dist/test/messages.test.js +173 -0
  34. package/dist/test/messages.test.js.map +1 -0
  35. package/dist/test/tools.test.d.ts +2 -0
  36. package/dist/test/tools.test.d.ts.map +1 -0
  37. package/dist/test/tools.test.js +175 -0
  38. package/dist/test/tools.test.js.map +1 -0
  39. package/package.json +70 -0
  40. package/src/index.ts +11 -0
  41. package/src/json.ts +38 -0
  42. package/src/language-model.ts +526 -0
  43. package/src/logger.ts +171 -0
  44. package/src/messages.ts +102 -0
  45. package/src/provider.ts +45 -0
  46. package/src/tools.ts +112 -0
@@ -0,0 +1,526 @@
1
+ import type {
2
+ LanguageModelV2,
3
+ LanguageModelV2CallOptions,
4
+ LanguageModelV2CallWarning,
5
+ LanguageModelV2Content,
6
+ LanguageModelV2FinishReason,
7
+ LanguageModelV2StreamPart,
8
+ LanguageModelV2Usage,
9
+ } from "@ai-sdk/provider";
10
+ import { query, type Options } from "@anthropic-ai/claude-agent-sdk";
11
+ import { safeJsonStringify } from "./json";
12
+ import { convertMessages } from "./messages";
13
+ import { AI_SDK_MCP_SERVER_NAME, convertTools } from "./tools";
14
+ import { logger } from "./logger";
15
+
16
+ /**
17
+ * Strips the MCP prefix from tool names returned by the Agent SDK.
18
+ * The Agent SDK returns tools in format: mcp__{serverName}__{toolName}
19
+ * The AI SDK expects just the original tool name.
20
+ */
21
+ function stripMcpPrefix(toolName: string): string {
22
+ const prefix = `mcp__${AI_SDK_MCP_SERVER_NAME}__`;
23
+ if (toolName.startsWith(prefix)) {
24
+ return toolName.slice(prefix.length);
25
+ }
26
+ return toolName;
27
+ }
28
+
29
+ function mapFinishReason(
30
+ stopReason: string | null | undefined,
31
+ hasToolCalls: boolean,
32
+ ): LanguageModelV2FinishReason {
33
+ if (hasToolCalls) return "tool-calls";
34
+
35
+ switch (stopReason) {
36
+ case "end_turn":
37
+ return "stop";
38
+ case "max_tokens":
39
+ return "length";
40
+ case "stop_sequence":
41
+ return "stop";
42
+ case "tool_use":
43
+ return "tool-calls";
44
+ default:
45
+ return "unknown";
46
+ }
47
+ }
48
+
49
+ export interface ClaudeAgentLanguageModelConfig {
50
+ provider: string;
51
+ cwd?: string;
52
+ }
53
+
54
+ type DoGenerateOptions = Parameters<LanguageModelV2["doGenerate"]>[0];
55
+ type DoGenerateResult = Awaited<ReturnType<LanguageModelV2["doGenerate"]>>;
56
+ type DoStreamOptions = Parameters<LanguageModelV2["doStream"]>[0];
57
+ type DoStreamResult = Awaited<ReturnType<LanguageModelV2["doStream"]>>;
58
+
59
+ let idCounter = 0;
60
+ function generateId(): string {
61
+ return `agent-${Date.now()}-${++idCounter}`;
62
+ }
63
+
64
+ export class ClaudeAgentLanguageModel implements LanguageModelV2 {
65
+ readonly specificationVersion = "v2" as const;
66
+ readonly modelId: string;
67
+ readonly defaultObjectGenerationMode = undefined;
68
+
69
+ private readonly config: ClaudeAgentLanguageModelConfig;
70
+
71
+ constructor(modelId: string, config: ClaudeAgentLanguageModelConfig) {
72
+ this.modelId = modelId;
73
+ this.config = config;
74
+ }
75
+
76
+ get provider(): string {
77
+ return this.config.provider;
78
+ }
79
+
80
+ get supportedUrls(): Record<string, RegExp[]> {
81
+ return {};
82
+ }
83
+
84
+ private buildQueryOptions(options: LanguageModelV2CallOptions) {
85
+ const { systemPrompt, prompt } = convertMessages(options.prompt);
86
+ const convertedTools = convertTools(options.tools as any);
87
+
88
+ const abortController = new AbortController();
89
+ if (options.abortSignal) {
90
+ options.abortSignal.addEventListener("abort", () => {
91
+ abortController.abort();
92
+ });
93
+ }
94
+
95
+ const queryOptions: Options = {
96
+ model: this.modelId,
97
+ maxTurns: 1,
98
+ permissionMode: "bypassPermissions" as const,
99
+ allowDangerouslySkipPermissions: true,
100
+ abortController,
101
+ tools: [],
102
+ allowedTools: [`mcp__${AI_SDK_MCP_SERVER_NAME}__*`],
103
+ ...(this.config.cwd ? { cwd: this.config.cwd } : {}),
104
+ };
105
+
106
+ if (systemPrompt) {
107
+ queryOptions.systemPrompt = systemPrompt;
108
+ }
109
+
110
+ if (convertedTools?.mcpServer) {
111
+ queryOptions.mcpServers = {
112
+ [AI_SDK_MCP_SERVER_NAME]: convertedTools.mcpServer,
113
+ };
114
+ }
115
+
116
+ return { prompt, queryOptions };
117
+ }
118
+
119
+ async doGenerate(options: DoGenerateOptions): Promise<DoGenerateResult> {
120
+ const warnings: LanguageModelV2CallWarning[] = [];
121
+ const { prompt, queryOptions } = this.buildQueryOptions(options);
122
+
123
+ const generator = query({
124
+ prompt,
125
+ options: queryOptions as any,
126
+ });
127
+
128
+ const content: LanguageModelV2Content[] = [];
129
+ let usage: LanguageModelV2Usage = {
130
+ inputTokens: undefined,
131
+ outputTokens: undefined,
132
+ totalTokens: undefined,
133
+ };
134
+ let finishReason: LanguageModelV2FinishReason = "unknown";
135
+ let hasToolCalls = false;
136
+
137
+ // Track message UUIDs to avoid counting usage multiple times
138
+ // Per SDK docs: all messages with same ID have identical usage
139
+ const seenMessageIds = new Set<string>();
140
+
141
+ for await (const message of generator) {
142
+ if (message.type === "assistant") {
143
+ const apiMessage = message.message as any;
144
+ const messageId = (message as any).uuid as string | undefined;
145
+
146
+ if (Array.isArray(apiMessage.content)) {
147
+ for (const block of apiMessage.content) {
148
+ if (block.type === "text") {
149
+ content.push({ type: "text", text: block.text });
150
+ } else if (block.type === "tool_use") {
151
+ hasToolCalls = true;
152
+ const originalToolName = stripMcpPrefix(block.name);
153
+ content.push({
154
+ type: "tool-call",
155
+ toolCallId: block.id,
156
+ toolName: originalToolName,
157
+ input:
158
+ typeof block.input === "string"
159
+ ? block.input
160
+ : safeJsonStringify(block.input),
161
+ });
162
+ } else if (block.type === "thinking") {
163
+ content.push({
164
+ type: "reasoning",
165
+ text: block.thinking ?? "",
166
+ });
167
+ }
168
+ }
169
+
170
+ // Only record usage once per unique message ID
171
+ if (apiMessage.usage && messageId && !seenMessageIds.has(messageId)) {
172
+ seenMessageIds.add(messageId);
173
+ usage = {
174
+ inputTokens: apiMessage.usage.input_tokens,
175
+ outputTokens: apiMessage.usage.output_tokens,
176
+ totalTokens:
177
+ (apiMessage.usage.input_tokens ?? 0) +
178
+ (apiMessage.usage.output_tokens ?? 0),
179
+ };
180
+ logger.debug("Usage reported in doGenerate", {
181
+ messageId,
182
+ inputTokens: usage.inputTokens,
183
+ outputTokens: usage.outputTokens,
184
+ totalTokens: usage.totalTokens,
185
+ });
186
+ }
187
+
188
+ finishReason = mapFinishReason(apiMessage.stop_reason, hasToolCalls);
189
+ }
190
+ }
191
+
192
+ if (message.type === "result") {
193
+ // Result message contains cumulative usage from all steps
194
+ if (message.usage) {
195
+ usage = {
196
+ inputTokens: message.usage.input_tokens ?? usage.inputTokens,
197
+ outputTokens: message.usage.output_tokens ?? usage.outputTokens,
198
+ totalTokens: usage.totalTokens,
199
+ };
200
+ logger.debug("Final usage from result message", {
201
+ inputTokens: usage.inputTokens,
202
+ outputTokens: usage.outputTokens,
203
+ });
204
+ }
205
+ }
206
+ }
207
+
208
+ // Calculate total tokens if we have both input and output
209
+ if (usage.inputTokens !== undefined && usage.outputTokens !== undefined) {
210
+ usage.totalTokens = usage.inputTokens + usage.outputTokens;
211
+ }
212
+
213
+ return {
214
+ content,
215
+ finishReason,
216
+ usage,
217
+ warnings,
218
+ request: { body: queryOptions },
219
+ response: {
220
+ headers: undefined,
221
+ },
222
+ };
223
+ }
224
+
225
+ async doStream(options: DoStreamOptions): Promise<DoStreamResult> {
226
+ const warnings: LanguageModelV2CallWarning[] = [];
227
+ const { prompt, queryOptions } = this.buildQueryOptions(options);
228
+
229
+ // Enable partial messages to get raw Anthropic streaming events
230
+ queryOptions.includePartialMessages = true;
231
+
232
+ const generator = query({
233
+ prompt,
234
+ options: queryOptions,
235
+ });
236
+
237
+ let hasToolCalls = false;
238
+
239
+ const stream = new ReadableStream<LanguageModelV2StreamPart>({
240
+ async start(controller) {
241
+ controller.enqueue({ type: "stream-start", warnings });
242
+
243
+ let finishReason: LanguageModelV2FinishReason = "unknown";
244
+ let usage: LanguageModelV2Usage = {
245
+ inputTokens: undefined,
246
+ outputTokens: undefined,
247
+ totalTokens: undefined,
248
+ };
249
+
250
+ // Track active text block for start/delta/end lifecycle
251
+ let activeTextId: string | null = null;
252
+ // Track active reasoning block
253
+ let activeReasoningId: string | null = null;
254
+
255
+ // Track tool calls being streamed (keyed by content block index)
256
+ const toolCalls: Map<
257
+ number,
258
+ { toolCallId: string; toolName: string; argsText: string }
259
+ > = new Map();
260
+
261
+ // Track message UUIDs to avoid counting usage multiple times
262
+ // Per SDK docs: all messages with same ID have identical usage
263
+ const seenMessageIds = new Set<string>();
264
+
265
+ try {
266
+ for await (const message of generator) {
267
+ if (message.type === "stream_event") {
268
+ const event = message.event;
269
+
270
+ if (!event || !event.type) continue;
271
+
272
+ switch (event.type) {
273
+ case "message_start": {
274
+ const msg = event.message;
275
+ if (msg) {
276
+ controller.enqueue({
277
+ type: "response-metadata",
278
+ id: msg.id,
279
+ timestamp: new Date(),
280
+ modelId: msg.model,
281
+ });
282
+ if (msg.usage) {
283
+ usage.inputTokens = msg.usage.input_tokens;
284
+ logger.debug(
285
+ "Initial usage reported in doStream (message_start)",
286
+ {
287
+ inputTokens: usage.inputTokens,
288
+ },
289
+ );
290
+ }
291
+ }
292
+ break;
293
+ }
294
+
295
+ case "content_block_start": {
296
+ const block = event.content_block;
297
+ const index = event.index as number;
298
+
299
+ if (block?.type === "text") {
300
+ activeTextId = generateId();
301
+ controller.enqueue({
302
+ type: "text-start",
303
+ id: activeTextId,
304
+ });
305
+ } else if (block?.type === "tool_use") {
306
+ hasToolCalls = true;
307
+ const id = block.id ?? generateId();
308
+ toolCalls.set(index, {
309
+ toolCallId: id,
310
+ toolName: block.name,
311
+ argsText: "",
312
+ });
313
+ controller.enqueue({
314
+ type: "tool-input-start",
315
+ id,
316
+ toolName: block.name,
317
+ });
318
+ } else if (block?.type === "thinking") {
319
+ activeReasoningId = generateId();
320
+ controller.enqueue({
321
+ type: "reasoning-start",
322
+ id: activeReasoningId,
323
+ });
324
+ }
325
+ break;
326
+ }
327
+
328
+ case "content_block_delta": {
329
+ const delta = event.delta;
330
+ const index = event.index as number;
331
+
332
+ if (delta?.type === "text_delta") {
333
+ if (!activeTextId) {
334
+ activeTextId = generateId();
335
+ controller.enqueue({
336
+ type: "text-start",
337
+ id: activeTextId,
338
+ });
339
+ }
340
+ controller.enqueue({
341
+ type: "text-delta",
342
+ id: activeTextId,
343
+ delta: delta.text,
344
+ });
345
+ } else if (delta?.type === "input_json_delta") {
346
+ const tc = toolCalls.get(index);
347
+ if (tc) {
348
+ tc.argsText += delta.partial_json;
349
+ controller.enqueue({
350
+ type: "tool-input-delta",
351
+ id: tc.toolCallId,
352
+ delta: delta.partial_json,
353
+ });
354
+ }
355
+ } else if (delta?.type === "thinking_delta") {
356
+ if (!activeReasoningId) {
357
+ activeReasoningId = generateId();
358
+ controller.enqueue({
359
+ type: "reasoning-start",
360
+ id: activeReasoningId,
361
+ });
362
+ }
363
+ controller.enqueue({
364
+ type: "reasoning-delta",
365
+ id: activeReasoningId,
366
+ delta: delta.thinking,
367
+ });
368
+ }
369
+ break;
370
+ }
371
+
372
+ case "content_block_stop": {
373
+ const index = event.index as number;
374
+ const tc = toolCalls.get(index);
375
+
376
+ if (tc) {
377
+ const originalToolName = stripMcpPrefix(tc.toolName);
378
+ // End the tool input stream
379
+ controller.enqueue({
380
+ type: "tool-input-end",
381
+ id: tc.toolCallId,
382
+ });
383
+ // Emit the complete tool call
384
+ controller.enqueue({
385
+ type: "tool-call",
386
+ toolCallId: tc.toolCallId,
387
+ toolName: originalToolName,
388
+ input: tc.argsText,
389
+ });
390
+ toolCalls.delete(index);
391
+ } else if (activeTextId) {
392
+ controller.enqueue({
393
+ type: "text-end",
394
+ id: activeTextId,
395
+ });
396
+ activeTextId = null;
397
+ } else if (activeReasoningId) {
398
+ controller.enqueue({
399
+ type: "reasoning-end",
400
+ id: activeReasoningId,
401
+ });
402
+ activeReasoningId = null;
403
+ }
404
+ break;
405
+ }
406
+
407
+ case "message_delta": {
408
+ if (event.usage) {
409
+ usage.outputTokens = event.usage.output_tokens;
410
+ if (usage.inputTokens !== undefined) {
411
+ usage.totalTokens =
412
+ usage.inputTokens + (event.usage.output_tokens ?? 0);
413
+ }
414
+ logger.debug(
415
+ "Usage delta reported in doStream (message_delta)",
416
+ {
417
+ outputTokens: usage.outputTokens,
418
+ totalTokens: usage.totalTokens,
419
+ },
420
+ );
421
+ }
422
+ finishReason = mapFinishReason(
423
+ event.delta?.stop_reason,
424
+ hasToolCalls,
425
+ );
426
+ break;
427
+ }
428
+
429
+ case "message_stop": {
430
+ // Final streaming event
431
+ break;
432
+ }
433
+ }
434
+ } else if (message.type === "assistant") {
435
+ // Full assistant message — only update finish reason, not usage
436
+ // Usage is tracked from streaming events (message_start, message_delta)
437
+ // Per SDK docs: assistant messages share usage with streaming events
438
+ const apiMessage = (message as any).message;
439
+ const messageId = (message as any).uuid as string | undefined;
440
+
441
+ if (Array.isArray(apiMessage?.content)) {
442
+ for (const block of apiMessage.content) {
443
+ if (block.type === "tool_use") {
444
+ hasToolCalls = true;
445
+ }
446
+ }
447
+ }
448
+
449
+ // Don't overwrite usage from streaming events - they are more accurate
450
+ // and already tracked. Only log if this is a new message ID.
451
+ if (
452
+ apiMessage?.usage &&
453
+ messageId &&
454
+ !seenMessageIds.has(messageId)
455
+ ) {
456
+ seenMessageIds.add(messageId);
457
+ logger.debug(
458
+ "Assistant message usage (already tracked from streaming)",
459
+ {
460
+ messageId,
461
+ inputTokens: apiMessage.usage.input_tokens,
462
+ outputTokens: apiMessage.usage.output_tokens,
463
+ },
464
+ );
465
+ }
466
+
467
+ if (apiMessage?.stop_reason) {
468
+ finishReason = mapFinishReason(
469
+ apiMessage.stop_reason,
470
+ hasToolCalls,
471
+ );
472
+ }
473
+ } else if (message.type === "result") {
474
+ // Final result with cumulative usage from all steps
475
+ const result = message as any;
476
+ if (result.usage) {
477
+ usage.inputTokens =
478
+ result.usage.input_tokens ?? usage.inputTokens;
479
+ usage.outputTokens =
480
+ result.usage.output_tokens ?? usage.outputTokens;
481
+ logger.debug("Final usage from result message", {
482
+ inputTokens: usage.inputTokens,
483
+ outputTokens: usage.outputTokens,
484
+ });
485
+ }
486
+ }
487
+ }
488
+ } catch (error) {
489
+ controller.enqueue({ type: "error", error });
490
+ }
491
+
492
+ // Close any dangling blocks
493
+ if (activeTextId) {
494
+ controller.enqueue({ type: "text-end", id: activeTextId });
495
+ }
496
+ if (activeReasoningId) {
497
+ controller.enqueue({ type: "reasoning-end", id: activeReasoningId });
498
+ }
499
+
500
+ // Calculate total tokens if not already done
501
+ if (
502
+ usage.inputTokens !== undefined &&
503
+ usage.outputTokens !== undefined
504
+ ) {
505
+ usage.totalTokens = usage.inputTokens + usage.outputTokens;
506
+ }
507
+
508
+ controller.enqueue({
509
+ type: "finish",
510
+ finishReason,
511
+ usage,
512
+ });
513
+
514
+ controller.close();
515
+ },
516
+ });
517
+
518
+ return {
519
+ stream,
520
+ request: { body: queryOptions },
521
+ response: {
522
+ headers: undefined,
523
+ },
524
+ };
525
+ }
526
+ }
package/src/logger.ts ADDED
@@ -0,0 +1,171 @@
1
+ import { createWriteStream, existsSync, mkdirSync } from "fs";
2
+ import type { WriteStream } from "fs";
3
+ import { homedir } from "os";
4
+ import { join } from "path";
5
+ import { safeJsonStringify } from "./json";
6
+
7
+ /**
8
+ * This is a weird file. We want to log to a file, but only if the consola package
9
+ * we are using is available.
10
+ */
11
+
12
+ // Environment configuration
13
+ const LOG_LEVEL = (process.env.LOG_LEVEL as string) || "debug";
14
+ const LOG_DIR =
15
+ process.env.LOG_DIR || join(homedir(), ".cache", "ai-sdk-claude-agent");
16
+ const LOG_FILE = process.env.LOG_FILE || "ai-sdk-claude-agent.log";
17
+
18
+ // Ensure log directory exists
19
+ if (!existsSync(LOG_DIR)) {
20
+ try {
21
+ mkdirSync(LOG_DIR, { recursive: true });
22
+ } catch {
23
+ // Silent fail
24
+ }
25
+ }
26
+
27
+ const LOG_FILE_PATH = join(LOG_DIR, LOG_FILE);
28
+
29
+ // Persistent write stream for file logging
30
+ let logStream: WriteStream | null = null;
31
+
32
+ function getLogStream(): WriteStream {
33
+ if (!logStream) {
34
+ logStream = createWriteStream(LOG_FILE_PATH, { flags: "a" });
35
+ }
36
+ return logStream;
37
+ }
38
+
39
+ // Type definitions for consola
40
+ interface ConsolaInstance {
41
+ debug: (message: string, ...args: unknown[]) => void;
42
+ info: (message: string, ...args: unknown[]) => void;
43
+ warn: (message: string, ...args: unknown[]) => void;
44
+ error: (message: string, ...args: unknown[]) => void;
45
+ log: (message: string, ...args: unknown[]) => void;
46
+ }
47
+
48
+ let consolaInstance: ConsolaInstance | null = null;
49
+ let consolaLoadAttempted = false;
50
+
51
+ /**
52
+ * Dynamically loads consola if available.
53
+ * This is an optional peer dependency - if not installed, logging is a no-op.
54
+ */
55
+ async function loadConsola(): Promise<ConsolaInstance | null> {
56
+ if (consolaLoadAttempted) return consolaInstance;
57
+ consolaLoadAttempted = true;
58
+
59
+ try {
60
+ // Dynamic import - will fail gracefully if consola is not installed
61
+ const { createConsola } = await import("consola");
62
+
63
+ const reporters = [];
64
+
65
+ // File reporter configuration
66
+ if (LOG_FILE) {
67
+ reporters.push({
68
+ log: (logObj: { level: number; args: unknown[]; date: Date }) => {
69
+ // Consola levels: 0=fatal/error, 1=warn, 2=log, 3=info, 4=debug, 5=trace
70
+ const levelNames: Record<number, string> = {
71
+ 0: "ERROR",
72
+ 1: "WARN",
73
+ 2: "LOG",
74
+ 3: "INFO",
75
+ 4: "DEBUG",
76
+ 5: "TRACE",
77
+ };
78
+ const levelName = levelNames[logObj.level] || "LOG";
79
+ const message = logObj.args
80
+ .map((arg) =>
81
+ typeof arg === "object" ? safeJsonStringify(arg) : String(arg),
82
+ )
83
+ .join(" ");
84
+
85
+ const line =
86
+ safeJsonStringify({
87
+ timestamp: logObj.date.toISOString(),
88
+ level: levelName,
89
+ message,
90
+ }) + "\n";
91
+
92
+ // Append to file using persistent stream
93
+ getLogStream().write(line);
94
+ },
95
+ });
96
+ }
97
+
98
+ consolaInstance = createConsola({
99
+ // Consola levels: 0=fatal/error, 1=warn, 2=log, 3=info, 4=debug, 5=trace
100
+ level:
101
+ LOG_LEVEL === "trace"
102
+ ? 5
103
+ : LOG_LEVEL === "debug"
104
+ ? 4
105
+ : LOG_LEVEL === "info"
106
+ ? 3
107
+ : LOG_LEVEL === "warn"
108
+ ? 1
109
+ : 0,
110
+ reporters,
111
+ }) as ConsolaInstance;
112
+
113
+ consolaInstance.info("Logger initialized with consola", {
114
+ level: LOG_LEVEL,
115
+ file: LOG_FILE_PATH,
116
+ });
117
+
118
+ return consolaInstance;
119
+ } catch {
120
+ // consola is not installed - logging will be a no-op
121
+ return null;
122
+ }
123
+ }
124
+
125
+ // Initialize consola asynchronously
126
+ const consolaPromise = loadConsola();
127
+
128
+ /**
129
+ * Logger interface that wraps consola if available, otherwise no-op.
130
+ */
131
+ export const logger = {
132
+ debug(message: string, ...args: unknown[]): void {
133
+ if (consolaInstance) {
134
+ consolaInstance.debug(message, ...args);
135
+ } else {
136
+ consolaPromise.then((c) => c?.debug(message, ...args));
137
+ }
138
+ },
139
+
140
+ info(message: string, ...args: unknown[]): void {
141
+ if (consolaInstance) {
142
+ consolaInstance.info(message, ...args);
143
+ } else {
144
+ consolaPromise.then((c) => c?.info(message, ...args));
145
+ }
146
+ },
147
+
148
+ warn(message: string, ...args: unknown[]): void {
149
+ if (consolaInstance) {
150
+ consolaInstance.warn(message, ...args);
151
+ } else {
152
+ consolaPromise.then((c) => c?.warn(message, ...args));
153
+ }
154
+ },
155
+
156
+ error(message: string, ...args: unknown[]): void {
157
+ if (consolaInstance) {
158
+ consolaInstance.error(message, ...args);
159
+ } else {
160
+ consolaPromise.then((c) => c?.error(message, ...args));
161
+ }
162
+ },
163
+
164
+ /**
165
+ * Legacy method - logs at info level
166
+ * @deprecated Use logger.info() or logger.debug() instead
167
+ */
168
+ log(message: string, ...args: unknown[]): void {
169
+ this.info(message, ...args);
170
+ },
171
+ };