observa-sdk 0.0.18 → 0.0.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +223 -45
- package/dist/index.js +223 -45
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -194,6 +194,96 @@ function getTraceContext() {
|
|
|
194
194
|
}
|
|
195
195
|
}
|
|
196
196
|
|
|
197
|
+
// src/instrumentation/error-utils.ts
|
|
198
|
+
function extractErrorCode(error, provider) {
|
|
199
|
+
if (!error) return null;
|
|
200
|
+
if (provider === "openai" || provider === "vercel-ai") {
|
|
201
|
+
if (error.code) return error.code;
|
|
202
|
+
if (error.type) return error.type;
|
|
203
|
+
if (error.status === 401) return "invalid_api_key";
|
|
204
|
+
if (error.status === 429) return "rate_limit_exceeded";
|
|
205
|
+
if (error.status === 400) return "invalid_request";
|
|
206
|
+
if (error.status === 403) return "insufficient_quota";
|
|
207
|
+
if (error.status === 404) return "model_not_found";
|
|
208
|
+
if (error.status === 500) return "internal_server_error";
|
|
209
|
+
if (error.status === 502) return "bad_gateway";
|
|
210
|
+
if (error.status === 503) return "service_unavailable";
|
|
211
|
+
}
|
|
212
|
+
if (provider === "anthropic") {
|
|
213
|
+
if (error.error?.type) return error.error.type;
|
|
214
|
+
if (error.status === 401) return "invalid_api_key";
|
|
215
|
+
if (error.status === 429) return "rate_limit_error";
|
|
216
|
+
if (error.status === 400) return "invalid_request";
|
|
217
|
+
if (error.status === 500) return "internal_server_error";
|
|
218
|
+
}
|
|
219
|
+
if (error.status) {
|
|
220
|
+
if (error.status === 401) return "unauthorized";
|
|
221
|
+
if (error.status === 403) return "forbidden";
|
|
222
|
+
if (error.status === 404) return "not_found";
|
|
223
|
+
if (error.status === 429) return "rate_limit_exceeded";
|
|
224
|
+
if (error.status >= 500) return "server_error";
|
|
225
|
+
}
|
|
226
|
+
const message = String(error.message || error);
|
|
227
|
+
if (message.toLowerCase().includes("timeout")) return "timeout_error";
|
|
228
|
+
if (message.toLowerCase().includes("network")) return "network_error";
|
|
229
|
+
if (message.toLowerCase().includes("connection")) return "connection_error";
|
|
230
|
+
return null;
|
|
231
|
+
}
|
|
232
|
+
function categorizeError(error, provider) {
|
|
233
|
+
if (!error) return "unknown_error";
|
|
234
|
+
const statusCode = error.status || error.statusCode || error.status_code;
|
|
235
|
+
const errorCode = extractErrorCode(error, provider);
|
|
236
|
+
const message = String(error.message || error.error?.message || error || "").toLowerCase();
|
|
237
|
+
if (statusCode === 401 || statusCode === 403 || errorCode === "invalid_api_key" || errorCode === "unauthorized" || errorCode === "forbidden" || message.includes("authentication") || message.includes("unauthorized") || message.includes("invalid api key") || message.includes("invalid api_token")) {
|
|
238
|
+
return "authentication_error";
|
|
239
|
+
}
|
|
240
|
+
if (statusCode === 429 || errorCode === "rate_limit_exceeded" || errorCode === "rate_limit_error" || message.includes("rate limit") || message.includes("too many requests")) {
|
|
241
|
+
return "rate_limit_error";
|
|
242
|
+
}
|
|
243
|
+
if (statusCode === 400 || errorCode === "invalid_request" || message.includes("validation") || message.includes("invalid") || message.includes("bad request")) {
|
|
244
|
+
return "validation_error";
|
|
245
|
+
}
|
|
246
|
+
if (errorCode === "timeout_error" || message.includes("timeout") || message.includes("timed out") || message.includes("request timeout")) {
|
|
247
|
+
return "timeout_error";
|
|
248
|
+
}
|
|
249
|
+
if (errorCode === "network_error" || errorCode === "connection_error" || message.includes("network") || message.includes("connection") || message.includes("econn") || message.includes("enotfound") || message.includes("eai_again")) {
|
|
250
|
+
return "network_error";
|
|
251
|
+
}
|
|
252
|
+
if (statusCode >= 500 || errorCode === "internal_server_error" || errorCode === "server_error" || errorCode === "bad_gateway" || errorCode === "service_unavailable" || message.includes("server error") || message.includes("internal error")) {
|
|
253
|
+
return "server_error";
|
|
254
|
+
}
|
|
255
|
+
if (errorCode === "insufficient_quota" || message.includes("quota") || message.includes("billing") || message.includes("credits")) {
|
|
256
|
+
return "quota_error";
|
|
257
|
+
}
|
|
258
|
+
if (statusCode === 404 || errorCode === "model_not_found" || errorCode === "not_found" || message.includes("model not found") || message.includes("model unavailable")) {
|
|
259
|
+
return "model_error";
|
|
260
|
+
}
|
|
261
|
+
return "unknown_error";
|
|
262
|
+
}
|
|
263
|
+
function extractProviderError(error, provider) {
|
|
264
|
+
const code = extractErrorCode(error, provider) || "unknown_error";
|
|
265
|
+
const category = categorizeError(error, provider);
|
|
266
|
+
const statusCode = error.status || error.statusCode || error.status_code;
|
|
267
|
+
let message = "An unknown error occurred";
|
|
268
|
+
if (error.message) {
|
|
269
|
+
message = error.message;
|
|
270
|
+
} else if (error.error?.message) {
|
|
271
|
+
message = error.error.message;
|
|
272
|
+
} else if (error.response?.data?.error?.message) {
|
|
273
|
+
message = error.response.data.error.message;
|
|
274
|
+
} else if (typeof error === "string") {
|
|
275
|
+
message = error;
|
|
276
|
+
} else if (error.toString && error.toString() !== "[object Object]") {
|
|
277
|
+
message = error.toString();
|
|
278
|
+
}
|
|
279
|
+
return {
|
|
280
|
+
code,
|
|
281
|
+
category,
|
|
282
|
+
message,
|
|
283
|
+
statusCode
|
|
284
|
+
};
|
|
285
|
+
}
|
|
286
|
+
|
|
197
287
|
// src/instrumentation/openai.ts
|
|
198
288
|
var proxyCache = /* @__PURE__ */ new WeakMap();
|
|
199
289
|
function observeOpenAI(client, options) {
|
|
@@ -229,6 +319,9 @@ async function traceOpenAICall(originalFn, args, options) {
|
|
|
229
319
|
const startTime = Date.now();
|
|
230
320
|
const requestParams = args[0] || {};
|
|
231
321
|
const isStreaming = requestParams.stream === true;
|
|
322
|
+
const inputText = requestParams.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
|
|
323
|
+
const inputMessages = requestParams.messages || null;
|
|
324
|
+
const model = requestParams.model || "unknown";
|
|
232
325
|
try {
|
|
233
326
|
const result = await originalFn(...args);
|
|
234
327
|
if (isStreaming) {
|
|
@@ -244,7 +337,15 @@ async function traceOpenAICall(originalFn, args, options) {
|
|
|
244
337
|
fullResponse.streamingDuration
|
|
245
338
|
);
|
|
246
339
|
},
|
|
247
|
-
(err) => recordError(
|
|
340
|
+
(err) => recordError(
|
|
341
|
+
requestParams,
|
|
342
|
+
err,
|
|
343
|
+
startTime,
|
|
344
|
+
options,
|
|
345
|
+
inputText,
|
|
346
|
+
inputMessages,
|
|
347
|
+
model
|
|
348
|
+
),
|
|
248
349
|
"openai"
|
|
249
350
|
);
|
|
250
351
|
} else {
|
|
@@ -252,7 +353,15 @@ async function traceOpenAICall(originalFn, args, options) {
|
|
|
252
353
|
return result;
|
|
253
354
|
}
|
|
254
355
|
} catch (error) {
|
|
255
|
-
recordError(
|
|
356
|
+
recordError(
|
|
357
|
+
requestParams,
|
|
358
|
+
error,
|
|
359
|
+
startTime,
|
|
360
|
+
options,
|
|
361
|
+
inputText,
|
|
362
|
+
inputMessages,
|
|
363
|
+
model
|
|
364
|
+
);
|
|
256
365
|
throw error;
|
|
257
366
|
}
|
|
258
367
|
}
|
|
@@ -291,20 +400,26 @@ function recordTrace(req, res, start, opts, timeToFirstToken, streamingDuration)
|
|
|
291
400
|
console.error("[Observa] Failed to record trace", e);
|
|
292
401
|
}
|
|
293
402
|
}
|
|
294
|
-
function recordError(req, error, start, opts) {
|
|
403
|
+
function recordError(req, error, start, opts, preExtractedInputText, preExtractedInputMessages, preExtractedModel) {
|
|
295
404
|
const duration = Date.now() - start;
|
|
296
405
|
try {
|
|
297
406
|
console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
|
|
298
407
|
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
299
408
|
if (opts?.observa) {
|
|
300
|
-
const model = sanitizedReq.model || "unknown";
|
|
301
|
-
|
|
409
|
+
const model = preExtractedModel || sanitizedReq.model || "unknown";
|
|
410
|
+
let inputText = preExtractedInputText || null;
|
|
411
|
+
let inputMessages = preExtractedInputMessages || null;
|
|
412
|
+
if (!inputText) {
|
|
413
|
+
inputMessages = sanitizedReq.messages || null;
|
|
414
|
+
inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
|
|
415
|
+
}
|
|
416
|
+
const extractedError = extractProviderError(error, "openai");
|
|
302
417
|
opts.observa.trackLLMCall({
|
|
303
418
|
model,
|
|
304
419
|
input: inputText,
|
|
305
420
|
output: null,
|
|
306
421
|
// No output on error
|
|
307
|
-
inputMessages
|
|
422
|
+
inputMessages,
|
|
308
423
|
outputMessages: null,
|
|
309
424
|
inputTokens: null,
|
|
310
425
|
outputTokens: null,
|
|
@@ -321,17 +436,19 @@ function recordError(req, error, start, opts) {
|
|
|
321
436
|
maxTokens: sanitizedReq.max_tokens || null
|
|
322
437
|
});
|
|
323
438
|
opts.observa.trackError({
|
|
324
|
-
errorType: "openai_api_error",
|
|
325
|
-
errorMessage:
|
|
439
|
+
errorType: error?.name || extractedError.code || "openai_api_error",
|
|
440
|
+
errorMessage: extractedError.message,
|
|
326
441
|
stackTrace: error?.stack || null,
|
|
327
442
|
context: {
|
|
328
443
|
request: sanitizedReq,
|
|
329
444
|
model,
|
|
330
445
|
input: inputText,
|
|
331
446
|
provider: "openai",
|
|
332
|
-
duration_ms: duration
|
|
447
|
+
duration_ms: duration,
|
|
448
|
+
status_code: extractedError.statusCode || null
|
|
333
449
|
},
|
|
334
|
-
errorCategory:
|
|
450
|
+
errorCategory: extractedError.category,
|
|
451
|
+
errorCode: extractedError.code
|
|
335
452
|
});
|
|
336
453
|
}
|
|
337
454
|
} catch (e) {
|
|
@@ -374,6 +491,15 @@ async function traceAnthropicCall(originalFn, args, options) {
|
|
|
374
491
|
const startTime = Date.now();
|
|
375
492
|
const requestParams = args[0] || {};
|
|
376
493
|
const isStreaming = requestParams.stream === true;
|
|
494
|
+
const inputText = requestParams.messages?.map((m) => {
|
|
495
|
+
if (typeof m.content === "string") return m.content;
|
|
496
|
+
if (Array.isArray(m.content)) {
|
|
497
|
+
return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
|
|
498
|
+
}
|
|
499
|
+
return null;
|
|
500
|
+
}).filter(Boolean).join("\n") || null;
|
|
501
|
+
const inputMessages = requestParams.messages || null;
|
|
502
|
+
const model = requestParams.model || "unknown";
|
|
377
503
|
try {
|
|
378
504
|
const result = await originalFn(...args);
|
|
379
505
|
if (isStreaming) {
|
|
@@ -389,7 +515,15 @@ async function traceAnthropicCall(originalFn, args, options) {
|
|
|
389
515
|
fullResponse.streamingDuration
|
|
390
516
|
);
|
|
391
517
|
},
|
|
392
|
-
(err) => recordError2(
|
|
518
|
+
(err) => recordError2(
|
|
519
|
+
requestParams,
|
|
520
|
+
err,
|
|
521
|
+
startTime,
|
|
522
|
+
options,
|
|
523
|
+
inputText,
|
|
524
|
+
inputMessages,
|
|
525
|
+
model
|
|
526
|
+
),
|
|
393
527
|
"anthropic"
|
|
394
528
|
);
|
|
395
529
|
} else {
|
|
@@ -397,7 +531,15 @@ async function traceAnthropicCall(originalFn, args, options) {
|
|
|
397
531
|
return result;
|
|
398
532
|
}
|
|
399
533
|
} catch (error) {
|
|
400
|
-
recordError2(
|
|
534
|
+
recordError2(
|
|
535
|
+
requestParams,
|
|
536
|
+
error,
|
|
537
|
+
startTime,
|
|
538
|
+
options,
|
|
539
|
+
inputText,
|
|
540
|
+
inputMessages,
|
|
541
|
+
model
|
|
542
|
+
);
|
|
401
543
|
throw error;
|
|
402
544
|
}
|
|
403
545
|
}
|
|
@@ -441,26 +583,32 @@ function recordTrace2(req, res, start, opts, timeToFirstToken, streamingDuration
|
|
|
441
583
|
console.error("[Observa] Failed to record trace", e);
|
|
442
584
|
}
|
|
443
585
|
}
|
|
444
|
-
function recordError2(req, error, start, opts) {
|
|
586
|
+
function recordError2(req, error, start, opts, preExtractedInputText, preExtractedInputMessages, preExtractedModel) {
|
|
445
587
|
const duration = Date.now() - start;
|
|
446
588
|
try {
|
|
447
589
|
console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
|
|
448
590
|
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
449
591
|
if (opts?.observa) {
|
|
450
|
-
const model = sanitizedReq.model || "unknown";
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
592
|
+
const model = preExtractedModel || sanitizedReq.model || "unknown";
|
|
593
|
+
let inputText = preExtractedInputText || null;
|
|
594
|
+
let inputMessages = preExtractedInputMessages || null;
|
|
595
|
+
if (!inputText) {
|
|
596
|
+
inputMessages = sanitizedReq.messages || null;
|
|
597
|
+
inputText = sanitizedReq.messages?.map((m) => {
|
|
598
|
+
if (typeof m.content === "string") return m.content;
|
|
599
|
+
if (Array.isArray(m.content)) {
|
|
600
|
+
return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
|
|
601
|
+
}
|
|
602
|
+
return null;
|
|
603
|
+
}).filter(Boolean).join("\n") || null;
|
|
604
|
+
}
|
|
605
|
+
const extractedError = extractProviderError(error, "anthropic");
|
|
458
606
|
opts.observa.trackLLMCall({
|
|
459
607
|
model,
|
|
460
608
|
input: inputText,
|
|
461
609
|
output: null,
|
|
462
610
|
// No output on error
|
|
463
|
-
inputMessages
|
|
611
|
+
inputMessages,
|
|
464
612
|
outputMessages: null,
|
|
465
613
|
inputTokens: null,
|
|
466
614
|
outputTokens: null,
|
|
@@ -477,17 +625,19 @@ function recordError2(req, error, start, opts) {
|
|
|
477
625
|
maxTokens: sanitizedReq.max_tokens || null
|
|
478
626
|
});
|
|
479
627
|
opts.observa.trackError({
|
|
480
|
-
errorType: "anthropic_api_error",
|
|
481
|
-
errorMessage:
|
|
628
|
+
errorType: error?.name || extractedError.code || "anthropic_api_error",
|
|
629
|
+
errorMessage: extractedError.message,
|
|
482
630
|
stackTrace: error?.stack || null,
|
|
483
631
|
context: {
|
|
484
632
|
request: sanitizedReq,
|
|
485
633
|
model,
|
|
486
634
|
input: inputText,
|
|
487
635
|
provider: "anthropic",
|
|
488
|
-
duration_ms: duration
|
|
636
|
+
duration_ms: duration,
|
|
637
|
+
status_code: extractedError.statusCode || null
|
|
489
638
|
},
|
|
490
|
-
errorCategory:
|
|
639
|
+
errorCategory: extractedError.category,
|
|
640
|
+
errorCode: extractedError.code
|
|
491
641
|
});
|
|
492
642
|
}
|
|
493
643
|
} catch (e) {
|
|
@@ -566,6 +716,14 @@ async function traceGenerateText(originalFn, args, options) {
|
|
|
566
716
|
const model = requestParams.model || "unknown";
|
|
567
717
|
const provider = extractProviderFromModel(model);
|
|
568
718
|
const modelIdentifier = extractModelIdentifier(model);
|
|
719
|
+
let inputText = null;
|
|
720
|
+
let inputMessages = null;
|
|
721
|
+
if (requestParams.prompt) {
|
|
722
|
+
inputText = typeof requestParams.prompt === "string" ? requestParams.prompt : JSON.stringify(requestParams.prompt);
|
|
723
|
+
} else if (requestParams.messages) {
|
|
724
|
+
inputMessages = requestParams.messages;
|
|
725
|
+
inputText = requestParams.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
|
|
726
|
+
}
|
|
569
727
|
try {
|
|
570
728
|
const result = await originalFn(...args);
|
|
571
729
|
const responseText = result.text || "";
|
|
@@ -606,7 +764,9 @@ async function traceGenerateText(originalFn, args, options) {
|
|
|
606
764
|
error,
|
|
607
765
|
startTime,
|
|
608
766
|
options,
|
|
609
|
-
provider
|
|
767
|
+
provider,
|
|
768
|
+
inputText,
|
|
769
|
+
inputMessages
|
|
610
770
|
);
|
|
611
771
|
throw error;
|
|
612
772
|
}
|
|
@@ -664,6 +824,14 @@ async function traceStreamText(originalFn, args, options) {
|
|
|
664
824
|
const model = requestParams.model || "unknown";
|
|
665
825
|
const provider = extractProviderFromModel(model);
|
|
666
826
|
const modelIdentifier = extractModelIdentifier(model);
|
|
827
|
+
let inputText = null;
|
|
828
|
+
let inputMessages = null;
|
|
829
|
+
if (requestParams.prompt) {
|
|
830
|
+
inputText = typeof requestParams.prompt === "string" ? requestParams.prompt : JSON.stringify(requestParams.prompt);
|
|
831
|
+
} else if (requestParams.messages) {
|
|
832
|
+
inputMessages = requestParams.messages;
|
|
833
|
+
inputText = requestParams.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
|
|
834
|
+
}
|
|
667
835
|
try {
|
|
668
836
|
const result = await originalFn(...args);
|
|
669
837
|
if (result.textStream) {
|
|
@@ -698,7 +866,9 @@ async function traceStreamText(originalFn, args, options) {
|
|
|
698
866
|
err,
|
|
699
867
|
startTime,
|
|
700
868
|
options,
|
|
701
|
-
provider
|
|
869
|
+
provider,
|
|
870
|
+
inputText,
|
|
871
|
+
inputMessages
|
|
702
872
|
)
|
|
703
873
|
);
|
|
704
874
|
const wrappedResult = Object.create(Object.getPrototypeOf(result));
|
|
@@ -738,7 +908,9 @@ async function traceStreamText(originalFn, args, options) {
|
|
|
738
908
|
error,
|
|
739
909
|
startTime,
|
|
740
910
|
options,
|
|
741
|
-
provider
|
|
911
|
+
provider,
|
|
912
|
+
inputText,
|
|
913
|
+
inputMessages
|
|
742
914
|
);
|
|
743
915
|
throw error;
|
|
744
916
|
}
|
|
@@ -785,21 +957,25 @@ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration
|
|
|
785
957
|
console.error("[Observa] Failed to record trace", e);
|
|
786
958
|
}
|
|
787
959
|
}
|
|
788
|
-
function recordError3(req, error, start, opts, provider) {
|
|
960
|
+
function recordError3(req, error, start, opts, provider, preExtractedInputText, preExtractedInputMessages) {
|
|
789
961
|
const duration = Date.now() - start;
|
|
790
962
|
try {
|
|
791
|
-
console.error("[Observa] \u26A0\uFE0F Error Captured:", error
|
|
963
|
+
console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
|
|
792
964
|
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
793
965
|
if (opts?.observa) {
|
|
794
966
|
const model = sanitizedReq.model || "unknown";
|
|
795
|
-
let inputText = null;
|
|
796
|
-
let inputMessages = null;
|
|
797
|
-
if (
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
967
|
+
let inputText = preExtractedInputText || null;
|
|
968
|
+
let inputMessages = preExtractedInputMessages || null;
|
|
969
|
+
if (!inputText) {
|
|
970
|
+
if (sanitizedReq.prompt) {
|
|
971
|
+
inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
|
|
972
|
+
} else if (sanitizedReq.messages) {
|
|
973
|
+
inputMessages = sanitizedReq.messages;
|
|
974
|
+
inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
|
|
975
|
+
}
|
|
802
976
|
}
|
|
977
|
+
const providerName = provider || "vercel-ai";
|
|
978
|
+
const extractedError = extractProviderError(error, providerName);
|
|
803
979
|
opts.observa.trackLLMCall({
|
|
804
980
|
model,
|
|
805
981
|
input: inputText,
|
|
@@ -816,23 +992,25 @@ function recordError3(req, error, start, opts, provider) {
|
|
|
816
992
|
finishReason: null,
|
|
817
993
|
responseId: null,
|
|
818
994
|
operationName: "generate_text",
|
|
819
|
-
providerName
|
|
995
|
+
providerName,
|
|
820
996
|
responseModel: model,
|
|
821
997
|
temperature: sanitizedReq.temperature || null,
|
|
822
998
|
maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
|
|
823
999
|
});
|
|
824
1000
|
opts.observa.trackError({
|
|
825
|
-
errorType: error
|
|
826
|
-
errorMessage:
|
|
827
|
-
stackTrace: error
|
|
1001
|
+
errorType: error?.name || extractedError.code || "UnknownError",
|
|
1002
|
+
errorMessage: extractedError.message,
|
|
1003
|
+
stackTrace: error?.stack || null,
|
|
828
1004
|
context: {
|
|
829
1005
|
request: sanitizedReq,
|
|
830
1006
|
model,
|
|
831
1007
|
input: inputText,
|
|
832
|
-
provider:
|
|
833
|
-
duration_ms: duration
|
|
1008
|
+
provider: providerName,
|
|
1009
|
+
duration_ms: duration,
|
|
1010
|
+
status_code: extractedError.statusCode || null
|
|
834
1011
|
},
|
|
835
|
-
errorCategory:
|
|
1012
|
+
errorCategory: extractedError.category,
|
|
1013
|
+
errorCode: extractedError.code
|
|
836
1014
|
});
|
|
837
1015
|
}
|
|
838
1016
|
} catch (e) {
|
package/dist/index.js
CHANGED
|
@@ -174,6 +174,96 @@ function getTraceContext() {
|
|
|
174
174
|
}
|
|
175
175
|
}
|
|
176
176
|
|
|
177
|
+
// src/instrumentation/error-utils.ts
|
|
178
|
+
function extractErrorCode(error, provider) {
|
|
179
|
+
if (!error) return null;
|
|
180
|
+
if (provider === "openai" || provider === "vercel-ai") {
|
|
181
|
+
if (error.code) return error.code;
|
|
182
|
+
if (error.type) return error.type;
|
|
183
|
+
if (error.status === 401) return "invalid_api_key";
|
|
184
|
+
if (error.status === 429) return "rate_limit_exceeded";
|
|
185
|
+
if (error.status === 400) return "invalid_request";
|
|
186
|
+
if (error.status === 403) return "insufficient_quota";
|
|
187
|
+
if (error.status === 404) return "model_not_found";
|
|
188
|
+
if (error.status === 500) return "internal_server_error";
|
|
189
|
+
if (error.status === 502) return "bad_gateway";
|
|
190
|
+
if (error.status === 503) return "service_unavailable";
|
|
191
|
+
}
|
|
192
|
+
if (provider === "anthropic") {
|
|
193
|
+
if (error.error?.type) return error.error.type;
|
|
194
|
+
if (error.status === 401) return "invalid_api_key";
|
|
195
|
+
if (error.status === 429) return "rate_limit_error";
|
|
196
|
+
if (error.status === 400) return "invalid_request";
|
|
197
|
+
if (error.status === 500) return "internal_server_error";
|
|
198
|
+
}
|
|
199
|
+
if (error.status) {
|
|
200
|
+
if (error.status === 401) return "unauthorized";
|
|
201
|
+
if (error.status === 403) return "forbidden";
|
|
202
|
+
if (error.status === 404) return "not_found";
|
|
203
|
+
if (error.status === 429) return "rate_limit_exceeded";
|
|
204
|
+
if (error.status >= 500) return "server_error";
|
|
205
|
+
}
|
|
206
|
+
const message = String(error.message || error);
|
|
207
|
+
if (message.toLowerCase().includes("timeout")) return "timeout_error";
|
|
208
|
+
if (message.toLowerCase().includes("network")) return "network_error";
|
|
209
|
+
if (message.toLowerCase().includes("connection")) return "connection_error";
|
|
210
|
+
return null;
|
|
211
|
+
}
|
|
212
|
+
function categorizeError(error, provider) {
|
|
213
|
+
if (!error) return "unknown_error";
|
|
214
|
+
const statusCode = error.status || error.statusCode || error.status_code;
|
|
215
|
+
const errorCode = extractErrorCode(error, provider);
|
|
216
|
+
const message = String(error.message || error.error?.message || error || "").toLowerCase();
|
|
217
|
+
if (statusCode === 401 || statusCode === 403 || errorCode === "invalid_api_key" || errorCode === "unauthorized" || errorCode === "forbidden" || message.includes("authentication") || message.includes("unauthorized") || message.includes("invalid api key") || message.includes("invalid api_token")) {
|
|
218
|
+
return "authentication_error";
|
|
219
|
+
}
|
|
220
|
+
if (statusCode === 429 || errorCode === "rate_limit_exceeded" || errorCode === "rate_limit_error" || message.includes("rate limit") || message.includes("too many requests")) {
|
|
221
|
+
return "rate_limit_error";
|
|
222
|
+
}
|
|
223
|
+
if (statusCode === 400 || errorCode === "invalid_request" || message.includes("validation") || message.includes("invalid") || message.includes("bad request")) {
|
|
224
|
+
return "validation_error";
|
|
225
|
+
}
|
|
226
|
+
if (errorCode === "timeout_error" || message.includes("timeout") || message.includes("timed out") || message.includes("request timeout")) {
|
|
227
|
+
return "timeout_error";
|
|
228
|
+
}
|
|
229
|
+
if (errorCode === "network_error" || errorCode === "connection_error" || message.includes("network") || message.includes("connection") || message.includes("econn") || message.includes("enotfound") || message.includes("eai_again")) {
|
|
230
|
+
return "network_error";
|
|
231
|
+
}
|
|
232
|
+
if (statusCode >= 500 || errorCode === "internal_server_error" || errorCode === "server_error" || errorCode === "bad_gateway" || errorCode === "service_unavailable" || message.includes("server error") || message.includes("internal error")) {
|
|
233
|
+
return "server_error";
|
|
234
|
+
}
|
|
235
|
+
if (errorCode === "insufficient_quota" || message.includes("quota") || message.includes("billing") || message.includes("credits")) {
|
|
236
|
+
return "quota_error";
|
|
237
|
+
}
|
|
238
|
+
if (statusCode === 404 || errorCode === "model_not_found" || errorCode === "not_found" || message.includes("model not found") || message.includes("model unavailable")) {
|
|
239
|
+
return "model_error";
|
|
240
|
+
}
|
|
241
|
+
return "unknown_error";
|
|
242
|
+
}
|
|
243
|
+
function extractProviderError(error, provider) {
|
|
244
|
+
const code = extractErrorCode(error, provider) || "unknown_error";
|
|
245
|
+
const category = categorizeError(error, provider);
|
|
246
|
+
const statusCode = error.status || error.statusCode || error.status_code;
|
|
247
|
+
let message = "An unknown error occurred";
|
|
248
|
+
if (error.message) {
|
|
249
|
+
message = error.message;
|
|
250
|
+
} else if (error.error?.message) {
|
|
251
|
+
message = error.error.message;
|
|
252
|
+
} else if (error.response?.data?.error?.message) {
|
|
253
|
+
message = error.response.data.error.message;
|
|
254
|
+
} else if (typeof error === "string") {
|
|
255
|
+
message = error;
|
|
256
|
+
} else if (error.toString && error.toString() !== "[object Object]") {
|
|
257
|
+
message = error.toString();
|
|
258
|
+
}
|
|
259
|
+
return {
|
|
260
|
+
code,
|
|
261
|
+
category,
|
|
262
|
+
message,
|
|
263
|
+
statusCode
|
|
264
|
+
};
|
|
265
|
+
}
|
|
266
|
+
|
|
177
267
|
// src/instrumentation/openai.ts
|
|
178
268
|
var proxyCache = /* @__PURE__ */ new WeakMap();
|
|
179
269
|
function observeOpenAI(client, options) {
|
|
@@ -209,6 +299,9 @@ async function traceOpenAICall(originalFn, args, options) {
|
|
|
209
299
|
const startTime = Date.now();
|
|
210
300
|
const requestParams = args[0] || {};
|
|
211
301
|
const isStreaming = requestParams.stream === true;
|
|
302
|
+
const inputText = requestParams.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
|
|
303
|
+
const inputMessages = requestParams.messages || null;
|
|
304
|
+
const model = requestParams.model || "unknown";
|
|
212
305
|
try {
|
|
213
306
|
const result = await originalFn(...args);
|
|
214
307
|
if (isStreaming) {
|
|
@@ -224,7 +317,15 @@ async function traceOpenAICall(originalFn, args, options) {
|
|
|
224
317
|
fullResponse.streamingDuration
|
|
225
318
|
);
|
|
226
319
|
},
|
|
227
|
-
(err) => recordError(
|
|
320
|
+
(err) => recordError(
|
|
321
|
+
requestParams,
|
|
322
|
+
err,
|
|
323
|
+
startTime,
|
|
324
|
+
options,
|
|
325
|
+
inputText,
|
|
326
|
+
inputMessages,
|
|
327
|
+
model
|
|
328
|
+
),
|
|
228
329
|
"openai"
|
|
229
330
|
);
|
|
230
331
|
} else {
|
|
@@ -232,7 +333,15 @@ async function traceOpenAICall(originalFn, args, options) {
|
|
|
232
333
|
return result;
|
|
233
334
|
}
|
|
234
335
|
} catch (error) {
|
|
235
|
-
recordError(
|
|
336
|
+
recordError(
|
|
337
|
+
requestParams,
|
|
338
|
+
error,
|
|
339
|
+
startTime,
|
|
340
|
+
options,
|
|
341
|
+
inputText,
|
|
342
|
+
inputMessages,
|
|
343
|
+
model
|
|
344
|
+
);
|
|
236
345
|
throw error;
|
|
237
346
|
}
|
|
238
347
|
}
|
|
@@ -271,20 +380,26 @@ function recordTrace(req, res, start, opts, timeToFirstToken, streamingDuration)
|
|
|
271
380
|
console.error("[Observa] Failed to record trace", e);
|
|
272
381
|
}
|
|
273
382
|
}
|
|
274
|
-
function recordError(req, error, start, opts) {
|
|
383
|
+
function recordError(req, error, start, opts, preExtractedInputText, preExtractedInputMessages, preExtractedModel) {
|
|
275
384
|
const duration = Date.now() - start;
|
|
276
385
|
try {
|
|
277
386
|
console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
|
|
278
387
|
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
279
388
|
if (opts?.observa) {
|
|
280
|
-
const model = sanitizedReq.model || "unknown";
|
|
281
|
-
|
|
389
|
+
const model = preExtractedModel || sanitizedReq.model || "unknown";
|
|
390
|
+
let inputText = preExtractedInputText || null;
|
|
391
|
+
let inputMessages = preExtractedInputMessages || null;
|
|
392
|
+
if (!inputText) {
|
|
393
|
+
inputMessages = sanitizedReq.messages || null;
|
|
394
|
+
inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
|
|
395
|
+
}
|
|
396
|
+
const extractedError = extractProviderError(error, "openai");
|
|
282
397
|
opts.observa.trackLLMCall({
|
|
283
398
|
model,
|
|
284
399
|
input: inputText,
|
|
285
400
|
output: null,
|
|
286
401
|
// No output on error
|
|
287
|
-
inputMessages
|
|
402
|
+
inputMessages,
|
|
288
403
|
outputMessages: null,
|
|
289
404
|
inputTokens: null,
|
|
290
405
|
outputTokens: null,
|
|
@@ -301,17 +416,19 @@ function recordError(req, error, start, opts) {
|
|
|
301
416
|
maxTokens: sanitizedReq.max_tokens || null
|
|
302
417
|
});
|
|
303
418
|
opts.observa.trackError({
|
|
304
|
-
errorType: "openai_api_error",
|
|
305
|
-
errorMessage:
|
|
419
|
+
errorType: error?.name || extractedError.code || "openai_api_error",
|
|
420
|
+
errorMessage: extractedError.message,
|
|
306
421
|
stackTrace: error?.stack || null,
|
|
307
422
|
context: {
|
|
308
423
|
request: sanitizedReq,
|
|
309
424
|
model,
|
|
310
425
|
input: inputText,
|
|
311
426
|
provider: "openai",
|
|
312
|
-
duration_ms: duration
|
|
427
|
+
duration_ms: duration,
|
|
428
|
+
status_code: extractedError.statusCode || null
|
|
313
429
|
},
|
|
314
|
-
errorCategory:
|
|
430
|
+
errorCategory: extractedError.category,
|
|
431
|
+
errorCode: extractedError.code
|
|
315
432
|
});
|
|
316
433
|
}
|
|
317
434
|
} catch (e) {
|
|
@@ -354,6 +471,15 @@ async function traceAnthropicCall(originalFn, args, options) {
|
|
|
354
471
|
const startTime = Date.now();
|
|
355
472
|
const requestParams = args[0] || {};
|
|
356
473
|
const isStreaming = requestParams.stream === true;
|
|
474
|
+
const inputText = requestParams.messages?.map((m) => {
|
|
475
|
+
if (typeof m.content === "string") return m.content;
|
|
476
|
+
if (Array.isArray(m.content)) {
|
|
477
|
+
return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
|
|
478
|
+
}
|
|
479
|
+
return null;
|
|
480
|
+
}).filter(Boolean).join("\n") || null;
|
|
481
|
+
const inputMessages = requestParams.messages || null;
|
|
482
|
+
const model = requestParams.model || "unknown";
|
|
357
483
|
try {
|
|
358
484
|
const result = await originalFn(...args);
|
|
359
485
|
if (isStreaming) {
|
|
@@ -369,7 +495,15 @@ async function traceAnthropicCall(originalFn, args, options) {
|
|
|
369
495
|
fullResponse.streamingDuration
|
|
370
496
|
);
|
|
371
497
|
},
|
|
372
|
-
(err) => recordError2(
|
|
498
|
+
(err) => recordError2(
|
|
499
|
+
requestParams,
|
|
500
|
+
err,
|
|
501
|
+
startTime,
|
|
502
|
+
options,
|
|
503
|
+
inputText,
|
|
504
|
+
inputMessages,
|
|
505
|
+
model
|
|
506
|
+
),
|
|
373
507
|
"anthropic"
|
|
374
508
|
);
|
|
375
509
|
} else {
|
|
@@ -377,7 +511,15 @@ async function traceAnthropicCall(originalFn, args, options) {
|
|
|
377
511
|
return result;
|
|
378
512
|
}
|
|
379
513
|
} catch (error) {
|
|
380
|
-
recordError2(
|
|
514
|
+
recordError2(
|
|
515
|
+
requestParams,
|
|
516
|
+
error,
|
|
517
|
+
startTime,
|
|
518
|
+
options,
|
|
519
|
+
inputText,
|
|
520
|
+
inputMessages,
|
|
521
|
+
model
|
|
522
|
+
);
|
|
381
523
|
throw error;
|
|
382
524
|
}
|
|
383
525
|
}
|
|
@@ -421,26 +563,32 @@ function recordTrace2(req, res, start, opts, timeToFirstToken, streamingDuration
|
|
|
421
563
|
console.error("[Observa] Failed to record trace", e);
|
|
422
564
|
}
|
|
423
565
|
}
|
|
424
|
-
function recordError2(req, error, start, opts) {
|
|
566
|
+
function recordError2(req, error, start, opts, preExtractedInputText, preExtractedInputMessages, preExtractedModel) {
|
|
425
567
|
const duration = Date.now() - start;
|
|
426
568
|
try {
|
|
427
569
|
console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
|
|
428
570
|
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
429
571
|
if (opts?.observa) {
|
|
430
|
-
const model = sanitizedReq.model || "unknown";
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
572
|
+
const model = preExtractedModel || sanitizedReq.model || "unknown";
|
|
573
|
+
let inputText = preExtractedInputText || null;
|
|
574
|
+
let inputMessages = preExtractedInputMessages || null;
|
|
575
|
+
if (!inputText) {
|
|
576
|
+
inputMessages = sanitizedReq.messages || null;
|
|
577
|
+
inputText = sanitizedReq.messages?.map((m) => {
|
|
578
|
+
if (typeof m.content === "string") return m.content;
|
|
579
|
+
if (Array.isArray(m.content)) {
|
|
580
|
+
return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
|
|
581
|
+
}
|
|
582
|
+
return null;
|
|
583
|
+
}).filter(Boolean).join("\n") || null;
|
|
584
|
+
}
|
|
585
|
+
const extractedError = extractProviderError(error, "anthropic");
|
|
438
586
|
opts.observa.trackLLMCall({
|
|
439
587
|
model,
|
|
440
588
|
input: inputText,
|
|
441
589
|
output: null,
|
|
442
590
|
// No output on error
|
|
443
|
-
inputMessages
|
|
591
|
+
inputMessages,
|
|
444
592
|
outputMessages: null,
|
|
445
593
|
inputTokens: null,
|
|
446
594
|
outputTokens: null,
|
|
@@ -457,17 +605,19 @@ function recordError2(req, error, start, opts) {
|
|
|
457
605
|
maxTokens: sanitizedReq.max_tokens || null
|
|
458
606
|
});
|
|
459
607
|
opts.observa.trackError({
|
|
460
|
-
errorType: "anthropic_api_error",
|
|
461
|
-
errorMessage:
|
|
608
|
+
errorType: error?.name || extractedError.code || "anthropic_api_error",
|
|
609
|
+
errorMessage: extractedError.message,
|
|
462
610
|
stackTrace: error?.stack || null,
|
|
463
611
|
context: {
|
|
464
612
|
request: sanitizedReq,
|
|
465
613
|
model,
|
|
466
614
|
input: inputText,
|
|
467
615
|
provider: "anthropic",
|
|
468
|
-
duration_ms: duration
|
|
616
|
+
duration_ms: duration,
|
|
617
|
+
status_code: extractedError.statusCode || null
|
|
469
618
|
},
|
|
470
|
-
errorCategory:
|
|
619
|
+
errorCategory: extractedError.category,
|
|
620
|
+
errorCode: extractedError.code
|
|
471
621
|
});
|
|
472
622
|
}
|
|
473
623
|
} catch (e) {
|
|
@@ -546,6 +696,14 @@ async function traceGenerateText(originalFn, args, options) {
|
|
|
546
696
|
const model = requestParams.model || "unknown";
|
|
547
697
|
const provider = extractProviderFromModel(model);
|
|
548
698
|
const modelIdentifier = extractModelIdentifier(model);
|
|
699
|
+
let inputText = null;
|
|
700
|
+
let inputMessages = null;
|
|
701
|
+
if (requestParams.prompt) {
|
|
702
|
+
inputText = typeof requestParams.prompt === "string" ? requestParams.prompt : JSON.stringify(requestParams.prompt);
|
|
703
|
+
} else if (requestParams.messages) {
|
|
704
|
+
inputMessages = requestParams.messages;
|
|
705
|
+
inputText = requestParams.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
|
|
706
|
+
}
|
|
549
707
|
try {
|
|
550
708
|
const result = await originalFn(...args);
|
|
551
709
|
const responseText = result.text || "";
|
|
@@ -586,7 +744,9 @@ async function traceGenerateText(originalFn, args, options) {
|
|
|
586
744
|
error,
|
|
587
745
|
startTime,
|
|
588
746
|
options,
|
|
589
|
-
provider
|
|
747
|
+
provider,
|
|
748
|
+
inputText,
|
|
749
|
+
inputMessages
|
|
590
750
|
);
|
|
591
751
|
throw error;
|
|
592
752
|
}
|
|
@@ -644,6 +804,14 @@ async function traceStreamText(originalFn, args, options) {
|
|
|
644
804
|
const model = requestParams.model || "unknown";
|
|
645
805
|
const provider = extractProviderFromModel(model);
|
|
646
806
|
const modelIdentifier = extractModelIdentifier(model);
|
|
807
|
+
let inputText = null;
|
|
808
|
+
let inputMessages = null;
|
|
809
|
+
if (requestParams.prompt) {
|
|
810
|
+
inputText = typeof requestParams.prompt === "string" ? requestParams.prompt : JSON.stringify(requestParams.prompt);
|
|
811
|
+
} else if (requestParams.messages) {
|
|
812
|
+
inputMessages = requestParams.messages;
|
|
813
|
+
inputText = requestParams.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
|
|
814
|
+
}
|
|
647
815
|
try {
|
|
648
816
|
const result = await originalFn(...args);
|
|
649
817
|
if (result.textStream) {
|
|
@@ -678,7 +846,9 @@ async function traceStreamText(originalFn, args, options) {
|
|
|
678
846
|
err,
|
|
679
847
|
startTime,
|
|
680
848
|
options,
|
|
681
|
-
provider
|
|
849
|
+
provider,
|
|
850
|
+
inputText,
|
|
851
|
+
inputMessages
|
|
682
852
|
)
|
|
683
853
|
);
|
|
684
854
|
const wrappedResult = Object.create(Object.getPrototypeOf(result));
|
|
@@ -718,7 +888,9 @@ async function traceStreamText(originalFn, args, options) {
|
|
|
718
888
|
error,
|
|
719
889
|
startTime,
|
|
720
890
|
options,
|
|
721
|
-
provider
|
|
891
|
+
provider,
|
|
892
|
+
inputText,
|
|
893
|
+
inputMessages
|
|
722
894
|
);
|
|
723
895
|
throw error;
|
|
724
896
|
}
|
|
@@ -765,21 +937,25 @@ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration
|
|
|
765
937
|
console.error("[Observa] Failed to record trace", e);
|
|
766
938
|
}
|
|
767
939
|
}
|
|
768
|
-
function recordError3(req, error, start, opts, provider) {
|
|
940
|
+
function recordError3(req, error, start, opts, provider, preExtractedInputText, preExtractedInputMessages) {
|
|
769
941
|
const duration = Date.now() - start;
|
|
770
942
|
try {
|
|
771
|
-
console.error("[Observa] \u26A0\uFE0F Error Captured:", error
|
|
943
|
+
console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
|
|
772
944
|
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
773
945
|
if (opts?.observa) {
|
|
774
946
|
const model = sanitizedReq.model || "unknown";
|
|
775
|
-
let inputText = null;
|
|
776
|
-
let inputMessages = null;
|
|
777
|
-
if (
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
947
|
+
let inputText = preExtractedInputText || null;
|
|
948
|
+
let inputMessages = preExtractedInputMessages || null;
|
|
949
|
+
if (!inputText) {
|
|
950
|
+
if (sanitizedReq.prompt) {
|
|
951
|
+
inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
|
|
952
|
+
} else if (sanitizedReq.messages) {
|
|
953
|
+
inputMessages = sanitizedReq.messages;
|
|
954
|
+
inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
|
|
955
|
+
}
|
|
782
956
|
}
|
|
957
|
+
const providerName = provider || "vercel-ai";
|
|
958
|
+
const extractedError = extractProviderError(error, providerName);
|
|
783
959
|
opts.observa.trackLLMCall({
|
|
784
960
|
model,
|
|
785
961
|
input: inputText,
|
|
@@ -796,23 +972,25 @@ function recordError3(req, error, start, opts, provider) {
|
|
|
796
972
|
finishReason: null,
|
|
797
973
|
responseId: null,
|
|
798
974
|
operationName: "generate_text",
|
|
799
|
-
providerName
|
|
975
|
+
providerName,
|
|
800
976
|
responseModel: model,
|
|
801
977
|
temperature: sanitizedReq.temperature || null,
|
|
802
978
|
maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
|
|
803
979
|
});
|
|
804
980
|
opts.observa.trackError({
|
|
805
|
-
errorType: error
|
|
806
|
-
errorMessage:
|
|
807
|
-
stackTrace: error
|
|
981
|
+
errorType: error?.name || extractedError.code || "UnknownError",
|
|
982
|
+
errorMessage: extractedError.message,
|
|
983
|
+
stackTrace: error?.stack || null,
|
|
808
984
|
context: {
|
|
809
985
|
request: sanitizedReq,
|
|
810
986
|
model,
|
|
811
987
|
input: inputText,
|
|
812
|
-
provider:
|
|
813
|
-
duration_ms: duration
|
|
988
|
+
provider: providerName,
|
|
989
|
+
duration_ms: duration,
|
|
990
|
+
status_code: extractedError.statusCode || null
|
|
814
991
|
},
|
|
815
|
-
errorCategory:
|
|
992
|
+
errorCategory: extractedError.category,
|
|
993
|
+
errorCode: extractedError.code
|
|
816
994
|
});
|
|
817
995
|
}
|
|
818
996
|
} catch (e) {
|
package/package.json
CHANGED