@dthink/bloop-sdk 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json ADDED
@@ -0,0 +1,38 @@
1
+ {
2
+ "name": "@dthink/bloop-sdk",
3
+ "version": "0.4.0",
4
+ "description": "Bloop error reporting and LLM tracing SDK for TypeScript/Node.js",
5
+ "main": "dist/index.js",
6
+ "types": "dist/index.d.ts",
7
+ "exports": {
8
+ ".": {
9
+ "import": "./dist/index.js",
10
+ "types": "./dist/index.d.ts"
11
+ },
12
+ "./integrations": {
13
+ "import": "./dist/integrations/index.js",
14
+ "types": "./dist/integrations/index.d.ts"
15
+ }
16
+ },
17
+ "files": ["dist", "src"],
18
+ "scripts": {
19
+ "build": "tsc",
20
+ "test": "node --test tests/"
21
+ },
22
+ "keywords": ["bloop", "error-tracking", "llm", "tracing", "observability"],
23
+ "license": "MIT",
24
+ "engines": {
25
+ "node": ">=18"
26
+ },
27
+ "devDependencies": {
28
+ "typescript": "^5.0.0"
29
+ },
30
+ "peerDependencies": {
31
+ "openai": ">=4.0.0",
32
+ "@anthropic-ai/sdk": ">=0.20.0"
33
+ },
34
+ "peerDependenciesMeta": {
35
+ "openai": { "optional": true },
36
+ "@anthropic-ai/sdk": { "optional": true }
37
+ }
38
+ }
package/src/client.ts ADDED
@@ -0,0 +1,371 @@
1
+ /**
2
+ * Bloop error reporting and LLM tracing client for TypeScript/Node.js.
3
+ * Zero external dependencies.
4
+ */
5
+
6
+ import { createHmac, randomUUID } from "node:crypto";
7
+
8
+ export interface BloopClientOptions {
9
+ endpoint: string;
10
+ projectKey: string;
11
+ flushInterval?: number; // ms, default 5000
12
+ maxBufferSize?: number; // default 100
13
+ environment?: string;
14
+ release?: string;
15
+ }
16
+
17
+ export interface SpanData {
18
+ id: string;
19
+ span_type: string;
20
+ name: string;
21
+ model?: string;
22
+ provider?: string;
23
+ parent_span_id?: string;
24
+ input_tokens: number;
25
+ output_tokens: number;
26
+ cost: number;
27
+ latency_ms: number;
28
+ time_to_first_token_ms?: number;
29
+ status: string;
30
+ error_message?: string;
31
+ input?: string;
32
+ output?: string;
33
+ metadata?: Record<string, unknown>;
34
+ started_at: number;
35
+ ended_at?: number;
36
+ }
37
+
38
+ export interface TraceData {
39
+ id: string;
40
+ name: string;
41
+ status: string;
42
+ session_id?: string;
43
+ user_id?: string;
44
+ input?: string;
45
+ output?: string;
46
+ metadata?: Record<string, unknown>;
47
+ prompt_name?: string;
48
+ prompt_version?: string;
49
+ started_at: number;
50
+ ended_at?: number;
51
+ spans: SpanData[];
52
+ }
53
+
54
+ export class BloopClient {
55
+ private endpoint: string;
56
+ private projectKey: string;
57
+ private flushInterval: number;
58
+ private maxBufferSize: number;
59
+ private environment: string;
60
+ private release: string;
61
+
62
+ private errorBuffer: Record<string, unknown>[] = [];
63
+ private traceBuffer: TraceData[] = [];
64
+ private timer: ReturnType<typeof setInterval> | null = null;
65
+ private closed = false;
66
+
67
+ constructor(options: BloopClientOptions) {
68
+ this.endpoint = options.endpoint.replace(/\/+$/, "");
69
+ this.projectKey = options.projectKey;
70
+ this.flushInterval = options.flushInterval ?? 5000;
71
+ this.maxBufferSize = options.maxBufferSize ?? 100;
72
+ this.environment = options.environment ?? "production";
73
+ this.release = options.release ?? "";
74
+
75
+ this.timer = setInterval(() => this.flush(), this.flushInterval);
76
+ if (this.timer.unref) this.timer.unref();
77
+ }
78
+
79
+ // ── Error Tracking ──
80
+
81
+ capture(params: {
82
+ errorType: string;
83
+ message: string;
84
+ source?: string;
85
+ stack?: string;
86
+ routeOrProcedure?: string;
87
+ screen?: string;
88
+ metadata?: Record<string, unknown>;
89
+ }): void {
90
+ this.errorBuffer.push({
91
+ timestamp: Math.floor(Date.now() / 1000),
92
+ source: params.source ?? "javascript",
93
+ environment: this.environment,
94
+ release: this.release,
95
+ error_type: params.errorType,
96
+ message: params.message,
97
+ stack: params.stack ?? "",
98
+ route_or_procedure: params.routeOrProcedure ?? "",
99
+ screen: params.screen ?? "",
100
+ metadata: params.metadata ?? {},
101
+ });
102
+ if (this.errorBuffer.length >= this.maxBufferSize) {
103
+ this.flush();
104
+ }
105
+ }
106
+
107
+ // ── LLM Tracing ──
108
+
109
+ trace(options: {
110
+ name?: string;
111
+ traceId?: string;
112
+ sessionId?: string;
113
+ userId?: string;
114
+ promptName?: string;
115
+ promptVersion?: string;
116
+ } = {}): Trace {
117
+ return new Trace(this, {
118
+ name: options.name ?? "",
119
+ traceId: options.traceId ?? randomUUID().replace(/-/g, ""),
120
+ sessionId: options.sessionId,
121
+ userId: options.userId,
122
+ promptName: options.promptName,
123
+ promptVersion: options.promptVersion,
124
+ });
125
+ }
126
+
127
+ /** @internal */
128
+ _sendTrace(data: TraceData): void {
129
+ this.traceBuffer.push(data);
130
+ if (this.traceBuffer.length >= 10) {
131
+ this._flushTraces();
132
+ }
133
+ }
134
+
135
+ // ── Auto-Instrumentation ──
136
+
137
+ wrapOpenAI<T>(openaiClient: T): T {
138
+ const { wrapOpenAI } = require("./integrations/openai");
139
+ return wrapOpenAI(openaiClient, this);
140
+ }
141
+
142
+ wrapAnthropic<T>(anthropicClient: T): T {
143
+ const { wrapAnthropic } = require("./integrations/anthropic");
144
+ return wrapAnthropic(anthropicClient, this);
145
+ }
146
+
147
+ // ── Flush & Transport ──
148
+
149
+ flush(): void {
150
+ this._flushErrors();
151
+ this._flushTraces();
152
+ }
153
+
154
+ close(): void {
155
+ this.closed = true;
156
+ if (this.timer) {
157
+ clearInterval(this.timer);
158
+ this.timer = null;
159
+ }
160
+ this.flush();
161
+ }
162
+
163
+ private _flushErrors(): void {
164
+ if (this.errorBuffer.length === 0) return;
165
+ const events = [...this.errorBuffer];
166
+ this.errorBuffer = [];
167
+ if (events.length === 1) {
168
+ this._post("/v1/ingest", events[0]);
169
+ } else {
170
+ this._post("/v1/ingest/batch", { events });
171
+ }
172
+ }
173
+
174
+ private _flushTraces(): void {
175
+ if (this.traceBuffer.length === 0) return;
176
+ const traces = [...this.traceBuffer];
177
+ this.traceBuffer = [];
178
+ this._post("/v1/traces/batch", { traces }).catch(() => {});
179
+ }
180
+
181
+ private async _post(path: string, payload: unknown): Promise<void> {
182
+ const body = JSON.stringify(payload);
183
+ const sig = createHmac("sha256", this.projectKey)
184
+ .update(body)
185
+ .digest("hex");
186
+ try {
187
+ await fetch(`${this.endpoint}${path}`, {
188
+ method: "POST",
189
+ headers: {
190
+ "Content-Type": "application/json",
191
+ "X-Signature": sig,
192
+ },
193
+ body,
194
+ });
195
+ } catch {
196
+ // Silently drop on failure (never crash the app)
197
+ }
198
+ }
199
+ }
200
+
201
+ export class Trace {
202
+ readonly id: string;
203
+ name: string;
204
+ status: string = "completed";
205
+ sessionId?: string;
206
+ userId?: string;
207
+ input?: string;
208
+ output?: string;
209
+ metadata?: Record<string, unknown>;
210
+ promptName?: string;
211
+ promptVersion?: string;
212
+
213
+ private client: BloopClient;
214
+ private spans: SpanData[] = [];
215
+ private startedAt: number;
216
+ private endedAt?: number;
217
+
218
+ constructor(
219
+ client: BloopClient,
220
+ options: {
221
+ name: string;
222
+ traceId: string;
223
+ sessionId?: string;
224
+ userId?: string;
225
+ promptName?: string;
226
+ promptVersion?: string;
227
+ },
228
+ ) {
229
+ this.client = client;
230
+ this.id = options.traceId;
231
+ this.name = options.name;
232
+ this.sessionId = options.sessionId;
233
+ this.userId = options.userId;
234
+ this.promptName = options.promptName;
235
+ this.promptVersion = options.promptVersion;
236
+ this.startedAt = Date.now();
237
+ }
238
+
239
+ span(options: {
240
+ spanType?: string;
241
+ name?: string;
242
+ model?: string;
243
+ provider?: string;
244
+ parentSpanId?: string;
245
+ } = {}): Span {
246
+ return new Span(this, {
247
+ spanType: options.spanType ?? "generation",
248
+ name: options.name ?? "",
249
+ model: options.model,
250
+ provider: options.provider,
251
+ parentSpanId: options.parentSpanId,
252
+ });
253
+ }
254
+
255
+ /** @internal */
256
+ _addSpan(data: SpanData): void {
257
+ this.spans.push(data);
258
+ }
259
+
260
+ end(): void {
261
+ this.endedAt = Date.now();
262
+ const data: TraceData = {
263
+ id: this.id,
264
+ name: this.name,
265
+ status: this.status,
266
+ started_at: this.startedAt,
267
+ ended_at: this.endedAt,
268
+ spans: this.spans,
269
+ };
270
+ if (this.sessionId) data.session_id = this.sessionId;
271
+ if (this.userId) data.user_id = this.userId;
272
+ if (this.input !== undefined) data.input = this.input;
273
+ if (this.output !== undefined) data.output = this.output;
274
+ if (this.metadata) data.metadata = this.metadata;
275
+ if (this.promptName) data.prompt_name = this.promptName;
276
+ if (this.promptVersion) data.prompt_version = this.promptVersion;
277
+
278
+ this.client._sendTrace(data);
279
+ }
280
+ }
281
+
282
+ export class Span {
283
+ readonly id: string;
284
+ spanType: string;
285
+ name: string;
286
+ model?: string;
287
+ provider?: string;
288
+ parentSpanId?: string;
289
+ inputTokens: number = 0;
290
+ outputTokens: number = 0;
291
+ cost: number = 0;
292
+ latencyMs: number = 0;
293
+ timeToFirstTokenMs?: number;
294
+ status: string = "ok";
295
+ errorMessage?: string;
296
+ input?: string;
297
+ output?: string;
298
+ metadata?: Record<string, unknown>;
299
+
300
+ private trace: Trace;
301
+ private startedAt: number;
302
+
303
+ constructor(
304
+ trace: Trace,
305
+ options: {
306
+ spanType: string;
307
+ name: string;
308
+ model?: string;
309
+ provider?: string;
310
+ parentSpanId?: string;
311
+ },
312
+ ) {
313
+ this.trace = trace;
314
+ this.id = randomUUID().replace(/-/g, "");
315
+ this.spanType = options.spanType;
316
+ this.name = options.name;
317
+ this.model = options.model;
318
+ this.provider = options.provider;
319
+ this.parentSpanId = options.parentSpanId;
320
+ this.startedAt = Date.now();
321
+ }
322
+
323
+ setTokens(inputTokens: number, outputTokens: number): void {
324
+ this.inputTokens = inputTokens;
325
+ this.outputTokens = outputTokens;
326
+ }
327
+
328
+ setCost(cost: number): void {
329
+ this.cost = cost;
330
+ }
331
+
332
+ setLatency(latencyMs: number, timeToFirstTokenMs?: number): void {
333
+ this.latencyMs = latencyMs;
334
+ this.timeToFirstTokenMs = timeToFirstTokenMs;
335
+ }
336
+
337
+ setError(message: string): void {
338
+ this.status = "error";
339
+ this.errorMessage = message;
340
+ }
341
+
342
+ end(): void {
343
+ const endedAt = Date.now();
344
+ if (this.latencyMs === 0) {
345
+ this.latencyMs = endedAt - this.startedAt;
346
+ }
347
+
348
+ const data: SpanData = {
349
+ id: this.id,
350
+ span_type: this.spanType,
351
+ name: this.name,
352
+ input_tokens: this.inputTokens,
353
+ output_tokens: this.outputTokens,
354
+ cost: this.cost,
355
+ latency_ms: this.latencyMs,
356
+ status: this.status,
357
+ started_at: this.startedAt,
358
+ ended_at: endedAt,
359
+ };
360
+ if (this.model) data.model = this.model;
361
+ if (this.provider) data.provider = this.provider;
362
+ if (this.parentSpanId) data.parent_span_id = this.parentSpanId;
363
+ if (this.timeToFirstTokenMs !== undefined) data.time_to_first_token_ms = this.timeToFirstTokenMs;
364
+ if (this.errorMessage) data.error_message = this.errorMessage;
365
+ if (this.input !== undefined) data.input = this.input;
366
+ if (this.output !== undefined) data.output = this.output;
367
+ if (this.metadata) data.metadata = this.metadata;
368
+
369
+ this.trace._addSpan(data);
370
+ }
371
+ }
package/src/index.ts ADDED
@@ -0,0 +1,4 @@
1
+ export { BloopClient, Trace, Span } from "./client";
2
+ export type { BloopClientOptions, TraceData, SpanData } from "./client";
3
+ export { wrapOpenAI } from "./integrations/openai";
4
+ export { wrapAnthropic } from "./integrations/anthropic";
@@ -0,0 +1,139 @@
1
+ /**
2
+ * Anthropic auto-instrumentation for bloop LLM tracing.
3
+ *
4
+ * Wraps `messages.create()` to automatically capture:
5
+ * - Model, tokens, latency, TTFT (streaming), errors
6
+ * - Cost is always 0 -- calculated server-side from pricing table
7
+ */
8
+
9
+ import type { BloopClient } from "../client";
10
+
11
+ function detectProvider(client: any): string {
12
+ try {
13
+ const baseUrl = String(client.baseURL || client._baseURL || "");
14
+ if (baseUrl.includes("anthropic.com")) return "anthropic";
15
+ const url = new URL(baseUrl);
16
+ return url.hostname.split(".")[0] || "anthropic";
17
+ } catch {
18
+ return "anthropic";
19
+ }
20
+ }
21
+
22
+ export function wrapAnthropic<T>(
23
+ anthropicClient: T,
24
+ bloopClient: BloopClient,
25
+ ): T {
26
+ const client = anthropicClient as any;
27
+ const provider = detectProvider(client);
28
+ const originalCreate = client.messages.create.bind(client.messages);
29
+
30
+ client.messages.create = function tracedCreate(...args: any[]): any {
31
+ const params = args[0] || {};
32
+ const model: string = params.model || "unknown";
33
+ const stream: boolean = params.stream || false;
34
+ const startMs = Date.now();
35
+
36
+ const trace = bloopClient.trace({ name: `${provider}/${model}` });
37
+ const span = trace.span({
38
+ spanType: "generation",
39
+ name: "messages.create",
40
+ model,
41
+ provider,
42
+ });
43
+
44
+ if (stream) {
45
+ return handleStreaming(originalCreate, args, trace, span, startMs, model);
46
+ }
47
+
48
+ return originalCreate(...args).then(
49
+ (response: any) => {
50
+ const endMs = Date.now();
51
+
52
+ const usage = response?.usage;
53
+ if (usage) {
54
+ span.setTokens(
55
+ usage.input_tokens || 0,
56
+ usage.output_tokens || 0,
57
+ );
58
+ }
59
+
60
+ span.setLatency(endMs - startMs);
61
+ span.cost = 0; // Server-side pricing
62
+ span.model = response?.model || model;
63
+ span.end();
64
+ trace.end();
65
+ return response;
66
+ },
67
+ (err: Error) => {
68
+ const endMs = Date.now();
69
+ span.setLatency(endMs - startMs);
70
+ span.setError(err.message);
71
+ span.end();
72
+ trace.status = "error";
73
+ trace.end();
74
+ throw err;
75
+ },
76
+ );
77
+ };
78
+
79
+ return anthropicClient;
80
+ }
81
+
82
+ async function* handleStreaming(
83
+ originalCreate: Function,
84
+ args: any[],
85
+ trace: ReturnType<BloopClient["trace"]>,
86
+ span: ReturnType<ReturnType<BloopClient["trace"]>["span"]>,
87
+ startMs: number,
88
+ model: string,
89
+ ): AsyncGenerator<any> {
90
+ let firstTokenSeen = false;
91
+ let inputTokens = 0;
92
+ let outputTokens = 0;
93
+ let resolvedModel = model;
94
+
95
+ try {
96
+ const stream = await originalCreate(...args);
97
+
98
+ for await (const event of stream) {
99
+ const eventType = event?.type || "";
100
+
101
+ if (!firstTokenSeen && eventType === "content_block_delta") {
102
+ firstTokenSeen = true;
103
+ span.timeToFirstTokenMs = Date.now() - startMs;
104
+ }
105
+
106
+ // Track usage from message_start event
107
+ if (eventType === "message_start" && event.message) {
108
+ resolvedModel = event.message.model || resolvedModel;
109
+ if (event.message.usage) {
110
+ inputTokens = event.message.usage.input_tokens || 0;
111
+ }
112
+ }
113
+
114
+ // Track output tokens from message_delta event
115
+ if (eventType === "message_delta" && event.usage) {
116
+ outputTokens = event.usage.output_tokens || 0;
117
+ }
118
+
119
+ yield event;
120
+ }
121
+
122
+ const endMs = Date.now();
123
+ span.setTokens(inputTokens, outputTokens);
124
+ span.setLatency(endMs - startMs);
125
+ span.cost = 0;
126
+ span.model = resolvedModel;
127
+ span.end();
128
+ trace.end();
129
+ } catch (err: any) {
130
+ const endMs = Date.now();
131
+ span.setTokens(inputTokens, outputTokens);
132
+ span.setLatency(endMs - startMs);
133
+ span.setError(err.message);
134
+ span.end();
135
+ trace.status = "error";
136
+ trace.end();
137
+ throw err;
138
+ }
139
+ }
@@ -0,0 +1,2 @@
1
+ export { wrapOpenAI } from "./openai";
2
+ export { wrapAnthropic } from "./anthropic";
@@ -0,0 +1,142 @@
1
+ /**
2
+ * OpenAI auto-instrumentation for bloop LLM tracing.
3
+ *
4
+ * Wraps `chat.completions.create()` to automatically capture:
5
+ * - Model, tokens, latency, TTFT (streaming), errors
6
+ * - Cost is always 0 -- calculated server-side from pricing table
7
+ */
8
+
9
+ import type { BloopClient } from "../client";
10
+
11
+ const PROVIDER_MAP: Record<string, string> = {
12
+ "api.openai.com": "openai",
13
+ "api.minimax.io": "minimax",
14
+ "api.moonshot.ai": "kimi",
15
+ "generativelanguage.googleapis.com": "google",
16
+ };
17
+
18
+ function detectProvider(client: any): string {
19
+ try {
20
+ const baseUrl = String(client.baseURL || "");
21
+ for (const [domain, provider] of Object.entries(PROVIDER_MAP)) {
22
+ if (baseUrl.includes(domain)) return provider;
23
+ }
24
+ const url = new URL(baseUrl);
25
+ return url.hostname.split(".")[0] || "openai";
26
+ } catch {
27
+ return "openai";
28
+ }
29
+ }
30
+
31
+ export function wrapOpenAI<T>(openaiClient: T, bloopClient: BloopClient): T {
32
+ const client = openaiClient as any;
33
+ const provider = detectProvider(client);
34
+ const originalCreate = client.chat.completions.create.bind(
35
+ client.chat.completions,
36
+ );
37
+
38
+ client.chat.completions.create = function tracedCreate(
39
+ ...args: any[]
40
+ ): any {
41
+ const params = args[0] || {};
42
+ const model: string = params.model || "unknown";
43
+ const stream: boolean = params.stream || false;
44
+ const startMs = Date.now();
45
+
46
+ const trace = bloopClient.trace({ name: `${provider}/${model}` });
47
+ const span = trace.span({
48
+ spanType: "generation",
49
+ name: "chat.completions.create",
50
+ model,
51
+ provider,
52
+ });
53
+
54
+ if (stream) {
55
+ return handleStreaming(originalCreate, args, trace, span, startMs, model);
56
+ }
57
+
58
+ return originalCreate(...args).then(
59
+ (response: any) => {
60
+ const endMs = Date.now();
61
+
62
+ const usage = response?.usage;
63
+ if (usage) {
64
+ span.setTokens(
65
+ usage.prompt_tokens || 0,
66
+ usage.completion_tokens || 0,
67
+ );
68
+ }
69
+
70
+ span.setLatency(endMs - startMs);
71
+ span.cost = 0; // Server-side pricing
72
+ span.model = response?.model || model;
73
+ span.end();
74
+ trace.end();
75
+ return response;
76
+ },
77
+ (err: Error) => {
78
+ const endMs = Date.now();
79
+ span.setLatency(endMs - startMs);
80
+ span.setError(err.message);
81
+ span.end();
82
+ trace.status = "error";
83
+ trace.end();
84
+ throw err;
85
+ },
86
+ );
87
+ };
88
+
89
+ return openaiClient;
90
+ }
91
+
92
+ async function* handleStreaming(
93
+ originalCreate: Function,
94
+ args: any[],
95
+ trace: ReturnType<BloopClient["trace"]>,
96
+ span: ReturnType<ReturnType<BloopClient["trace"]>["span"]>,
97
+ startMs: number,
98
+ model: string,
99
+ ): AsyncGenerator<any> {
100
+ let firstTokenSeen = false;
101
+ let inputTokens = 0;
102
+ let outputTokens = 0;
103
+ let resolvedModel = model;
104
+
105
+ try {
106
+ const stream = await originalCreate(...args);
107
+
108
+ for await (const chunk of stream) {
109
+ if (!firstTokenSeen) {
110
+ firstTokenSeen = true;
111
+ span.timeToFirstTokenMs = Date.now() - startMs;
112
+ }
113
+
114
+ if (chunk.model) resolvedModel = chunk.model;
115
+
116
+ // Track usage from final chunk (OpenAI includes it with stream_options)
117
+ if (chunk.usage) {
118
+ inputTokens = chunk.usage.prompt_tokens || 0;
119
+ outputTokens = chunk.usage.completion_tokens || 0;
120
+ }
121
+
122
+ yield chunk;
123
+ }
124
+
125
+ const endMs = Date.now();
126
+ span.setTokens(inputTokens, outputTokens);
127
+ span.setLatency(endMs - startMs);
128
+ span.cost = 0;
129
+ span.model = resolvedModel;
130
+ span.end();
131
+ trace.end();
132
+ } catch (err: any) {
133
+ const endMs = Date.now();
134
+ span.setTokens(inputTokens, outputTokens);
135
+ span.setLatency(endMs - startMs);
136
+ span.setError(err.message);
137
+ span.end();
138
+ trace.status = "error";
139
+ trace.end();
140
+ throw err;
141
+ }
142
+ }