@dthink/bloop-sdk 0.4.0 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client.d.ts +2 -0
- package/dist/client.js +8 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +4 -1
- package/dist/integrations/index.d.ts +1 -0
- package/dist/integrations/index.js +4 -1
- package/dist/integrations/pi-ai.d.ts +34 -0
- package/dist/integrations/pi-ai.js +126 -0
- package/package.json +1 -1
- package/src/client.ts +10 -0
- package/src/index.ts +1 -0
- package/src/integrations/index.ts +1 -0
- package/src/integrations/pi-ai.ts +144 -0
package/dist/client.d.ts
CHANGED
|
@@ -78,6 +78,8 @@ export declare class BloopClient {
|
|
|
78
78
|
_sendTrace(data: TraceData): void;
|
|
79
79
|
wrapOpenAI<T>(openaiClient: T): T;
|
|
80
80
|
wrapAnthropic<T>(anthropicClient: T): T;
|
|
81
|
+
wrapPiAiComplete(completeFn: Function): Function;
|
|
82
|
+
wrapPiAiStream(streamFn: Function): Function;
|
|
81
83
|
flush(): void;
|
|
82
84
|
close(): void;
|
|
83
85
|
private _flushErrors;
|
package/dist/client.js
CHANGED
|
@@ -73,6 +73,14 @@ class BloopClient {
|
|
|
73
73
|
const { wrapAnthropic } = require("./integrations/anthropic");
|
|
74
74
|
return wrapAnthropic(anthropicClient, this);
|
|
75
75
|
}
|
|
76
|
+
wrapPiAiComplete(completeFn) {
|
|
77
|
+
const { wrapPiAiComplete } = require("./integrations/pi-ai");
|
|
78
|
+
return wrapPiAiComplete(completeFn, this);
|
|
79
|
+
}
|
|
80
|
+
wrapPiAiStream(streamFn) {
|
|
81
|
+
const { wrapPiAiStream } = require("./integrations/pi-ai");
|
|
82
|
+
return wrapPiAiStream(streamFn, this);
|
|
83
|
+
}
|
|
76
84
|
// ── Flush & Transport ──
|
|
77
85
|
flush() {
|
|
78
86
|
this._flushErrors();
|
package/dist/index.d.ts
CHANGED
|
@@ -2,3 +2,4 @@ export { BloopClient, Trace, Span } from "./client";
|
|
|
2
2
|
export type { BloopClientOptions, TraceData, SpanData } from "./client";
|
|
3
3
|
export { wrapOpenAI } from "./integrations/openai";
|
|
4
4
|
export { wrapAnthropic } from "./integrations/anthropic";
|
|
5
|
+
export { wrapPiAiComplete, wrapPiAiStream } from "./integrations/pi-ai";
|
package/dist/index.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.wrapAnthropic = exports.wrapOpenAI = exports.Span = exports.Trace = exports.BloopClient = void 0;
|
|
3
|
+
exports.wrapPiAiStream = exports.wrapPiAiComplete = exports.wrapAnthropic = exports.wrapOpenAI = exports.Span = exports.Trace = exports.BloopClient = void 0;
|
|
4
4
|
var client_1 = require("./client");
|
|
5
5
|
Object.defineProperty(exports, "BloopClient", { enumerable: true, get: function () { return client_1.BloopClient; } });
|
|
6
6
|
Object.defineProperty(exports, "Trace", { enumerable: true, get: function () { return client_1.Trace; } });
|
|
@@ -9,3 +9,6 @@ var openai_1 = require("./integrations/openai");
|
|
|
9
9
|
Object.defineProperty(exports, "wrapOpenAI", { enumerable: true, get: function () { return openai_1.wrapOpenAI; } });
|
|
10
10
|
var anthropic_1 = require("./integrations/anthropic");
|
|
11
11
|
Object.defineProperty(exports, "wrapAnthropic", { enumerable: true, get: function () { return anthropic_1.wrapAnthropic; } });
|
|
12
|
+
var pi_ai_1 = require("./integrations/pi-ai");
|
|
13
|
+
Object.defineProperty(exports, "wrapPiAiComplete", { enumerable: true, get: function () { return pi_ai_1.wrapPiAiComplete; } });
|
|
14
|
+
Object.defineProperty(exports, "wrapPiAiStream", { enumerable: true, get: function () { return pi_ai_1.wrapPiAiStream; } });
|
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.wrapAnthropic = exports.wrapOpenAI = void 0;
|
|
3
|
+
exports.wrapPiAiStream = exports.wrapPiAiComplete = exports.wrapAnthropic = exports.wrapOpenAI = void 0;
|
|
4
4
|
var openai_1 = require("./openai");
|
|
5
5
|
Object.defineProperty(exports, "wrapOpenAI", { enumerable: true, get: function () { return openai_1.wrapOpenAI; } });
|
|
6
6
|
var anthropic_1 = require("./anthropic");
|
|
7
7
|
Object.defineProperty(exports, "wrapAnthropic", { enumerable: true, get: function () { return anthropic_1.wrapAnthropic; } });
|
|
8
|
+
var pi_ai_1 = require("./pi-ai");
|
|
9
|
+
Object.defineProperty(exports, "wrapPiAiComplete", { enumerable: true, get: function () { return pi_ai_1.wrapPiAiComplete; } });
|
|
10
|
+
Object.defineProperty(exports, "wrapPiAiStream", { enumerable: true, get: function () { return pi_ai_1.wrapPiAiStream; } });
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* pi-ai (@mariozechner/pi-ai) auto-instrumentation for bloop LLM tracing.
|
|
3
|
+
*
|
|
4
|
+
* Wraps `complete()` and `stream()` to automatically capture:
|
|
5
|
+
* - Model, provider, tokens, latency, TTFT (streaming), errors
|
|
6
|
+
* - Cost is always 0 -- calculated server-side from pricing table
|
|
7
|
+
*
|
|
8
|
+
* Works with all pi-ai providers: OpenAI, Anthropic, Google, Mistral,
|
|
9
|
+
* OpenRouter, Groq, xAI, Cerebras, Azure, Bedrock, and any
|
|
10
|
+
* OpenAI-compatible endpoint.
|
|
11
|
+
*/
|
|
12
|
+
import type { BloopClient } from "../client";
|
|
13
|
+
/**
|
|
14
|
+
* Wrap pi-ai's `complete()` function for automatic tracing.
|
|
15
|
+
*
|
|
16
|
+
* Usage:
|
|
17
|
+
* import { complete, getModel } from "@mariozechner/pi-ai";
|
|
18
|
+
* const tracedComplete = bloop.wrapPiAiComplete(complete);
|
|
19
|
+
* const model = getModel("openai", "gpt-4o");
|
|
20
|
+
* const response = await tracedComplete(model, context);
|
|
21
|
+
*/
|
|
22
|
+
export declare function wrapPiAiComplete(completeFn: Function, bloopClient: BloopClient): Function;
|
|
23
|
+
/**
|
|
24
|
+
* Wrap pi-ai's `stream()` function for automatic tracing.
|
|
25
|
+
*
|
|
26
|
+
* Usage:
|
|
27
|
+
* import { stream, getModel } from "@mariozechner/pi-ai";
|
|
28
|
+
* const tracedStream = bloop.wrapPiAiStream(stream);
|
|
29
|
+
* const model = getModel("openai", "gpt-4o");
|
|
30
|
+
* const eventStream = tracedStream(model, context);
|
|
31
|
+
* for await (const event of eventStream) { ... }
|
|
32
|
+
* const result = await eventStream.result();
|
|
33
|
+
*/
|
|
34
|
+
export declare function wrapPiAiStream(streamFn: Function, bloopClient: BloopClient): Function;
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* pi-ai (@mariozechner/pi-ai) auto-instrumentation for bloop LLM tracing.
|
|
4
|
+
*
|
|
5
|
+
* Wraps `complete()` and `stream()` to automatically capture:
|
|
6
|
+
* - Model, provider, tokens, latency, TTFT (streaming), errors
|
|
7
|
+
* - Cost is always 0 -- calculated server-side from pricing table
|
|
8
|
+
*
|
|
9
|
+
* Works with all pi-ai providers: OpenAI, Anthropic, Google, Mistral,
|
|
10
|
+
* OpenRouter, Groq, xAI, Cerebras, Azure, Bedrock, and any
|
|
11
|
+
* OpenAI-compatible endpoint.
|
|
12
|
+
*/
|
|
13
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
14
|
+
exports.wrapPiAiComplete = wrapPiAiComplete;
|
|
15
|
+
exports.wrapPiAiStream = wrapPiAiStream;
|
|
16
|
+
/**
|
|
17
|
+
* Wrap pi-ai's `complete()` function for automatic tracing.
|
|
18
|
+
*
|
|
19
|
+
* Usage:
|
|
20
|
+
* import { complete, getModel } from "@mariozechner/pi-ai";
|
|
21
|
+
* const tracedComplete = bloop.wrapPiAiComplete(complete);
|
|
22
|
+
* const model = getModel("openai", "gpt-4o");
|
|
23
|
+
* const response = await tracedComplete(model, context);
|
|
24
|
+
*/
|
|
25
|
+
function wrapPiAiComplete(completeFn, bloopClient) {
|
|
26
|
+
return async function tracedComplete(model, context, options) {
|
|
27
|
+
const modelName = model?.name || model?.id || "unknown";
|
|
28
|
+
const provider = model?.provider || "unknown";
|
|
29
|
+
const startMs = Date.now();
|
|
30
|
+
const trace = bloopClient.trace({ name: `${provider}/${modelName}` });
|
|
31
|
+
const span = trace.span({
|
|
32
|
+
spanType: "generation",
|
|
33
|
+
name: "complete",
|
|
34
|
+
model: modelName,
|
|
35
|
+
provider,
|
|
36
|
+
});
|
|
37
|
+
try {
|
|
38
|
+
const response = await completeFn(model, context, options);
|
|
39
|
+
const endMs = Date.now();
|
|
40
|
+
// pi-ai AssistantMessage has usage.input, usage.output, usage.totalTokens
|
|
41
|
+
const usage = response?.usage;
|
|
42
|
+
if (usage) {
|
|
43
|
+
span.setTokens(usage.input || 0, usage.output || 0);
|
|
44
|
+
}
|
|
45
|
+
span.setLatency(endMs - startMs);
|
|
46
|
+
span.cost = 0; // Server-side pricing
|
|
47
|
+
span.end();
|
|
48
|
+
trace.end();
|
|
49
|
+
return response;
|
|
50
|
+
}
|
|
51
|
+
catch (err) {
|
|
52
|
+
const endMs = Date.now();
|
|
53
|
+
span.setLatency(endMs - startMs);
|
|
54
|
+
span.setError(err.message);
|
|
55
|
+
span.end();
|
|
56
|
+
trace.status = "error";
|
|
57
|
+
trace.end();
|
|
58
|
+
throw err;
|
|
59
|
+
}
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Wrap pi-ai's `stream()` function for automatic tracing.
|
|
64
|
+
*
|
|
65
|
+
* Usage:
|
|
66
|
+
* import { stream, getModel } from "@mariozechner/pi-ai";
|
|
67
|
+
* const tracedStream = bloop.wrapPiAiStream(stream);
|
|
68
|
+
* const model = getModel("openai", "gpt-4o");
|
|
69
|
+
* const eventStream = tracedStream(model, context);
|
|
70
|
+
* for await (const event of eventStream) { ... }
|
|
71
|
+
* const result = await eventStream.result();
|
|
72
|
+
*/
|
|
73
|
+
function wrapPiAiStream(streamFn, bloopClient) {
|
|
74
|
+
return function tracedStream(model, context, options) {
|
|
75
|
+
const modelName = model?.name || model?.id || "unknown";
|
|
76
|
+
const provider = model?.provider || "unknown";
|
|
77
|
+
const startMs = Date.now();
|
|
78
|
+
const trace = bloopClient.trace({ name: `${provider}/${modelName}` });
|
|
79
|
+
const span = trace.span({
|
|
80
|
+
spanType: "generation",
|
|
81
|
+
name: "stream",
|
|
82
|
+
model: modelName,
|
|
83
|
+
provider,
|
|
84
|
+
});
|
|
85
|
+
const originalStream = streamFn(model, context, options);
|
|
86
|
+
let firstTokenSeen = false;
|
|
87
|
+
// Wrap the async iterable to intercept events
|
|
88
|
+
const wrappedStream = {
|
|
89
|
+
[Symbol.asyncIterator]: async function* () {
|
|
90
|
+
try {
|
|
91
|
+
for await (const event of originalStream) {
|
|
92
|
+
if (!firstTokenSeen && (event?.type === "text" || event?.type === "thinking")) {
|
|
93
|
+
firstTokenSeen = true;
|
|
94
|
+
span.timeToFirstTokenMs = Date.now() - startMs;
|
|
95
|
+
}
|
|
96
|
+
yield event;
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
catch (err) {
|
|
100
|
+
const endMs = Date.now();
|
|
101
|
+
span.setLatency(endMs - startMs);
|
|
102
|
+
span.setError(err.message);
|
|
103
|
+
span.end();
|
|
104
|
+
trace.status = "error";
|
|
105
|
+
trace.end();
|
|
106
|
+
throw err;
|
|
107
|
+
}
|
|
108
|
+
},
|
|
109
|
+
// Preserve the result() method that returns the final AssistantMessage
|
|
110
|
+
result: async () => {
|
|
111
|
+
const message = await originalStream.result();
|
|
112
|
+
const endMs = Date.now();
|
|
113
|
+
const usage = message?.usage;
|
|
114
|
+
if (usage) {
|
|
115
|
+
span.setTokens(usage.input || 0, usage.output || 0);
|
|
116
|
+
}
|
|
117
|
+
span.setLatency(endMs - startMs);
|
|
118
|
+
span.cost = 0;
|
|
119
|
+
span.end();
|
|
120
|
+
trace.end();
|
|
121
|
+
return message;
|
|
122
|
+
},
|
|
123
|
+
};
|
|
124
|
+
return wrappedStream;
|
|
125
|
+
};
|
|
126
|
+
}
|
package/package.json
CHANGED
package/src/client.ts
CHANGED
|
@@ -144,6 +144,16 @@ export class BloopClient {
|
|
|
144
144
|
return wrapAnthropic(anthropicClient, this);
|
|
145
145
|
}
|
|
146
146
|
|
|
147
|
+
wrapPiAiComplete(completeFn: Function): Function {
|
|
148
|
+
const { wrapPiAiComplete } = require("./integrations/pi-ai");
|
|
149
|
+
return wrapPiAiComplete(completeFn, this);
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
wrapPiAiStream(streamFn: Function): Function {
|
|
153
|
+
const { wrapPiAiStream } = require("./integrations/pi-ai");
|
|
154
|
+
return wrapPiAiStream(streamFn, this);
|
|
155
|
+
}
|
|
156
|
+
|
|
147
157
|
// ── Flush & Transport ──
|
|
148
158
|
|
|
149
159
|
flush(): void {
|
package/src/index.ts
CHANGED
|
@@ -2,3 +2,4 @@ export { BloopClient, Trace, Span } from "./client";
|
|
|
2
2
|
export type { BloopClientOptions, TraceData, SpanData } from "./client";
|
|
3
3
|
export { wrapOpenAI } from "./integrations/openai";
|
|
4
4
|
export { wrapAnthropic } from "./integrations/anthropic";
|
|
5
|
+
export { wrapPiAiComplete, wrapPiAiStream } from "./integrations/pi-ai";
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* pi-ai (@mariozechner/pi-ai) auto-instrumentation for bloop LLM tracing.
|
|
3
|
+
*
|
|
4
|
+
* Wraps `complete()` and `stream()` to automatically capture:
|
|
5
|
+
* - Model, provider, tokens, latency, TTFT (streaming), errors
|
|
6
|
+
* - Cost is always 0 -- calculated server-side from pricing table
|
|
7
|
+
*
|
|
8
|
+
* Works with all pi-ai providers: OpenAI, Anthropic, Google, Mistral,
|
|
9
|
+
* OpenRouter, Groq, xAI, Cerebras, Azure, Bedrock, and any
|
|
10
|
+
* OpenAI-compatible endpoint.
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import type { BloopClient } from "../client";
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Wrap pi-ai's `complete()` function for automatic tracing.
|
|
17
|
+
*
|
|
18
|
+
* Usage:
|
|
19
|
+
* import { complete, getModel } from "@mariozechner/pi-ai";
|
|
20
|
+
* const tracedComplete = bloop.wrapPiAiComplete(complete);
|
|
21
|
+
* const model = getModel("openai", "gpt-4o");
|
|
22
|
+
* const response = await tracedComplete(model, context);
|
|
23
|
+
*/
|
|
24
|
+
export function wrapPiAiComplete(
|
|
25
|
+
completeFn: Function,
|
|
26
|
+
bloopClient: BloopClient,
|
|
27
|
+
): Function {
|
|
28
|
+
return async function tracedComplete(model: any, context: any, options?: any) {
|
|
29
|
+
const modelName: string = model?.name || model?.id || "unknown";
|
|
30
|
+
const provider: string = model?.provider || "unknown";
|
|
31
|
+
const startMs = Date.now();
|
|
32
|
+
|
|
33
|
+
const trace = bloopClient.trace({ name: `${provider}/${modelName}` });
|
|
34
|
+
const span = trace.span({
|
|
35
|
+
spanType: "generation",
|
|
36
|
+
name: "complete",
|
|
37
|
+
model: modelName,
|
|
38
|
+
provider,
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
try {
|
|
42
|
+
const response = await completeFn(model, context, options);
|
|
43
|
+
const endMs = Date.now();
|
|
44
|
+
|
|
45
|
+
// pi-ai AssistantMessage has usage.input, usage.output, usage.totalTokens
|
|
46
|
+
const usage = response?.usage;
|
|
47
|
+
if (usage) {
|
|
48
|
+
span.setTokens(
|
|
49
|
+
usage.input || 0,
|
|
50
|
+
usage.output || 0,
|
|
51
|
+
);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
span.setLatency(endMs - startMs);
|
|
55
|
+
span.cost = 0; // Server-side pricing
|
|
56
|
+
span.end();
|
|
57
|
+
trace.end();
|
|
58
|
+
return response;
|
|
59
|
+
} catch (err: any) {
|
|
60
|
+
const endMs = Date.now();
|
|
61
|
+
span.setLatency(endMs - startMs);
|
|
62
|
+
span.setError(err.message);
|
|
63
|
+
span.end();
|
|
64
|
+
trace.status = "error";
|
|
65
|
+
trace.end();
|
|
66
|
+
throw err;
|
|
67
|
+
}
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Wrap pi-ai's `stream()` function for automatic tracing.
|
|
73
|
+
*
|
|
74
|
+
* Usage:
|
|
75
|
+
* import { stream, getModel } from "@mariozechner/pi-ai";
|
|
76
|
+
* const tracedStream = bloop.wrapPiAiStream(stream);
|
|
77
|
+
* const model = getModel("openai", "gpt-4o");
|
|
78
|
+
* const eventStream = tracedStream(model, context);
|
|
79
|
+
* for await (const event of eventStream) { ... }
|
|
80
|
+
* const result = await eventStream.result();
|
|
81
|
+
*/
|
|
82
|
+
export function wrapPiAiStream(
|
|
83
|
+
streamFn: Function,
|
|
84
|
+
bloopClient: BloopClient,
|
|
85
|
+
): Function {
|
|
86
|
+
return function tracedStream(model: any, context: any, options?: any) {
|
|
87
|
+
const modelName: string = model?.name || model?.id || "unknown";
|
|
88
|
+
const provider: string = model?.provider || "unknown";
|
|
89
|
+
const startMs = Date.now();
|
|
90
|
+
|
|
91
|
+
const trace = bloopClient.trace({ name: `${provider}/${modelName}` });
|
|
92
|
+
const span = trace.span({
|
|
93
|
+
spanType: "generation",
|
|
94
|
+
name: "stream",
|
|
95
|
+
model: modelName,
|
|
96
|
+
provider,
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
const originalStream = streamFn(model, context, options);
|
|
100
|
+
let firstTokenSeen = false;
|
|
101
|
+
|
|
102
|
+
// Wrap the async iterable to intercept events
|
|
103
|
+
const wrappedStream = {
|
|
104
|
+
[Symbol.asyncIterator]: async function* () {
|
|
105
|
+
try {
|
|
106
|
+
for await (const event of originalStream) {
|
|
107
|
+
if (!firstTokenSeen && (event?.type === "text" || event?.type === "thinking")) {
|
|
108
|
+
firstTokenSeen = true;
|
|
109
|
+
span.timeToFirstTokenMs = Date.now() - startMs;
|
|
110
|
+
}
|
|
111
|
+
yield event;
|
|
112
|
+
}
|
|
113
|
+
} catch (err: any) {
|
|
114
|
+
const endMs = Date.now();
|
|
115
|
+
span.setLatency(endMs - startMs);
|
|
116
|
+
span.setError(err.message);
|
|
117
|
+
span.end();
|
|
118
|
+
trace.status = "error";
|
|
119
|
+
trace.end();
|
|
120
|
+
throw err;
|
|
121
|
+
}
|
|
122
|
+
},
|
|
123
|
+
|
|
124
|
+
// Preserve the result() method that returns the final AssistantMessage
|
|
125
|
+
result: async () => {
|
|
126
|
+
const message = await originalStream.result();
|
|
127
|
+
const endMs = Date.now();
|
|
128
|
+
|
|
129
|
+
const usage = message?.usage;
|
|
130
|
+
if (usage) {
|
|
131
|
+
span.setTokens(usage.input || 0, usage.output || 0);
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
span.setLatency(endMs - startMs);
|
|
135
|
+
span.cost = 0;
|
|
136
|
+
span.end();
|
|
137
|
+
trace.end();
|
|
138
|
+
return message;
|
|
139
|
+
},
|
|
140
|
+
};
|
|
141
|
+
|
|
142
|
+
return wrappedStream;
|
|
143
|
+
};
|
|
144
|
+
}
|