agentracer 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +527 -0
- package/dist/anthropic.d.mts +10 -0
- package/dist/anthropic.d.ts +10 -0
- package/dist/anthropic.js +219 -0
- package/dist/anthropic.mjs +109 -0
- package/dist/chunk-72GBZ4TW.mjs +160 -0
- package/dist/chunk-HCE5ELLK.mjs +66 -0
- package/dist/gemini.d.mts +5 -0
- package/dist/gemini.d.ts +5 -0
- package/dist/gemini.js +243 -0
- package/dist/gemini.mjs +134 -0
- package/dist/index.d.mts +51 -0
- package/dist/index.d.ts +51 -0
- package/dist/index.js +183 -0
- package/dist/index.mjs +20 -0
- package/dist/openai.d.mts +10 -0
- package/dist/openai.d.ts +10 -0
- package/dist/openai.js +223 -0
- package/dist/openai.mjs +113 -0
- package/package.json +85 -0
package/dist/openai.js
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/openai.ts
|
|
21
|
+
var openai_exports = {};
|
|
22
|
+
__export(openai_exports, {
|
|
23
|
+
TrackedOpenAI: () => TrackedOpenAI,
|
|
24
|
+
_setClientForTesting: () => _setClientForTesting,
|
|
25
|
+
openai: () => openai
|
|
26
|
+
});
|
|
27
|
+
module.exports = __toCommonJS(openai_exports);
|
|
28
|
+
|
|
29
|
+
// src/index.ts
|
|
30
|
+
var import_async_hooks = require("async_hooks");
|
|
31
|
+
var config = {
|
|
32
|
+
trackerApiKey: "",
|
|
33
|
+
projectId: "",
|
|
34
|
+
environment: "production",
|
|
35
|
+
host: "https://api.agentracer.dev",
|
|
36
|
+
debug: false,
|
|
37
|
+
enabled: true
|
|
38
|
+
};
|
|
39
|
+
var featureTagStorage = new import_async_hooks.AsyncLocalStorage();
|
|
40
|
+
var runStorage = new import_async_hooks.AsyncLocalStorage();
|
|
41
|
+
async function sendTelemetry(payload) {
|
|
42
|
+
if (!config.enabled) return;
|
|
43
|
+
try {
|
|
44
|
+
if (config.debug) console.log("[agentracer]", payload);
|
|
45
|
+
const response = await fetch(`${config.host}/api/ingest`, {
|
|
46
|
+
method: "POST",
|
|
47
|
+
headers: {
|
|
48
|
+
"Content-Type": "application/json",
|
|
49
|
+
"x-api-key": config.trackerApiKey
|
|
50
|
+
},
|
|
51
|
+
body: JSON.stringify(payload),
|
|
52
|
+
signal: AbortSignal.timeout(2e3)
|
|
53
|
+
});
|
|
54
|
+
} catch {
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
async function track(options) {
|
|
58
|
+
const currentTag = featureTagStorage.getStore();
|
|
59
|
+
let runId = options.runId;
|
|
60
|
+
let stepIndex = options.stepIndex;
|
|
61
|
+
const activeRun = runStorage.getStore();
|
|
62
|
+
if (activeRun && runId == null) {
|
|
63
|
+
runId = activeRun.runId;
|
|
64
|
+
stepIndex = activeRun._nextStep();
|
|
65
|
+
sendRunApi("/api/runs/step", {
|
|
66
|
+
project_id: config.projectId,
|
|
67
|
+
run_id: activeRun.runId,
|
|
68
|
+
step_index: stepIndex,
|
|
69
|
+
step_type: "llm_call",
|
|
70
|
+
model: options.model,
|
|
71
|
+
provider: options.provider ?? "custom",
|
|
72
|
+
input_tokens: options.inputTokens,
|
|
73
|
+
output_tokens: options.outputTokens,
|
|
74
|
+
cached_tokens: options.cachedTokens ?? 0,
|
|
75
|
+
cost_usd: 0,
|
|
76
|
+
latency_ms: options.latencyMs,
|
|
77
|
+
success: options.success ?? true,
|
|
78
|
+
error_type: options.errorType ?? null
|
|
79
|
+
}).catch(() => {
|
|
80
|
+
});
|
|
81
|
+
}
|
|
82
|
+
const payload = {
|
|
83
|
+
project_id: config.projectId,
|
|
84
|
+
provider: options.provider ?? "custom",
|
|
85
|
+
model: options.model,
|
|
86
|
+
feature_tag: options.featureTag ?? currentTag ?? "unknown",
|
|
87
|
+
input_tokens: options.inputTokens,
|
|
88
|
+
output_tokens: options.outputTokens,
|
|
89
|
+
cached_tokens: options.cachedTokens ?? 0,
|
|
90
|
+
latency_ms: options.latencyMs,
|
|
91
|
+
success: options.success ?? true,
|
|
92
|
+
environment: options.environment ?? config.environment
|
|
93
|
+
};
|
|
94
|
+
if (options.errorType != null) payload.error_type = options.errorType;
|
|
95
|
+
if (options.endUserId != null) payload.end_user_id = options.endUserId;
|
|
96
|
+
if (runId != null) payload.run_id = runId;
|
|
97
|
+
if (stepIndex != null) payload.step_index = stepIndex;
|
|
98
|
+
await sendTelemetry(payload);
|
|
99
|
+
}
|
|
100
|
+
async function sendRunApi(path, payload) {
|
|
101
|
+
if (!config.enabled) return;
|
|
102
|
+
try {
|
|
103
|
+
await fetch(`${config.host}${path}`, {
|
|
104
|
+
method: "POST",
|
|
105
|
+
headers: {
|
|
106
|
+
"Content-Type": "application/json",
|
|
107
|
+
"x-api-key": config.trackerApiKey
|
|
108
|
+
},
|
|
109
|
+
body: JSON.stringify(payload),
|
|
110
|
+
signal: AbortSignal.timeout(2e3)
|
|
111
|
+
});
|
|
112
|
+
} catch {
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// src/openai.ts
|
|
117
|
+
var _clientInstance = null;
|
|
118
|
+
function getClient(opts) {
|
|
119
|
+
if (opts) {
|
|
120
|
+
const OpenAI = require("openai").default || require("openai");
|
|
121
|
+
return new OpenAI(opts);
|
|
122
|
+
}
|
|
123
|
+
if (!_clientInstance) {
|
|
124
|
+
const OpenAI = require("openai").default || require("openai");
|
|
125
|
+
_clientInstance = new OpenAI();
|
|
126
|
+
}
|
|
127
|
+
return _clientInstance;
|
|
128
|
+
}
|
|
129
|
+
function _setClientForTesting(client) {
|
|
130
|
+
_clientInstance = client;
|
|
131
|
+
}
|
|
132
|
+
async function* wrapOpenAIStream(stream, model, featureTag, start) {
|
|
133
|
+
let inputTokens = 0;
|
|
134
|
+
let outputTokens = 0;
|
|
135
|
+
for await (const chunk of stream) {
|
|
136
|
+
if (chunk.usage) {
|
|
137
|
+
inputTokens = chunk.usage.prompt_tokens ?? 0;
|
|
138
|
+
outputTokens = chunk.usage.completion_tokens ?? 0;
|
|
139
|
+
}
|
|
140
|
+
yield chunk;
|
|
141
|
+
}
|
|
142
|
+
track({
|
|
143
|
+
model,
|
|
144
|
+
inputTokens,
|
|
145
|
+
outputTokens,
|
|
146
|
+
latencyMs: Date.now() - start,
|
|
147
|
+
featureTag,
|
|
148
|
+
provider: "openai"
|
|
149
|
+
}).catch(() => {
|
|
150
|
+
});
|
|
151
|
+
}
|
|
152
|
+
function createOpenAIProxy(clientGetter) {
|
|
153
|
+
return new Proxy({}, {
|
|
154
|
+
get(_, prop) {
|
|
155
|
+
if (prop === "chat") {
|
|
156
|
+
return {
|
|
157
|
+
completions: {
|
|
158
|
+
create: async (params) => {
|
|
159
|
+
const featureTag = params.feature_tag ?? featureTagStorage.getStore() ?? "unknown";
|
|
160
|
+
const { feature_tag, ...cleanParams } = params;
|
|
161
|
+
const start = Date.now();
|
|
162
|
+
const client = clientGetter();
|
|
163
|
+
if (cleanParams.stream) {
|
|
164
|
+
cleanParams.stream_options = {
|
|
165
|
+
...cleanParams.stream_options,
|
|
166
|
+
include_usage: true
|
|
167
|
+
};
|
|
168
|
+
const stream = await client.chat.completions.create(cleanParams);
|
|
169
|
+
return wrapOpenAIStream(stream, params.model, featureTag, start);
|
|
170
|
+
}
|
|
171
|
+
let response;
|
|
172
|
+
try {
|
|
173
|
+
response = await client.chat.completions.create(cleanParams);
|
|
174
|
+
} catch (err) {
|
|
175
|
+
track({
|
|
176
|
+
model: params.model,
|
|
177
|
+
inputTokens: 0,
|
|
178
|
+
outputTokens: 0,
|
|
179
|
+
latencyMs: Date.now() - start,
|
|
180
|
+
featureTag,
|
|
181
|
+
provider: "openai",
|
|
182
|
+
success: false,
|
|
183
|
+
errorType: err?.constructor?.name ?? "Error"
|
|
184
|
+
}).catch(() => {
|
|
185
|
+
});
|
|
186
|
+
throw err;
|
|
187
|
+
}
|
|
188
|
+
track({
|
|
189
|
+
model: params.model,
|
|
190
|
+
inputTokens: response.usage?.prompt_tokens ?? 0,
|
|
191
|
+
outputTokens: response.usage?.completion_tokens ?? 0,
|
|
192
|
+
cachedTokens: response.usage?.prompt_tokens_details?.cached_tokens ?? 0,
|
|
193
|
+
latencyMs: Date.now() - start,
|
|
194
|
+
featureTag,
|
|
195
|
+
provider: "openai"
|
|
196
|
+
}).catch(() => {
|
|
197
|
+
});
|
|
198
|
+
return response;
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
};
|
|
202
|
+
}
|
|
203
|
+
return clientGetter()[prop];
|
|
204
|
+
}
|
|
205
|
+
});
|
|
206
|
+
}
|
|
207
|
+
var TrackedOpenAI = class {
|
|
208
|
+
constructor(options) {
|
|
209
|
+
const OpenAI = require("openai").default || require("openai");
|
|
210
|
+
const client = new OpenAI(options);
|
|
211
|
+
this._proxy = createOpenAIProxy(() => client);
|
|
212
|
+
}
|
|
213
|
+
get chat() {
|
|
214
|
+
return this._proxy.chat;
|
|
215
|
+
}
|
|
216
|
+
};
|
|
217
|
+
var openai = createOpenAIProxy(() => getClient());
|
|
218
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
219
|
+
0 && (module.exports = {
|
|
220
|
+
TrackedOpenAI,
|
|
221
|
+
_setClientForTesting,
|
|
222
|
+
openai
|
|
223
|
+
});
|
package/dist/openai.mjs
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import {
|
|
2
|
+
__require,
|
|
3
|
+
featureTagStorage,
|
|
4
|
+
track
|
|
5
|
+
} from "./chunk-72GBZ4TW.mjs";
|
|
6
|
+
|
|
7
|
+
// src/openai.ts
|
|
8
|
+
var _clientInstance = null;
|
|
9
|
+
function getClient(opts) {
|
|
10
|
+
if (opts) {
|
|
11
|
+
const OpenAI = __require("openai").default || __require("openai");
|
|
12
|
+
return new OpenAI(opts);
|
|
13
|
+
}
|
|
14
|
+
if (!_clientInstance) {
|
|
15
|
+
const OpenAI = __require("openai").default || __require("openai");
|
|
16
|
+
_clientInstance = new OpenAI();
|
|
17
|
+
}
|
|
18
|
+
return _clientInstance;
|
|
19
|
+
}
|
|
20
|
+
function _setClientForTesting(client) {
|
|
21
|
+
_clientInstance = client;
|
|
22
|
+
}
|
|
23
|
+
async function* wrapOpenAIStream(stream, model, featureTag, start) {
|
|
24
|
+
let inputTokens = 0;
|
|
25
|
+
let outputTokens = 0;
|
|
26
|
+
for await (const chunk of stream) {
|
|
27
|
+
if (chunk.usage) {
|
|
28
|
+
inputTokens = chunk.usage.prompt_tokens ?? 0;
|
|
29
|
+
outputTokens = chunk.usage.completion_tokens ?? 0;
|
|
30
|
+
}
|
|
31
|
+
yield chunk;
|
|
32
|
+
}
|
|
33
|
+
track({
|
|
34
|
+
model,
|
|
35
|
+
inputTokens,
|
|
36
|
+
outputTokens,
|
|
37
|
+
latencyMs: Date.now() - start,
|
|
38
|
+
featureTag,
|
|
39
|
+
provider: "openai"
|
|
40
|
+
}).catch(() => {
|
|
41
|
+
});
|
|
42
|
+
}
|
|
43
|
+
function createOpenAIProxy(clientGetter) {
|
|
44
|
+
return new Proxy({}, {
|
|
45
|
+
get(_, prop) {
|
|
46
|
+
if (prop === "chat") {
|
|
47
|
+
return {
|
|
48
|
+
completions: {
|
|
49
|
+
create: async (params) => {
|
|
50
|
+
const featureTag = params.feature_tag ?? featureTagStorage.getStore() ?? "unknown";
|
|
51
|
+
const { feature_tag, ...cleanParams } = params;
|
|
52
|
+
const start = Date.now();
|
|
53
|
+
const client = clientGetter();
|
|
54
|
+
if (cleanParams.stream) {
|
|
55
|
+
cleanParams.stream_options = {
|
|
56
|
+
...cleanParams.stream_options,
|
|
57
|
+
include_usage: true
|
|
58
|
+
};
|
|
59
|
+
const stream = await client.chat.completions.create(cleanParams);
|
|
60
|
+
return wrapOpenAIStream(stream, params.model, featureTag, start);
|
|
61
|
+
}
|
|
62
|
+
let response;
|
|
63
|
+
try {
|
|
64
|
+
response = await client.chat.completions.create(cleanParams);
|
|
65
|
+
} catch (err) {
|
|
66
|
+
track({
|
|
67
|
+
model: params.model,
|
|
68
|
+
inputTokens: 0,
|
|
69
|
+
outputTokens: 0,
|
|
70
|
+
latencyMs: Date.now() - start,
|
|
71
|
+
featureTag,
|
|
72
|
+
provider: "openai",
|
|
73
|
+
success: false,
|
|
74
|
+
errorType: err?.constructor?.name ?? "Error"
|
|
75
|
+
}).catch(() => {
|
|
76
|
+
});
|
|
77
|
+
throw err;
|
|
78
|
+
}
|
|
79
|
+
track({
|
|
80
|
+
model: params.model,
|
|
81
|
+
inputTokens: response.usage?.prompt_tokens ?? 0,
|
|
82
|
+
outputTokens: response.usage?.completion_tokens ?? 0,
|
|
83
|
+
cachedTokens: response.usage?.prompt_tokens_details?.cached_tokens ?? 0,
|
|
84
|
+
latencyMs: Date.now() - start,
|
|
85
|
+
featureTag,
|
|
86
|
+
provider: "openai"
|
|
87
|
+
}).catch(() => {
|
|
88
|
+
});
|
|
89
|
+
return response;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
return clientGetter()[prop];
|
|
95
|
+
}
|
|
96
|
+
});
|
|
97
|
+
}
|
|
98
|
+
var TrackedOpenAI = class {
|
|
99
|
+
constructor(options) {
|
|
100
|
+
const OpenAI = __require("openai").default || __require("openai");
|
|
101
|
+
const client = new OpenAI(options);
|
|
102
|
+
this._proxy = createOpenAIProxy(() => client);
|
|
103
|
+
}
|
|
104
|
+
get chat() {
|
|
105
|
+
return this._proxy.chat;
|
|
106
|
+
}
|
|
107
|
+
};
|
|
108
|
+
var openai = createOpenAIProxy(() => getClient());
|
|
109
|
+
export {
|
|
110
|
+
TrackedOpenAI,
|
|
111
|
+
_setClientForTesting,
|
|
112
|
+
openai
|
|
113
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "agentracer",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Lightweight AI incident detection. Catch cost spikes, latency anomalies, and prompt bloat before they hit your users.",
|
|
5
|
+
"main": "dist/index.js",
|
|
6
|
+
"types": "dist/index.d.ts",
|
|
7
|
+
"files": [
|
|
8
|
+
"dist",
|
|
9
|
+
"README.md",
|
|
10
|
+
"LICENSE"
|
|
11
|
+
],
|
|
12
|
+
"sideEffects": false,
|
|
13
|
+
"exports": {
|
|
14
|
+
".": {
|
|
15
|
+
"types": "./dist/index.d.ts",
|
|
16
|
+
"import": "./dist/index.mjs",
|
|
17
|
+
"require": "./dist/index.js"
|
|
18
|
+
},
|
|
19
|
+
"./openai": {
|
|
20
|
+
"types": "./dist/openai.d.ts",
|
|
21
|
+
"import": "./dist/openai.mjs",
|
|
22
|
+
"require": "./dist/openai.js"
|
|
23
|
+
},
|
|
24
|
+
"./anthropic": {
|
|
25
|
+
"types": "./dist/anthropic.d.ts",
|
|
26
|
+
"import": "./dist/anthropic.mjs",
|
|
27
|
+
"require": "./dist/anthropic.js"
|
|
28
|
+
},
|
|
29
|
+
"./gemini": {
|
|
30
|
+
"types": "./dist/gemini.d.ts",
|
|
31
|
+
"import": "./dist/gemini.mjs",
|
|
32
|
+
"require": "./dist/gemini.js"
|
|
33
|
+
}
|
|
34
|
+
},
|
|
35
|
+
"scripts": {
|
|
36
|
+
"build": "tsup src/index.ts src/openai.ts src/anthropic.ts src/gemini.ts --format cjs,esm --dts",
|
|
37
|
+
"dev": "tsup src/index.ts src/openai.ts src/anthropic.ts src/gemini.ts --format cjs,esm --dts --watch",
|
|
38
|
+
"test": "vitest run",
|
|
39
|
+
"prepublishOnly": "npm run build && npm test"
|
|
40
|
+
},
|
|
41
|
+
"keywords": [
|
|
42
|
+
"ai",
|
|
43
|
+
"observability",
|
|
44
|
+
"llm",
|
|
45
|
+
"cost-tracking",
|
|
46
|
+
"openai",
|
|
47
|
+
"anthropic",
|
|
48
|
+
"gemini"
|
|
49
|
+
],
|
|
50
|
+
"author": "Agentracer <hello@agentracer.dev>",
|
|
51
|
+
"license": "MIT",
|
|
52
|
+
"repository": {
|
|
53
|
+
"type": "git",
|
|
54
|
+
"url": "https://github.com/aqib-ilyas/agentracer-node"
|
|
55
|
+
},
|
|
56
|
+
"homepage": "https://agentracer.dev",
|
|
57
|
+
"bugs": {
|
|
58
|
+
"url": "https://github.com/aqib-ilyas/agentracer-node/issues"
|
|
59
|
+
},
|
|
60
|
+
"engines": {
|
|
61
|
+
"node": ">=18.0.0"
|
|
62
|
+
},
|
|
63
|
+
"devDependencies": {
|
|
64
|
+
"@types/node": "^25.3.2",
|
|
65
|
+
"tsup": "^8.0.0",
|
|
66
|
+
"typescript": "^5.3.0",
|
|
67
|
+
"vitest": "^4.0.18"
|
|
68
|
+
},
|
|
69
|
+
"peerDependencies": {
|
|
70
|
+
"@anthropic-ai/sdk": ">=0.18.0",
|
|
71
|
+
"@google/generative-ai": ">=0.1.0",
|
|
72
|
+
"openai": ">=4.0.0"
|
|
73
|
+
},
|
|
74
|
+
"peerDependenciesMeta": {
|
|
75
|
+
"openai": {
|
|
76
|
+
"optional": true
|
|
77
|
+
},
|
|
78
|
+
"@anthropic-ai/sdk": {
|
|
79
|
+
"optional": true
|
|
80
|
+
},
|
|
81
|
+
"@google/generative-ai": {
|
|
82
|
+
"optional": true
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
}
|