@ato-sdk/js 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,116 @@
1
+ # @agentic-tool-optimization/sdk
2
+
3
+ Auto-capture LLM traces for [ATO](https://agentictool.ai). Works with Anthropic, OpenAI, and any LLM provider.
4
+
5
+ ## Install
6
+
7
+ ```bash
8
+ npm install @agentic-tool-optimization/sdk
9
+ ```
10
+
11
+ ## Quick Start
12
+
13
+ ```typescript
14
+ import { init } from '@agentic-tool-optimization/sdk';
15
+
16
+ // Initialize with your ATO API key
17
+ init({ apiKey: 'your-ato-api-key' });
18
+ ```
19
+
20
+ ### Anthropic
21
+
22
+ ```typescript
23
+ import Anthropic from '@anthropic-ai/sdk';
24
+ import { wrapAnthropic } from '@agentic-tool-optimization/sdk/anthropic';
25
+
26
+ const client = wrapAnthropic(new Anthropic());
27
+
28
+ // All calls are now auto-traced — no other changes needed
29
+ const msg = await client.messages.create({
30
+ model: 'claude-sonnet-4-6',
31
+ max_tokens: 1024,
32
+ messages: [{ role: 'user', content: 'Hello' }],
33
+ });
34
+ ```
35
+
36
+ ### OpenAI
37
+
38
+ ```typescript
39
+ import OpenAI from 'openai';
40
+ import { wrapOpenAI } from '@agentic-tool-optimization/sdk/openai';
41
+
42
+ const client = wrapOpenAI(new OpenAI());
43
+
44
+ // All calls are now auto-traced
45
+ const res = await client.chat.completions.create({
46
+ model: 'gpt-4o',
47
+ messages: [{ role: 'user', content: 'Hello' }],
48
+ });
49
+ ```
50
+
51
+ ## What Gets Captured
52
+
53
+ Every LLM call automatically records:
54
+
55
+ - **Model** — which model was used
56
+ - **Tokens** — input, output, cached
57
+ - **Cost** — calculated from built-in pricing table (60+ models)
58
+ - **Duration** — response time in ms
59
+ - **Status** — success or error (with error message)
60
+ - **Metadata** — temperature, max_tokens, tool usage, stop reason
61
+
62
+ ## Configuration
63
+
64
+ ```typescript
65
+ init({
66
+ apiKey: 'your-key', // ATO Cloud API key
67
+ endpoint: 'https://api.agentictool.ai', // Custom endpoint
68
+ debug: true, // Log traces to console
69
+ batching: true, // Batch traces (default)
70
+ flushInterval: 5000, // Flush every 5s (default)
71
+ maxBatchSize: 50, // Flush at 50 traces (default)
72
+ sessionId: 'my-session', // Group traces by session
73
+ userId: 'user-123', // Attribute traces to user
74
+ defaultTags: ['production'], // Tags for all traces
75
+ localOnly: true, // Don't send to cloud
76
+ });
77
+ ```
78
+
79
+ ## Manual Traces
80
+
81
+ For custom LLM providers:
82
+
83
+ ```typescript
84
+ import { capture, generateTraceId } from '@agentic-tool-optimization/sdk';
85
+ import { calculateCost } from '@agentic-tool-optimization/sdk';
86
+
87
+ capture({
88
+ id: generateTraceId(),
89
+ provider: 'custom',
90
+ model: 'my-model',
91
+ inputTokens: 100,
92
+ outputTokens: 50,
93
+ cachedTokens: 0,
94
+ totalTokens: 150,
95
+ costUsd: calculateCost('gpt-4o', 100, 50),
96
+ durationMs: 234,
97
+ status: 'success',
98
+ metadata: {},
99
+ timestamp: new Date().toISOString(),
100
+ });
101
+ ```
102
+
103
+ ## Cost Calculation
104
+
105
+ Built-in pricing for 60+ models:
106
+
107
+ ```typescript
108
+ import { calculateCost } from '@agentic-tool-optimization/sdk';
109
+
110
+ calculateCost('claude-sonnet-4-6', 1000, 500); // $0.0105
111
+ calculateCost('gpt-4o', 1000, 500); // $0.0075
112
+ ```
113
+
114
+ ## License
115
+
116
+ MIT
@@ -0,0 +1,26 @@
1
+ export { A as AtoConfig, a as AtoTrace, M as MODEL_PRICING, c as calculateCost, g as getClient, i as init } from './pricing-DnUk84wO.mjs';
2
+
3
+ /**
4
+ * ATO wrapper for the Anthropic SDK
5
+ *
6
+ * Usage:
7
+ * import Anthropic from '@anthropic-ai/sdk';
8
+ * import { wrapAnthropic } from '@ato/sdk/anthropic';
9
+ *
10
+ * const client = wrapAnthropic(new Anthropic());
11
+ * // All calls are now auto-traced
12
+ * const msg = await client.messages.create({ model: 'claude-sonnet-4-6', ... });
13
+ */
14
+ type AnthropicClient = {
15
+ messages: {
16
+ create: (...args: any[]) => Promise<any>;
17
+ stream: (...args: any[]) => any;
18
+ };
19
+ [key: string]: any;
20
+ };
21
+ /**
22
+ * Wrap an Anthropic client to auto-capture traces
23
+ */
24
+ declare function wrapAnthropic<T extends AnthropicClient>(client: T): T;
25
+
26
+ export { wrapAnthropic };
@@ -0,0 +1,26 @@
1
+ export { A as AtoConfig, a as AtoTrace, M as MODEL_PRICING, c as calculateCost, g as getClient, i as init } from './pricing-DnUk84wO.js';
2
+
3
+ /**
4
+ * ATO wrapper for the Anthropic SDK
5
+ *
6
+ * Usage:
7
+ * import Anthropic from '@anthropic-ai/sdk';
8
+ * import { wrapAnthropic } from '@ato/sdk/anthropic';
9
+ *
10
+ * const client = wrapAnthropic(new Anthropic());
11
+ * // All calls are now auto-traced
12
+ * const msg = await client.messages.create({ model: 'claude-sonnet-4-6', ... });
13
+ */
14
+ type AnthropicClient = {
15
+ messages: {
16
+ create: (...args: any[]) => Promise<any>;
17
+ stream: (...args: any[]) => any;
18
+ };
19
+ [key: string]: any;
20
+ };
21
+ /**
22
+ * Wrap an Anthropic client to auto-capture traces
23
+ */
24
+ declare function wrapAnthropic<T extends AnthropicClient>(client: T): T;
25
+
26
+ export { wrapAnthropic };
@@ -0,0 +1,310 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/anthropic.ts
21
+ var anthropic_exports = {};
22
+ __export(anthropic_exports, {
23
+ MODEL_PRICING: () => MODEL_PRICING,
24
+ calculateCost: () => calculateCost,
25
+ getClient: () => getClient,
26
+ init: () => init,
27
+ wrapAnthropic: () => wrapAnthropic
28
+ });
29
+ module.exports = __toCommonJS(anthropic_exports);
30
+
31
+ // src/client.ts
32
+ var SDK_VERSION = "0.1.0";
33
+ var DEFAULT_ENDPOINT = "https://api.agentictool.ai";
34
+ var DEFAULT_FLUSH_INTERVAL = 5e3;
35
+ var DEFAULT_MAX_BATCH_SIZE = 50;
36
+ var globalClient = null;
37
+ var AtoClient = class {
38
+ constructor(config = {}) {
39
+ this.queue = [];
40
+ this.timer = null;
41
+ this.config = {
42
+ endpoint: DEFAULT_ENDPOINT,
43
+ batching: true,
44
+ flushInterval: DEFAULT_FLUSH_INTERVAL,
45
+ maxBatchSize: DEFAULT_MAX_BATCH_SIZE,
46
+ debug: false,
47
+ localOnly: false,
48
+ ...config
49
+ };
50
+ if (this.config.batching && !this.config.localOnly) {
51
+ this.timer = setInterval(() => this.flush(), this.config.flushInterval);
52
+ if (this.timer && typeof this.timer === "object" && "unref" in this.timer) {
53
+ this.timer.unref();
54
+ }
55
+ }
56
+ }
57
+ /**
58
+ * Record a trace
59
+ */
60
+ capture(trace) {
61
+ if (this.config.defaultTags) {
62
+ trace.tags = [...trace.tags || [], ...this.config.defaultTags];
63
+ }
64
+ if (this.config.defaultMetadata) {
65
+ trace.metadata = { ...this.config.defaultMetadata, ...trace.metadata };
66
+ }
67
+ if (this.config.sessionId && !trace.sessionId) {
68
+ trace.sessionId = this.config.sessionId;
69
+ }
70
+ if (this.config.userId && !trace.userId) {
71
+ trace.userId = this.config.userId;
72
+ }
73
+ if (this.config.debug) {
74
+ console.log("[ato]", JSON.stringify(trace, null, 2));
75
+ }
76
+ if (this.config.localOnly) return;
77
+ this.queue.push(trace);
78
+ if (!this.config.batching || this.queue.length >= this.config.maxBatchSize) {
79
+ this.flush();
80
+ }
81
+ }
82
+ /**
83
+ * Flush pending traces to ATO Cloud
84
+ */
85
+ async flush() {
86
+ if (this.queue.length === 0) return;
87
+ const traces = this.queue.splice(0);
88
+ const payload = {
89
+ traces,
90
+ sdk: "@ato/sdk",
91
+ sdkVersion: SDK_VERSION,
92
+ sentAt: (/* @__PURE__ */ new Date()).toISOString()
93
+ };
94
+ try {
95
+ const headers = {
96
+ "Content-Type": "application/json"
97
+ };
98
+ if (this.config.apiKey) {
99
+ headers["Authorization"] = `Bearer ${this.config.apiKey}`;
100
+ }
101
+ const res = await fetch(`${this.config.endpoint}/api/analytics/ingest`, {
102
+ method: "POST",
103
+ headers,
104
+ body: JSON.stringify(payload)
105
+ });
106
+ if (!res.ok && this.config.debug) {
107
+ console.error("[ato] Failed to send traces:", res.status, await res.text());
108
+ }
109
+ } catch (err) {
110
+ if (this.config.debug) {
111
+ console.error("[ato] Failed to send traces:", err);
112
+ }
113
+ this.queue.unshift(...traces);
114
+ if (this.queue.length > 1e3) {
115
+ this.queue.splice(0, this.queue.length - 500);
116
+ }
117
+ }
118
+ }
119
+ /**
120
+ * Shutdown — flush remaining traces
121
+ */
122
+ async shutdown() {
123
+ if (this.timer) {
124
+ clearInterval(this.timer);
125
+ this.timer = null;
126
+ }
127
+ await this.flush();
128
+ }
129
+ getConfig() {
130
+ return { ...this.config };
131
+ }
132
+ };
133
+ function init(config = {}) {
134
+ globalClient = new AtoClient(config);
135
+ return globalClient;
136
+ }
137
+ function getClient() {
138
+ if (!globalClient) {
139
+ globalClient = new AtoClient();
140
+ }
141
+ return globalClient;
142
+ }
143
+ function generateTraceId() {
144
+ return `ato_${Date.now()}_${Math.random().toString(36).slice(2, 10)}`;
145
+ }
146
+
147
+ // src/pricing.ts
148
+ var MODEL_PRICING = {
149
+ // Anthropic
150
+ "claude-opus-4-6": { input: 15, output: 75, cached: 1.5 },
151
+ "claude-sonnet-4-6": { input: 3, output: 15, cached: 0.3 },
152
+ "claude-haiku-4-5": { input: 0.8, output: 4, cached: 0.08 },
153
+ "claude-sonnet-4-5": { input: 3, output: 15, cached: 0.3 },
154
+ "claude-3-5-sonnet": { input: 3, output: 15, cached: 0.3 },
155
+ "claude-3-5-haiku": { input: 0.8, output: 4, cached: 0.08 },
156
+ "claude-3-opus": { input: 15, output: 75, cached: 1.5 },
157
+ "claude-3-sonnet": { input: 3, output: 15 },
158
+ "claude-3-haiku": { input: 0.25, output: 1.25 },
159
+ // OpenAI
160
+ "gpt-4o": { input: 2.5, output: 10, cached: 1.25 },
161
+ "gpt-4o-mini": { input: 0.15, output: 0.6, cached: 0.075 },
162
+ "gpt-4-turbo": { input: 10, output: 30 },
163
+ "gpt-4": { input: 30, output: 60 },
164
+ "gpt-3.5-turbo": { input: 0.5, output: 1.5 },
165
+ "o1": { input: 15, output: 60, cached: 7.5 },
166
+ "o1-mini": { input: 3, output: 12, cached: 1.5 },
167
+ "o1-pro": { input: 150, output: 600 },
168
+ "o3": { input: 10, output: 40, cached: 2.5 },
169
+ "o3-mini": { input: 1.1, output: 4.4, cached: 0.55 },
170
+ "o4-mini": { input: 1.1, output: 4.4, cached: 0.275 },
171
+ "gpt-4.1": { input: 2, output: 8, cached: 0.5 },
172
+ "gpt-4.1-mini": { input: 0.4, output: 1.6, cached: 0.1 },
173
+ "gpt-4.1-nano": { input: 0.1, output: 0.4, cached: 0.025 },
174
+ // Google
175
+ "gemini-2.5-pro": { input: 1.25, output: 10 },
176
+ "gemini-2.5-flash": { input: 0.15, output: 0.6 },
177
+ "gemini-2.0-flash": { input: 0.1, output: 0.4 },
178
+ "gemini-1.5-pro": { input: 1.25, output: 5 },
179
+ "gemini-1.5-flash": { input: 0.075, output: 0.3 },
180
+ // Mistral
181
+ "mistral-large": { input: 2, output: 6 },
182
+ "mistral-small": { input: 0.2, output: 0.6 },
183
+ "codestral": { input: 0.3, output: 0.9 },
184
+ // Groq (inference pricing)
185
+ "llama-3.3-70b": { input: 0.59, output: 0.79 },
186
+ "llama-3.1-8b": { input: 0.05, output: 0.08 },
187
+ "mixtral-8x7b": { input: 0.24, output: 0.24 },
188
+ // Cohere
189
+ "command-r-plus": { input: 2.5, output: 10 },
190
+ "command-r": { input: 0.15, output: 0.6 }
191
+ };
192
+ function calculateCost(model, inputTokens, outputTokens, cachedTokens = 0) {
193
+ const pricing = MODEL_PRICING[model] || Object.entries(MODEL_PRICING).find(
194
+ ([key]) => model.startsWith(key)
195
+ )?.[1];
196
+ if (!pricing) return 0;
197
+ const inputCost = (inputTokens - cachedTokens) / 1e6 * pricing.input;
198
+ const outputCost = outputTokens / 1e6 * pricing.output;
199
+ const cachedCost = pricing.cached ? cachedTokens / 1e6 * pricing.cached : 0;
200
+ return inputCost + outputCost + cachedCost;
201
+ }
202
+
203
+ // src/anthropic.ts
204
+ function wrapAnthropic(client) {
205
+ const originalCreate = client.messages.create.bind(client.messages);
206
+ const originalStream = client.messages.stream?.bind(client.messages);
207
+ client.messages.create = async function(...args) {
208
+ const params = args[0] || {};
209
+ const start = Date.now();
210
+ let trace;
211
+ try {
212
+ const result = await originalCreate(...args);
213
+ const duration = Date.now() - start;
214
+ const inputTokens = result.usage?.input_tokens || 0;
215
+ const outputTokens = result.usage?.output_tokens || 0;
216
+ const cachedTokens = result.usage?.cache_read_input_tokens || 0;
217
+ trace = {
218
+ id: generateTraceId(),
219
+ provider: "anthropic",
220
+ model: result.model || params.model || "unknown",
221
+ inputTokens,
222
+ outputTokens,
223
+ cachedTokens,
224
+ totalTokens: inputTokens + outputTokens,
225
+ costUsd: calculateCost(result.model || params.model, inputTokens, outputTokens, cachedTokens),
226
+ durationMs: duration,
227
+ status: "success",
228
+ metadata: {
229
+ stopReason: result.stop_reason,
230
+ maxTokens: params.max_tokens,
231
+ temperature: params.temperature,
232
+ system: params.system ? "[present]" : void 0,
233
+ toolUse: params.tools?.length ? params.tools.length : void 0
234
+ },
235
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
236
+ };
237
+ getClient().capture(trace);
238
+ return result;
239
+ } catch (err) {
240
+ const duration = Date.now() - start;
241
+ trace = {
242
+ id: generateTraceId(),
243
+ provider: "anthropic",
244
+ model: params.model || "unknown",
245
+ inputTokens: 0,
246
+ outputTokens: 0,
247
+ cachedTokens: 0,
248
+ totalTokens: 0,
249
+ costUsd: 0,
250
+ durationMs: duration,
251
+ status: "error",
252
+ error: err.message || String(err),
253
+ metadata: {
254
+ errorType: err.constructor?.name,
255
+ statusCode: err.status
256
+ },
257
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
258
+ };
259
+ getClient().capture(trace);
260
+ throw err;
261
+ }
262
+ };
263
+ if (originalStream) {
264
+ client.messages.stream = function(...args) {
265
+ const params = args[0] || {};
266
+ const start = Date.now();
267
+ const stream = originalStream(...args);
268
+ const originalOn = stream.on?.bind(stream);
269
+ if (originalOn) {
270
+ stream.on = function(event, handler) {
271
+ if (event === "finalMessage" || event === "message") {
272
+ const wrappedHandler = (message) => {
273
+ const duration = Date.now() - start;
274
+ const inputTokens = message.usage?.input_tokens || 0;
275
+ const outputTokens = message.usage?.output_tokens || 0;
276
+ const cachedTokens = message.usage?.cache_read_input_tokens || 0;
277
+ getClient().capture({
278
+ id: generateTraceId(),
279
+ provider: "anthropic",
280
+ model: message.model || params.model || "unknown",
281
+ inputTokens,
282
+ outputTokens,
283
+ cachedTokens,
284
+ totalTokens: inputTokens + outputTokens,
285
+ costUsd: calculateCost(message.model || params.model, inputTokens, outputTokens, cachedTokens),
286
+ durationMs: duration,
287
+ status: "success",
288
+ metadata: { streaming: true, stopReason: message.stop_reason },
289
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
290
+ });
291
+ handler(message);
292
+ };
293
+ return originalOn(event, wrappedHandler);
294
+ }
295
+ return originalOn(event, handler);
296
+ };
297
+ }
298
+ return stream;
299
+ };
300
+ }
301
+ return client;
302
+ }
303
+ // Annotate the CommonJS export names for ESM import in node:
304
+ 0 && (module.exports = {
305
+ MODEL_PRICING,
306
+ calculateCost,
307
+ getClient,
308
+ init,
309
+ wrapAnthropic
310
+ });
@@ -0,0 +1,115 @@
1
+ import {
2
+ MODEL_PRICING,
3
+ calculateCost,
4
+ generateTraceId,
5
+ getClient,
6
+ init
7
+ } from "./chunk-Q2LJUUHK.mjs";
8
+
9
+ // src/anthropic.ts
10
+ function wrapAnthropic(client) {
11
+ const originalCreate = client.messages.create.bind(client.messages);
12
+ const originalStream = client.messages.stream?.bind(client.messages);
13
+ client.messages.create = async function(...args) {
14
+ const params = args[0] || {};
15
+ const start = Date.now();
16
+ let trace;
17
+ try {
18
+ const result = await originalCreate(...args);
19
+ const duration = Date.now() - start;
20
+ const inputTokens = result.usage?.input_tokens || 0;
21
+ const outputTokens = result.usage?.output_tokens || 0;
22
+ const cachedTokens = result.usage?.cache_read_input_tokens || 0;
23
+ trace = {
24
+ id: generateTraceId(),
25
+ provider: "anthropic",
26
+ model: result.model || params.model || "unknown",
27
+ inputTokens,
28
+ outputTokens,
29
+ cachedTokens,
30
+ totalTokens: inputTokens + outputTokens,
31
+ costUsd: calculateCost(result.model || params.model, inputTokens, outputTokens, cachedTokens),
32
+ durationMs: duration,
33
+ status: "success",
34
+ metadata: {
35
+ stopReason: result.stop_reason,
36
+ maxTokens: params.max_tokens,
37
+ temperature: params.temperature,
38
+ system: params.system ? "[present]" : void 0,
39
+ toolUse: params.tools?.length ? params.tools.length : void 0
40
+ },
41
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
42
+ };
43
+ getClient().capture(trace);
44
+ return result;
45
+ } catch (err) {
46
+ const duration = Date.now() - start;
47
+ trace = {
48
+ id: generateTraceId(),
49
+ provider: "anthropic",
50
+ model: params.model || "unknown",
51
+ inputTokens: 0,
52
+ outputTokens: 0,
53
+ cachedTokens: 0,
54
+ totalTokens: 0,
55
+ costUsd: 0,
56
+ durationMs: duration,
57
+ status: "error",
58
+ error: err.message || String(err),
59
+ metadata: {
60
+ errorType: err.constructor?.name,
61
+ statusCode: err.status
62
+ },
63
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
64
+ };
65
+ getClient().capture(trace);
66
+ throw err;
67
+ }
68
+ };
69
+ if (originalStream) {
70
+ client.messages.stream = function(...args) {
71
+ const params = args[0] || {};
72
+ const start = Date.now();
73
+ const stream = originalStream(...args);
74
+ const originalOn = stream.on?.bind(stream);
75
+ if (originalOn) {
76
+ stream.on = function(event, handler) {
77
+ if (event === "finalMessage" || event === "message") {
78
+ const wrappedHandler = (message) => {
79
+ const duration = Date.now() - start;
80
+ const inputTokens = message.usage?.input_tokens || 0;
81
+ const outputTokens = message.usage?.output_tokens || 0;
82
+ const cachedTokens = message.usage?.cache_read_input_tokens || 0;
83
+ getClient().capture({
84
+ id: generateTraceId(),
85
+ provider: "anthropic",
86
+ model: message.model || params.model || "unknown",
87
+ inputTokens,
88
+ outputTokens,
89
+ cachedTokens,
90
+ totalTokens: inputTokens + outputTokens,
91
+ costUsd: calculateCost(message.model || params.model, inputTokens, outputTokens, cachedTokens),
92
+ durationMs: duration,
93
+ status: "success",
94
+ metadata: { streaming: true, stopReason: message.stop_reason },
95
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
96
+ });
97
+ handler(message);
98
+ };
99
+ return originalOn(event, wrappedHandler);
100
+ }
101
+ return originalOn(event, handler);
102
+ };
103
+ }
104
+ return stream;
105
+ };
106
+ }
107
+ return client;
108
+ }
109
+ export {
110
+ MODEL_PRICING,
111
+ calculateCost,
112
+ getClient,
113
+ init,
114
+ wrapAnthropic
115
+ };