@ato-sdk/js 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/openai.js ADDED
@@ -0,0 +1,360 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/openai.ts
21
+ var openai_exports = {};
22
+ __export(openai_exports, {
23
+ MODEL_PRICING: () => MODEL_PRICING,
24
+ calculateCost: () => calculateCost,
25
+ getClient: () => getClient,
26
+ init: () => init,
27
+ wrapOpenAI: () => wrapOpenAI
28
+ });
29
+ module.exports = __toCommonJS(openai_exports);
30
+
31
+ // src/client.ts
32
+ var SDK_VERSION = "0.1.0";
33
+ var DEFAULT_ENDPOINT = "https://api.agentictool.ai";
34
+ var DEFAULT_FLUSH_INTERVAL = 5e3;
35
+ var DEFAULT_MAX_BATCH_SIZE = 50;
36
+ var globalClient = null;
37
+ var AtoClient = class {
38
+ constructor(config = {}) {
39
+ this.queue = [];
40
+ this.timer = null;
41
+ this.config = {
42
+ endpoint: DEFAULT_ENDPOINT,
43
+ batching: true,
44
+ flushInterval: DEFAULT_FLUSH_INTERVAL,
45
+ maxBatchSize: DEFAULT_MAX_BATCH_SIZE,
46
+ debug: false,
47
+ localOnly: false,
48
+ ...config
49
+ };
50
+ if (this.config.batching && !this.config.localOnly) {
51
+ this.timer = setInterval(() => this.flush(), this.config.flushInterval);
52
+ if (this.timer && typeof this.timer === "object" && "unref" in this.timer) {
53
+ this.timer.unref();
54
+ }
55
+ }
56
+ }
57
+ /**
58
+ * Record a trace
59
+ */
60
+ capture(trace) {
61
+ if (this.config.defaultTags) {
62
+ trace.tags = [...trace.tags || [], ...this.config.defaultTags];
63
+ }
64
+ if (this.config.defaultMetadata) {
65
+ trace.metadata = { ...this.config.defaultMetadata, ...trace.metadata };
66
+ }
67
+ if (this.config.sessionId && !trace.sessionId) {
68
+ trace.sessionId = this.config.sessionId;
69
+ }
70
+ if (this.config.userId && !trace.userId) {
71
+ trace.userId = this.config.userId;
72
+ }
73
+ if (this.config.debug) {
74
+ console.log("[ato]", JSON.stringify(trace, null, 2));
75
+ }
76
+ if (this.config.localOnly) return;
77
+ this.queue.push(trace);
78
+ if (!this.config.batching || this.queue.length >= this.config.maxBatchSize) {
79
+ this.flush();
80
+ }
81
+ }
82
+ /**
83
+ * Flush pending traces to ATO Cloud
84
+ */
85
+ async flush() {
86
+ if (this.queue.length === 0) return;
87
+ const traces = this.queue.splice(0);
88
+ const payload = {
89
+ traces,
90
+ sdk: "@ato/sdk",
91
+ sdkVersion: SDK_VERSION,
92
+ sentAt: (/* @__PURE__ */ new Date()).toISOString()
93
+ };
94
+ try {
95
+ const headers = {
96
+ "Content-Type": "application/json"
97
+ };
98
+ if (this.config.apiKey) {
99
+ headers["Authorization"] = `Bearer ${this.config.apiKey}`;
100
+ }
101
+ const res = await fetch(`${this.config.endpoint}/api/analytics/ingest`, {
102
+ method: "POST",
103
+ headers,
104
+ body: JSON.stringify(payload)
105
+ });
106
+ if (!res.ok && this.config.debug) {
107
+ console.error("[ato] Failed to send traces:", res.status, await res.text());
108
+ }
109
+ } catch (err) {
110
+ if (this.config.debug) {
111
+ console.error("[ato] Failed to send traces:", err);
112
+ }
113
+ this.queue.unshift(...traces);
114
+ if (this.queue.length > 1e3) {
115
+ this.queue.splice(0, this.queue.length - 500);
116
+ }
117
+ }
118
+ }
119
+ /**
120
+ * Shutdown — flush remaining traces
121
+ */
122
+ async shutdown() {
123
+ if (this.timer) {
124
+ clearInterval(this.timer);
125
+ this.timer = null;
126
+ }
127
+ await this.flush();
128
+ }
129
+ getConfig() {
130
+ return { ...this.config };
131
+ }
132
+ };
133
+ function init(config = {}) {
134
+ globalClient = new AtoClient(config);
135
+ return globalClient;
136
+ }
137
+ function getClient() {
138
+ if (!globalClient) {
139
+ globalClient = new AtoClient();
140
+ }
141
+ return globalClient;
142
+ }
143
+ function generateTraceId() {
144
+ return `ato_${Date.now()}_${Math.random().toString(36).slice(2, 10)}`;
145
+ }
146
+
147
+ // src/pricing.ts
148
+ var MODEL_PRICING = {
149
+ // Anthropic
150
+ "claude-opus-4-6": { input: 15, output: 75, cached: 1.5 },
151
+ "claude-sonnet-4-6": { input: 3, output: 15, cached: 0.3 },
152
+ "claude-haiku-4-5": { input: 0.8, output: 4, cached: 0.08 },
153
+ "claude-sonnet-4-5": { input: 3, output: 15, cached: 0.3 },
154
+ "claude-3-5-sonnet": { input: 3, output: 15, cached: 0.3 },
155
+ "claude-3-5-haiku": { input: 0.8, output: 4, cached: 0.08 },
156
+ "claude-3-opus": { input: 15, output: 75, cached: 1.5 },
157
+ "claude-3-sonnet": { input: 3, output: 15 },
158
+ "claude-3-haiku": { input: 0.25, output: 1.25 },
159
+ // OpenAI
160
+ "gpt-4o": { input: 2.5, output: 10, cached: 1.25 },
161
+ "gpt-4o-mini": { input: 0.15, output: 0.6, cached: 0.075 },
162
+ "gpt-4-turbo": { input: 10, output: 30 },
163
+ "gpt-4": { input: 30, output: 60 },
164
+ "gpt-3.5-turbo": { input: 0.5, output: 1.5 },
165
+ "o1": { input: 15, output: 60, cached: 7.5 },
166
+ "o1-mini": { input: 3, output: 12, cached: 1.5 },
167
+ "o1-pro": { input: 150, output: 600 },
168
+ "o3": { input: 10, output: 40, cached: 2.5 },
169
+ "o3-mini": { input: 1.1, output: 4.4, cached: 0.55 },
170
+ "o4-mini": { input: 1.1, output: 4.4, cached: 0.275 },
171
+ "gpt-4.1": { input: 2, output: 8, cached: 0.5 },
172
+ "gpt-4.1-mini": { input: 0.4, output: 1.6, cached: 0.1 },
173
+ "gpt-4.1-nano": { input: 0.1, output: 0.4, cached: 0.025 },
174
+ // Google
175
+ "gemini-2.5-pro": { input: 1.25, output: 10 },
176
+ "gemini-2.5-flash": { input: 0.15, output: 0.6 },
177
+ "gemini-2.0-flash": { input: 0.1, output: 0.4 },
178
+ "gemini-1.5-pro": { input: 1.25, output: 5 },
179
+ "gemini-1.5-flash": { input: 0.075, output: 0.3 },
180
+ // Mistral
181
+ "mistral-large": { input: 2, output: 6 },
182
+ "mistral-small": { input: 0.2, output: 0.6 },
183
+ "codestral": { input: 0.3, output: 0.9 },
184
+ // Groq (inference pricing)
185
+ "llama-3.3-70b": { input: 0.59, output: 0.79 },
186
+ "llama-3.1-8b": { input: 0.05, output: 0.08 },
187
+ "mixtral-8x7b": { input: 0.24, output: 0.24 },
188
+ // Cohere
189
+ "command-r-plus": { input: 2.5, output: 10 },
190
+ "command-r": { input: 0.15, output: 0.6 }
191
+ };
192
+ function calculateCost(model, inputTokens, outputTokens, cachedTokens = 0) {
193
+ const pricing = MODEL_PRICING[model] || Object.entries(MODEL_PRICING).find(
194
+ ([key]) => model.startsWith(key)
195
+ )?.[1];
196
+ if (!pricing) return 0;
197
+ const inputCost = (inputTokens - cachedTokens) / 1e6 * pricing.input;
198
+ const outputCost = outputTokens / 1e6 * pricing.output;
199
+ const cachedCost = pricing.cached ? cachedTokens / 1e6 * pricing.cached : 0;
200
+ return inputCost + outputCost + cachedCost;
201
+ }
202
+
203
+ // src/openai.ts
204
+ function wrapOpenAI(client) {
205
+ const originalCreate = client.chat.completions.create.bind(client.chat.completions);
206
+ client.chat.completions.create = async function(...args) {
207
+ const params = args[0] || {};
208
+ const start = Date.now();
209
+ if (params.stream) {
210
+ return handleStream(originalCreate, params, args, start);
211
+ }
212
+ try {
213
+ const result = await originalCreate(...args);
214
+ const duration = Date.now() - start;
215
+ const inputTokens = result.usage?.prompt_tokens || 0;
216
+ const outputTokens = result.usage?.completion_tokens || 0;
217
+ const cachedTokens = result.usage?.prompt_tokens_details?.cached_tokens || 0;
218
+ const trace = {
219
+ id: generateTraceId(),
220
+ provider: "openai",
221
+ model: result.model || params.model || "unknown",
222
+ inputTokens,
223
+ outputTokens,
224
+ cachedTokens,
225
+ totalTokens: inputTokens + outputTokens,
226
+ costUsd: calculateCost(result.model || params.model, inputTokens, outputTokens, cachedTokens),
227
+ durationMs: duration,
228
+ status: "success",
229
+ metadata: {
230
+ finishReason: result.choices?.[0]?.finish_reason,
231
+ maxTokens: params.max_tokens || params.max_completion_tokens,
232
+ temperature: params.temperature,
233
+ toolCalls: result.choices?.[0]?.message?.tool_calls?.length,
234
+ systemFingerprint: result.system_fingerprint
235
+ },
236
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
237
+ };
238
+ getClient().capture(trace);
239
+ return result;
240
+ } catch (err) {
241
+ const duration = Date.now() - start;
242
+ const trace = {
243
+ id: generateTraceId(),
244
+ provider: "openai",
245
+ model: params.model || "unknown",
246
+ inputTokens: 0,
247
+ outputTokens: 0,
248
+ cachedTokens: 0,
249
+ totalTokens: 0,
250
+ costUsd: 0,
251
+ durationMs: duration,
252
+ status: "error",
253
+ error: err.message || String(err),
254
+ metadata: {
255
+ errorType: err.constructor?.name,
256
+ statusCode: err.status,
257
+ errorCode: err.code
258
+ },
259
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
260
+ };
261
+ getClient().capture(trace);
262
+ throw err;
263
+ }
264
+ };
265
+ if (client.responses?.create) {
266
+ const originalResponses = client.responses.create.bind(client.responses);
267
+ client.responses.create = async function(...args) {
268
+ const params = args[0] || {};
269
+ const start = Date.now();
270
+ try {
271
+ const result = await originalResponses(...args);
272
+ const duration = Date.now() - start;
273
+ const inputTokens = result.usage?.input_tokens || 0;
274
+ const outputTokens = result.usage?.output_tokens || 0;
275
+ const trace = {
276
+ id: generateTraceId(),
277
+ provider: "openai",
278
+ model: result.model || params.model || "unknown",
279
+ inputTokens,
280
+ outputTokens,
281
+ cachedTokens: 0,
282
+ totalTokens: inputTokens + outputTokens,
283
+ costUsd: calculateCost(result.model || params.model, inputTokens, outputTokens),
284
+ durationMs: duration,
285
+ status: "success",
286
+ metadata: { api: "responses" },
287
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
288
+ };
289
+ getClient().capture(trace);
290
+ return result;
291
+ } catch (err) {
292
+ const duration = Date.now() - start;
293
+ getClient().capture({
294
+ id: generateTraceId(),
295
+ provider: "openai",
296
+ model: params.model || "unknown",
297
+ inputTokens: 0,
298
+ outputTokens: 0,
299
+ cachedTokens: 0,
300
+ totalTokens: 0,
301
+ costUsd: 0,
302
+ durationMs: duration,
303
+ status: "error",
304
+ error: err.message || String(err),
305
+ metadata: { api: "responses", errorType: err.constructor?.name },
306
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
307
+ });
308
+ throw err;
309
+ }
310
+ };
311
+ }
312
+ return client;
313
+ }
314
+ async function handleStream(originalCreate, params, args, start) {
315
+ const result = await originalCreate(...args);
316
+ if (result && Symbol.asyncIterator in result) {
317
+ const originalIterator = result[Symbol.asyncIterator].bind(result);
318
+ let totalInputTokens = 0;
319
+ let totalOutputTokens = 0;
320
+ let model = params.model || "unknown";
321
+ let finishReason = "";
322
+ result[Symbol.asyncIterator] = async function* () {
323
+ for await (const chunk of originalIterator()) {
324
+ if (chunk.usage) {
325
+ totalInputTokens = chunk.usage.prompt_tokens || 0;
326
+ totalOutputTokens = chunk.usage.completion_tokens || 0;
327
+ }
328
+ if (chunk.model) model = chunk.model;
329
+ if (chunk.choices?.[0]?.finish_reason) {
330
+ finishReason = chunk.choices[0].finish_reason;
331
+ }
332
+ yield chunk;
333
+ }
334
+ const duration = Date.now() - start;
335
+ getClient().capture({
336
+ id: generateTraceId(),
337
+ provider: "openai",
338
+ model,
339
+ inputTokens: totalInputTokens,
340
+ outputTokens: totalOutputTokens,
341
+ cachedTokens: 0,
342
+ totalTokens: totalInputTokens + totalOutputTokens,
343
+ costUsd: calculateCost(model, totalInputTokens, totalOutputTokens),
344
+ durationMs: duration,
345
+ status: "success",
346
+ metadata: { streaming: true, finishReason },
347
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
348
+ });
349
+ };
350
+ }
351
+ return result;
352
+ }
353
+ // Annotate the CommonJS export names for ESM import in node:
354
+ 0 && (module.exports = {
355
+ MODEL_PRICING,
356
+ calculateCost,
357
+ getClient,
358
+ init,
359
+ wrapOpenAI
360
+ });
@@ -0,0 +1,165 @@
1
+ import {
2
+ MODEL_PRICING,
3
+ calculateCost,
4
+ generateTraceId,
5
+ getClient,
6
+ init
7
+ } from "./chunk-Q2LJUUHK.mjs";
8
+
9
+ // src/openai.ts
10
+ function wrapOpenAI(client) {
11
+ const originalCreate = client.chat.completions.create.bind(client.chat.completions);
12
+ client.chat.completions.create = async function(...args) {
13
+ const params = args[0] || {};
14
+ const start = Date.now();
15
+ if (params.stream) {
16
+ return handleStream(originalCreate, params, args, start);
17
+ }
18
+ try {
19
+ const result = await originalCreate(...args);
20
+ const duration = Date.now() - start;
21
+ const inputTokens = result.usage?.prompt_tokens || 0;
22
+ const outputTokens = result.usage?.completion_tokens || 0;
23
+ const cachedTokens = result.usage?.prompt_tokens_details?.cached_tokens || 0;
24
+ const trace = {
25
+ id: generateTraceId(),
26
+ provider: "openai",
27
+ model: result.model || params.model || "unknown",
28
+ inputTokens,
29
+ outputTokens,
30
+ cachedTokens,
31
+ totalTokens: inputTokens + outputTokens,
32
+ costUsd: calculateCost(result.model || params.model, inputTokens, outputTokens, cachedTokens),
33
+ durationMs: duration,
34
+ status: "success",
35
+ metadata: {
36
+ finishReason: result.choices?.[0]?.finish_reason,
37
+ maxTokens: params.max_tokens || params.max_completion_tokens,
38
+ temperature: params.temperature,
39
+ toolCalls: result.choices?.[0]?.message?.tool_calls?.length,
40
+ systemFingerprint: result.system_fingerprint
41
+ },
42
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
43
+ };
44
+ getClient().capture(trace);
45
+ return result;
46
+ } catch (err) {
47
+ const duration = Date.now() - start;
48
+ const trace = {
49
+ id: generateTraceId(),
50
+ provider: "openai",
51
+ model: params.model || "unknown",
52
+ inputTokens: 0,
53
+ outputTokens: 0,
54
+ cachedTokens: 0,
55
+ totalTokens: 0,
56
+ costUsd: 0,
57
+ durationMs: duration,
58
+ status: "error",
59
+ error: err.message || String(err),
60
+ metadata: {
61
+ errorType: err.constructor?.name,
62
+ statusCode: err.status,
63
+ errorCode: err.code
64
+ },
65
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
66
+ };
67
+ getClient().capture(trace);
68
+ throw err;
69
+ }
70
+ };
71
+ if (client.responses?.create) {
72
+ const originalResponses = client.responses.create.bind(client.responses);
73
+ client.responses.create = async function(...args) {
74
+ const params = args[0] || {};
75
+ const start = Date.now();
76
+ try {
77
+ const result = await originalResponses(...args);
78
+ const duration = Date.now() - start;
79
+ const inputTokens = result.usage?.input_tokens || 0;
80
+ const outputTokens = result.usage?.output_tokens || 0;
81
+ const trace = {
82
+ id: generateTraceId(),
83
+ provider: "openai",
84
+ model: result.model || params.model || "unknown",
85
+ inputTokens,
86
+ outputTokens,
87
+ cachedTokens: 0,
88
+ totalTokens: inputTokens + outputTokens,
89
+ costUsd: calculateCost(result.model || params.model, inputTokens, outputTokens),
90
+ durationMs: duration,
91
+ status: "success",
92
+ metadata: { api: "responses" },
93
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
94
+ };
95
+ getClient().capture(trace);
96
+ return result;
97
+ } catch (err) {
98
+ const duration = Date.now() - start;
99
+ getClient().capture({
100
+ id: generateTraceId(),
101
+ provider: "openai",
102
+ model: params.model || "unknown",
103
+ inputTokens: 0,
104
+ outputTokens: 0,
105
+ cachedTokens: 0,
106
+ totalTokens: 0,
107
+ costUsd: 0,
108
+ durationMs: duration,
109
+ status: "error",
110
+ error: err.message || String(err),
111
+ metadata: { api: "responses", errorType: err.constructor?.name },
112
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
113
+ });
114
+ throw err;
115
+ }
116
+ };
117
+ }
118
+ return client;
119
+ }
120
+ async function handleStream(originalCreate, params, args, start) {
121
+ const result = await originalCreate(...args);
122
+ if (result && Symbol.asyncIterator in result) {
123
+ const originalIterator = result[Symbol.asyncIterator].bind(result);
124
+ let totalInputTokens = 0;
125
+ let totalOutputTokens = 0;
126
+ let model = params.model || "unknown";
127
+ let finishReason = "";
128
+ result[Symbol.asyncIterator] = async function* () {
129
+ for await (const chunk of originalIterator()) {
130
+ if (chunk.usage) {
131
+ totalInputTokens = chunk.usage.prompt_tokens || 0;
132
+ totalOutputTokens = chunk.usage.completion_tokens || 0;
133
+ }
134
+ if (chunk.model) model = chunk.model;
135
+ if (chunk.choices?.[0]?.finish_reason) {
136
+ finishReason = chunk.choices[0].finish_reason;
137
+ }
138
+ yield chunk;
139
+ }
140
+ const duration = Date.now() - start;
141
+ getClient().capture({
142
+ id: generateTraceId(),
143
+ provider: "openai",
144
+ model,
145
+ inputTokens: totalInputTokens,
146
+ outputTokens: totalOutputTokens,
147
+ cachedTokens: 0,
148
+ totalTokens: totalInputTokens + totalOutputTokens,
149
+ costUsd: calculateCost(model, totalInputTokens, totalOutputTokens),
150
+ durationMs: duration,
151
+ status: "success",
152
+ metadata: { streaming: true, finishReason },
153
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
154
+ });
155
+ };
156
+ }
157
+ return result;
158
+ }
159
+ export {
160
+ MODEL_PRICING,
161
+ calculateCost,
162
+ getClient,
163
+ init,
164
+ wrapOpenAI
165
+ };
@@ -0,0 +1,104 @@
1
+ /**
2
+ * Core types for ATO SDK traces
3
+ */
4
+ interface AtoTrace {
5
+ id: string;
6
+ provider: string;
7
+ model: string;
8
+ inputTokens: number;
9
+ outputTokens: number;
10
+ cachedTokens: number;
11
+ totalTokens: number;
12
+ costUsd: number;
13
+ durationMs: number;
14
+ status: 'success' | 'error';
15
+ error?: string;
16
+ metadata: Record<string, unknown>;
17
+ timestamp: string;
18
+ sessionId?: string;
19
+ userId?: string;
20
+ tags?: string[];
21
+ }
22
+ interface AtoConfig {
23
+ /** ATO Cloud API key (from app.agentictool.ai/settings) */
24
+ apiKey?: string;
25
+ /** ATO Cloud endpoint (default: https://api.agentictool.ai) */
26
+ endpoint?: string;
27
+ /** Send traces in batches for performance (default: true) */
28
+ batching?: boolean;
29
+ /** Batch flush interval in ms (default: 5000) */
30
+ flushInterval?: number;
31
+ /** Max batch size before auto-flush (default: 50) */
32
+ maxBatchSize?: number;
33
+ /** Log traces to console for debugging (default: false) */
34
+ debug?: boolean;
35
+ /** Default tags applied to all traces */
36
+ defaultTags?: string[];
37
+ /** Default metadata applied to all traces */
38
+ defaultMetadata?: Record<string, unknown>;
39
+ /** Session ID for grouping traces */
40
+ sessionId?: string;
41
+ /** User ID for attribution */
42
+ userId?: string;
43
+ /** Disable sending to cloud (local logging only) */
44
+ localOnly?: boolean;
45
+ }
46
+ interface BatchPayload {
47
+ traces: AtoTrace[];
48
+ sdk: string;
49
+ sdkVersion: string;
50
+ sentAt: string;
51
+ }
52
+
53
+ /**
54
+ * ATO Client — manages trace collection and cloud sync
55
+ */
56
+
57
+ declare class AtoClient {
58
+ private config;
59
+ private queue;
60
+ private timer;
61
+ constructor(config?: AtoConfig);
62
+ /**
63
+ * Record a trace
64
+ */
65
+ capture(trace: AtoTrace): void;
66
+ /**
67
+ * Flush pending traces to ATO Cloud
68
+ */
69
+ flush(): Promise<void>;
70
+ /**
71
+ * Shutdown — flush remaining traces
72
+ */
73
+ shutdown(): Promise<void>;
74
+ getConfig(): AtoConfig;
75
+ }
76
+ /**
77
+ * Initialize the global ATO client
78
+ */
79
+ declare function init(config?: AtoConfig): AtoClient;
80
+ /**
81
+ * Get or create the global client
82
+ */
83
+ declare function getClient(): AtoClient;
84
+ /**
85
+ * Generate a unique trace ID
86
+ */
87
+ declare function generateTraceId(): string;
88
+
89
+ /**
90
+ * LLM model pricing (per 1M tokens)
91
+ * Updated: April 2026
92
+ */
93
+ interface ModelPricing {
94
+ input: number;
95
+ output: number;
96
+ cached?: number;
97
+ }
98
+ declare const MODEL_PRICING: Record<string, ModelPricing>;
99
+ /**
100
+ * Calculate cost for a given model and token usage
101
+ */
102
+ declare function calculateCost(model: string, inputTokens: number, outputTokens: number, cachedTokens?: number): number;
103
+
104
+ export { type AtoConfig as A, type BatchPayload as B, MODEL_PRICING as M, type AtoTrace as a, AtoClient as b, calculateCost as c, type ModelPricing as d, generateTraceId as e, getClient as g, init as i };