@ato-sdk/js 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,180 @@
1
+ // src/client.ts
2
+ var SDK_VERSION = "0.1.0";
3
+ var DEFAULT_ENDPOINT = "https://api.agentictool.ai";
4
+ var DEFAULT_FLUSH_INTERVAL = 5e3;
5
+ var DEFAULT_MAX_BATCH_SIZE = 50;
6
+ var globalClient = null;
7
+ var AtoClient = class {
8
+ constructor(config = {}) {
9
+ this.queue = [];
10
+ this.timer = null;
11
+ this.config = {
12
+ endpoint: DEFAULT_ENDPOINT,
13
+ batching: true,
14
+ flushInterval: DEFAULT_FLUSH_INTERVAL,
15
+ maxBatchSize: DEFAULT_MAX_BATCH_SIZE,
16
+ debug: false,
17
+ localOnly: false,
18
+ ...config
19
+ };
20
+ if (this.config.batching && !this.config.localOnly) {
21
+ this.timer = setInterval(() => this.flush(), this.config.flushInterval);
22
+ if (this.timer && typeof this.timer === "object" && "unref" in this.timer) {
23
+ this.timer.unref();
24
+ }
25
+ }
26
+ }
27
+ /**
28
+ * Record a trace
29
+ */
30
+ capture(trace) {
31
+ if (this.config.defaultTags) {
32
+ trace.tags = [...trace.tags || [], ...this.config.defaultTags];
33
+ }
34
+ if (this.config.defaultMetadata) {
35
+ trace.metadata = { ...this.config.defaultMetadata, ...trace.metadata };
36
+ }
37
+ if (this.config.sessionId && !trace.sessionId) {
38
+ trace.sessionId = this.config.sessionId;
39
+ }
40
+ if (this.config.userId && !trace.userId) {
41
+ trace.userId = this.config.userId;
42
+ }
43
+ if (this.config.debug) {
44
+ console.log("[ato]", JSON.stringify(trace, null, 2));
45
+ }
46
+ if (this.config.localOnly) return;
47
+ this.queue.push(trace);
48
+ if (!this.config.batching || this.queue.length >= this.config.maxBatchSize) {
49
+ this.flush();
50
+ }
51
+ }
52
+ /**
53
+ * Flush pending traces to ATO Cloud
54
+ */
55
+ async flush() {
56
+ if (this.queue.length === 0) return;
57
+ const traces = this.queue.splice(0);
58
+ const payload = {
59
+ traces,
60
+ sdk: "@ato/sdk",
61
+ sdkVersion: SDK_VERSION,
62
+ sentAt: (/* @__PURE__ */ new Date()).toISOString()
63
+ };
64
+ try {
65
+ const headers = {
66
+ "Content-Type": "application/json"
67
+ };
68
+ if (this.config.apiKey) {
69
+ headers["Authorization"] = `Bearer ${this.config.apiKey}`;
70
+ }
71
+ const res = await fetch(`${this.config.endpoint}/api/analytics/ingest`, {
72
+ method: "POST",
73
+ headers,
74
+ body: JSON.stringify(payload)
75
+ });
76
+ if (!res.ok && this.config.debug) {
77
+ console.error("[ato] Failed to send traces:", res.status, await res.text());
78
+ }
79
+ } catch (err) {
80
+ if (this.config.debug) {
81
+ console.error("[ato] Failed to send traces:", err);
82
+ }
83
+ this.queue.unshift(...traces);
84
+ if (this.queue.length > 1e3) {
85
+ this.queue.splice(0, this.queue.length - 500);
86
+ }
87
+ }
88
+ }
89
+ /**
90
+ * Shutdown — flush remaining traces
91
+ */
92
+ async shutdown() {
93
+ if (this.timer) {
94
+ clearInterval(this.timer);
95
+ this.timer = null;
96
+ }
97
+ await this.flush();
98
+ }
99
+ getConfig() {
100
+ return { ...this.config };
101
+ }
102
+ };
103
+ function init(config = {}) {
104
+ globalClient = new AtoClient(config);
105
+ return globalClient;
106
+ }
107
+ function getClient() {
108
+ if (!globalClient) {
109
+ globalClient = new AtoClient();
110
+ }
111
+ return globalClient;
112
+ }
113
+ function generateTraceId() {
114
+ return `ato_${Date.now()}_${Math.random().toString(36).slice(2, 10)}`;
115
+ }
116
+
117
+ // src/pricing.ts
118
+ var MODEL_PRICING = {
119
+ // Anthropic
120
+ "claude-opus-4-6": { input: 15, output: 75, cached: 1.5 },
121
+ "claude-sonnet-4-6": { input: 3, output: 15, cached: 0.3 },
122
+ "claude-haiku-4-5": { input: 0.8, output: 4, cached: 0.08 },
123
+ "claude-sonnet-4-5": { input: 3, output: 15, cached: 0.3 },
124
+ "claude-3-5-sonnet": { input: 3, output: 15, cached: 0.3 },
125
+ "claude-3-5-haiku": { input: 0.8, output: 4, cached: 0.08 },
126
+ "claude-3-opus": { input: 15, output: 75, cached: 1.5 },
127
+ "claude-3-sonnet": { input: 3, output: 15 },
128
+ "claude-3-haiku": { input: 0.25, output: 1.25 },
129
+ // OpenAI
130
+ "gpt-4o": { input: 2.5, output: 10, cached: 1.25 },
131
+ "gpt-4o-mini": { input: 0.15, output: 0.6, cached: 0.075 },
132
+ "gpt-4-turbo": { input: 10, output: 30 },
133
+ "gpt-4": { input: 30, output: 60 },
134
+ "gpt-3.5-turbo": { input: 0.5, output: 1.5 },
135
+ "o1": { input: 15, output: 60, cached: 7.5 },
136
+ "o1-mini": { input: 3, output: 12, cached: 1.5 },
137
+ "o1-pro": { input: 150, output: 600 },
138
+ "o3": { input: 10, output: 40, cached: 2.5 },
139
+ "o3-mini": { input: 1.1, output: 4.4, cached: 0.55 },
140
+ "o4-mini": { input: 1.1, output: 4.4, cached: 0.275 },
141
+ "gpt-4.1": { input: 2, output: 8, cached: 0.5 },
142
+ "gpt-4.1-mini": { input: 0.4, output: 1.6, cached: 0.1 },
143
+ "gpt-4.1-nano": { input: 0.1, output: 0.4, cached: 0.025 },
144
+ // Google
145
+ "gemini-2.5-pro": { input: 1.25, output: 10 },
146
+ "gemini-2.5-flash": { input: 0.15, output: 0.6 },
147
+ "gemini-2.0-flash": { input: 0.1, output: 0.4 },
148
+ "gemini-1.5-pro": { input: 1.25, output: 5 },
149
+ "gemini-1.5-flash": { input: 0.075, output: 0.3 },
150
+ // Mistral
151
+ "mistral-large": { input: 2, output: 6 },
152
+ "mistral-small": { input: 0.2, output: 0.6 },
153
+ "codestral": { input: 0.3, output: 0.9 },
154
+ // Groq (inference pricing)
155
+ "llama-3.3-70b": { input: 0.59, output: 0.79 },
156
+ "llama-3.1-8b": { input: 0.05, output: 0.08 },
157
+ "mixtral-8x7b": { input: 0.24, output: 0.24 },
158
+ // Cohere
159
+ "command-r-plus": { input: 2.5, output: 10 },
160
+ "command-r": { input: 0.15, output: 0.6 }
161
+ };
162
+ function calculateCost(model, inputTokens, outputTokens, cachedTokens = 0) {
163
+ const pricing = MODEL_PRICING[model] || Object.entries(MODEL_PRICING).find(
164
+ ([key]) => model.startsWith(key)
165
+ )?.[1];
166
+ if (!pricing) return 0;
167
+ const inputCost = (inputTokens - cachedTokens) / 1e6 * pricing.input;
168
+ const outputCost = outputTokens / 1e6 * pricing.output;
169
+ const cachedCost = pricing.cached ? cachedTokens / 1e6 * pricing.cached : 0;
170
+ return inputCost + outputCost + cachedCost;
171
+ }
172
+
173
+ export {
174
+ AtoClient,
175
+ init,
176
+ getClient,
177
+ generateTraceId,
178
+ MODEL_PRICING,
179
+ calculateCost
180
+ };
@@ -0,0 +1,33 @@
1
+ import { a as AtoTrace } from './pricing-DnUk84wO.mjs';
2
+ export { b as AtoClient, A as AtoConfig, B as BatchPayload, M as MODEL_PRICING, d as ModelPricing, c as calculateCost, e as generateTraceId, g as getClient, i as init } from './pricing-DnUk84wO.mjs';
3
+
4
+ /**
5
+ * @ato/sdk — Auto-capture LLM traces for ATO
6
+ *
7
+ * Quick start:
8
+ * import { init } from '@ato/sdk';
9
+ * init({ apiKey: 'your-ato-api-key' });
10
+ *
11
+ * Then wrap your LLM client:
12
+ * import { wrapAnthropic } from '@ato/sdk/anthropic';
13
+ * import { wrapOpenAI } from '@ato/sdk/openai';
14
+ *
15
+ * Or capture traces manually:
16
+ * import { capture } from '@ato/sdk';
17
+ * capture({ provider: 'custom', model: 'my-model', ... });
18
+ */
19
+
20
+ /**
21
+ * Capture a trace manually (for custom providers)
22
+ */
23
+ declare function capture(trace: AtoTrace): void;
24
+ /**
25
+ * Flush all pending traces
26
+ */
27
+ declare function flush(): Promise<void>;
28
+ /**
29
+ * Shutdown the SDK (flush + cleanup)
30
+ */
31
+ declare function shutdown(): Promise<void>;
32
+
33
+ export { AtoTrace, capture, flush, shutdown };
@@ -0,0 +1,33 @@
1
+ import { a as AtoTrace } from './pricing-DnUk84wO.js';
2
+ export { b as AtoClient, A as AtoConfig, B as BatchPayload, M as MODEL_PRICING, d as ModelPricing, c as calculateCost, e as generateTraceId, g as getClient, i as init } from './pricing-DnUk84wO.js';
3
+
4
+ /**
5
+ * @ato/sdk — Auto-capture LLM traces for ATO
6
+ *
7
+ * Quick start:
8
+ * import { init } from '@ato/sdk';
9
+ * init({ apiKey: 'your-ato-api-key' });
10
+ *
11
+ * Then wrap your LLM client:
12
+ * import { wrapAnthropic } from '@ato/sdk/anthropic';
13
+ * import { wrapOpenAI } from '@ato/sdk/openai';
14
+ *
15
+ * Or capture traces manually:
16
+ * import { capture } from '@ato/sdk';
17
+ * capture({ provider: 'custom', model: 'my-model', ... });
18
+ */
19
+
20
+ /**
21
+ * Capture a trace manually (for custom providers)
22
+ */
23
+ declare function capture(trace: AtoTrace): void;
24
+ /**
25
+ * Flush all pending traces
26
+ */
27
+ declare function flush(): Promise<void>;
28
+ /**
29
+ * Shutdown the SDK (flush + cleanup)
30
+ */
31
+ declare function shutdown(): Promise<void>;
32
+
33
+ export { AtoTrace, capture, flush, shutdown };
package/dist/index.js ADDED
@@ -0,0 +1,228 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ AtoClient: () => AtoClient,
24
+ MODEL_PRICING: () => MODEL_PRICING,
25
+ calculateCost: () => calculateCost,
26
+ capture: () => capture,
27
+ flush: () => flush,
28
+ generateTraceId: () => generateTraceId,
29
+ getClient: () => getClient,
30
+ init: () => init,
31
+ shutdown: () => shutdown
32
+ });
33
+ module.exports = __toCommonJS(index_exports);
34
+
35
+ // src/client.ts
36
+ var SDK_VERSION = "0.1.0";
37
+ var DEFAULT_ENDPOINT = "https://api.agentictool.ai";
38
+ var DEFAULT_FLUSH_INTERVAL = 5e3;
39
+ var DEFAULT_MAX_BATCH_SIZE = 50;
40
+ var globalClient = null;
41
+ var AtoClient = class {
42
+ constructor(config = {}) {
43
+ this.queue = [];
44
+ this.timer = null;
45
+ this.config = {
46
+ endpoint: DEFAULT_ENDPOINT,
47
+ batching: true,
48
+ flushInterval: DEFAULT_FLUSH_INTERVAL,
49
+ maxBatchSize: DEFAULT_MAX_BATCH_SIZE,
50
+ debug: false,
51
+ localOnly: false,
52
+ ...config
53
+ };
54
+ if (this.config.batching && !this.config.localOnly) {
55
+ this.timer = setInterval(() => this.flush(), this.config.flushInterval);
56
+ if (this.timer && typeof this.timer === "object" && "unref" in this.timer) {
57
+ this.timer.unref();
58
+ }
59
+ }
60
+ }
61
+ /**
62
+ * Record a trace
63
+ */
64
+ capture(trace) {
65
+ if (this.config.defaultTags) {
66
+ trace.tags = [...trace.tags || [], ...this.config.defaultTags];
67
+ }
68
+ if (this.config.defaultMetadata) {
69
+ trace.metadata = { ...this.config.defaultMetadata, ...trace.metadata };
70
+ }
71
+ if (this.config.sessionId && !trace.sessionId) {
72
+ trace.sessionId = this.config.sessionId;
73
+ }
74
+ if (this.config.userId && !trace.userId) {
75
+ trace.userId = this.config.userId;
76
+ }
77
+ if (this.config.debug) {
78
+ console.log("[ato]", JSON.stringify(trace, null, 2));
79
+ }
80
+ if (this.config.localOnly) return;
81
+ this.queue.push(trace);
82
+ if (!this.config.batching || this.queue.length >= this.config.maxBatchSize) {
83
+ this.flush();
84
+ }
85
+ }
86
+ /**
87
+ * Flush pending traces to ATO Cloud
88
+ */
89
+ async flush() {
90
+ if (this.queue.length === 0) return;
91
+ const traces = this.queue.splice(0);
92
+ const payload = {
93
+ traces,
94
+ sdk: "@ato/sdk",
95
+ sdkVersion: SDK_VERSION,
96
+ sentAt: (/* @__PURE__ */ new Date()).toISOString()
97
+ };
98
+ try {
99
+ const headers = {
100
+ "Content-Type": "application/json"
101
+ };
102
+ if (this.config.apiKey) {
103
+ headers["Authorization"] = `Bearer ${this.config.apiKey}`;
104
+ }
105
+ const res = await fetch(`${this.config.endpoint}/api/analytics/ingest`, {
106
+ method: "POST",
107
+ headers,
108
+ body: JSON.stringify(payload)
109
+ });
110
+ if (!res.ok && this.config.debug) {
111
+ console.error("[ato] Failed to send traces:", res.status, await res.text());
112
+ }
113
+ } catch (err) {
114
+ if (this.config.debug) {
115
+ console.error("[ato] Failed to send traces:", err);
116
+ }
117
+ this.queue.unshift(...traces);
118
+ if (this.queue.length > 1e3) {
119
+ this.queue.splice(0, this.queue.length - 500);
120
+ }
121
+ }
122
+ }
123
+ /**
124
+ * Shutdown — flush remaining traces
125
+ */
126
+ async shutdown() {
127
+ if (this.timer) {
128
+ clearInterval(this.timer);
129
+ this.timer = null;
130
+ }
131
+ await this.flush();
132
+ }
133
+ getConfig() {
134
+ return { ...this.config };
135
+ }
136
+ };
137
+ function init(config = {}) {
138
+ globalClient = new AtoClient(config);
139
+ return globalClient;
140
+ }
141
+ function getClient() {
142
+ if (!globalClient) {
143
+ globalClient = new AtoClient();
144
+ }
145
+ return globalClient;
146
+ }
147
+ function generateTraceId() {
148
+ return `ato_${Date.now()}_${Math.random().toString(36).slice(2, 10)}`;
149
+ }
150
+
151
+ // src/pricing.ts
152
+ var MODEL_PRICING = {
153
+ // Anthropic
154
+ "claude-opus-4-6": { input: 15, output: 75, cached: 1.5 },
155
+ "claude-sonnet-4-6": { input: 3, output: 15, cached: 0.3 },
156
+ "claude-haiku-4-5": { input: 0.8, output: 4, cached: 0.08 },
157
+ "claude-sonnet-4-5": { input: 3, output: 15, cached: 0.3 },
158
+ "claude-3-5-sonnet": { input: 3, output: 15, cached: 0.3 },
159
+ "claude-3-5-haiku": { input: 0.8, output: 4, cached: 0.08 },
160
+ "claude-3-opus": { input: 15, output: 75, cached: 1.5 },
161
+ "claude-3-sonnet": { input: 3, output: 15 },
162
+ "claude-3-haiku": { input: 0.25, output: 1.25 },
163
+ // OpenAI
164
+ "gpt-4o": { input: 2.5, output: 10, cached: 1.25 },
165
+ "gpt-4o-mini": { input: 0.15, output: 0.6, cached: 0.075 },
166
+ "gpt-4-turbo": { input: 10, output: 30 },
167
+ "gpt-4": { input: 30, output: 60 },
168
+ "gpt-3.5-turbo": { input: 0.5, output: 1.5 },
169
+ "o1": { input: 15, output: 60, cached: 7.5 },
170
+ "o1-mini": { input: 3, output: 12, cached: 1.5 },
171
+ "o1-pro": { input: 150, output: 600 },
172
+ "o3": { input: 10, output: 40, cached: 2.5 },
173
+ "o3-mini": { input: 1.1, output: 4.4, cached: 0.55 },
174
+ "o4-mini": { input: 1.1, output: 4.4, cached: 0.275 },
175
+ "gpt-4.1": { input: 2, output: 8, cached: 0.5 },
176
+ "gpt-4.1-mini": { input: 0.4, output: 1.6, cached: 0.1 },
177
+ "gpt-4.1-nano": { input: 0.1, output: 0.4, cached: 0.025 },
178
+ // Google
179
+ "gemini-2.5-pro": { input: 1.25, output: 10 },
180
+ "gemini-2.5-flash": { input: 0.15, output: 0.6 },
181
+ "gemini-2.0-flash": { input: 0.1, output: 0.4 },
182
+ "gemini-1.5-pro": { input: 1.25, output: 5 },
183
+ "gemini-1.5-flash": { input: 0.075, output: 0.3 },
184
+ // Mistral
185
+ "mistral-large": { input: 2, output: 6 },
186
+ "mistral-small": { input: 0.2, output: 0.6 },
187
+ "codestral": { input: 0.3, output: 0.9 },
188
+ // Groq (inference pricing)
189
+ "llama-3.3-70b": { input: 0.59, output: 0.79 },
190
+ "llama-3.1-8b": { input: 0.05, output: 0.08 },
191
+ "mixtral-8x7b": { input: 0.24, output: 0.24 },
192
+ // Cohere
193
+ "command-r-plus": { input: 2.5, output: 10 },
194
+ "command-r": { input: 0.15, output: 0.6 }
195
+ };
196
+ function calculateCost(model, inputTokens, outputTokens, cachedTokens = 0) {
197
+ const pricing = MODEL_PRICING[model] || Object.entries(MODEL_PRICING).find(
198
+ ([key]) => model.startsWith(key)
199
+ )?.[1];
200
+ if (!pricing) return 0;
201
+ const inputCost = (inputTokens - cachedTokens) / 1e6 * pricing.input;
202
+ const outputCost = outputTokens / 1e6 * pricing.output;
203
+ const cachedCost = pricing.cached ? cachedTokens / 1e6 * pricing.cached : 0;
204
+ return inputCost + outputCost + cachedCost;
205
+ }
206
+
207
+ // src/index.ts
208
+ function capture(trace) {
209
+ getClient().capture(trace);
210
+ }
211
+ function flush() {
212
+ return getClient().flush();
213
+ }
214
+ function shutdown() {
215
+ return getClient().shutdown();
216
+ }
217
+ // Annotate the CommonJS export names for ESM import in node:
218
+ 0 && (module.exports = {
219
+ AtoClient,
220
+ MODEL_PRICING,
221
+ calculateCost,
222
+ capture,
223
+ flush,
224
+ generateTraceId,
225
+ getClient,
226
+ init,
227
+ shutdown
228
+ });
package/dist/index.mjs ADDED
@@ -0,0 +1,30 @@
1
+ import {
2
+ AtoClient,
3
+ MODEL_PRICING,
4
+ calculateCost,
5
+ generateTraceId,
6
+ getClient,
7
+ init
8
+ } from "./chunk-Q2LJUUHK.mjs";
9
+
10
+ // src/index.ts
11
+ function capture(trace) {
12
+ getClient().capture(trace);
13
+ }
14
+ function flush() {
15
+ return getClient().flush();
16
+ }
17
+ function shutdown() {
18
+ return getClient().shutdown();
19
+ }
20
+ export {
21
+ AtoClient,
22
+ MODEL_PRICING,
23
+ calculateCost,
24
+ capture,
25
+ flush,
26
+ generateTraceId,
27
+ getClient,
28
+ init,
29
+ shutdown
30
+ };
@@ -0,0 +1,30 @@
1
+ export { A as AtoConfig, a as AtoTrace, M as MODEL_PRICING, c as calculateCost, g as getClient, i as init } from './pricing-DnUk84wO.mjs';
2
+
3
+ /**
4
+ * ATO wrapper for the OpenAI SDK
5
+ *
6
+ * Usage:
7
+ * import OpenAI from 'openai';
8
+ * import { wrapOpenAI } from '@ato/sdk/openai';
9
+ *
10
+ * const client = wrapOpenAI(new OpenAI());
11
+ * // All calls are now auto-traced
12
+ * const res = await client.chat.completions.create({ model: 'gpt-4o', ... });
13
+ */
14
+ type OpenAIClient = {
15
+ chat: {
16
+ completions: {
17
+ create: (...args: any[]) => Promise<any>;
18
+ };
19
+ };
20
+ responses?: {
21
+ create: (...args: any[]) => Promise<any>;
22
+ };
23
+ [key: string]: any;
24
+ };
25
+ /**
26
+ * Wrap an OpenAI client to auto-capture traces
27
+ */
28
+ declare function wrapOpenAI<T extends OpenAIClient>(client: T): T;
29
+
30
+ export { wrapOpenAI };
@@ -0,0 +1,30 @@
1
+ export { A as AtoConfig, a as AtoTrace, M as MODEL_PRICING, c as calculateCost, g as getClient, i as init } from './pricing-DnUk84wO.js';
2
+
3
+ /**
4
+ * ATO wrapper for the OpenAI SDK
5
+ *
6
+ * Usage:
7
+ * import OpenAI from 'openai';
8
+ * import { wrapOpenAI } from '@ato/sdk/openai';
9
+ *
10
+ * const client = wrapOpenAI(new OpenAI());
11
+ * // All calls are now auto-traced
12
+ * const res = await client.chat.completions.create({ model: 'gpt-4o', ... });
13
+ */
14
+ type OpenAIClient = {
15
+ chat: {
16
+ completions: {
17
+ create: (...args: any[]) => Promise<any>;
18
+ };
19
+ };
20
+ responses?: {
21
+ create: (...args: any[]) => Promise<any>;
22
+ };
23
+ [key: string]: any;
24
+ };
25
+ /**
26
+ * Wrap an OpenAI client to auto-capture traces
27
+ */
28
+ declare function wrapOpenAI<T extends OpenAIClient>(client: T): T;
29
+
30
+ export { wrapOpenAI };