@aispendguard/sdk 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +57 -0
- package/dist/client.js +15 -3
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -133,6 +133,63 @@ const event = createGeminiUsageEvent({
|
|
|
133
133
|
await trackUsage(event);
|
|
134
134
|
```
|
|
135
135
|
|
|
136
|
+
## Streaming Responses
|
|
137
|
+
|
|
138
|
+
With streaming, usage data arrives in the **final chunk**. Accumulate the stream, then track:
|
|
139
|
+
|
|
140
|
+
### OpenAI Streaming
|
|
141
|
+
```ts
|
|
142
|
+
const startedAt = Date.now();
|
|
143
|
+
const stream = await openai.chat.completions.create({
|
|
144
|
+
model: "gpt-4o-mini",
|
|
145
|
+
messages: [{ role: "user", content: "Hello" }],
|
|
146
|
+
stream: true,
|
|
147
|
+
stream_options: { include_usage: true }, // required for usage in stream
|
|
148
|
+
});
|
|
149
|
+
|
|
150
|
+
let usage;
|
|
151
|
+
for await (const chunk of stream) {
|
|
152
|
+
if (chunk.usage) usage = chunk.usage;
|
|
153
|
+
// ... process chunk.choices[0]?.delta
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
if (usage) {
|
|
157
|
+
const event = createOpenAIUsageEvent({
|
|
158
|
+
model: "gpt-4o-mini",
|
|
159
|
+
usage,
|
|
160
|
+
latencyMs: Date.now() - startedAt,
|
|
161
|
+
tags: { task_type: "chat", feature: "assistant", route: "POST /api/chat" },
|
|
162
|
+
});
|
|
163
|
+
await trackUsage(event);
|
|
164
|
+
}
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
### Anthropic Streaming
|
|
168
|
+
```ts
|
|
169
|
+
const startedAt = Date.now();
|
|
170
|
+
const stream = anthropic.messages.stream({
|
|
171
|
+
model: "claude-sonnet-4-20250514",
|
|
172
|
+
messages: [{ role: "user", content: "Hello" }],
|
|
173
|
+
max_tokens: 200,
|
|
174
|
+
});
|
|
175
|
+
|
|
176
|
+
for await (const event of stream) {
|
|
177
|
+
// ... process text events
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
const message = await stream.finalMessage();
|
|
181
|
+
|
|
182
|
+
const event = createAnthropicUsageEvent({
|
|
183
|
+
model: "claude-sonnet-4-20250514",
|
|
184
|
+
usage: message.usage,
|
|
185
|
+
latencyMs: Date.now() - startedAt,
|
|
186
|
+
tags: { task_type: "chat", feature: "assistant", route: "POST /api/chat" },
|
|
187
|
+
});
|
|
188
|
+
await trackUsage(event);
|
|
189
|
+
```
|
|
190
|
+
|
|
191
|
+
> **Key point:** OpenAI requires `stream_options: { include_usage: true }` — without it, usage is `null` in the stream. Anthropic provides usage on the final message automatically.
|
|
192
|
+
|
|
136
193
|
## API
|
|
137
194
|
- `init(config)`
|
|
138
195
|
- `trackUsage(event | event[])`
|
package/dist/client.js
CHANGED
|
@@ -38,7 +38,7 @@ class AISpendGuardClient {
|
|
|
38
38
|
if (this.strict) {
|
|
39
39
|
throw error;
|
|
40
40
|
}
|
|
41
|
-
this.logger.warn(`[aispendguard-sdk] ${message}
|
|
41
|
+
this.logger.warn(`[aispendguard-sdk] tracking failed: ${message}. Use { strict: true } to throw on errors.`);
|
|
42
42
|
return { ok: false, error: message };
|
|
43
43
|
}
|
|
44
44
|
}
|
|
@@ -74,16 +74,28 @@ class AISpendGuardClient {
|
|
|
74
74
|
body: JSON.stringify(payload),
|
|
75
75
|
signal: controller.signal
|
|
76
76
|
});
|
|
77
|
+
if (response.redirected) {
|
|
78
|
+
throw new Error(`ingest failed: redirected to ${response.url} — update your endpoint to "${response.url}"`);
|
|
79
|
+
}
|
|
77
80
|
const raw = (await response.json().catch(() => null));
|
|
78
81
|
if (!response.ok) {
|
|
79
|
-
const msg = raw?.errors?.join("; ") || `HTTP ${response.status}`;
|
|
80
|
-
throw new Error(`ingest failed: ${msg}`);
|
|
82
|
+
const msg = raw?.errors?.join("; ") || `HTTP ${response.status} ${response.statusText}`;
|
|
83
|
+
throw new Error(`ingest failed (${this.endpoint}): ${msg}`);
|
|
81
84
|
}
|
|
82
85
|
if (!raw) {
|
|
83
86
|
throw new Error("ingest failed: empty response body");
|
|
84
87
|
}
|
|
85
88
|
return raw;
|
|
86
89
|
}
|
|
90
|
+
catch (err) {
|
|
91
|
+
if (err instanceof Error && err.name === "AbortError") {
|
|
92
|
+
throw new Error(`ingest failed: request to ${this.endpoint} timed out after ${this.timeoutMs}ms`);
|
|
93
|
+
}
|
|
94
|
+
if (err instanceof TypeError) {
|
|
95
|
+
throw new Error(`ingest failed: network error reaching ${this.endpoint} — ${err.message}`);
|
|
96
|
+
}
|
|
97
|
+
throw err;
|
|
98
|
+
}
|
|
87
99
|
finally {
|
|
88
100
|
clearTimeout(timeout);
|
|
89
101
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aispendguard/sdk",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.2",
|
|
4
4
|
"description": "Tags-only SDK for tracking AI API spend with AISpendGuard. Supports OpenAI, Anthropic, Google Gemini, LangChain.js, and OpenTelemetry.",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"main": "dist/index.js",
|