ai-spend-guard 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Khalid IBNFKIH
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,74 @@
1
+ # AI Spend Guard
2
+
3
+ **Stop your AI calls from wasting money.**
4
+
5
+ ### The Problem
6
+ Running AI calls in production gets expensive fast. A single classification task might cost $0.66 with GPT-4o when GPT-4o-mini would've cost $0.02.
7
+
8
+ **Without guardrails:** $0.66
9
+ **With Spend Guard:** $0.02
10
+ **Saved:** 97%
11
+
12
+ ---
13
+
14
+ ## Install
15
+
16
+ ```bash
17
+ npm install ai-spend-guard
18
+ ```
19
+
20
+ ## Quick start (policy only)
21
+
22
+ ```ts
23
+ import { createSpendGuard } from "ai-spend-guard";
24
+
25
+ const guard = createSpendGuard({
26
+ cacheRepeatedPrompts: true,
27
+ autoReroute: true,
28
+ maxCostPerCall: 0.2
29
+ });
30
+
31
+ const result = guard.evaluate({
32
+ task: "classification",
33
+ model: "gpt-4o",
34
+ prompt: "Classify this support ticket",
35
+ estimatedInputTokens: 180,
36
+ estimatedOutputTokens: 40
37
+ });
38
+
39
+ console.log(result.action, result.model, result.savingsUsd);
40
+ ```
41
+
42
+ ## Wrap live calls
43
+
44
+ ```ts
45
+ import { createAIGuard } from "ai-spend-guard";
46
+
47
+ const guard = createAIGuard({
48
+ apiKey: process.env.HUGGINGFACE_API_KEY!,
49
+ provider: "huggingface",
50
+ huggingfaceModel: "deepseek-ai/DeepSeek-R1:fastest",
51
+ policy: { cacheRepeatedPrompts: true, autoReroute: true }
52
+ });
53
+
54
+ const response = await guard.chat({
55
+ model: "deepseek-ai/DeepSeek-R1:fastest",
56
+ messages: [{ role: "user", content: "Classify this ticket" }]
57
+ }, "classification");
58
+
59
+ console.log(response.choices?.[0]?.message?.content);
60
+ ```
61
+
62
+ ## Pricing
63
+
64
+ Model pricing lives in `pricing.rates.json`. You can override it with the `AI_SPEND_GUARD_PRICING` environment variable.
65
+
66
+ ## Development scripts
67
+
68
+ - `npm run check` - type-check the project
69
+ - `npm run build` - build to `dist/`
70
+ - `npm run demo` - run the local demo
71
+
72
+ ## Usage Guide
73
+
74
+ See the usage guide for integration examples, policy examples, and a middleware sample: [USAGE.md](USAGE.md)
@@ -0,0 +1,23 @@
1
+ export interface CacheEntry {
2
+ response: unknown;
3
+ timestamp: number;
4
+ cost: number;
5
+ model: string;
6
+ }
7
+ export declare class InMemoryCache {
8
+ private cache;
9
+ private readonly ttlMs;
10
+ private readonly maxSize;
11
+ constructor(options?: {
12
+ ttlMs?: number;
13
+ maxSize?: number;
14
+ });
15
+ get(key: string): CacheEntry | null;
16
+ set(key: string, response: unknown, cost: number, model: string): void;
17
+ stats(): {
18
+ size: number;
19
+ maxSize: number;
20
+ ttlMs: number;
21
+ };
22
+ clear(): void;
23
+ }
package/dist/cache.js ADDED
@@ -0,0 +1,52 @@
1
+ export class InMemoryCache {
2
+ cache = new Map();
3
+ ttlMs;
4
+ maxSize;
5
+ constructor(options = {}) {
6
+ this.ttlMs = options.ttlMs ?? 3600000; // 1 hour default
7
+ this.maxSize = options.maxSize ?? 1000;
8
+ }
9
+ get(key) {
10
+ const entry = this.cache.get(key);
11
+ if (!entry)
12
+ return null;
13
+ // Check TTL
14
+ if (Date.now() - entry.timestamp > this.ttlMs) {
15
+ this.cache.delete(key);
16
+ return null;
17
+ }
18
+ return entry;
19
+ }
20
+ set(key, response, cost, model) {
21
+ // Simple LRU: delete oldest if at max size (loop to avoid sort overhead)
22
+ if (this.cache.size >= this.maxSize && this.cache.size > 0) {
23
+ let oldestKey = null;
24
+ let oldestTime = Infinity;
25
+ for (const [k, v] of this.cache.entries()) {
26
+ if (v.timestamp < oldestTime) {
27
+ oldestTime = v.timestamp;
28
+ oldestKey = k;
29
+ }
30
+ }
31
+ if (oldestKey) {
32
+ this.cache.delete(oldestKey);
33
+ }
34
+ }
35
+ this.cache.set(key, {
36
+ response,
37
+ timestamp: Date.now(),
38
+ cost,
39
+ model
40
+ });
41
+ }
42
+ stats() {
43
+ return {
44
+ size: this.cache.size,
45
+ maxSize: this.maxSize,
46
+ ttlMs: this.ttlMs
47
+ };
48
+ }
49
+ clear() {
50
+ this.cache.clear();
51
+ }
52
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,132 @@
1
+ import { SpendGuardEngine } from "./engine.js";
2
+ import { estimateChatCostUsd } from "./pricing.js";
3
+ import { AIGuard } from "./client.js";
4
+ import readline from "node:readline";
5
+ const requests = [
6
+ {
7
+ id: 1,
8
+ task: "classification",
9
+ model: "gpt-4o",
10
+ prompt: "Classify this ticket as billing or technical",
11
+ estimatedInputTokens: 50000,
12
+ estimatedOutputTokens: 5000
13
+ },
14
+ {
15
+ id: 2,
16
+ task: "classification",
17
+ model: "gpt-4o",
18
+ prompt: "Classify this ticket as billing or technical",
19
+ estimatedInputTokens: 50000,
20
+ estimatedOutputTokens: 5000
21
+ }
22
+ ];
23
+ async function runMock() {
24
+ const engine = new SpendGuardEngine({ cacheRepeatedPrompts: true, autoReroute: true });
25
+ let totalBefore = 0;
26
+ let totalAfter = 0;
27
+ const actionsTaken = {};
28
+ for (const r of requests) {
29
+ const before = estimateChatCostUsd(r.model, r.estimatedInputTokens, r.estimatedOutputTokens);
30
+ totalBefore += before;
31
+ const decision = engine.evaluate({
32
+ task: r.task,
33
+ model: r.model,
34
+ prompt: r.prompt,
35
+ estimatedInputTokens: r.estimatedInputTokens,
36
+ estimatedOutputTokens: r.estimatedOutputTokens
37
+ });
38
+ const afterModel = decision.model ?? r.model;
39
+ const after = estimateChatCostUsd(afterModel, r.estimatedInputTokens, r.estimatedOutputTokens);
40
+ totalAfter += after;
41
+ actionsTaken[decision.action] = (actionsTaken[decision.action] || 0) + 1;
42
+ }
43
+ const saved = totalBefore - totalAfter;
44
+ const savedPct = totalBefore > 0 ? Math.round((saved / totalBefore) * 100) : 0;
45
+ console.log("\n=== AI Guard Demo ===\n");
46
+ console.log(`Task: classification + repeat\n`);
47
+ console.log("Without AI Guard:");
48
+ console.log(` Cost: $${totalBefore.toFixed(2)}\n`);
49
+ console.log("With AI Guard:");
50
+ console.log(` Cost: $${totalAfter.toFixed(2)}\n`);
51
+ console.log(`Saved: $${saved.toFixed(2)} (${savedPct}%)\n`);
52
+ console.log("Actions taken:");
53
+ if (actionsTaken["reroute"]) {
54
+ console.log(` - Rerouted model → saved on ${actionsTaken["reroute"]} request(s)`);
55
+ }
56
+ if (actionsTaken["cache"]) {
57
+ console.log(` - Cached repeated request(s) → saved on ${actionsTaken["cache"]} request(s)`);
58
+ }
59
+ if (actionsTaken["warn"]) {
60
+ console.log(` - Warnings shown on ${actionsTaken["warn"]} request(s)`);
61
+ }
62
+ if (actionsTaken["block"]) {
63
+ console.log(` - Blocked ${actionsTaken["block"]} request(s)`);
64
+ }
65
+ console.log('\nTry integrating: import { createAIGuard } from "./src/index.js" and use guard.chat(...)');
66
+ }
67
+ async function runLive() {
68
+ const apiKey = process.env.OPENAI_API_KEY;
69
+ if (!apiKey) {
70
+ console.error("OPENAI_API_KEY not set. Set it in your environment to run live.");
71
+ process.exit(1);
72
+ }
73
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
74
+ const answer = await new Promise((res) => rl.question("Proceed with live OpenAI calls? This may incur cost (y/N): ", res));
75
+ rl.close();
76
+ if (!/^y(es)?$/i.test(answer.trim())) {
77
+ console.log("Aborted live run.");
78
+ return;
79
+ }
80
+ const guard = new AIGuard({ apiKey, enableLogging: true });
81
+ let totalBefore = 0;
82
+ let totalAfter = 0;
83
+ const actionsTaken = {};
84
+ for (const r of requests) {
85
+ const before = estimateChatCostUsd(r.model, r.estimatedInputTokens, r.estimatedOutputTokens);
86
+ totalBefore += before;
87
+ const messages = [{ role: "user", content: r.prompt }];
88
+ const resp = await guard.chat({ model: r.model, messages }, "classification");
89
+ const usedIn = resp.usage?.prompt_tokens ?? r.estimatedInputTokens;
90
+ const usedOut = resp.usage?.completion_tokens ?? r.estimatedOutputTokens;
91
+ const finalModel = resp.model ?? r.model;
92
+ const after = estimateChatCostUsd(finalModel, usedIn, usedOut);
93
+ totalAfter += after;
94
+ // attempt to infer action from logs
95
+ const lastLog = guard.getLogs().slice(-1)[0];
96
+ if (lastLog) {
97
+ actionsTaken[lastLog.rerouted ? "reroute" : lastLog.cached ? "cache" : lastLog.ruleSavingsUsd ? "warn" : "other"] = (actionsTaken[lastLog.rerouted ? "reroute" : lastLog.cached ? "cache" : lastLog.ruleSavingsUsd ? "warn" : "other"] || 0) + 1;
98
+ }
99
+ }
100
+ const saved = totalBefore - totalAfter;
101
+ const savedPct = totalBefore > 0 ? Math.round((saved / totalBefore) * 100) : 0;
102
+ console.log("\n=== AI Guard Live Demo ===\n");
103
+ console.log(`Task: classification + repeat\n`);
104
+ console.log("Without AI Guard:");
105
+ console.log(` Cost: $${totalBefore.toFixed(2)}\n`);
106
+ console.log("With AI Guard:");
107
+ console.log(` Cost: $${totalAfter.toFixed(2)}\n`);
108
+ console.log(`Saved: $${saved.toFixed(2)} (${savedPct}%)\n`);
109
+ console.log("Actions taken:");
110
+ if (actionsTaken["reroute"]) {
111
+ console.log(` - Rerouted model → saved on ${actionsTaken["reroute"]} request(s)`);
112
+ }
113
+ if (actionsTaken["cache"]) {
114
+ console.log(` - Cached repeated request(s) → saved on ${actionsTaken["cache"]} request(s)`);
115
+ }
116
+ if (actionsTaken["warn"]) {
117
+ console.log(` - Warnings shown on ${actionsTaken["warn"]} request(s)`);
118
+ }
119
+ }
120
+ async function main() {
121
+ const live = process.argv.includes("--live");
122
+ if (live) {
123
+ await runLive();
124
+ }
125
+ else {
126
+ await runMock();
127
+ }
128
+ }
129
+ main().catch((err) => {
130
+ console.error(err);
131
+ process.exit(1);
132
+ });
@@ -0,0 +1,70 @@
1
+ import type { PolicyConfig, TaskKind } from "./types.js";
2
+ export interface RequestLog {
3
+ id: string;
4
+ model: string;
5
+ tokensIn: number;
6
+ tokensOut: number;
7
+ estimatedCostUsd: number;
8
+ actualCostUsd: number;
9
+ promptHash: string;
10
+ cached: boolean;
11
+ rerouted: boolean;
12
+ originalModel?: string;
13
+ potentialSavingsUsd: number;
14
+ ruleSavingsUsd: number;
15
+ latencyMs: number;
16
+ timestamp: number;
17
+ warnings: string[];
18
+ }
19
+ export interface AIGuardConfig {
20
+ apiKey: string;
21
+ provider?: "openai" | "huggingface";
22
+ huggingfaceModel?: string;
23
+ policy?: PolicyConfig;
24
+ cacheOptions?: {
25
+ ttlMs?: number;
26
+ maxSize?: number;
27
+ };
28
+ backendUrl?: string;
29
+ enableLogging?: boolean;
30
+ }
31
+ export declare class AIGuard {
32
+ private provider;
33
+ private openai;
34
+ private apiKey;
35
+ private huggingfaceModel;
36
+ private guard;
37
+ private cache;
38
+ private requestLogs;
39
+ private backendUrl;
40
+ private enableLogging;
41
+ constructor(config: AIGuardConfig);
42
+ chat(request: {
43
+ model: string;
44
+ messages: Array<{
45
+ role: string;
46
+ content: string;
47
+ }>;
48
+ [key: string]: unknown;
49
+ }, task?: TaskKind): Promise<any>;
50
+ private callProvider;
51
+ private logRequest;
52
+ private generateId;
53
+ getLogs(): RequestLog[];
54
+ getStats(): {
55
+ totalRequests: number;
56
+ actualCostUsd: number;
57
+ couldHaveSpentUsd: number;
58
+ wastedCostUsd: number;
59
+ wastedPercentage: number;
60
+ reroutedRequests: number;
61
+ cachedRequests: number;
62
+ avgLatencyMs: number;
63
+ cacheStats: {
64
+ size: number;
65
+ maxSize: number;
66
+ ttlMs: number;
67
+ };
68
+ };
69
+ clearLogs(): void;
70
+ }
package/dist/client.js ADDED
@@ -0,0 +1,162 @@
1
+ import OpenAI from "openai";
2
+ import { SpendGuardEngine } from "./engine.js";
3
+ import { InMemoryCache } from "./cache.js";
4
+ import { estimateTokens, hashPrompt } from "./tokens.js";
5
+ import { estimateChatCostUsd } from "./pricing.js";
6
+ export class AIGuard {
7
+ provider;
8
+ openai;
9
+ apiKey;
10
+ huggingfaceModel;
11
+ guard;
12
+ cache;
13
+ requestLogs = [];
14
+ backendUrl;
15
+ enableLogging;
16
+ constructor(config) {
17
+ this.provider = config.provider ?? "openai";
18
+ this.apiKey = config.apiKey;
19
+ this.huggingfaceModel = config.huggingfaceModel ?? "deepseek-ai/DeepSeek-R1:fastest";
20
+ this.openai = new OpenAI(this.provider === "huggingface"
21
+ ? { apiKey: config.apiKey, baseURL: "https://router.huggingface.co/v1" }
22
+ : { apiKey: config.apiKey });
23
+ this.guard = new SpendGuardEngine(config.policy);
24
+ this.cache = new InMemoryCache(config.cacheOptions);
25
+ this.backendUrl = config.backendUrl;
26
+ this.enableLogging = config.enableLogging !== false;
27
+ }
28
+ async chat(request, task = "other") {
29
+ const startTime = Date.now();
30
+ const promptHash = hashPrompt(request.messages);
31
+ // Check cache
32
+ const cached = this.cache.get(promptHash);
33
+ if (cached) {
34
+ const log = {
35
+ id: this.generateId(),
36
+ model: request.model,
37
+ tokensIn: 0,
38
+ tokensOut: 0,
39
+ estimatedCostUsd: 0,
40
+ actualCostUsd: cached.cost,
41
+ promptHash,
42
+ cached: true,
43
+ rerouted: false,
44
+ potentialSavingsUsd: 0,
45
+ ruleSavingsUsd: 0,
46
+ latencyMs: Date.now() - startTime,
47
+ timestamp: Date.now(),
48
+ warnings: ["Served from cache"]
49
+ };
50
+ this.logRequest(log);
51
+ return cached.response;
52
+ }
53
+ // Estimate tokens
54
+ const { input, output } = estimateTokens(request.model, "", request.messages);
55
+ // Evaluate guardrails
56
+ const decision = this.guard.evaluate({
57
+ task,
58
+ model: request.model,
59
+ prompt: request.messages.map((m) => m.content).join("\n"),
60
+ estimatedInputTokens: input,
61
+ estimatedOutputTokens: output
62
+ });
63
+ const estimatedCost = estimateChatCostUsd(request.model, input, output);
64
+ // Determine final model
65
+ let finalModel = request.model;
66
+ let rerouted = false;
67
+ if (decision.action === "reroute" && decision.model !== request.model) {
68
+ finalModel = decision.model;
69
+ rerouted = true;
70
+ }
71
+ if (decision.action === "block") {
72
+ throw new Error(`[AI Guard] Request blocked: ${decision.reasons.join("; ")}`);
73
+ }
74
+ if (decision.action === "warn" && this.enableLogging) {
75
+ console.warn(`[AI Guard] Warning: ${decision.reasons.join("; ")}`);
76
+ }
77
+ // Make the actual call
78
+ const updatedRequest = { ...request, model: finalModel };
79
+ const response = await this.callProvider(updatedRequest);
80
+ const latencyMs = Date.now() - startTime;
81
+ const actualTokensIn = response.usage?.prompt_tokens ?? input;
82
+ const actualTokensOut = response.usage?.completion_tokens ?? output;
83
+ const actualCost = estimateChatCostUsd(finalModel, actualTokensIn, actualTokensOut);
84
+ // Cache the response
85
+ this.cache.set(promptHash, response, actualCost, finalModel);
86
+ // Log the request
87
+ const potentialSavings = Math.max(0, estimatedCost - actualCost);
88
+ const log = {
89
+ id: this.generateId(),
90
+ model: request.model,
91
+ tokensIn: actualTokensIn,
92
+ tokensOut: actualTokensOut,
93
+ estimatedCostUsd: estimatedCost,
94
+ actualCostUsd: actualCost,
95
+ promptHash,
96
+ cached: false,
97
+ rerouted,
98
+ potentialSavingsUsd: Math.round(potentialSavings * 100) / 100,
99
+ ruleSavingsUsd: Math.round((decision.savingsUsd ?? 0) * 100) / 100,
100
+ latencyMs,
101
+ timestamp: Date.now(),
102
+ warnings: decision.warnings,
103
+ ...(rerouted ? { originalModel: request.model } : {})
104
+ };
105
+ this.logRequest(log);
106
+ if (this.enableLogging) {
107
+ console.log(`[AI Guard] Cost: $${actualCost.toFixed(2)} (saved $${decision.savingsUsd.toFixed(2)})${rerouted ? ` [rerouted to ${finalModel}]` : ""}${cached ? " [cached]" : ""}`);
108
+ }
109
+ return response;
110
+ }
111
+ async callProvider(request) {
112
+ if (!this.openai) {
113
+ throw new Error("OpenAI client is not initialized");
114
+ }
115
+ const routedRequest = this.provider === "huggingface"
116
+ ? { ...request, model: this.huggingfaceModel ?? request.model }
117
+ : request;
118
+ return this.openai.chat.completions.create(routedRequest);
119
+ }
120
+ logRequest(log) {
121
+ this.requestLogs.push(log);
122
+ if (this.backendUrl) {
123
+ fetch(`${this.backendUrl}/log`, {
124
+ method: "POST",
125
+ headers: { "Content-Type": "application/json" },
126
+ body: JSON.stringify(log)
127
+ }).catch(() => {
128
+ // Silently fail if backend is unavailable
129
+ });
130
+ }
131
+ }
132
+ generateId() {
133
+ return `${Date.now()}-${Math.random().toString(36).slice(2)}`;
134
+ }
135
+ getLogs() {
136
+ return this.requestLogs;
137
+ }
138
+ getStats() {
139
+ const logs = this.requestLogs;
140
+ const totalCost = logs.reduce((sum, log) => sum + log.actualCostUsd, 0);
141
+ const totalSavings = logs.filter((l) => l.rerouted || l.cached).length;
142
+ const cachedRequests = logs.filter((l) => l.cached).length;
143
+ const totalPotentialSavings = Math.round((logs.reduce((s, l) => s + (l.potentialSavingsUsd ?? 0), 0)) * 100) / 100;
144
+ const couldHaveSpent = Math.round(((totalCost + totalPotentialSavings) * 100)) / 100;
145
+ const overspendRatio = totalCost > 0 ? Math.round((totalPotentialSavings / totalCost) * 100) : 0;
146
+ return {
147
+ totalRequests: logs.length,
148
+ actualCostUsd: Math.round(totalCost * 100) / 100,
149
+ couldHaveSpentUsd: couldHaveSpent,
150
+ wastedCostUsd: totalPotentialSavings,
151
+ wastedPercentage: overspendRatio,
152
+ reroutedRequests: logs.filter((l) => l.rerouted).length,
153
+ cachedRequests,
154
+ avgLatencyMs: logs.length > 0 ? Math.round(logs.reduce((sum, l) => sum + l.latencyMs, 0) / logs.length) : 0,
155
+ cacheStats: this.cache.stats()
156
+ };
157
+ }
158
+ clearLogs() {
159
+ this.requestLogs = [];
160
+ this.cache.clear();
161
+ }
162
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,57 @@
1
+ import { SpendGuardEngine } from "./engine.js";
2
+ import { estimateChatCostUsd } from "./pricing.js";
3
+ // Simple single-file demo that doesn't call a real model.
4
+ // It shows before vs after costs for a request and demonstrates caching.
5
+ const engine = new SpendGuardEngine({ cacheRepeatedPrompts: true, autoReroute: true });
6
+ const requests = [
7
+ {
8
+ id: 1,
9
+ task: "classification",
10
+ model: "gpt-4o",
11
+ prompt: "Classify this ticket as billing or technical",
12
+ estimatedInputTokens: 50000,
13
+ estimatedOutputTokens: 5000
14
+ },
15
+ {
16
+ id: 2,
17
+ task: "classification",
18
+ model: "gpt-4o",
19
+ prompt: "Classify this ticket as billing or technical",
20
+ estimatedInputTokens: 50000,
21
+ estimatedOutputTokens: 5000
22
+ }
23
+ ];
24
+ let totalBefore = 0;
25
+ let totalAfter = 0;
26
+ let savedCount = 0;
27
+ let downgradeCount = 0;
28
+ let cacheCount = 0;
29
+ for (const r of requests) {
30
+ const before = estimateChatCostUsd(r.model, r.estimatedInputTokens, r.estimatedOutputTokens);
31
+ totalBefore += before;
32
+ const decision = engine.evaluate({
33
+ task: r.task,
34
+ model: r.model,
35
+ prompt: r.prompt,
36
+ estimatedInputTokens: r.estimatedInputTokens,
37
+ estimatedOutputTokens: r.estimatedOutputTokens
38
+ });
39
+ const afterModel = decision.model;
40
+ const after = estimateChatCostUsd(afterModel, r.estimatedInputTokens, r.estimatedOutputTokens);
41
+ totalAfter += after;
42
+ if (decision.action === "reroute")
43
+ downgradeCount++;
44
+ if (decision.action === "cache")
45
+ cacheCount++;
46
+ if (after < before)
47
+ savedCount++;
48
+ console.log(JSON.stringify({ id: r.id, before: `$${before.toFixed(2)}`, after: `$${after.toFixed(2)}`, action: decision.action, model: afterModel, savings: `$${(before - after).toFixed(2)}` }, null, 2));
49
+ }
50
+ console.log("\n==== SUMMARY ====");
51
+ console.log(`Requests: ${requests.length}`);
52
+ console.log(`Saved on ${savedCount} requests`);
53
+ console.log(`Downgrades: ${downgradeCount}`);
54
+ console.log(`Cached: ${cacheCount}`);
55
+ console.log(`Total before: $${totalBefore.toFixed(2)}`);
56
+ console.log(`Total after: $${totalAfter.toFixed(2)}`);
57
+ console.log(`Total saved: $${(totalBefore - totalAfter).toFixed(2)}`);
package/dist/demo.d.ts ADDED
@@ -0,0 +1 @@
1
+ export {};
package/dist/demo.js ADDED
@@ -0,0 +1,29 @@
1
+ import { createSpendGuard } from "./index.js";
2
+ const guard = createSpendGuard({
3
+ cacheRepeatedPrompts: true,
4
+ autoReroute: true,
5
+ disallowModels: ["gpt-4.1"],
6
+ maxCostPerCall: 0.2
7
+ });
8
+ const requests = [
9
+ {
10
+ task: "classification",
11
+ model: "gpt-4o",
12
+ prompt: "Classify this ticket as billing or technical",
13
+ estimatedInputTokens: 50000,
14
+ estimatedOutputTokens: 5000,
15
+ retries: 0
16
+ },
17
+ {
18
+ task: "classification",
19
+ model: "gpt-4o",
20
+ prompt: "Classify this ticket as billing or technical",
21
+ estimatedInputTokens: 50000,
22
+ estimatedOutputTokens: 5000,
23
+ retries: 0
24
+ }
25
+ ];
26
+ for (const request of requests) {
27
+ const decision = guard.evaluate(request);
28
+ console.log(JSON.stringify({ request, decision }, null, 2));
29
+ }
@@ -0,0 +1,7 @@
1
+ import type { ChatRequest, GuardrailDecision, PolicyConfig } from "./types.js";
2
+ export declare class SpendGuardEngine {
3
+ private readonly policy;
4
+ constructor(policy?: PolicyConfig);
5
+ evaluate(request: ChatRequest): GuardrailDecision;
6
+ private detectRules;
7
+ }
package/dist/engine.js ADDED
@@ -0,0 +1,135 @@
1
+ import { estimateChatCostUsd, getCheaperModel } from "./pricing.js";
2
+ const promptHistory = new Map();
3
+ export class SpendGuardEngine {
4
+ policy;
5
+ constructor(policy = {}) {
6
+ this.policy = policy;
7
+ }
8
+ evaluate(request) {
9
+ const estimatedCostUsd = estimateChatCostUsd(request.model, request.estimatedInputTokens, request.estimatedOutputTokens);
10
+ const warnings = [];
11
+ const reasons = [];
12
+ const appliedRuleIds = [];
13
+ let action = "allow";
14
+ let routedModel = request.model;
15
+ let savingsUsd = 0;
16
+ const rules = this.detectRules(request, estimatedCostUsd);
17
+ for (const rule of rules) {
18
+ appliedRuleIds.push(rule.id);
19
+ reasons.push(rule.reason);
20
+ if (rule.action === "warn") {
21
+ warnings.push(rule.reason);
22
+ }
23
+ if (rule.action === "cache") {
24
+ action = "cache";
25
+ savingsUsd = Math.max(savingsUsd, rule.estimatedSavingsUsd ?? 0);
26
+ break;
27
+ }
28
+ if (rule.action === "reroute" && this.policy.autoReroute !== false) {
29
+ action = "reroute";
30
+ routedModel = rule.suggestedModel ?? routedModel;
31
+ savingsUsd = Math.max(savingsUsd, rule.estimatedSavingsUsd ?? 0);
32
+ continue;
33
+ }
34
+ if (rule.action === "block" && this.policy.allowBlocking) {
35
+ action = "block";
36
+ savingsUsd = Math.max(savingsUsd, estimatedCostUsd);
37
+ continue;
38
+ }
39
+ if (rule.action === "warn" && action === "allow") {
40
+ action = "warn";
41
+ }
42
+ }
43
+ if (request.retries !== undefined && request.retries >= 3) {
44
+ warnings.push("Retry loop detected");
45
+ reasons.push("Retry loop detected");
46
+ action = action === "allow" ? "warn" : action;
47
+ }
48
+ if (estimatedCostUsd > (this.policy.maxCostPerCall ?? Number.POSITIVE_INFINITY)) {
49
+ warnings.push(`Estimated cost ${formatUsd(estimatedCostUsd)} exceeds policy max`);
50
+ reasons.push("Call exceeds maximum cost policy");
51
+ if (this.policy.allowBlocking) {
52
+ action = "block";
53
+ }
54
+ else if (action === "allow") {
55
+ action = "warn";
56
+ }
57
+ }
58
+ if (routedModel !== request.model) {
59
+ const routedCost = estimateChatCostUsd(routedModel, request.estimatedInputTokens, request.estimatedOutputTokens);
60
+ savingsUsd = Math.max(savingsUsd, estimatedCostUsd - routedCost);
61
+ }
62
+ return {
63
+ action,
64
+ model: routedModel,
65
+ estimatedCostUsd,
66
+ savingsUsd: roundMoney(savingsUsd),
67
+ warnings,
68
+ reasons,
69
+ appliedRuleIds
70
+ };
71
+ }
72
+ detectRules(request, estimatedCostUsd) {
73
+ const rules = [];
74
+ const promptKey = normalizePrompt(request.prompt);
75
+ const repeatCount = (promptHistory.get(promptKey) ?? 0) + 1;
76
+ promptHistory.set(promptKey, repeatCount);
77
+ if (this.policy.disallowModels?.includes(request.model)) {
78
+ const suggestedModel = getCheaperModel(request.model);
79
+ rules.push({
80
+ id: "disallow-model",
81
+ reason: `${request.model} is disallowed by policy`,
82
+ action: this.policy.allowBlocking ? "block" : "reroute",
83
+ estimatedSavingsUsd: estimatedCostUsd * 0.7,
84
+ ...(suggestedModel ? { suggestedModel } : {})
85
+ });
86
+ }
87
+ if (request.task === "classification" && isHighCostModel(request.model)) {
88
+ const suggestedModel = getCheaperModel(request.model);
89
+ rules.push({
90
+ id: "overpowered-model-classification",
91
+ reason: "Overpowered model used for classification",
92
+ action: "reroute",
93
+ estimatedSavingsUsd: estimatedCostUsd * 0.8,
94
+ ...(suggestedModel ? { suggestedModel } : {})
95
+ });
96
+ }
97
+ if (repeatCount >= 2 && this.policy.cacheRepeatedPrompts) {
98
+ rules.push({
99
+ id: "identical-prompt-repeat",
100
+ reason: `Identical prompt repeated ${repeatCount} times`,
101
+ action: "cache",
102
+ estimatedSavingsUsd: estimatedCostUsd * 0.9
103
+ });
104
+ }
105
+ if (request.retries !== undefined && request.retries >= 2) {
106
+ rules.push({
107
+ id: "retry-loop",
108
+ reason: "Retry loop detected",
109
+ action: "warn",
110
+ estimatedSavingsUsd: estimatedCostUsd * 0.3
111
+ });
112
+ }
113
+ if (estimatedCostUsd >= 0.02 && request.task === "classification") {
114
+ rules.push({
115
+ id: "high-cost-low-variance",
116
+ reason: "High-cost call with low-variance output likely wasteful",
117
+ action: "warn",
118
+ estimatedSavingsUsd: estimatedCostUsd * 0.5
119
+ });
120
+ }
121
+ return rules;
122
+ }
123
+ }
124
+ function normalizePrompt(prompt) {
125
+ return prompt.trim().replace(/\s+/g, " ").toLowerCase();
126
+ }
127
+ function isHighCostModel(model) {
128
+ return model === "gpt-4o" || model === "gpt-4.1";
129
+ }
130
+ function roundMoney(value) {
131
+ return Math.round(value * 100) / 100;
132
+ }
133
+ function formatUsd(value) {
134
+ return `$${value.toFixed(2)}`;
135
+ }
@@ -0,0 +1,12 @@
1
+ export { SpendGuardEngine } from "./engine.js";
2
+ export { estimateChatCostUsd } from "./pricing.js";
3
+ export { AIGuard } from "./client.js";
4
+ export { InMemoryCache } from "./cache.js";
5
+ export { estimateTokens, hashPrompt } from "./tokens.js";
6
+ export type { ChatRequest, GuardrailDecision, GuardrailRule, PolicyConfig, SpendAction, TaskKind } from "./types.js";
7
+ export type { RequestLog, AIGuardConfig } from "./client.js";
8
+ import { AIGuard } from "./client.js";
9
+ import { SpendGuardEngine } from "./engine.js";
10
+ import type { PolicyConfig } from "./types.js";
11
+ export declare function createAIGuard(config: ConstructorParameters<typeof AIGuard>[0]): AIGuard;
12
+ export declare function createSpendGuard(policy?: PolicyConfig): SpendGuardEngine;
package/dist/index.js ADDED
@@ -0,0 +1,13 @@
1
+ export { SpendGuardEngine } from "./engine.js";
2
+ export { estimateChatCostUsd } from "./pricing.js";
3
+ export { AIGuard } from "./client.js";
4
+ export { InMemoryCache } from "./cache.js";
5
+ export { estimateTokens, hashPrompt } from "./tokens.js";
6
+ import { AIGuard } from "./client.js";
7
+ import { SpendGuardEngine } from "./engine.js";
8
+ export function createAIGuard(config) {
9
+ return new AIGuard(config);
10
+ }
11
+ export function createSpendGuard(policy = {}) {
12
+ return new SpendGuardEngine(policy);
13
+ }
@@ -0,0 +1,2 @@
1
+ import type { ChatRequest } from "./types.js";
2
+ export declare function guardedChatFetch(request: ChatRequest): Promise<any>;
@@ -0,0 +1,48 @@
1
+ import { createSpendGuard } from "./index.js";
2
+ // Very small example showing how to use the guard before calling an OpenAI-like API.
3
+ // This file is illustrative — adapt request shaping for your client library.
4
+ const guard = createSpendGuard({
5
+ cacheRepeatedPrompts: true,
6
+ autoReroute: true,
7
+ maxCostPerCall: 0.5
8
+ });
9
+ export async function guardedChatFetch(request) {
10
+ // request should include: task, model, prompt, estimatedInputTokens, estimatedOutputTokens
11
+ const decision = guard.evaluate(request);
12
+ // Soft enforcement: warn and continue, reroute, or return cached result.
13
+ if (decision.action === 'cache') {
14
+ // Your cache lookup here (example omitted).
15
+ // If cached value exists, return it immediately.
16
+ const cached = null; // TODO: implement cache lookup
17
+ if (cached)
18
+ return cached;
19
+ }
20
+ const finalModel = decision.model;
21
+ if (decision.action === 'warn') {
22
+ console.warn('Spend guard warning:', decision.reasons.join('; '));
23
+ }
24
+ if (decision.action === 'block') {
25
+ throw new Error('Call blocked by spend policy: ' + decision.reasons.join('; '));
26
+ }
27
+ // Build the upstream request adapting the model if rerouted
28
+ const upstreamPayload = {
29
+ model: finalModel,
30
+ messages: [
31
+ { role: 'system', content: 'You are a helpful assistant.' },
32
+ { role: 'user', content: request.prompt }
33
+ ],
34
+ max_tokens: request.estimatedOutputTokens
35
+ };
36
+ // Replace the URL and headers with your provider / API key
37
+ const resp = await fetch('https://api.openai.com/v1/chat/completions', {
38
+ method: 'POST',
39
+ headers: {
40
+ 'Content-Type': 'application/json',
41
+ 'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`
42
+ },
43
+ body: JSON.stringify(upstreamPayload)
44
+ });
45
+ const body = await resp.json();
46
+ // Optionally store response in cache if decision.action was 'cache' and caching enabled
47
+ return body;
48
+ }
@@ -0,0 +1,2 @@
1
+ export declare function estimateChatCostUsd(model: string, inputTokens: number, outputTokens: number): number;
2
+ export declare function getCheaperModel(model: string): string | undefined;
@@ -0,0 +1,107 @@
1
+ import { readFileSync } from "node:fs";
2
+ import { resolve, dirname } from "node:path";
3
+ import { fileURLToPath } from "node:url";
4
+ const DEFAULT_TABLE = {
5
+ default: {
6
+ inputPer1MTokens: 5,
7
+ outputPer1MTokens: 15
8
+ },
9
+ models: {
10
+ "gpt-4o": {
11
+ inputPer1MTokens: 5,
12
+ outputPer1MTokens: 15
13
+ },
14
+ "gpt-4.1": {
15
+ inputPer1MTokens: 10,
16
+ outputPer1MTokens: 30
17
+ },
18
+ "gpt-4o-mini": {
19
+ inputPer1MTokens: 0.15,
20
+ outputPer1MTokens: 0.6
21
+ },
22
+ "gpt-4.1-mini": {
23
+ inputPer1MTokens: 0.4,
24
+ outputPer1MTokens: 1.6
25
+ },
26
+ "gpt-3.5-turbo": {
27
+ inputPer1MTokens: 0.5,
28
+ outputPer1MTokens: 1.5
29
+ }
30
+ }
31
+ };
32
+ let cachedTable = null;
33
+ export function estimateChatCostUsd(model, inputTokens, outputTokens) {
34
+ const table = loadPricingTable();
35
+ const modelEntry = table.models[model];
36
+ const fallback = table.default ?? DEFAULT_TABLE.default;
37
+ const inputRate = modelEntry?.inputPer1MTokens ?? fallback.inputPer1MTokens;
38
+ const outputRate = modelEntry?.outputPer1MTokens ?? fallback.outputPer1MTokens;
39
+ const inputCost = (inputTokens / 1_000_000) * inputRate;
40
+ const outputCost = (outputTokens / 1_000_000) * outputRate;
41
+ return roundToCents(inputCost + outputCost);
42
+ }
43
+ export function getCheaperModel(model) {
44
+ if (model === "gpt-4.1") {
45
+ return "gpt-4o-mini";
46
+ }
47
+ if (model === "gpt-4o") {
48
+ return "gpt-4o-mini";
49
+ }
50
+ if (model === "gpt-4.1-mini") {
51
+ return "gpt-4o-mini";
52
+ }
53
+ return undefined;
54
+ }
55
+ function roundToCents(value) {
56
+ return Math.round(value * 100) / 100;
57
+ }
58
+ function loadPricingTable() {
59
+ if (cachedTable) {
60
+ return cachedTable;
61
+ }
62
+ const envPath = process.env.AI_SPEND_GUARD_PRICING;
63
+ const baseDir = dirname(fileURLToPath(import.meta.url));
64
+ const defaultPath = resolve(baseDir, "../pricing.rates.json");
65
+ const pricingPath = envPath ? resolve(envPath) : defaultPath;
66
+ try {
67
+ const raw = readFileSync(pricingPath, "utf8");
68
+ const parsed = JSON.parse(raw);
69
+ if (isPricingTable(parsed)) {
70
+ cachedTable = parsed;
71
+ return parsed;
72
+ }
73
+ }
74
+ catch {
75
+ // Fall back to defaults if the file cannot be read.
76
+ }
77
+ cachedTable = DEFAULT_TABLE;
78
+ return DEFAULT_TABLE;
79
+ }
80
+ function isPricingTable(value) {
81
+ if (!value || typeof value !== "object") {
82
+ return false;
83
+ }
84
+ const table = value;
85
+ if (!table.default || !isRateEntry(table.default)) {
86
+ return false;
87
+ }
88
+ if (!table.models || typeof table.models !== "object") {
89
+ return false;
90
+ }
91
+ for (const entry of Object.values(table.models)) {
92
+ if (!isRateEntry(entry)) {
93
+ return false;
94
+ }
95
+ }
96
+ return true;
97
+ }
98
+ function isRateEntry(value) {
99
+ if (!value || typeof value !== "object") {
100
+ return false;
101
+ }
102
+ const entry = value;
103
+ return isValidRate(entry.inputPer1MTokens) && isValidRate(entry.outputPer1MTokens);
104
+ }
105
+ function isValidRate(value) {
106
+ return typeof value === "number" && Number.isFinite(value) && value >= 0;
107
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,62 @@
1
+ // This is a demo of the full SDK wrapper
2
+ // It shows how to use the AIGuard to wrap OpenAI or Hugging Face chat calls with guardrails
3
+ // Example usage:
4
+ // const guard = createAIGuard({
5
+ // apiKey: process.env.OPENAI_API_KEY,
6
+ // provider: "openai",
7
+ // policy: {
8
+ // autoReroute: true,
9
+ // cacheRepeatedPrompts: true,
10
+ // maxCostPerCall: 0.5
11
+ // }
12
+ // });
13
+ //
14
+ // const response = await guard.chat({
15
+ // model: "gpt-4o",
16
+ // messages: [
17
+ // { role: "user", content: "Classify this ticket" }
18
+ // ]
19
+ // }, "classification");
20
+ //
21
+ // console.log(guard.getStats());
22
+ console.log(`
23
+ ╔════════════════════════════════════════════════════════════════════╗
24
+ ║ AI Spend Guard - SDK Ready ║
25
+ ╠════════════════════════════════════════════════════════════════════╣
26
+ ║ ║
27
+ ║ The SDK is ready to wrap your OpenAI calls with cost guardrails. ║
28
+ ║ ║
29
+ ║ To use it in your app: ║
30
+ ║ ║
31
+ ║ 1. Install: npm install ║
32
+ ║ 2. Import: import { createAIGuard } from './src/index.js' ║
33
+ ║ 3. Configure: ║
34
+ ║ const guard = createAIGuard({ ║
35
+ ║ apiKey: process.env.OPENAI_API_KEY, ║
36
+ ║ policy: { ║
37
+ ║ autoReroute: true, ║
38
+ ║ cacheRepeatedPrompts: true, ║
39
+ ║ maxCostPerCall: 0.5 ║
40
+ ║ } ║
41
+ ║ }) ║
42
+ ║ 4. Make calls: ║
43
+ ║ const response = await guard.chat({ ║
44
+ ║ model: "gpt-4o", ║
45
+ ║ messages: [{ role: "user", content: "..." }] ║
46
+ ║ }, "classification") ║
47
+ ║ 5. Check stats: ║
48
+ ║ console.log(guard.getStats()) ║
49
+ ║ ║
50
+ ║ Features: ║
51
+ ║ ✓ Token estimation with tiktoken ║
52
+ ║ ✓ Prompt hashing & caching ║
53
+ ║ ✓ Automatic model downgrading ║
54
+ ║ ✓ Cost tracking per request ║
55
+ ║ ✓ Guardrail rules & soft enforcement ║
56
+ ║ ✓ Request logging ║
57
+ ║ ║
58
+ ║ Note: Set OPENAI_API_KEY environment variable to use live API. ║
59
+ ║ ║
60
+ ╚════════════════════════════════════════════════════════════════════╝
61
+ `);
62
+ export {};
@@ -0,0 +1,11 @@
1
+ export declare function estimateTokens(model: string, prompt: string, messages?: Array<{
2
+ role: string;
3
+ content: string;
4
+ }>): {
5
+ input: number;
6
+ output: number;
7
+ };
8
+ export declare function hashPrompt(content: string | Array<{
9
+ role: string;
10
+ content: string;
11
+ }>): string;
package/dist/tokens.js ADDED
@@ -0,0 +1,29 @@
1
+ import { encodingForModel } from "js-tiktoken";
2
+ import { createHash } from "node:crypto";
3
+ export function estimateTokens(model, prompt, messages) {
4
+ try {
5
+ const enc = encodingForModel(model);
6
+ let inputTokens = 0;
7
+ if (messages) {
8
+ for (const msg of messages) {
9
+ inputTokens += enc.encode(msg.content).length;
10
+ }
11
+ }
12
+ else {
13
+ inputTokens = enc.encode(prompt).length;
14
+ }
15
+ // Rough estimate: output is often 25-30% of input for most tasks
16
+ const outputTokens = Math.max(Math.ceil(inputTokens * 0.3), 50);
17
+ return { input: inputTokens, output: outputTokens };
18
+ }
19
+ catch {
20
+ // Fallback to rough estimate if model is unknown
21
+ const inputTokens = Math.ceil((messages?.reduce((sum, m) => sum + m.content.length, 0) ?? prompt.length) / 4);
22
+ const outputTokens = Math.max(Math.ceil(inputTokens * 0.3), 50);
23
+ return { input: inputTokens, output: outputTokens };
24
+ }
25
+ }
26
+ export function hashPrompt(content) {
27
+ const text = Array.isArray(content) ? content.map((m) => m.content).join("\n") : content;
28
+ return createHash("sha256").update(text).digest("hex");
29
+ }
@@ -0,0 +1,34 @@
1
+ export type TaskKind = "classification" | "summarization" | "generation" | "qa" | "other";
2
+ export type SpendAction = "allow" | "warn" | "reroute" | "cache" | "block";
3
+ export interface ChatRequest {
4
+ task: TaskKind;
5
+ model: string;
6
+ prompt: string;
7
+ estimatedInputTokens: number;
8
+ estimatedOutputTokens: number;
9
+ retries?: number;
10
+ metadata?: Record<string, unknown>;
11
+ }
12
+ export interface PolicyConfig {
13
+ maxCostPerCall?: number;
14
+ disallowModels?: string[];
15
+ cacheRepeatedPrompts?: boolean;
16
+ autoReroute?: boolean;
17
+ allowBlocking?: boolean;
18
+ }
19
+ export interface GuardrailRule {
20
+ id: string;
21
+ reason: string;
22
+ action: SpendAction;
23
+ suggestedModel?: string;
24
+ estimatedSavingsUsd?: number;
25
+ }
26
+ export interface GuardrailDecision {
27
+ action: SpendAction;
28
+ model: string;
29
+ estimatedCostUsd: number;
30
+ savingsUsd: number;
31
+ warnings: string[];
32
+ reasons: string[];
33
+ appliedRuleIds: string[];
34
+ }
package/dist/types.js ADDED
@@ -0,0 +1 @@
1
+ export {};
package/package.json ADDED
@@ -0,0 +1,50 @@
1
+ {
2
+ "name": "ai-spend-guard",
3
+ "version": "0.1.0",
4
+ "type": "module",
5
+ "description": "Cost-control guardrails for OpenAI- and Hugging Face-compatible chat calls.",
6
+ "license": "MIT",
7
+ "keywords": [
8
+ "ai",
9
+ "llm",
10
+ "cost",
11
+ "guardrails",
12
+ "openai",
13
+ "huggingface",
14
+ "cache",
15
+ "budget"
16
+ ],
17
+ "main": "dist/index.js",
18
+ "types": "dist/index.d.ts",
19
+ "exports": {
20
+ ".": {
21
+ "types": "./dist/index.d.ts",
22
+ "import": "./dist/index.js"
23
+ }
24
+ },
25
+ "files": [
26
+ "dist",
27
+ "README.md",
28
+ "LICENSE",
29
+ "pricing.rates.json"
30
+ ],
31
+ "scripts": {
32
+ "build": "node ./node_modules/typescript/bin/tsc -p tsconfig.json",
33
+ "check": "node ./node_modules/typescript/bin/tsc -p tsconfig.json --noEmit",
34
+ "example": "node --import tsx examples/basic.ts",
35
+ "demo": "node --import tsx src/demo.ts",
36
+ "demo-cli": "node --import tsx src/cli-demo.ts",
37
+ "server": "node --import tsx src/server.ts"
38
+ },
39
+ "dependencies": {
40
+ "openai": "^4.67.1",
41
+ "express": "^4.19.2",
42
+ "js-tiktoken": "^1.0.14"
43
+ },
44
+ "devDependencies": {
45
+ "@types/node": "^22.15.29",
46
+ "@types/express": "^4.17.21",
47
+ "tsx": "^4.19.4",
48
+ "typescript": "^5.8.3"
49
+ }
50
+ }
@@ -0,0 +1,28 @@
1
+ {
2
+ "default": {
3
+ "inputPer1MTokens": 5,
4
+ "outputPer1MTokens": 15
5
+ },
6
+ "models": {
7
+ "gpt-4o": {
8
+ "inputPer1MTokens": 5,
9
+ "outputPer1MTokens": 15
10
+ },
11
+ "gpt-4.1": {
12
+ "inputPer1MTokens": 10,
13
+ "outputPer1MTokens": 30
14
+ },
15
+ "gpt-4o-mini": {
16
+ "inputPer1MTokens": 0.15,
17
+ "outputPer1MTokens": 0.6
18
+ },
19
+ "gpt-4.1-mini": {
20
+ "inputPer1MTokens": 0.4,
21
+ "outputPer1MTokens": 1.6
22
+ },
23
+ "gpt-3.5-turbo": {
24
+ "inputPer1MTokens": 0.5,
25
+ "outputPer1MTokens": 1.5
26
+ }
27
+ }
28
+ }