agentlytics 0.1.8 → 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/package.json +3 -1
  2. package/pricing.js +88 -0
  3. package/pricing.json +87 -0
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "agentlytics",
3
- "version": "0.1.8",
3
+ "version": "0.1.9",
4
4
  "description": "Comprehensive analytics dashboard for AI coding agents — Cursor, Windsurf, Claude Code, VS Code Copilot, Zed, Antigravity, OpenCode, Command Code",
5
5
  "main": "index.js",
6
6
  "bin": {
@@ -14,6 +14,8 @@
14
14
  "relay-server.js",
15
15
  "relay-client.js",
16
16
  "mcp-server.js",
17
+ "pricing.js",
18
+ "pricing.json",
17
19
  "editors/",
18
20
  "ui/src/",
19
21
  "ui/index.html",
package/pricing.js ADDED
@@ -0,0 +1,88 @@
1
+ // Load pricing data from JSON – edit pricing.json to add/update models
2
+ const _raw = require('./pricing.json');
3
+ const MODEL_PRICING = Object.fromEntries(
4
+ Object.entries(_raw).filter(([k]) => !k.startsWith('_'))
5
+ );
6
+
7
+ // Normalize a model identifier to match pricing keys
8
+ // Handles versioned names like "claude-sonnet-4-20250514", "gpt-4o-2024-08-06", etc.
9
+ function normalizeModelName(name) {
10
+ if (!name) return null;
11
+ let n = name.toLowerCase().trim();
12
+
13
+ // Strip leading provider prefixes (e.g. "anthropic/claude-..." or "openai/gpt-...")
14
+ const slashIdx = n.lastIndexOf('/');
15
+ if (slashIdx !== -1) n = n.substring(slashIdx + 1);
16
+
17
+ // Strip dot-delimited provider prefixes (e.g. "us.anthropic.claude-sonnet-4-6")
18
+ // Only strip if all prefix segments are simple words (no dashes), to avoid
19
+ // splitting version dots like "claude-4.6-opus"
20
+ const dotParts = n.split('.');
21
+ if (dotParts.length > 1) {
22
+ const prefixes = dotParts.slice(0, -1);
23
+ const last = dotParts[dotParts.length - 1];
24
+ if (last.includes('-') && prefixes.every(p => !p.includes('-'))) n = last;
25
+ }
26
+
27
+ // Handle MODEL_CLAUDE_* / MODEL_GPT_* enum constants
28
+ if (n.startsWith('model_')) {
29
+ n = n.substring(6).replace(/_/g, '-');
30
+ }
31
+
32
+ // Build candidate list: original + dots→dashes + reversed claude names
33
+ const candidates = [n];
34
+ if (n.includes('.')) candidates.push(n.replace(/\./g, '-'));
35
+
36
+ // Rearrange reversed claude names: "claude-4-6-opus-..." → "claude-opus-4-6"
37
+ // Run on all candidates so dots→dashes variant is also checked
38
+ for (const c of [...candidates]) {
39
+ const rev = c.match(/^(claude)-(\d+)-(\d+)-(opus|sonnet|haiku)/);
40
+ if (rev) candidates.push(`${rev[1]}-${rev[4]}-${rev[2]}-${rev[3]}`);
41
+ }
42
+
43
+ // Pass 1: exact and precise matches across ALL candidates first
44
+ for (const c of candidates) {
45
+ if (MODEL_PRICING[c]) return c;
46
+ }
47
+ for (const c of candidates) {
48
+ const withoutDate = c.replace(/-\d{4}-?\d{2}-?\d{2}$/, '');
49
+ if (MODEL_PRICING[withoutDate]) return withoutDate;
50
+ const withoutTag = c.replace(/:(latest|thinking)$/, '');
51
+ if (MODEL_PRICING[withoutTag]) return withoutTag;
52
+ const withoutQual = c.replace(/-(thinking|high|xhigh|preview|latest)(-thinking|-high|-xhigh|-preview)*/g, '');
53
+ if (withoutQual !== c && MODEL_PRICING[withoutQual]) return withoutQual;
54
+ }
55
+
56
+ // Pass 2: fuzzy startsWith (longest key match wins)
57
+ const keys = Object.keys(MODEL_PRICING);
58
+ for (const c of candidates) {
59
+ let best = null;
60
+ for (const key of keys) {
61
+ if (c.startsWith(key) && (!best || key.length > best.length)) best = key;
62
+ }
63
+ if (best) return best;
64
+ }
65
+
66
+ return null;
67
+ }
68
+
69
+ function getModelPricing(modelName) {
70
+ const key = normalizeModelName(modelName);
71
+ return key ? MODEL_PRICING[key] : null;
72
+ }
73
+
74
+ // Calculate cost for a set of token counts and a model
75
+ // Returns cost in USD or null if model is unknown
76
+ function calculateCost(modelName, inputTokens, outputTokens, cacheRead, cacheWrite) {
77
+ const pricing = getModelPricing(modelName);
78
+ if (!pricing) return null;
79
+
80
+ const input = ((inputTokens || 0) / 1_000_000) * pricing.input;
81
+ const output = ((outputTokens || 0) / 1_000_000) * pricing.output;
82
+ const cr = ((cacheRead || 0) / 1_000_000) * pricing.cacheRead;
83
+ const cw = ((cacheWrite || 0) / 1_000_000) * pricing.cacheWrite;
84
+
85
+ return input + output + cr + cw;
86
+ }
87
+
88
+ module.exports = { MODEL_PRICING, normalizeModelName, getModelPricing, calculateCost };
package/pricing.json ADDED
@@ -0,0 +1,87 @@
1
+ {
2
+ "_meta": {
3
+ "description": "Model pricing per million tokens (USD). Keys use dashes (no dots) for consistent normalization.",
4
+ "sources": {
5
+ "Anthropic": "platform.claude.com/docs/en/about-claude/pricing",
6
+ "OpenAI": "developers.openai.com/api/docs/pricing",
7
+ "Google": "ai.google.dev/gemini-api/docs/pricing",
8
+ "xAI": "docs.x.ai/developers/models",
9
+ "DeepSeek": "api-docs.deepseek.com/quick_start/pricing"
10
+ },
11
+ "lastVerified": "2026-07",
12
+ "notes": {
13
+ "keys": "All keys use dashes instead of dots (e.g. gpt-5-4 not gpt-5.4) so normalizeModelName dots→dashes works",
14
+ "cacheRead": "cache hit price (Anthropic: 0.1x input, OpenAI: varies, Google: ~0.1x input)",
15
+ "cacheWrite": "cache write price (Anthropic: 1.25x input, OpenAI: ~same as input)"
16
+ }
17
+ },
18
+
19
+ "claude-opus-4-6": { "input": 5, "output": 25, "cacheRead": 0.50, "cacheWrite": 6.25 },
20
+ "claude-sonnet-4-6": { "input": 3, "output": 15, "cacheRead": 0.30, "cacheWrite": 3.75 },
21
+ "claude-opus-4-5": { "input": 5, "output": 25, "cacheRead": 0.50, "cacheWrite": 6.25 },
22
+ "claude-sonnet-4-5": { "input": 3, "output": 15, "cacheRead": 0.30, "cacheWrite": 3.75 },
23
+ "claude-haiku-4-5": { "input": 1, "output": 5, "cacheRead": 0.10, "cacheWrite": 1.25 },
24
+ "claude-opus-4-1": { "input": 15, "output": 75, "cacheRead": 1.50, "cacheWrite": 18.75 },
25
+ "claude-opus-4-0": { "input": 15, "output": 75, "cacheRead": 1.50, "cacheWrite": 18.75 },
26
+ "claude-sonnet-4-0": { "input": 3, "output": 15, "cacheRead": 0.30, "cacheWrite": 3.75 },
27
+ "claude-sonnet-4": { "input": 3, "output": 15, "cacheRead": 0.30, "cacheWrite": 3.75 },
28
+ "claude-sonnet-3-7": { "input": 3, "output": 15, "cacheRead": 0.30, "cacheWrite": 3.75 },
29
+ "claude-3-5-sonnet": { "input": 3, "output": 15, "cacheRead": 0.30, "cacheWrite": 3.75 },
30
+ "claude-3-5-haiku": { "input": 0.80, "output": 4, "cacheRead": 0.08, "cacheWrite": 1 },
31
+ "claude-3-opus": { "input": 15, "output": 75, "cacheRead": 1.50, "cacheWrite": 18.75 },
32
+ "claude-3-sonnet": { "input": 3, "output": 15, "cacheRead": 0.30, "cacheWrite": 3.75 },
33
+ "claude-3-haiku": { "input": 0.25, "output": 1.25, "cacheRead": 0.03, "cacheWrite": 0.30 },
34
+
35
+ "gpt-5-4": { "input": 2.50, "output": 15, "cacheRead": 0.25, "cacheWrite": 2.50 },
36
+ "gpt-5-4-pro": { "input": 30, "output": 180, "cacheRead": 30, "cacheWrite": 30 },
37
+ "gpt-5-2": { "input": 1.75, "output": 14, "cacheRead": 0.175, "cacheWrite": 1.75 },
38
+ "gpt-5-2-pro": { "input": 21, "output": 168, "cacheRead": 21, "cacheWrite": 21 },
39
+ "gpt-5-1": { "input": 1.25, "output": 10, "cacheRead": 0.125, "cacheWrite": 1.25 },
40
+ "gpt-5": { "input": 1.25, "output": 10, "cacheRead": 0.125, "cacheWrite": 1.25 },
41
+ "gpt-5-mini": { "input": 0.25, "output": 2, "cacheRead": 0.025, "cacheWrite": 0.25 },
42
+ "gpt-5-nano": { "input": 0.05, "output": 0.40, "cacheRead": 0.005, "cacheWrite": 0.05 },
43
+ "gpt-5-pro": { "input": 15, "output": 120, "cacheRead": 15, "cacheWrite": 15 },
44
+ "gpt-5-3-codex": { "input": 1.75, "output": 14, "cacheRead": 0.175, "cacheWrite": 1.75 },
45
+ "gpt-5-2-codex": { "input": 1.75, "output": 14, "cacheRead": 0.175, "cacheWrite": 1.75 },
46
+ "gpt-5-1-codex-max": { "input": 1.25, "output": 10, "cacheRead": 0.125, "cacheWrite": 1.25 },
47
+ "gpt-5-1-codex": { "input": 1.25, "output": 10, "cacheRead": 0.125, "cacheWrite": 1.25 },
48
+ "gpt-5-codex": { "input": 1.25, "output": 10, "cacheRead": 0.125, "cacheWrite": 1.25 },
49
+ "codex-mini": { "input": 1.50, "output": 6, "cacheRead": 0.375, "cacheWrite": 1.50 },
50
+ "gpt-4-1": { "input": 2, "output": 8, "cacheRead": 0.50, "cacheWrite": 2 },
51
+ "gpt-4-1-mini": { "input": 0.40, "output": 1.60, "cacheRead": 0.10, "cacheWrite": 0.40 },
52
+ "gpt-4-1-nano": { "input": 0.10, "output": 0.40, "cacheRead": 0.025, "cacheWrite": 0.10 },
53
+ "gpt-4o": { "input": 2.50, "output": 10, "cacheRead": 1.25, "cacheWrite": 2.50 },
54
+ "gpt-4o-mini": { "input": 0.15, "output": 0.60, "cacheRead": 0.075, "cacheWrite": 0.15 },
55
+ "gpt-4-turbo": { "input": 10, "output": 30, "cacheRead": 10, "cacheWrite": 10 },
56
+ "gpt-4": { "input": 30, "output": 60, "cacheRead": 30, "cacheWrite": 30 },
57
+ "gpt-3-5-turbo": { "input": 0.50, "output": 1.50, "cacheRead": 0.50, "cacheWrite": 0.50 },
58
+ "o1": { "input": 15, "output": 60, "cacheRead": 7.50, "cacheWrite": 15 },
59
+ "o1-mini": { "input": 1.10, "output": 4.40, "cacheRead": 0.55, "cacheWrite": 1.10 },
60
+ "o1-pro": { "input": 150, "output": 600, "cacheRead": 150, "cacheWrite": 150 },
61
+ "o3": { "input": 2, "output": 8, "cacheRead": 0.50, "cacheWrite": 2 },
62
+ "o3-pro": { "input": 20, "output": 80, "cacheRead": 20, "cacheWrite": 20 },
63
+ "o3-mini": { "input": 1.10, "output": 4.40, "cacheRead": 0.55, "cacheWrite": 1.10 },
64
+ "o4-mini": { "input": 1.10, "output": 4.40, "cacheRead": 0.275, "cacheWrite": 1.10 },
65
+
66
+ "gemini-3-1-pro": { "input": 2, "output": 12, "cacheRead": 0.20, "cacheWrite": 2 },
67
+ "gemini-3-pro": { "input": 2, "output": 12, "cacheRead": 0.20, "cacheWrite": 2 },
68
+ "gemini-3-flash": { "input": 0.50, "output": 3, "cacheRead": 0.05, "cacheWrite": 0.50 },
69
+ "gemini-3-1-flash-lite": { "input": 0.25, "output": 1.50, "cacheRead": 0.025, "cacheWrite": 0.25 },
70
+ "gemini-2-5-pro": { "input": 1.25, "output": 10, "cacheRead": 0.125, "cacheWrite": 1.25 },
71
+ "gemini-2-5-flash": { "input": 0.30, "output": 2.50, "cacheRead": 0.03, "cacheWrite": 0.30 },
72
+ "gemini-2-5-flash-lite": { "input": 0.10, "output": 0.40, "cacheRead": 0.01, "cacheWrite": 0.10 },
73
+ "gemini-2-0-flash": { "input": 0.10, "output": 0.40, "cacheRead": 0.025, "cacheWrite": 0.10 },
74
+ "gemini-2-0-flash-lite": { "input": 0.075,"output": 0.30, "cacheRead": 0.075, "cacheWrite": 0.075 },
75
+ "gemini-1-5-pro": { "input": 1.25, "output": 5, "cacheRead": 0.315, "cacheWrite": 1.25 },
76
+ "gemini-1-5-flash": { "input": 0.075,"output": 0.30, "cacheRead": 0.019, "cacheWrite": 0.075 },
77
+
78
+ "grok-4": { "input": 3, "output": 15, "cacheRead": 0.75, "cacheWrite": 3 },
79
+ "grok-4-1-fast": { "input": 0.20, "output": 0.50, "cacheRead": 0.05, "cacheWrite": 0.20 },
80
+ "grok-4-fast": { "input": 0.20, "output": 0.50, "cacheRead": 0.05, "cacheWrite": 0.20 },
81
+ "grok-code-fast-1": { "input": 0.20, "output": 1.50, "cacheRead": 0.02, "cacheWrite": 0.20 },
82
+ "grok-3": { "input": 3, "output": 15, "cacheRead": 0.75, "cacheWrite": 3 },
83
+ "grok-3-mini": { "input": 0.30, "output": 0.50, "cacheRead": 0.07, "cacheWrite": 0.30 },
84
+
85
+ "deepseek-chat": { "input": 0.28, "output": 0.42, "cacheRead": 0.028, "cacheWrite": 0.28 },
86
+ "deepseek-reasoner": { "input": 0.28, "output": 0.42, "cacheRead": 0.028, "cacheWrite": 0.28 }
87
+ }