@revenium/claude-code-metering 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/CHANGELOG.md +22 -28
  2. package/README.md +321 -139
  3. package/dist/cli/commands/backfill.d.ts +98 -1
  4. package/dist/cli/commands/backfill.d.ts.map +1 -1
  5. package/dist/cli/commands/backfill.js +356 -146
  6. package/dist/cli/commands/backfill.js.map +1 -1
  7. package/dist/cli/commands/setup.d.ts +2 -0
  8. package/dist/cli/commands/setup.d.ts.map +1 -1
  9. package/dist/cli/commands/setup.js +55 -49
  10. package/dist/cli/commands/setup.js.map +1 -1
  11. package/dist/cli/commands/status.d.ts.map +1 -1
  12. package/dist/cli/commands/status.js +2 -11
  13. package/dist/cli/commands/status.js.map +1 -1
  14. package/dist/cli/commands/test.d.ts.map +1 -1
  15. package/dist/cli/commands/test.js +23 -22
  16. package/dist/cli/commands/test.js.map +1 -1
  17. package/dist/cli/index.d.ts +2 -1
  18. package/dist/cli/index.d.ts.map +1 -1
  19. package/dist/cli/index.js +44 -30
  20. package/dist/cli/index.js.map +1 -1
  21. package/dist/core/api/client.d.ts +7 -6
  22. package/dist/core/api/client.d.ts.map +1 -1
  23. package/dist/core/api/client.js +52 -47
  24. package/dist/core/api/client.js.map +1 -1
  25. package/dist/core/config/loader.d.ts +5 -13
  26. package/dist/core/config/loader.d.ts.map +1 -1
  27. package/dist/core/config/loader.js +67 -46
  28. package/dist/core/config/loader.js.map +1 -1
  29. package/dist/core/config/validator.d.ts +5 -1
  30. package/dist/core/config/validator.d.ts.map +1 -1
  31. package/dist/core/config/validator.js +37 -22
  32. package/dist/core/config/validator.js.map +1 -1
  33. package/dist/core/config/writer.d.ts +1 -1
  34. package/dist/core/config/writer.d.ts.map +1 -1
  35. package/dist/core/config/writer.js +76 -72
  36. package/dist/core/config/writer.js.map +1 -1
  37. package/dist/core/shell/detector.d.ts +8 -1
  38. package/dist/core/shell/detector.d.ts.map +1 -1
  39. package/dist/core/shell/detector.js +38 -24
  40. package/dist/core/shell/detector.js.map +1 -1
  41. package/dist/core/shell/profile-updater.d.ts +1 -1
  42. package/dist/core/shell/profile-updater.d.ts.map +1 -1
  43. package/dist/core/shell/profile-updater.js +40 -27
  44. package/dist/core/shell/profile-updater.js.map +1 -1
  45. package/dist/index.d.ts +9 -8
  46. package/dist/index.d.ts.map +1 -1
  47. package/dist/index.js +1 -0
  48. package/dist/index.js.map +1 -1
  49. package/dist/types/index.d.ts +18 -23
  50. package/dist/types/index.d.ts.map +1 -1
  51. package/dist/utils/constants.d.ts +2 -2
  52. package/dist/utils/constants.d.ts.map +1 -1
  53. package/dist/utils/constants.js +21 -21
  54. package/dist/utils/constants.js.map +1 -1
  55. package/dist/utils/hashing.d.ts +18 -0
  56. package/dist/utils/hashing.d.ts.map +1 -0
  57. package/dist/utils/hashing.js +27 -0
  58. package/dist/utils/hashing.js.map +1 -0
  59. package/package.json +6 -3
@@ -3,6 +3,18 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
3
3
  return (mod && mod.__esModule) ? mod : { "default": mod };
4
4
  };
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.sleep = sleep;
7
+ exports.sanitizeErrorMessage = sanitizeErrorMessage;
8
+ exports.isRetryableError = isRetryableError;
9
+ exports.sendBatchWithRetry = sendBatchWithRetry;
10
+ exports.parseRelativeDate = parseRelativeDate;
11
+ exports.parseSinceDate = parseSinceDate;
12
+ exports.findJsonlFiles = findJsonlFiles;
13
+ exports.calculateStatistics = calculateStatistics;
14
+ exports.parseJsonlLine = parseJsonlLine;
15
+ exports.streamJsonlRecords = streamJsonlRecords;
16
+ exports.toUnixNano = toUnixNano;
17
+ exports.createOtlpPayload = createOtlpPayload;
6
18
  exports.backfillCommand = backfillCommand;
7
19
  const node_fs_1 = require("node:fs");
8
20
  const promises_1 = require("node:fs/promises");
@@ -14,6 +26,83 @@ const ora_1 = __importDefault(require("ora"));
14
26
  const loader_js_1 = require("../../core/config/loader.js");
15
27
  const client_js_1 = require("../../core/api/client.js");
16
28
  const constants_js_1 = require("../../utils/constants.js");
29
+ const hashing_js_1 = require("../../utils/hashing.js");
30
+ /**
31
+ * Sleep for a specified number of milliseconds.
32
+ */
33
+ function sleep(ms) {
34
+ return new Promise((resolve) => setTimeout(resolve, ms));
35
+ }
36
+ /**
37
+ * Sanitize error message to prevent API key leakage.
38
+ * Truncates long messages and removes potential sensitive data.
39
+ */
40
+ function sanitizeErrorMessage(errorMsg) {
41
+ const maxLength = 500;
42
+ let sanitized = errorMsg;
43
+ if (sanitized.length > maxLength) {
44
+ sanitized = `${sanitized.substring(0, maxLength)}...`;
45
+ }
46
+ return sanitized;
47
+ }
48
+ /**
49
+ * Check if an error is retryable based on HTTP status code.
50
+ * 4xx errors (except 429) are not retryable as they indicate client errors.
51
+ */
52
+ function isRetryableError(errorMsg) {
53
+ const statusMatch = errorMsg.match(/OTLP request failed: (\d{3})/);
54
+ if (!statusMatch) {
55
+ return true;
56
+ }
57
+ const statusCode = parseInt(statusMatch[1], 10);
58
+ if (statusCode === 429) {
59
+ return true;
60
+ }
61
+ if (statusCode >= 400 && statusCode < 500) {
62
+ return false;
63
+ }
64
+ return true;
65
+ }
66
+ /**
67
+ * Send a batch with retry logic and exponential backoff.
68
+ */
69
+ async function sendBatchWithRetry(endpoint, apiKey, payload, maxRetries, verbose) {
70
+ for (let attempt = 0; attempt < maxRetries; attempt++) {
71
+ try {
72
+ await (0, client_js_1.sendOtlpLogs)(endpoint, apiKey, payload);
73
+ if (verbose && attempt > 0) {
74
+ console.log(chalk_1.default.green(` ✓ Succeeded after ${attempt + 1} attempts`));
75
+ }
76
+ return { success: true, attempts: attempt + 1 };
77
+ }
78
+ catch (error) {
79
+ const rawErrorMsg = error instanceof Error ? error.message : "Unknown error";
80
+ const errorMsg = sanitizeErrorMessage(rawErrorMsg);
81
+ const isRetryable = isRetryableError(errorMsg);
82
+ if (!isRetryable) {
83
+ if (verbose) {
84
+ console.log(chalk_1.default.red(` ✗ Non-retryable error (client error): ${errorMsg}`));
85
+ }
86
+ return { success: false, attempts: attempt + 1, error: errorMsg };
87
+ }
88
+ if (attempt < maxRetries - 1) {
89
+ const backoffDelay = 1000 * Math.pow(2, attempt);
90
+ if (verbose) {
91
+ console.log(chalk_1.default.yellow(` ✗ Attempt ${attempt + 1} failed: ${errorMsg}`));
92
+ console.log(chalk_1.default.blue(` ⏳ Retrying in ${backoffDelay}ms...`));
93
+ }
94
+ await sleep(backoffDelay);
95
+ }
96
+ else {
97
+ if (verbose) {
98
+ console.log(chalk_1.default.red(` ✗ All ${maxRetries} attempts failed`));
99
+ }
100
+ return { success: false, attempts: maxRetries, error: errorMsg };
101
+ }
102
+ }
103
+ }
104
+ return { success: false, attempts: maxRetries };
105
+ }
17
106
  /**
18
107
  * Parses a relative date string like "7d" or "1m" into a Date.
19
108
  */
@@ -25,19 +114,19 @@ function parseRelativeDate(input) {
25
114
  const unit = match[2];
26
115
  const now = new Date();
27
116
  switch (unit) {
28
- case 'd':
117
+ case "d":
29
118
  now.setDate(now.getDate() - amount);
30
119
  break;
31
- case 'w':
120
+ case "w":
32
121
  now.setDate(now.getDate() - amount * 7);
33
122
  break;
34
- case 'm':
123
+ case "m":
35
124
  now.setMonth(now.getMonth() - amount);
36
125
  break;
37
- case 'M':
126
+ case "M":
38
127
  now.setMonth(now.getMonth() - amount);
39
128
  break;
40
- case 'y':
129
+ case "y":
41
130
  now.setFullYear(now.getFullYear() - amount);
42
131
  break;
43
132
  default:
@@ -73,7 +162,7 @@ async function findJsonlFiles(dir, errors = []) {
73
162
  const result = await findJsonlFiles(fullPath, errors);
74
163
  files.push(...result.files);
75
164
  }
76
- else if (entry.isFile() && entry.name.endsWith('.jsonl')) {
165
+ else if (entry.isFile() && entry.name.endsWith(".jsonl")) {
77
166
  files.push(fullPath);
78
167
  }
79
168
  }
@@ -84,6 +173,76 @@ async function findJsonlFiles(dir, errors = []) {
84
173
  }
85
174
  return { files, errors };
86
175
  }
176
+ function calculateStatistics(records) {
177
+ if (records.length === 0) {
178
+ return {
179
+ totalRecords: 0,
180
+ oldestTimestamp: "",
181
+ newestTimestamp: "",
182
+ totalInputTokens: 0,
183
+ totalOutputTokens: 0,
184
+ totalCacheReadTokens: 0,
185
+ totalCacheCreationTokens: 0,
186
+ };
187
+ }
188
+ const sortedRecords = [...records].sort((a, b) => new Date(a.timestamp).getTime() - new Date(b.timestamp).getTime());
189
+ return {
190
+ totalRecords: records.length,
191
+ oldestTimestamp: sortedRecords[0].timestamp,
192
+ newestTimestamp: sortedRecords[sortedRecords.length - 1].timestamp,
193
+ totalInputTokens: records.reduce((sum, r) => sum + r.inputTokens, 0),
194
+ totalOutputTokens: records.reduce((sum, r) => sum + r.outputTokens, 0),
195
+ totalCacheReadTokens: records.reduce((sum, r) => sum + r.cacheReadTokens, 0),
196
+ totalCacheCreationTokens: records.reduce((sum, r) => sum + r.cacheCreationTokens, 0),
197
+ };
198
+ }
199
+ function parseJsonlLine(line, sinceDate) {
200
+ if (!line.trim()) {
201
+ return {};
202
+ }
203
+ let entry;
204
+ try {
205
+ entry = JSON.parse(line);
206
+ }
207
+ catch {
208
+ return { parseError: true };
209
+ }
210
+ if (entry.type !== "assistant" || !entry.message?.usage) {
211
+ return {};
212
+ }
213
+ const usage = entry.message.usage;
214
+ const timestamp = entry.timestamp;
215
+ const sessionId = entry.sessionId;
216
+ const model = entry.message.model;
217
+ if (!timestamp || !sessionId || !model) {
218
+ return { missingFields: true };
219
+ }
220
+ const entryDate = new Date(timestamp);
221
+ if (!Number.isFinite(entryDate.getTime())) {
222
+ return {};
223
+ }
224
+ if (sinceDate && entryDate < sinceDate) {
225
+ return {};
226
+ }
227
+ const totalTokens = (usage.input_tokens || 0) +
228
+ (usage.output_tokens || 0) +
229
+ (usage.cache_read_input_tokens || 0) +
230
+ (usage.cache_creation_input_tokens || 0);
231
+ if (totalTokens === 0) {
232
+ return {};
233
+ }
234
+ return {
235
+ record: {
236
+ sessionId,
237
+ timestamp,
238
+ model,
239
+ inputTokens: usage.input_tokens || 0,
240
+ outputTokens: usage.output_tokens || 0,
241
+ cacheReadTokens: usage.cache_read_input_tokens || 0,
242
+ cacheCreationTokens: usage.cache_creation_input_tokens || 0,
243
+ },
244
+ };
245
+ }
87
246
  /**
88
247
  * Streams a JSONL file and extracts records with usage data.
89
248
  * Yields objects indicating either a valid record or a parse error.
@@ -96,56 +255,13 @@ async function* streamJsonlRecords(filePath, sinceDate) {
96
255
  });
97
256
  try {
98
257
  for await (const line of rl) {
99
- if (!line.trim())
100
- continue;
101
- try {
102
- const entry = JSON.parse(line);
103
- // Only process assistant messages with usage data
104
- if (entry.type !== 'assistant' || !entry.message?.usage)
105
- continue;
106
- const usage = entry.message.usage;
107
- const timestamp = entry.timestamp;
108
- const sessionId = entry.sessionId;
109
- const model = entry.message.model;
110
- // Skip if missing required fields
111
- if (!timestamp || !sessionId || !model)
112
- continue;
113
- // Validate timestamp is a valid date
114
- const entryDate = new Date(timestamp);
115
- if (!Number.isFinite(entryDate.getTime()))
116
- continue;
117
- // Check date filter
118
- if (sinceDate) {
119
- if (entryDate < sinceDate)
120
- continue;
121
- }
122
- // Skip entries with no actual token usage
123
- const totalTokens = (usage.input_tokens || 0) +
124
- (usage.output_tokens || 0) +
125
- (usage.cache_read_input_tokens || 0) +
126
- (usage.cache_creation_input_tokens || 0);
127
- if (totalTokens === 0)
128
- continue;
129
- yield {
130
- record: {
131
- sessionId,
132
- timestamp,
133
- model,
134
- inputTokens: usage.input_tokens || 0,
135
- outputTokens: usage.output_tokens || 0,
136
- cacheReadTokens: usage.cache_read_input_tokens || 0,
137
- cacheCreationTokens: usage.cache_creation_input_tokens || 0,
138
- },
139
- };
140
- }
141
- catch {
142
- // Invalid JSON line, signal parse error
143
- yield { parseError: true };
258
+ const result = parseJsonlLine(line, sinceDate);
259
+ if (result.record || result.parseError || result.missingFields) {
260
+ yield result;
144
261
  }
145
262
  }
146
263
  }
147
264
  finally {
148
- // Ensure file stream is properly closed even on early exit
149
265
  fileStream.destroy();
150
266
  rl.close();
151
267
  }
@@ -162,55 +278,92 @@ function toUnixNano(timestamp) {
162
278
  }
163
279
  return (BigInt(ms) * BigInt(1_000_000)).toString();
164
280
  }
165
- /**
166
- * Creates an OTEL metrics payload from parsed records.
167
- * Each record generates multiple metrics (input_tokens, output_tokens, etc.)
168
- */
169
- function createOtlpPayload(records, costMultiplier) {
170
- // Build metrics for all records
171
- const allMetrics = [];
172
- for (const record of records) {
281
+ function createOtlpPayload(records, options) {
282
+ const { costMultiplier, email, organizationId, productId } = options;
283
+ // Filter and map records, skipping any with invalid timestamps
284
+ const logRecords = records
285
+ .map((record) => {
173
286
  const timeUnixNano = toUnixNano(record.timestamp);
174
- if (timeUnixNano === null)
175
- continue;
176
- // Common attributes for this record
287
+ if (timeUnixNano === null) {
288
+ return null;
289
+ }
290
+ // Build attributes array with required fields
177
291
  const attributes = [
178
- { key: 'ai.transaction_id', value: { stringValue: record.sessionId } },
179
- { key: 'ai.model', value: { stringValue: record.model } },
180
- { key: 'ai.provider', value: { stringValue: 'anthropic' } },
181
- { key: 'cost_multiplier', value: { doubleValue: costMultiplier } },
182
- ];
183
- // Create metrics for each token type
184
- const tokenMetrics = [
185
- { name: 'ai.tokens.input', value: record.inputTokens },
186
- { name: 'ai.tokens.output', value: record.outputTokens },
187
- { name: 'ai.tokens.cache_read', value: record.cacheReadTokens },
188
- { name: 'ai.tokens.cache_creation', value: record.cacheCreationTokens },
292
+ {
293
+ key: "transaction_id",
294
+ value: { stringValue: (0, hashing_js_1.generateTransactionId)(record) },
295
+ },
296
+ {
297
+ key: "session.id",
298
+ value: { stringValue: record.sessionId },
299
+ },
300
+ {
301
+ key: "model",
302
+ value: { stringValue: record.model },
303
+ },
304
+ {
305
+ key: "input_tokens",
306
+ value: { intValue: record.inputTokens },
307
+ },
308
+ {
309
+ key: "output_tokens",
310
+ value: { intValue: record.outputTokens },
311
+ },
312
+ {
313
+ key: "cache_read_tokens",
314
+ value: { intValue: record.cacheReadTokens },
315
+ },
316
+ {
317
+ key: "cache_creation_tokens",
318
+ value: { intValue: record.cacheCreationTokens },
319
+ },
189
320
  ];
190
- for (const metric of tokenMetrics) {
191
- allMetrics.push({
192
- name: metric.name,
193
- sum: {
194
- dataPoints: [{
195
- attributes,
196
- timeUnixNano,
197
- asInt: metric.value,
198
- }],
199
- },
321
+ // Add optional subscriber/attribution attributes at log record level
322
+ // (backend ClaudeCodeMapper reads these from log record attrs, not resource attrs)
323
+ if (email) {
324
+ attributes.push({ key: "user.email", value: { stringValue: email } });
325
+ }
326
+ if (organizationId) {
327
+ attributes.push({
328
+ key: "organization.name",
329
+ value: { stringValue: organizationId },
200
330
  });
201
331
  }
202
- }
332
+ if (productId) {
333
+ attributes.push({
334
+ key: "product.name",
335
+ value: { stringValue: productId },
336
+ });
337
+ }
338
+ return {
339
+ timeUnixNano,
340
+ body: { stringValue: "claude_code.api_request" },
341
+ attributes,
342
+ };
343
+ })
344
+ .filter((record) => record !== null);
203
345
  return {
204
- resourceMetrics: [
346
+ resourceLogs: [
205
347
  {
206
348
  resource: {
207
349
  attributes: [
208
- { key: 'service.name', value: { stringValue: 'claude-code' } },
350
+ {
351
+ key: "service.name",
352
+ value: { stringValue: "claude-code" },
353
+ },
354
+ {
355
+ key: "cost_multiplier",
356
+ value: { doubleValue: costMultiplier },
357
+ },
209
358
  ],
210
359
  },
211
- scopeMetrics: [
360
+ scopeLogs: [
212
361
  {
213
- metrics: allMetrics,
362
+ scope: {
363
+ name: "claude-code",
364
+ version: "1.0.0",
365
+ },
366
+ logRecords,
214
367
  },
215
368
  ],
216
369
  },
@@ -220,17 +373,18 @@ function createOtlpPayload(records, costMultiplier) {
220
373
  /**
221
374
  * Backfill command - imports historical Claude Code usage data.
222
375
  */
223
- async function backfillCommand(options = {}) {
224
- const { since, dryRun = false, batchSize = 100, verbose = false } = options;
225
- console.log(chalk_1.default.bold('\nRevenium Claude Code Backfill\n'));
376
+ async function backfillCommand(options = {}, deps = {}) {
377
+ const { since, dryRun = false, batchSize = 100, delay = 100, verbose = false, } = options;
378
+ const { loadConfig: getConfig = loader_js_1.loadConfig, findJsonlFiles: findFiles = findJsonlFiles, streamJsonlRecords: streamRecords = streamJsonlRecords, sendBatchWithRetry: sendBatch = sendBatchWithRetry, homedir: getHomedir = node_os_1.homedir, } = deps;
379
+ console.log(chalk_1.default.bold("\nRevenium Claude Code Backfill\n"));
226
380
  if (dryRun) {
227
- console.log(chalk_1.default.yellow('Running in dry-run mode - no data will be sent\n'));
381
+ console.log(chalk_1.default.yellow("Running in dry-run mode - no data will be sent\n"));
228
382
  }
229
383
  // Load configuration
230
- const config = await (0, loader_js_1.loadConfig)();
384
+ const config = await getConfig();
231
385
  if (!config) {
232
- console.log(chalk_1.default.red('Configuration not found'));
233
- console.log(chalk_1.default.yellow('\nRun `revenium-metering setup` to configure Claude Code metering.'));
386
+ console.log(chalk_1.default.red("Configuration not found"));
387
+ console.log(chalk_1.default.yellow("\nRun `revenium-metering setup` to configure Claude Code metering."));
234
388
  process.exit(1);
235
389
  }
236
390
  // Parse since date
@@ -239,21 +393,23 @@ async function backfillCommand(options = {}) {
239
393
  sinceDate = parseSinceDate(since);
240
394
  if (!sinceDate) {
241
395
  console.log(chalk_1.default.red(`Invalid --since value: ${since}`));
242
- console.log(chalk_1.default.dim('Use ISO format (2024-01-15) or relative format (7d, 1m, 1y)'));
396
+ console.log(chalk_1.default.dim("Use ISO format (2024-01-15) or relative format (7d, 1m, 1y)"));
243
397
  process.exit(1);
244
398
  }
245
399
  console.log(chalk_1.default.dim(`Filtering records since: ${sinceDate.toISOString()}\n`));
246
400
  }
247
401
  // Get cost multiplier (use ?? to allow explicit 0 override for free tier/testing)
248
402
  const costMultiplier = config.costMultiplierOverride ??
249
- (config.subscriptionTier ? (0, constants_js_1.getCostMultiplier)(config.subscriptionTier) : 0.08);
403
+ (config.subscriptionTier
404
+ ? (0, constants_js_1.getCostMultiplier)(config.subscriptionTier)
405
+ : 0.08);
250
406
  // Discover JSONL files
251
- const projectsDir = (0, node_path_1.join)((0, node_os_1.homedir)(), '.claude', 'projects');
252
- const discoverSpinner = (0, ora_1.default)('Discovering JSONL files...').start();
253
- const { files: jsonlFiles, errors: discoveryErrors } = await findJsonlFiles(projectsDir);
407
+ const projectsDir = (0, node_path_1.join)(getHomedir(), ".claude", "projects");
408
+ const discoverSpinner = (0, ora_1.default)("Discovering JSONL files...").start();
409
+ const { files: jsonlFiles, errors: discoveryErrors } = await findFiles(projectsDir);
254
410
  if (discoveryErrors.length > 0 && verbose) {
255
411
  discoverSpinner.warn(`Found ${jsonlFiles.length} JSONL file(s) with ${discoveryErrors.length} directory error(s)`);
256
- console.log(chalk_1.default.yellow('\nDirectory access errors:'));
412
+ console.log(chalk_1.default.yellow("\nDirectory access errors:"));
257
413
  for (const error of discoveryErrors.slice(0, 5)) {
258
414
  console.log(chalk_1.default.yellow(` ${error}`));
259
415
  }
@@ -262,10 +418,10 @@ async function backfillCommand(options = {}) {
262
418
  }
263
419
  }
264
420
  else if (jsonlFiles.length === 0) {
265
- discoverSpinner.fail('No JSONL files found');
421
+ discoverSpinner.fail("No JSONL files found");
266
422
  console.log(chalk_1.default.dim(`Searched in: ${projectsDir}`));
267
423
  if (discoveryErrors.length > 0) {
268
- console.log(chalk_1.default.yellow('\nDirectory access errors:'));
424
+ console.log(chalk_1.default.yellow("\nDirectory access errors:"));
269
425
  for (const error of discoveryErrors) {
270
426
  console.log(chalk_1.default.yellow(` ${error}`));
271
427
  }
@@ -276,27 +432,31 @@ async function backfillCommand(options = {}) {
276
432
  discoverSpinner.succeed(`Found ${jsonlFiles.length} JSONL file(s)`);
277
433
  }
278
434
  if (verbose) {
279
- console.log(chalk_1.default.dim('\nFiles:'));
435
+ console.log(chalk_1.default.dim("\nFiles:"));
280
436
  for (const file of jsonlFiles.slice(0, 10)) {
281
437
  console.log(chalk_1.default.dim(` ${file}`));
282
438
  }
283
439
  if (jsonlFiles.length > 10) {
284
440
  console.log(chalk_1.default.dim(` ... and ${jsonlFiles.length - 10} more`));
285
441
  }
286
- console.log('');
442
+ console.log("");
287
443
  }
288
444
  // Process files and collect records
289
- const processSpinner = (0, ora_1.default)('Processing files...').start();
445
+ const processSpinner = (0, ora_1.default)("Processing files...").start();
290
446
  const allRecords = [];
291
447
  let processedFiles = 0;
292
448
  let skippedLines = 0;
293
449
  let skippedFiles = 0;
450
+ let skippedMissingFields = 0;
294
451
  for (const file of jsonlFiles) {
295
452
  try {
296
- for await (const result of streamJsonlRecords(file, sinceDate)) {
453
+ for await (const result of streamRecords(file, sinceDate)) {
297
454
  if (result.parseError) {
298
455
  skippedLines++;
299
456
  }
457
+ else if (result.missingFields) {
458
+ skippedMissingFields++;
459
+ }
300
460
  else if (result.record) {
301
461
  allRecords.push(result.record);
302
462
  }
@@ -315,76 +475,126 @@ async function backfillCommand(options = {}) {
315
475
  // Build status message with skipped line info
316
476
  let statusMessage = `Processed ${processedFiles} files, found ${allRecords.length} usage records`;
317
477
  if (skippedLines > 0) {
318
- statusMessage += chalk_1.default.yellow(` (${skippedLines} malformed line${skippedLines > 1 ? 's' : ''} skipped)`);
478
+ statusMessage += chalk_1.default.yellow(` (${skippedLines} malformed line${skippedLines > 1 ? "s" : ""} skipped)`);
479
+ }
480
+ if (skippedMissingFields > 0) {
481
+ statusMessage += chalk_1.default.yellow(` (${skippedMissingFields} record${skippedMissingFields > 1 ? "s" : ""} missing required fields)`);
319
482
  }
320
483
  if (skippedFiles > 0) {
321
- statusMessage += chalk_1.default.yellow(` (${skippedFiles} file${skippedFiles > 1 ? 's' : ''} failed)`);
484
+ statusMessage += chalk_1.default.yellow(` (${skippedFiles} file${skippedFiles > 1 ? "s" : ""} failed)`);
322
485
  }
323
486
  processSpinner.succeed(statusMessage);
324
487
  if (allRecords.length === 0) {
325
- console.log(chalk_1.default.yellow('\nNo usage records found to backfill.'));
488
+ console.log(chalk_1.default.yellow("\nNo usage records found to backfill."));
489
+ if (skippedMissingFields > 0) {
490
+ console.log(chalk_1.default.dim(`${skippedMissingFields} record${skippedMissingFields > 1 ? "s were" : " was"} skipped due to missing required fields (timestamp, sessionId, or model).`));
491
+ }
326
492
  if (since) {
327
493
  console.log(chalk_1.default.dim(`Try a broader date range or remove the --since filter.`));
328
494
  }
329
495
  return;
330
496
  }
331
- // Sort records by timestamp
332
- allRecords.sort((a, b) => new Date(a.timestamp).getTime() - new Date(b.timestamp).getTime());
333
- // Show summary
334
- const oldestRecord = allRecords[0];
335
- const newestRecord = allRecords[allRecords.length - 1];
336
- const totalInputTokens = allRecords.reduce((sum, r) => sum + r.inputTokens, 0);
337
- const totalOutputTokens = allRecords.reduce((sum, r) => sum + r.outputTokens, 0);
338
- const totalCacheReadTokens = allRecords.reduce((sum, r) => sum + r.cacheReadTokens, 0);
339
- const totalCacheCreationTokens = allRecords.reduce((sum, r) => sum + r.cacheCreationTokens, 0);
340
- console.log('\n' + chalk_1.default.bold('Summary:'));
341
- console.log(` Records: ${allRecords.length.toLocaleString()}`);
342
- console.log(` Date range: ${oldestRecord.timestamp.split('T')[0]} to ${newestRecord.timestamp.split('T')[0]}`);
343
- console.log(` Input tokens: ${totalInputTokens.toLocaleString()}`);
344
- console.log(` Output tokens: ${totalOutputTokens.toLocaleString()}`);
345
- console.log(` Cache read tokens: ${totalCacheReadTokens.toLocaleString()}`);
346
- console.log(` Cache creation: ${totalCacheCreationTokens.toLocaleString()}`);
497
+ // Calculate statistics
498
+ const stats = calculateStatistics(allRecords);
499
+ console.log("\n" + chalk_1.default.bold("Summary:"));
500
+ console.log(` Records: ${stats.totalRecords.toLocaleString()}`);
501
+ console.log(` Date range: ${stats.oldestTimestamp.split("T")[0]} to ${stats.newestTimestamp.split("T")[0]}`);
502
+ console.log(` Input tokens: ${stats.totalInputTokens.toLocaleString()}`);
503
+ console.log(` Output tokens: ${stats.totalOutputTokens.toLocaleString()}`);
504
+ console.log(` Cache read tokens: ${stats.totalCacheReadTokens.toLocaleString()}`);
505
+ console.log(` Cache creation: ${stats.totalCacheCreationTokens.toLocaleString()}`);
347
506
  console.log(` Cost multiplier: ${costMultiplier}`);
507
+ if (verbose &&
508
+ (skippedLines > 0 || skippedMissingFields > 0 || skippedFiles > 0)) {
509
+ console.log("\n" + chalk_1.default.dim("Skipped records:"));
510
+ if (skippedLines > 0) {
511
+ console.log(chalk_1.default.dim(` Malformed JSON: ${skippedLines.toLocaleString()}`));
512
+ }
513
+ if (skippedMissingFields > 0) {
514
+ console.log(chalk_1.default.dim(` Missing fields: ${skippedMissingFields.toLocaleString()} (timestamp, sessionId, or model)`));
515
+ }
516
+ if (skippedFiles > 0) {
517
+ console.log(chalk_1.default.dim(` Failed files: ${skippedFiles.toLocaleString()}`));
518
+ }
519
+ }
348
520
  if (dryRun) {
349
- console.log('\n' + chalk_1.default.yellow('Dry run complete. Use without --dry-run to send data.'));
521
+ console.log("\n" +
522
+ chalk_1.default.yellow("Dry run complete. Use without --dry-run to send data."));
350
523
  if (verbose) {
351
- console.log('\n' + chalk_1.default.dim('Sample OTLP payload (first batch):'));
524
+ console.log("\n" + chalk_1.default.dim("Sample OTLP payload (first batch):"));
352
525
  const sampleRecords = allRecords.slice(0, Math.min(batchSize, 3));
353
- const samplePayload = createOtlpPayload(sampleRecords, costMultiplier);
526
+ const samplePayload = createOtlpPayload(sampleRecords, {
527
+ costMultiplier,
528
+ email: config.email,
529
+ organizationId: config.organizationId,
530
+ productId: config.productId,
531
+ });
354
532
  console.log(chalk_1.default.dim(JSON.stringify(samplePayload, null, 2)));
355
533
  }
356
534
  return;
357
535
  }
358
536
  // Send data in batches
359
537
  const totalBatches = Math.ceil(allRecords.length / batchSize);
360
- const sendSpinner = (0, ora_1.default)(`Sending data... (0/${totalBatches} batches)`).start();
538
+ const sendSpinner = (0, ora_1.default)(`Sending data... (0/${totalBatches} batches, ~${delay}ms delay)`).start();
361
539
  let sentBatches = 0;
362
540
  let sentRecords = 0;
363
- let failedBatches = 0;
541
+ let permanentlyFailedBatches = 0;
542
+ let totalRetryAttempts = 0;
543
+ const failedBatchDetails = [];
544
+ const maxRetries = 3;
364
545
  for (let i = 0; i < allRecords.length; i += batchSize) {
546
+ const batchNumber = Math.floor(i / batchSize) + 1;
365
547
  const batch = allRecords.slice(i, i + batchSize);
366
- const payload = createOtlpPayload(batch, costMultiplier);
367
- try {
368
- await (0, client_js_1.sendOtlpMetrics)(config.endpoint, config.apiKey, payload);
548
+ const payload = createOtlpPayload(batch, {
549
+ costMultiplier,
550
+ email: config.email,
551
+ organizationId: config.organizationId,
552
+ productId: config.productId,
553
+ });
554
+ sendSpinner.text = `Sending batch ${batchNumber}/${totalBatches}...`;
555
+ const result = await sendBatch(config.endpoint, config.apiKey, payload, maxRetries, verbose);
556
+ totalRetryAttempts += result.attempts;
557
+ if (result.success) {
369
558
  sentBatches++;
370
559
  sentRecords += batch.length;
371
- sendSpinner.text = `Sending data... (${sentBatches}/${totalBatches} batches)`;
560
+ sendSpinner.text = `Sending data... (${sentBatches}/${totalBatches} batches, ~${delay}ms delay)`;
372
561
  }
373
- catch (error) {
374
- failedBatches++;
375
- if (verbose) {
376
- const batchNumber = Math.floor(i / batchSize) + 1;
377
- console.log(chalk_1.default.yellow(`\nBatch ${batchNumber} failed: ${error instanceof Error ? error.message : 'Unknown error'}`));
378
- }
562
+ else {
563
+ permanentlyFailedBatches++;
564
+ failedBatchDetails.push({
565
+ batchNumber,
566
+ error: result.error || "Unknown error",
567
+ });
568
+ }
569
+ // Apply rate limiting delay between batches (except after the last batch)
570
+ if (i + batchSize < allRecords.length) {
571
+ sendSpinner.text = `Waiting ${delay}ms before next batch...`;
572
+ await sleep(delay);
379
573
  }
380
574
  }
381
- if (failedBatches === 0) {
575
+ if (permanentlyFailedBatches === 0) {
382
576
  sendSpinner.succeed(`Sent ${sentRecords.toLocaleString()} records in ${sentBatches} batches`);
383
577
  }
384
578
  else {
385
- sendSpinner.warn(`Sent ${sentRecords.toLocaleString()} records in ${sentBatches} batches (${failedBatches} failed)`);
579
+ sendSpinner.warn(`Sent ${sentRecords.toLocaleString()} records in ${sentBatches} batches (${permanentlyFailedBatches} permanently failed)`);
580
+ }
581
+ // Show retry statistics if there were retries
582
+ const retriedBatches = totalRetryAttempts - totalBatches;
583
+ if (retriedBatches > 0 && verbose) {
584
+ console.log("\n" + chalk_1.default.bold("Retry Statistics:"));
585
+ console.log(` Total retry attempts: ${retriedBatches}`);
586
+ console.log(` Average attempts/batch: ${(totalRetryAttempts / totalBatches).toFixed(2)}`);
587
+ }
588
+ // Show permanently failed batches details
589
+ if (permanentlyFailedBatches > 0) {
590
+ console.log("\n" + chalk_1.default.red.bold("Permanently Failed Batches:"));
591
+ for (const failed of failedBatchDetails) {
592
+ console.log(chalk_1.default.red(` Batch ${failed.batchNumber}: ${failed.error}`));
593
+ }
594
+ console.log("\n" +
595
+ chalk_1.default.yellow("Tip: You can re-run the backfill command to retry failed batches."));
386
596
  }
387
- console.log('\n' + chalk_1.default.green.bold('Backfill complete!'));
388
- console.log(chalk_1.default.dim('Check your Revenium dashboard to see the imported data.'));
597
+ console.log("\n" + chalk_1.default.green.bold("Backfill complete!"));
598
+ console.log(chalk_1.default.dim("Check your Revenium dashboard to see the imported data."));
389
599
  }
390
600
  //# sourceMappingURL=backfill.js.map