@revenium/anthropic 1.0.8 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -23,7 +23,7 @@ const logger = (0, config_1.getLogger)();
23
23
  const patchingContext = {
24
24
  originalMethods: {},
25
25
  isPatched: false,
26
- patchedInstances: new WeakSet()
26
+ patchedInstances: new WeakSet(),
27
27
  };
28
28
  /**
29
29
  * Get the Messages prototype using sophisticated prototype access
@@ -46,8 +46,8 @@ function getMessagesPrototype() {
46
46
  const config = (0, config_1.getConfig)();
47
47
  const apiKey = config?.anthropicApiKey ?? process.env.ANTHROPIC_API_KEY;
48
48
  if (!apiKey) {
49
- throw new error_handling_1.AnthropicPatchingError('Unable to access Anthropic Messages prototype: No API key available and direct prototype access failed. ' +
50
- 'Provide ANTHROPIC_API_KEY environment variable or pass anthropicApiKey in config.');
49
+ throw new error_handling_1.AnthropicPatchingError("Unable to access Anthropic Messages prototype: No API key available and direct prototype access failed. " +
50
+ "Provide ANTHROPIC_API_KEY environment variable or pass anthropicApiKey in config.");
51
51
  }
52
52
  const minimalInstance = new sdk_1.default({ apiKey });
53
53
  const messagesPrototype = Object.getPrototypeOf(minimalInstance.messages);
@@ -65,19 +65,19 @@ function getMessagesPrototype() {
65
65
  */
66
66
  function patchAnthropic() {
67
67
  if (patchingContext.isPatched) {
68
- logger.debug('Anthropic SDK already patched, skipping duplicate initialization');
68
+ logger.debug("Anthropic SDK already patched, skipping duplicate initialization");
69
69
  return;
70
70
  }
71
71
  try {
72
72
  // Access the Messages class prototype using sophisticated prototype access
73
73
  const messagesPrototype = getMessagesPrototype();
74
74
  if (!messagesPrototype)
75
- throw new error_handling_1.AnthropicPatchingError('Unable to access Anthropic Messages prototype');
75
+ throw new error_handling_1.AnthropicPatchingError("Unable to access Anthropic Messages prototype");
76
76
  // Store original methods
77
77
  patchingContext.originalMethods.create = messagesPrototype?.create;
78
78
  patchingContext.originalMethods.stream = messagesPrototype?.stream;
79
79
  if (!patchingContext.originalMethods?.create) {
80
- throw new error_handling_1.AnthropicPatchingError('Unable to find original create method');
80
+ throw new error_handling_1.AnthropicPatchingError("Unable to find original create method");
81
81
  }
82
82
  // Patch the create method
83
83
  const patchedCreateFunction = function (params, options) {
@@ -91,11 +91,11 @@ function patchAnthropic() {
91
91
  };
92
92
  }
93
93
  patchingContext.isPatched = true;
94
- logger.info('Anthropic SDK patched successfully');
94
+ logger.info("Anthropic SDK patched successfully");
95
95
  }
96
96
  catch (error) {
97
97
  const errorContext = (0, error_handling_1.createErrorContext)()
98
- .with('patchingAttempt', true)
98
+ .with("patchingAttempt", true)
99
99
  .build();
100
100
  (0, error_handling_1.handleError)(error, logger, errorContext);
101
101
  if (error instanceof error_handling_1.AnthropicPatchingError)
@@ -120,11 +120,11 @@ function unpatchAnthropic() {
120
120
  }
121
121
  patchingContext.isPatched = false;
122
122
  patchingContext.originalMethods = {};
123
- logger.info('Anthropic SDK unpatched successfully');
123
+ logger.info("Anthropic SDK unpatched successfully");
124
124
  }
125
125
  catch (error) {
126
126
  const errorContext = (0, error_handling_1.createErrorContext)()
127
- .with('unpatchingAttempt', true)
127
+ .with("unpatchingAttempt", true)
128
128
  .build();
129
129
  (0, error_handling_1.handleError)(error, logger, errorContext);
130
130
  throw new error_handling_1.AnthropicPatchingError(`Failed to unpatch Anthropic SDK: ${error instanceof Error ? error.message : String(error)}`, errorContext);
@@ -140,7 +140,7 @@ function isAnthropicPatched() {
140
140
  * Handle streaming response by collecting chunks and extracting usage data
141
141
  */
142
142
  async function handleStreamingResponse(stream, context) {
143
- const { requestId, model, metadata, requestTime, startTime } = context;
143
+ const { requestId, model, metadata, requestTime, startTime, requestBody } = context;
144
144
  // Create a new async generator that collects chunks and tracks usage
145
145
  async function* trackingStream() {
146
146
  const chunks = [];
@@ -148,7 +148,7 @@ async function handleStreamingResponse(stream, context) {
148
148
  try {
149
149
  for await (const chunk of stream) {
150
150
  // Track first token time
151
- if (!firstTokenTime && chunk.type === 'content_block_delta') {
151
+ if (!firstTokenTime && chunk.type === "content_block_delta") {
152
152
  firstTokenTime = Date.now();
153
153
  }
154
154
  chunks.push(chunk);
@@ -158,12 +158,14 @@ async function handleStreamingResponse(stream, context) {
158
158
  const endTime = Date.now();
159
159
  const responseTime = new Date();
160
160
  const duration = endTime - startTime;
161
- const timeToFirstToken = firstTokenTime ? firstTokenTime - startTime : undefined;
162
- logger.debug('Stream completed, extracting usage', {
161
+ const timeToFirstToken = firstTokenTime
162
+ ? firstTokenTime - startTime
163
+ : undefined;
164
+ logger.debug("Stream completed, extracting usage", {
163
165
  requestId,
164
166
  chunkCount: chunks.length,
165
167
  duration,
166
- timeToFirstToken
168
+ timeToFirstToken,
167
169
  });
168
170
  const usage = (0, tracking_1.extractUsageFromStream)(chunks);
169
171
  // Create tracking data
@@ -180,22 +182,23 @@ async function handleStreamingResponse(stream, context) {
180
182
  metadata,
181
183
  requestTime,
182
184
  responseTime,
183
- timeToFirstToken
185
+ timeToFirstToken,
186
+ requestBody: requestBody,
184
187
  };
185
188
  // Track usage asynchronously
186
189
  (0, tracking_1.trackUsageAsync)(trackingData);
187
- logger.debug('Anthropic streaming request completed successfully', {
190
+ logger.debug("Anthropic streaming request completed successfully", {
188
191
  requestId,
189
192
  model,
190
193
  inputTokens: usage.inputTokens,
191
194
  outputTokens: usage.outputTokens,
192
- duration
195
+ duration,
193
196
  });
194
197
  }
195
198
  catch (error) {
196
- logger.error('Error processing streaming response', {
199
+ logger.error("Error processing streaming response", {
197
200
  requestId,
198
- error: error instanceof Error ? error.message : String(error)
201
+ error: error instanceof Error ? error.message : String(error),
199
202
  });
200
203
  throw error;
201
204
  }
@@ -209,19 +212,19 @@ async function patchedCreateMethod(params, options) {
209
212
  const requestId = (0, crypto_1.randomUUID)();
210
213
  const startTime = Date.now();
211
214
  const requestTime = new Date();
212
- logger.debug('Intercepted Anthropic messages.create call', {
215
+ logger.debug("Intercepted Anthropic messages.create call", {
213
216
  requestId,
214
217
  model: params.model,
215
218
  hasMetadata: !!params.usageMetadata,
216
- isStreaming: !!params.stream
219
+ isStreaming: !!params.stream,
217
220
  });
218
221
  // Validate parameters
219
222
  const validation = (0, validation_1.validateAnthropicMessageParams)(params);
220
223
  if (!validation.isValid) {
221
- logger.warn('Invalid Anthropic parameters detected', {
224
+ logger.warn("Invalid Anthropic parameters detected", {
222
225
  requestId,
223
226
  errors: validation.errors,
224
- warnings: validation.warnings
227
+ warnings: validation.warnings,
225
228
  });
226
229
  }
227
230
  // Extract and validate metadata
@@ -232,7 +235,7 @@ async function patchedCreateMethod(params, options) {
232
235
  // Call original method
233
236
  const originalCreate = patchingContext.originalMethods.create;
234
237
  if (!originalCreate)
235
- throw new error_handling_1.RequestProcessingError('Original create method not available');
238
+ throw new error_handling_1.RequestProcessingError("Original create method not available");
236
239
  const response = await originalCreate.call(this, cleanParams, options);
237
240
  // Check if this is a streaming response
238
241
  const isStreaming = !!params.stream;
@@ -255,16 +258,17 @@ async function patchedCreateMethod(params, options) {
255
258
  stopReason: usage.stopReason,
256
259
  metadata,
257
260
  requestTime,
258
- responseTime
261
+ responseTime,
262
+ requestBody: params,
259
263
  };
260
264
  // Track usage asynchronously
261
265
  (0, tracking_1.trackUsageAsync)(trackingData);
262
- logger.debug('Anthropic request completed successfully', {
266
+ logger.debug("Anthropic request completed successfully", {
263
267
  requestId,
264
268
  model: params.model,
265
269
  inputTokens: usage.inputTokens,
266
270
  outputTokens: usage.outputTokens,
267
- duration
271
+ duration,
268
272
  });
269
273
  return response;
270
274
  }
@@ -274,7 +278,8 @@ async function patchedCreateMethod(params, options) {
274
278
  model: params.model,
275
279
  metadata,
276
280
  requestTime,
277
- startTime
281
+ startTime,
282
+ requestBody: params,
278
283
  });
279
284
  }
280
285
  catch (error) {
@@ -299,18 +304,18 @@ async function* patchedStreamMethod(params, options) {
299
304
  const responseTime = new Date();
300
305
  const chunks = [];
301
306
  let firstTokenTime;
302
- logger.debug('Intercepted Anthropic messages.stream call', {
307
+ logger.debug("Intercepted Anthropic messages.stream call", {
303
308
  requestId,
304
309
  model: params.model,
305
- hasMetadata: !!params.usageMetadata
310
+ hasMetadata: !!params.usageMetadata,
306
311
  });
307
312
  // Validate parameters
308
313
  const validation = (0, validation_1.validateAnthropicMessageParams)(params);
309
314
  if (!validation.isValid) {
310
- logger.warn('Invalid Anthropic streaming parameters detected', {
315
+ logger.warn("Invalid Anthropic streaming parameters detected", {
311
316
  requestId,
312
317
  errors: validation.errors,
313
- warnings: validation.warnings
318
+ warnings: validation.warnings,
314
319
  });
315
320
  }
316
321
  // Extract and validate metadata
@@ -321,12 +326,12 @@ async function* patchedStreamMethod(params, options) {
321
326
  // Call original stream method
322
327
  const originalStream = patchingContext.originalMethods?.stream;
323
328
  if (!originalStream) {
324
- throw new error_handling_1.StreamProcessingError('Original stream method not available');
329
+ throw new error_handling_1.StreamProcessingError("Original stream method not available");
325
330
  }
326
331
  const stream = originalStream.call(this, cleanParams, options);
327
332
  for await (const chunk of stream) {
328
333
  // Track first token time
329
- if (!firstTokenTime && chunk.type === 'content_block_delta') {
334
+ if (!firstTokenTime && chunk.type === "content_block_delta") {
330
335
  firstTokenTime = Date.now();
331
336
  }
332
337
  chunks.push(chunk);
@@ -334,7 +339,9 @@ async function* patchedStreamMethod(params, options) {
334
339
  }
335
340
  const endTime = Date.now();
336
341
  const duration = endTime - startTime;
337
- const timeToFirstToken = firstTokenTime ? firstTokenTime - startTime : undefined;
342
+ const timeToFirstToken = firstTokenTime
343
+ ? firstTokenTime - startTime
344
+ : undefined;
338
345
  // Extract usage information from all chunks
339
346
  const usage = (0, tracking_1.extractUsageFromStream)(chunks);
340
347
  // Create tracking data
@@ -351,18 +358,19 @@ async function* patchedStreamMethod(params, options) {
351
358
  metadata,
352
359
  requestTime,
353
360
  responseTime,
354
- timeToFirstToken
361
+ timeToFirstToken,
362
+ requestBody: params,
355
363
  };
356
364
  // Track usage asynchronously
357
365
  (0, tracking_1.trackUsageAsync)(trackingData);
358
- logger.debug('Anthropic streaming request completed successfully', {
366
+ logger.debug("Anthropic streaming request completed successfully", {
359
367
  requestId,
360
368
  model: params.model,
361
369
  inputTokens: usage.inputTokens,
362
370
  outputTokens: usage.outputTokens,
363
371
  duration,
364
372
  timeToFirstToken,
365
- chunkCount: chunks.length
373
+ chunkCount: chunks.length,
366
374
  });
367
375
  }
368
376
  catch (error) {
@@ -372,8 +380,8 @@ async function* patchedStreamMethod(params, options) {
372
380
  .withRequestId(requestId)
373
381
  .withModel(params.model)
374
382
  .withDuration(duration)
375
- .with('isStreaming', true)
376
- .with('chunkCount', chunks.length)
383
+ .with("isStreaming", true)
384
+ .with("chunkCount", chunks.length)
377
385
  .build();
378
386
  (0, error_handling_1.handleError)(error, logger, errorContext);
379
387
  throw error;
@@ -1,31 +1,31 @@
1
- import { validateReveniumConfig } from './utils/validation.js';
2
- import { DEFAULT_CONFIG, ENV_VARS, LOGGING_CONFIG } from './constants.js';
1
+ import { validateReveniumConfig } from "./utils/validation.js";
2
+ import { DEFAULT_CONFIG, ENV_VARS, LOGGING_CONFIG } from "./constants.js";
3
3
  /**
4
4
  * Simple console logger implementation for Anthropic middleware
5
5
  */
6
6
  class ConsoleLogger {
7
7
  isDebugEnabled() {
8
- return process.env[ENV_VARS.DEBUG] === 'true';
8
+ return process.env[ENV_VARS.DEBUG] === "true";
9
9
  }
10
10
  formatMessage(level, message, context) {
11
11
  const timestamp = new Date().toISOString();
12
- const prefix = `[${LOGGING_CONFIG.MIDDLEWARE_NAME}${level === 'DEBUG' ? ' Debug' : ''}]`;
13
- const contextStr = context ? ` ${JSON.stringify(context)}` : '';
12
+ const prefix = `[${LOGGING_CONFIG.MIDDLEWARE_NAME}${level === "DEBUG" ? " Debug" : ""}]`;
13
+ const contextStr = context ? ` ${JSON.stringify(context)}` : "";
14
14
  return `${timestamp} ${prefix} ${message}${contextStr}`;
15
15
  }
16
16
  debug(message, context) {
17
17
  if (this.isDebugEnabled()) {
18
- console.debug(this.formatMessage('DEBUG', message, context));
18
+ console.debug(this.formatMessage("DEBUG", message, context));
19
19
  }
20
20
  }
21
21
  info(message, context) {
22
- console.info(this.formatMessage('INFO', message, context));
22
+ console.info(this.formatMessage("INFO", message, context));
23
23
  }
24
24
  warn(message, context) {
25
- console.warn(this.formatMessage('WARN', message, context));
25
+ console.warn(this.formatMessage("WARN", message, context));
26
26
  }
27
27
  error(message, context) {
28
- console.error(this.formatMessage('ERROR', message, context));
28
+ console.error(this.formatMessage("ERROR", message, context));
29
29
  }
30
30
  }
31
31
  /**
@@ -40,14 +40,32 @@ function loadConfigFromEnvironment() {
40
40
  reveniumApiKey: process.env[ENV_VARS.REVENIUM_API_KEY],
41
41
  reveniumBaseUrl: process.env[ENV_VARS.REVENIUM_BASE_URL],
42
42
  anthropicApiKey: process.env[ENV_VARS.ANTHROPIC_API_KEY],
43
- debug: process.env[ENV_VARS.DEBUG] === 'true',
43
+ debug: process.env[ENV_VARS.DEBUG] === "true",
44
44
  logLevel: process.env[ENV_VARS.LOG_LEVEL],
45
45
  apiTimeout: process.env[ENV_VARS.API_TIMEOUT],
46
46
  failSilent: process.env[ENV_VARS.FAIL_SILENT],
47
- maxRetries: process.env[ENV_VARS.MAX_RETRIES]
47
+ maxRetries: process.env[ENV_VARS.MAX_RETRIES],
48
+ printSummary: process.env[ENV_VARS.PRINT_SUMMARY],
49
+ teamId: process.env[ENV_VARS.TEAM_ID],
50
+ capturePrompts: process.env[ENV_VARS.CAPTURE_PROMPTS],
48
51
  };
49
52
  return env;
50
53
  }
54
+ /**
55
+ * Parse printSummary environment variable value
56
+ */
57
+ function parsePrintSummary(value) {
58
+ if (!value)
59
+ return undefined;
60
+ const lowerValue = value.toLowerCase();
61
+ if (lowerValue === "true" || lowerValue === "human")
62
+ return "human";
63
+ if (lowerValue === "json")
64
+ return "json";
65
+ if (lowerValue === "false")
66
+ return false;
67
+ return undefined;
68
+ }
51
69
  /**
52
70
  * Convert environment config to Revenium config
53
71
  */
@@ -56,15 +74,20 @@ function createConfigFromEnvironment(env) {
56
74
  return null;
57
75
  }
58
76
  const apiTimeout = env.apiTimeout ? parseInt(env.apiTimeout, 10) : undefined;
59
- const failSilent = env.failSilent !== 'false'; // Default to true
77
+ const failSilent = env.failSilent !== "false"; // Default to true
60
78
  const maxRetries = env.maxRetries ? parseInt(env.maxRetries, 10) : undefined;
79
+ const printSummary = parsePrintSummary(env.printSummary);
80
+ const capturePrompts = env.capturePrompts === "true";
61
81
  return {
62
82
  reveniumApiKey: env.reveniumApiKey,
63
83
  reveniumBaseUrl: env.reveniumBaseUrl || DEFAULT_CONFIG.REVENIUM_BASE_URL,
64
84
  anthropicApiKey: env.anthropicApiKey,
65
85
  apiTimeout,
66
86
  failSilent,
67
- maxRetries
87
+ maxRetries,
88
+ printSummary,
89
+ teamId: env.teamId?.trim(),
90
+ capturePrompts,
68
91
  };
69
92
  }
70
93
  /**
@@ -74,18 +97,18 @@ export function validateConfig(config) {
74
97
  const validation = validateReveniumConfig(config);
75
98
  if (!validation.isValid) {
76
99
  // Log detailed validation errors
77
- getLogger().error('Configuration validation failed', {
100
+ getLogger().error("Configuration validation failed", {
78
101
  errors: validation.errors,
79
102
  warnings: validation.warnings,
80
- suggestions: validation.suggestions
103
+ suggestions: validation.suggestions,
81
104
  });
82
105
  // Create detailed error message
83
- let errorMessage = 'Configuration validation failed:\n';
106
+ let errorMessage = "Configuration validation failed:\n";
84
107
  validation.errors.forEach((error, index) => {
85
108
  errorMessage += ` ${index + 1}. ${error}\n`;
86
109
  });
87
110
  if (validation.suggestions && validation.suggestions.length > 0) {
88
- errorMessage += '\nSuggestions:\n';
111
+ errorMessage += "\nSuggestions:\n";
89
112
  validation.suggestions.forEach((suggestion) => {
90
113
  errorMessage += ` • ${suggestion}\n`;
91
114
  });
@@ -94,8 +117,8 @@ export function validateConfig(config) {
94
117
  }
95
118
  // Log warnings if any
96
119
  if (validation.warnings && validation.warnings.length > 0) {
97
- getLogger().warn('Configuration warnings', {
98
- warnings: validation.warnings
120
+ getLogger().warn("Configuration warnings", {
121
+ warnings: validation.warnings,
99
122
  });
100
123
  }
101
124
  }
@@ -112,14 +135,42 @@ export function getConfig() {
112
135
  }
113
136
  /**
114
137
  * Set the global configuration
138
+ * Uses the normalized config from validation (with defaults applied and fields trimmed)
115
139
  */
116
140
  export function setConfig(config) {
117
- validateConfig(config);
118
- globalConfig = config;
119
- globalLogger.debug('Revenium configuration updated', {
120
- baseUrl: config.reveniumBaseUrl,
121
- hasApiKey: !!config.reveniumApiKey,
122
- hasAnthropicKey: !!config.anthropicApiKey
141
+ const validation = validateReveniumConfig(config);
142
+ if (!validation.isValid) {
143
+ // Log detailed validation errors
144
+ getLogger().error("Configuration validation failed", {
145
+ errors: validation.errors,
146
+ warnings: validation.warnings,
147
+ suggestions: validation.suggestions,
148
+ });
149
+ // Create detailed error message
150
+ let errorMessage = "Configuration validation failed:\n";
151
+ validation.errors.forEach((error, index) => {
152
+ errorMessage += ` ${index + 1}. ${error}\n`;
153
+ });
154
+ if (validation.suggestions && validation.suggestions.length > 0) {
155
+ errorMessage += "\nSuggestions:\n";
156
+ validation.suggestions.forEach((suggestion) => {
157
+ errorMessage += ` • ${suggestion}\n`;
158
+ });
159
+ }
160
+ throw new Error(errorMessage.trim());
161
+ }
162
+ // Log warnings if any
163
+ if (validation.warnings && validation.warnings.length > 0) {
164
+ getLogger().warn("Configuration warnings", {
165
+ warnings: validation.warnings,
166
+ });
167
+ }
168
+ // Use the normalized config from validation (with defaults applied and fields trimmed)
169
+ globalConfig = validation.config;
170
+ globalLogger.debug("Revenium configuration updated", {
171
+ baseUrl: globalConfig.reveniumBaseUrl,
172
+ hasApiKey: !!globalConfig.reveniumApiKey,
173
+ hasAnthropicKey: !!globalConfig.anthropicApiKey,
123
174
  });
124
175
  }
125
176
  /**
@@ -133,7 +184,7 @@ export function getLogger() {
133
184
  */
134
185
  export function setLogger(logger) {
135
186
  globalLogger = logger;
136
- globalLogger.debug('Custom logger set for Revenium middleware');
187
+ globalLogger.debug("Custom logger set for Revenium middleware");
137
188
  }
138
189
  /**
139
190
  * Initialize configuration from environment variables
@@ -144,12 +195,12 @@ export function initializeConfig() {
144
195
  if (config) {
145
196
  try {
146
197
  setConfig(config);
147
- globalLogger.debug('Revenium middleware initialized from environment variables');
198
+ globalLogger.debug("Revenium middleware initialized from environment variables");
148
199
  return true;
149
200
  }
150
201
  catch (error) {
151
- globalLogger.error('Failed to initialize Revenium configuration', {
152
- error: error instanceof Error ? error.message : String(error)
202
+ globalLogger.error("Failed to initialize Revenium configuration", {
203
+ error: error instanceof Error ? error.message : String(error),
153
204
  });
154
205
  return false;
155
206
  }
@@ -170,14 +221,14 @@ export function getConfigStatus() {
170
221
  hasConfig: false,
171
222
  hasApiKey: false,
172
223
  hasAnthropicKey: false,
173
- baseUrl: ''
224
+ baseUrl: "",
174
225
  };
175
226
  }
176
227
  return {
177
228
  hasConfig: true,
178
229
  hasApiKey: !!globalConfig.reveniumApiKey,
179
230
  hasAnthropicKey: !!globalConfig.anthropicApiKey,
180
- baseUrl: globalConfig.reveniumBaseUrl
231
+ baseUrl: globalConfig.reveniumBaseUrl,
181
232
  };
182
233
  }
183
234
  /**
@@ -7,7 +7,7 @@
7
7
  */
8
8
  export const DEFAULT_CONFIG = {
9
9
  /** Default Revenium API base URL */
10
- REVENIUM_BASE_URL: 'https://api.revenium.ai',
10
+ REVENIUM_BASE_URL: "https://api.revenium.ai",
11
11
  /** Default API timeout in milliseconds */
12
12
  API_TIMEOUT: 5000,
13
13
  /** Default maximum retries for failed API calls */
@@ -22,6 +22,10 @@ export const DEFAULT_CONFIG = {
22
22
  MAX_RETRY_ATTEMPTS: 10,
23
23
  /** Warning threshold for low API timeout */
24
24
  LOW_TIMEOUT_WARNING_THRESHOLD: 3000,
25
+ /** Default prompt capture behavior */
26
+ CAPTURE_PROMPTS: false,
27
+ /** Maximum size for each prompt field in characters (50KB) */
28
+ MAX_PROMPT_SIZE: 50000,
25
29
  };
26
30
  /**
27
31
  * Circuit breaker configuration constants
@@ -60,9 +64,9 @@ export const VALIDATION_CONFIG = {
60
64
  /** Minimum API key length */
61
65
  MIN_API_KEY_LENGTH: 20,
62
66
  /** Required API key prefix for Revenium */
63
- REVENIUM_API_KEY_PREFIX: 'hak_',
67
+ REVENIUM_API_KEY_PREFIX: "hak_",
64
68
  /** Required API key prefix for Anthropic */
65
- ANTHROPIC_API_KEY_PREFIX: 'sk-ant-',
69
+ ANTHROPIC_API_KEY_PREFIX: "sk-ant-",
66
70
  /** Maximum tokens warning threshold */
67
71
  HIGH_MAX_TOKENS_THRESHOLD: 4096,
68
72
  /** Temperature range */
@@ -83,39 +87,56 @@ export const VALIDATION_CONFIG = {
83
87
  */
84
88
  export const LOGGING_CONFIG = {
85
89
  /** Middleware name for log prefixes */
86
- MIDDLEWARE_NAME: 'Revenium',
90
+ MIDDLEWARE_NAME: "Revenium",
87
91
  /** User agent string for API requests */
88
- USER_AGENT: 'revenium-middleware-anthropic-node/1.0.0',
92
+ USER_AGENT: "revenium-middleware-anthropic-node/1.0.0",
89
93
  /** Debug environment variable name */
90
- DEBUG_ENV_VAR: 'REVENIUM_DEBUG',
94
+ DEBUG_ENV_VAR: "REVENIUM_DEBUG",
91
95
  };
92
96
  /**
93
97
  * Environment variable names
94
98
  */
95
99
  export const ENV_VARS = {
96
100
  /** Revenium API key */
97
- REVENIUM_API_KEY: 'REVENIUM_METERING_API_KEY',
101
+ REVENIUM_API_KEY: "REVENIUM_METERING_API_KEY",
98
102
  /** Revenium base URL */
99
- REVENIUM_BASE_URL: 'REVENIUM_METERING_BASE_URL',
103
+ REVENIUM_BASE_URL: "REVENIUM_METERING_BASE_URL",
100
104
  /** Anthropic API key */
101
- ANTHROPIC_API_KEY: 'ANTHROPIC_API_KEY',
105
+ ANTHROPIC_API_KEY: "ANTHROPIC_API_KEY",
102
106
  /** Debug mode */
103
- DEBUG: 'REVENIUM_DEBUG',
107
+ DEBUG: "REVENIUM_DEBUG",
104
108
  /** Log level */
105
- LOG_LEVEL: 'REVENIUM_LOG_LEVEL',
109
+ LOG_LEVEL: "REVENIUM_LOG_LEVEL",
106
110
  /** API timeout */
107
- API_TIMEOUT: 'REVENIUM_API_TIMEOUT',
111
+ API_TIMEOUT: "REVENIUM_API_TIMEOUT",
108
112
  /** Fail silent mode */
109
- FAIL_SILENT: 'REVENIUM_FAIL_SILENT',
113
+ FAIL_SILENT: "REVENIUM_FAIL_SILENT",
110
114
  /** Maximum retries */
111
- MAX_RETRIES: 'REVENIUM_MAX_RETRIES',
115
+ MAX_RETRIES: "REVENIUM_MAX_RETRIES",
116
+ /** Print summary mode (true/false/human/json) */
117
+ PRINT_SUMMARY: "REVENIUM_PRINT_SUMMARY",
118
+ /** Team ID for cost metrics retrieval */
119
+ TEAM_ID: "REVENIUM_TEAM_ID",
120
+ /** Prompt capture mode */
121
+ CAPTURE_PROMPTS: "REVENIUM_CAPTURE_PROMPTS",
122
+ };
123
+ /**
124
+ * Summary printer configuration
125
+ */
126
+ export const SUMMARY_PRINTER_CONFIG = {
127
+ /** Maximum number of retries when fetching cost metrics */
128
+ MAX_RETRIES: 3,
129
+ /** Delay between retries in milliseconds */
130
+ RETRY_DELAY: 2000,
131
+ /** Fetch timeout in milliseconds (prevents hung requests from keeping Node process alive) */
132
+ FETCH_TIMEOUT: 10000,
112
133
  };
113
134
  /**
114
135
  * API endpoints
115
136
  */
116
137
  export const API_ENDPOINTS = {
117
138
  /** Revenium AI completions endpoint */
118
- AI_COMPLETIONS: '/meter/v2/ai/completions',
139
+ AI_COMPLETIONS: "/meter/v2/ai/completions",
119
140
  };
120
141
  /**
121
142
  * Anthropic model patterns
@@ -125,17 +146,17 @@ export const ANTHROPIC_PATTERNS = {
125
146
  CLAUDE_MODEL_PATTERN: /claude/i,
126
147
  /** Known Anthropic stop reasons */
127
148
  STOP_REASONS: {
128
- END_TURN: 'end_turn',
129
- MAX_TOKENS: 'max_tokens',
130
- STOP_SEQUENCE: 'stop_sequence',
131
- TOOL_USE: 'tool_use',
149
+ END_TURN: "end_turn",
150
+ MAX_TOKENS: "max_tokens",
151
+ STOP_SEQUENCE: "stop_sequence",
152
+ TOOL_USE: "tool_use",
132
153
  },
133
154
  /** Revenium stop reason mappings */
134
155
  REVENIUM_STOP_REASON_MAP: {
135
- 'end_turn': 'END',
136
- 'max_tokens': 'TOKEN_LIMIT',
137
- 'stop_sequence': 'END_SEQUENCE',
138
- 'tool_use': 'END',
156
+ end_turn: "END",
157
+ max_tokens: "TOKEN_LIMIT",
158
+ stop_sequence: "END_SEQUENCE",
159
+ tool_use: "END",
139
160
  },
140
161
  };
141
162
  //# sourceMappingURL=constants.js.map