@revenium/openai 1.0.11 → 1.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/.env.example +20 -0
  2. package/CHANGELOG.md +21 -47
  3. package/README.md +141 -690
  4. package/dist/cjs/core/config/loader.js +1 -1
  5. package/dist/cjs/core/config/loader.js.map +1 -1
  6. package/dist/cjs/core/tracking/api-client.js +1 -1
  7. package/dist/cjs/core/tracking/api-client.js.map +1 -1
  8. package/dist/cjs/index.js +2 -2
  9. package/dist/cjs/index.js.map +1 -1
  10. package/dist/cjs/utils/url-builder.js +32 -7
  11. package/dist/cjs/utils/url-builder.js.map +1 -1
  12. package/dist/esm/core/config/loader.js +1 -1
  13. package/dist/esm/core/config/loader.js.map +1 -1
  14. package/dist/esm/core/tracking/api-client.js +1 -1
  15. package/dist/esm/core/tracking/api-client.js.map +1 -1
  16. package/dist/esm/index.js +2 -2
  17. package/dist/esm/index.js.map +1 -1
  18. package/dist/esm/utils/url-builder.js +32 -7
  19. package/dist/esm/utils/url-builder.js.map +1 -1
  20. package/dist/types/index.d.ts +2 -2
  21. package/dist/types/types/index.d.ts +2 -2
  22. package/dist/types/types/index.d.ts.map +1 -1
  23. package/dist/types/utils/url-builder.d.ts +11 -3
  24. package/dist/types/utils/url-builder.d.ts.map +1 -1
  25. package/examples/README.md +250 -254
  26. package/examples/azure-basic.ts +25 -13
  27. package/examples/azure-responses-basic.ts +36 -7
  28. package/examples/azure-responses-streaming.ts +36 -7
  29. package/examples/azure-streaming.ts +40 -19
  30. package/examples/getting_started.ts +54 -0
  31. package/examples/openai-basic.ts +39 -17
  32. package/examples/openai-function-calling.ts +259 -0
  33. package/examples/openai-responses-basic.ts +36 -7
  34. package/examples/openai-responses-streaming.ts +36 -7
  35. package/examples/openai-streaming.ts +24 -13
  36. package/examples/openai-vision.ts +289 -0
  37. package/package.json +3 -9
  38. package/src/core/config/azure-config.ts +72 -0
  39. package/src/core/config/index.ts +23 -0
  40. package/src/core/config/loader.ts +66 -0
  41. package/src/core/config/manager.ts +94 -0
  42. package/src/core/config/validator.ts +89 -0
  43. package/src/core/providers/detector.ts +159 -0
  44. package/src/core/providers/index.ts +16 -0
  45. package/src/core/tracking/api-client.ts +78 -0
  46. package/src/core/tracking/index.ts +21 -0
  47. package/src/core/tracking/payload-builder.ts +132 -0
  48. package/src/core/tracking/usage-tracker.ts +189 -0
  49. package/src/core/wrapper/index.ts +9 -0
  50. package/src/core/wrapper/instance-patcher.ts +288 -0
  51. package/src/core/wrapper/request-handler.ts +423 -0
  52. package/src/core/wrapper/stream-wrapper.ts +100 -0
  53. package/src/index.ts +336 -0
  54. package/src/types/function-parameters.ts +251 -0
  55. package/src/types/index.ts +313 -0
  56. package/src/types/openai-augmentation.ts +233 -0
  57. package/src/types/responses-api.ts +308 -0
  58. package/src/utils/azure-model-resolver.ts +220 -0
  59. package/src/utils/constants.ts +21 -0
  60. package/src/utils/error-handler.ts +251 -0
  61. package/src/utils/metadata-builder.ts +219 -0
  62. package/src/utils/provider-detection.ts +257 -0
  63. package/src/utils/request-handler-factory.ts +285 -0
  64. package/src/utils/stop-reason-mapper.ts +74 -0
  65. package/src/utils/type-guards.ts +202 -0
  66. package/src/utils/url-builder.ts +68 -0
@@ -0,0 +1,100 @@
1
+ /**
2
+ * Stream Wrapper Module
3
+ *
4
+ * Handles wrapping of streaming responses for usage tracking.
5
+ * Extracted from wrapper.ts for better organization.
6
+ */
7
+
8
+ import { UsageMetadata } from '../../types/index.js';
9
+ import {
10
+ OpenAIClientInstance,
11
+ StreamChunk,
12
+ ExtendedUsage,
13
+ } from '../../types/function-parameters.js';
14
+ import { isStreamChunk } from '../../utils/type-guards.js';
15
+ import { trackUsageAsync } from '../tracking/index.js';
16
+ import { getLogger } from '../config/index.js';
17
+ import { instanceProviders } from './instance-patcher.js';
18
+
19
+ // Global logger
20
+ const logger = getLogger();
21
+
22
+ /**
23
+ * Create a simple stream wrapper that tracks usage when complete
24
+ */
25
+ export function createTrackingStreamWrapper(
26
+ originalStream: AsyncIterable<unknown>,
27
+ usageMetadata: UsageMetadata | undefined,
28
+ requestStartTime: number,
29
+ instance: OpenAIClientInstance
30
+ ): AsyncIterable<unknown> {
31
+ // For streaming, we need to collect the final response data
32
+ let accumulatedResponse: StreamChunk | null = null;
33
+
34
+ // Create async iterator
35
+ const wrappedIterator = {
36
+ async *[Symbol.asyncIterator]() {
37
+ try {
38
+ for await (const chunk of originalStream) {
39
+ // Validate and accumulate response data for tracking
40
+ if (isStreamChunk(chunk)) {
41
+ if (!accumulatedResponse) {
42
+ accumulatedResponse = {
43
+ id: chunk.id,
44
+ model: chunk.model,
45
+ usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
46
+ };
47
+ }
48
+
49
+ // Update usage if available in chunk
50
+ if (chunk.usage) {
51
+ accumulatedResponse.usage = chunk.usage;
52
+ }
53
+ }
54
+
55
+ // Forward the chunk to the client
56
+ yield chunk;
57
+ }
58
+
59
+ // Stream completed - track usage
60
+ if (accumulatedResponse && accumulatedResponse.usage) {
61
+ const duration = Date.now() - requestStartTime;
62
+
63
+ // Get provider info for this instance
64
+ const providerInfo = instanceProviders.get(instance);
65
+
66
+ // Safely access extended usage fields
67
+ const usage = accumulatedResponse.usage as ExtendedUsage;
68
+
69
+ trackUsageAsync({
70
+ requestId: accumulatedResponse.id,
71
+ model: accumulatedResponse.model,
72
+ promptTokens: usage.prompt_tokens,
73
+ completionTokens: usage.completion_tokens || 0,
74
+ totalTokens: usage.total_tokens,
75
+ reasoningTokens: usage.reasoning_tokens,
76
+ cachedTokens: usage.cached_tokens,
77
+ duration,
78
+ finishReason: null, // Will be determined from final chunk
79
+ usageMetadata,
80
+ isStreamed: true,
81
+ providerInfo,
82
+ });
83
+
84
+ logger.debug('Chat completion streaming completed', {
85
+ model: accumulatedResponse.model,
86
+ duration,
87
+ totalTokens: accumulatedResponse.usage.total_tokens,
88
+ });
89
+ }
90
+ } catch (error) {
91
+ logger.error('Chat completion streaming error', {
92
+ error: error instanceof Error ? error.message : String(error),
93
+ });
94
+ throw error;
95
+ }
96
+ },
97
+ };
98
+
99
+ return wrappedIterator;
100
+ }
package/src/index.ts ADDED
@@ -0,0 +1,336 @@
1
+ /**
2
+ * Revenium OpenAI Middleware for TypeScript
3
+ *
4
+ * This middleware tracks OpenAI usage and sends metrics to Revenium.
5
+ * Uses hybrid initialization: auto-initializes on import with graceful fallback to manual init.
6
+ *
7
+ * Environment Variables:
8
+ * REVENIUM_METERING_API_KEY=hak_your_api_key
9
+ * REVENIUM_METERING_BASE_URL=https://api.revenium.io (optional)
10
+ * OPENAI_API_KEY=sk_your_openai_key
11
+ *
12
+ * Simple Usage (auto-initialization):
13
+ * import { patchOpenAIInstance } from '@revenium/openai';
14
+ * import OpenAI from 'openai';
15
+ *
16
+ * const openai = patchOpenAIInstance(new OpenAI());
17
+ * // Auto-initializes from environment variables
18
+ *
19
+ * Advanced Usage (explicit initialization):
20
+ * import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
21
+ * import OpenAI from 'openai';
22
+ *
23
+ * const result = initializeReveniumFromEnv();
24
+ * if (!result.success) {
25
+ * throw new Error(result.message);
26
+ * }
27
+ * const openai = patchOpenAIInstance(new OpenAI());
28
+ *
29
+ * const response = await openai.chat.completions.create({
30
+ * model: 'gpt-4',
31
+ * messages: [{ role: 'user', content: 'Hello!' }],
32
+ * usageMetadata: {
33
+ * subscriber: {
34
+ * id: 'user-123',
35
+ * email: 'user@my-org.com'
36
+ * },
37
+ * organizationId: 'my-org',
38
+ * productId: 'my-app'
39
+ * }
40
+ * });
41
+ */
42
+
43
+ // Import type augmentations to extend OpenAI types with usageMetadata
44
+ import './types/openai-augmentation.js';
45
+
46
+ // Import from new modular structure
47
+ import {
48
+ setConfig,
49
+ validateConfig,
50
+ initializeConfig,
51
+ getConfig,
52
+ getLogger,
53
+ } from './core/config/index.js';
54
+ import { patchOpenAI, patchOpenAIInstance } from './core/wrapper/index.js';
55
+ import type { ReveniumConfig } from './types/index.js';
56
+
57
+ // Track initialization state
58
+ let isInitialized = false;
59
+ let autoInitAttempted = false;
60
+
61
+ /**
62
+ * Core types for TypeScript developers using Revenium middleware
63
+ *
64
+ * These types provide comprehensive TypeScript support for the Revenium OpenAI middleware,
65
+ * enabling type-safe configuration, usage tracking, and integration with OpenAI APIs.
66
+ *
67
+ * @public
68
+ */
69
+ export type {
70
+ /**
71
+ * Configuration interface for Revenium middleware initialization
72
+ *
73
+ * Defines all required and optional settings for connecting to Revenium's
74
+ * metering API and configuring middleware behavior.
75
+ *
76
+ * @example
77
+ * ```typescript
78
+ * const config: ReveniumConfig = {
79
+ * reveniumApiKey: 'hak_your_api_key',
80
+ * reveniumBaseUrl: 'https://api.revenium.io',
81
+ * debug: true
82
+ * };
83
+ * ```
84
+ */
85
+ ReveniumConfig,
86
+
87
+ /**
88
+ * Usage metadata structure for tracking AI API calls
89
+ *
90
+ * Comprehensive metadata interface that allows tracking of user context,
91
+ * business information, and custom fields for detailed analytics and billing.
92
+ * All fields are optional to provide maximum flexibility.
93
+ *
94
+ * @example
95
+ * ```typescript
96
+ * const metadata: UsageMetadata = {
97
+ * subscriber: { id: 'user-123', email: 'user@company.com' },
98
+ * organizationId: 'company-456',
99
+ * productId: 'chat-app',
100
+ * taskType: 'customer-support'
101
+ * };
102
+ * ```
103
+ */
104
+ UsageMetadata,
105
+
106
+ /**
107
+ * Logger interface for custom logging implementations
108
+ *
109
+ * Standardized logging interface that allows custom logger integration
110
+ * while maintaining consistent log levels and metadata support.
111
+ *
112
+ * @example
113
+ * ```typescript
114
+ * const customLogger: Logger = {
115
+ * debug: (msg, meta) => console.debug(msg, meta),
116
+ * info: (msg, meta) => console.info(msg, meta),
117
+ * warn: (msg, meta) => console.warn(msg, meta),
118
+ * error: (msg, meta) => console.error(msg, meta)
119
+ * };
120
+ * ```
121
+ */
122
+ Logger,
123
+
124
+ /**
125
+ * Azure OpenAI configuration interface
126
+ *
127
+ * Specific configuration options for Azure OpenAI integration,
128
+ * including endpoint, API version, and deployment settings.
129
+ *
130
+ * @example
131
+ * ```typescript
132
+ * const azureConfig: AzureConfig = {
133
+ * endpoint: 'https://your-resource.openai.azure.com',
134
+ * apiVersion: '2024-02-01',
135
+ * deployment: 'gpt-4'
136
+ * };
137
+ * ```
138
+ */
139
+ AzureConfig,
140
+
141
+ /**
142
+ * AI provider enumeration
143
+ *
144
+ * Supported AI providers for automatic detection and routing.
145
+ * Used internally for provider-specific handling and metrics.
146
+ */
147
+ Provider,
148
+
149
+ /**
150
+ * Provider information structure
151
+ *
152
+ * Detailed information about the detected AI provider, including
153
+ * configuration details and Azure-specific settings when applicable.
154
+ *
155
+ * @example
156
+ * ```typescript
157
+ * const providerInfo: ProviderInfo = {
158
+ * provider: Provider.AZURE_OPENAI,
159
+ * isAzure: true,
160
+ * endpoint: 'https://your-resource.openai.azure.com',
161
+ * deployment: 'gpt-4'
162
+ * };
163
+ * ```
164
+ */
165
+ ProviderInfo,
166
+ } from './types';
167
+
168
+ // Note: ExtendedChatCompletionCreateParams and ExtendedEmbeddingCreateParams are no longer exported
169
+ // as they have been replaced with seamless TypeScript module augmentation. The usageMetadata field
170
+ // is now natively available on OpenAI's types without requiring additional imports.
171
+
172
+ // Export tracking functions
173
+ export { trackUsageAsync, trackEmbeddingsUsageAsync } from './core/tracking/index.js';
174
+
175
+ // Export provider detection functions
176
+ export {
177
+ detectProvider,
178
+ hasAzureConfig,
179
+ validateAzureConfig,
180
+ getProviderMetadata,
181
+ } from './core/providers/index.js';
182
+ export { getProviderInfo } from './core/wrapper/index.js';
183
+
184
+ // Export Azure model resolution functions
185
+ export {
186
+ resolveAzureModelName,
187
+ clearModelNameCache,
188
+ getModelNameCacheStats,
189
+ batchResolveModelNames,
190
+ wouldTransformDeploymentName,
191
+ } from './utils/azure-model-resolver.js';
192
+
193
+ // Global logger
194
+ const logger = getLogger();
195
+
196
+ /**
197
+ * Initialize Revenium middleware with configuration
198
+ */
199
+ export function initializeRevenium(config: ReveniumConfig): {
200
+ success: boolean;
201
+ message: string;
202
+ } {
203
+ // Check if already initialized to prevent duplicate initialization
204
+ if (isInitialized) {
205
+ return {
206
+ success: true,
207
+ message: 'Revenium middleware already initialized',
208
+ };
209
+ }
210
+
211
+ try {
212
+ // Apply default base URL if not provided
213
+ const configWithDefaults = {
214
+ ...config,
215
+ reveniumBaseUrl: config.reveniumBaseUrl || 'https://api.revenium.io',
216
+ };
217
+
218
+ validateConfig(configWithDefaults);
219
+ setConfig(configWithDefaults);
220
+
221
+ // Mark as initialized
222
+ isInitialized = true;
223
+
224
+ // Patch OpenAI prototype methods
225
+ patchOpenAI();
226
+
227
+ return {
228
+ success: true,
229
+ message: 'Revenium middleware initialized successfully',
230
+ };
231
+ } catch (error) {
232
+ isInitialized = false;
233
+ return {
234
+ success: false,
235
+ message: error instanceof Error ? error.message : 'Unknown initialization error',
236
+ };
237
+ }
238
+ }
239
+
240
+ /**
241
+ * Initialize Revenium middleware from environment variables
242
+ */
243
+ export function initializeReveniumFromEnv(): {
244
+ success: boolean;
245
+ message: string;
246
+ } {
247
+ // Check if already initialized to prevent duplicate initialization
248
+ if (isInitialized) {
249
+ return {
250
+ success: true,
251
+ message: 'Revenium middleware already initialized',
252
+ };
253
+ }
254
+
255
+ const envSuccess = initializeConfig();
256
+
257
+ if (!envSuccess) {
258
+ isInitialized = false;
259
+ return {
260
+ success: false,
261
+ message:
262
+ 'Failed to load configuration from environment variables. Check REVENIUM_METERING_API_KEY and REVENIUM_METERING_BASE_URL.',
263
+ };
264
+ }
265
+
266
+ // Mark as initialized
267
+ isInitialized = true;
268
+
269
+ // Patch OpenAI prototype methods
270
+ patchOpenAI();
271
+
272
+ return {
273
+ success: true,
274
+ message: 'Revenium middleware initialized from environment',
275
+ };
276
+ }
277
+
278
+ /**
279
+ * Manually patch an OpenAI instance (for advanced use cases)
280
+ */
281
+ export { patchOpenAIInstance } from './core/wrapper/index.js';
282
+
283
+ /**
284
+ * Auto-initialization with graceful fallback
285
+ * Attempts to initialize from environment variables on module load.
286
+ * If it fails, logs a debug message but doesn't throw - allows manual configuration later.
287
+ */
288
+ function attemptAutoInitialization(): void {
289
+ if (autoInitAttempted || isInitialized) return;
290
+
291
+ autoInitAttempted = true;
292
+ try {
293
+ const result = initializeReveniumFromEnv();
294
+ if (result.success) {
295
+ // Auto-init succeeded - log debug message
296
+ logger.debug('Revenium middleware auto-initialized from environment variables');
297
+ } else {
298
+ // Auto-init failed - log debug message but don't throw
299
+ logger.debug('Auto-initialization failed, manual initialization required:', result.message);
300
+ }
301
+ } catch (error) {
302
+ // Unexpected error during auto-init - log but don't throw
303
+ logger.debug(
304
+ 'Auto-initialization encountered error:',
305
+ error instanceof Error ? error.message : String(error)
306
+ );
307
+ }
308
+ }
309
+
310
+ /**
311
+ * Check if middleware has been initialized
312
+ */
313
+ export function isReveniumInitialized(): boolean {
314
+ return isInitialized;
315
+ }
316
+
317
+ /**
318
+ * Get detailed initialization status
319
+ */
320
+ export function getInitializationStatus(): {
321
+ initialized: boolean;
322
+ hasConfig: boolean;
323
+ hasApiKey: boolean;
324
+ autoInitAttempted: boolean;
325
+ } {
326
+ const config = getConfig();
327
+ return {
328
+ initialized: isInitialized,
329
+ hasConfig: !!config,
330
+ hasApiKey: !!config?.reveniumApiKey,
331
+ autoInitAttempted,
332
+ };
333
+ }
334
+
335
+ // Perform auto-initialization when module is imported
336
+ attemptAutoInitialization();
@@ -0,0 +1,251 @@
1
+ /**
2
+ * Function Parameter Types
3
+ *
4
+ * Comprehensive type definitions for function parameters throughout the middleware.
5
+ * These interfaces provide type safety by replacing 'any' types with proper,
6
+ * well-documented interfaces that match OpenAI API structures and internal requirements.
7
+ *
8
+ * @fileoverview Type-safe function parameter definitions
9
+ * @author Revenium
10
+ * @since 1.0.0
11
+ */
12
+
13
+ import { UsageMetadata, ProviderInfo } from './index';
14
+ import { OpenAIResponsesRequest } from './responses-api';
15
+
16
+ /**
17
+ * OpenAI API response structure for chat completions
18
+ *
19
+ * Represents the complete response structure returned by OpenAI's chat completions API.
20
+ * Includes usage statistics, response choices, and metadata. Used internally for
21
+ * processing responses and extracting usage metrics.
22
+ *
23
+ * @public
24
+ * @example
25
+ * ```typescript
26
+ * const response: OpenAIChatResponse = {
27
+ * id: 'chatcmpl-123',
28
+ * model: 'gpt-4o-mini',
29
+ * usage: {
30
+ * prompt_tokens: 10,
31
+ * completion_tokens: 20,
32
+ * total_tokens: 30
33
+ * },
34
+ * choices: [{
35
+ * finish_reason: 'stop',
36
+ * message: {
37
+ * content: 'Hello! How can I help you?',
38
+ * role: 'assistant'
39
+ * }
40
+ * }]
41
+ * };
42
+ * ```
43
+ */
44
+ export interface OpenAIChatResponse {
45
+ /** Unique identifier for the chat completion */
46
+ id: string;
47
+ /** Model used for the completion */
48
+ model: string;
49
+ /** Token usage statistics */
50
+ usage: {
51
+ /** Number of tokens in the prompt */
52
+ prompt_tokens: number;
53
+ /** Number of tokens in the completion */
54
+ completion_tokens: number;
55
+ /** Total tokens used (prompt + completion) */
56
+ total_tokens: number;
57
+ /** Number of reasoning tokens (for reasoning models) */
58
+ reasoning_tokens?: number;
59
+ /** Number of cached tokens used */
60
+ cached_tokens?: number;
61
+ };
62
+ /** Array of completion choices */
63
+ choices: Array<{
64
+ /** Reason why the completion finished */
65
+ finish_reason: string | null;
66
+ /** Complete message (for non-streaming responses) */
67
+ message?: {
68
+ /** Message content */
69
+ content: string;
70
+ /** Message role (assistant, user, system) */
71
+ role: string;
72
+ };
73
+ /** Delta message (for streaming responses) */
74
+ delta?: {
75
+ /** Incremental content */
76
+ content?: string;
77
+ /** Message role */
78
+ role?: string;
79
+ };
80
+ }>;
81
+ /** Unix timestamp of when the completion was created */
82
+ created?: number;
83
+ /** Object type identifier */
84
+ object?: string;
85
+ }
86
+
87
+ /**
88
+ * OpenAI API response structure for embeddings
89
+ */
90
+ export interface OpenAIEmbeddingResponse {
91
+ model: string;
92
+ usage: {
93
+ prompt_tokens: number;
94
+ total_tokens: number;
95
+ };
96
+ data: Array<{
97
+ embedding: number[];
98
+ index: number;
99
+ object: string;
100
+ }>;
101
+ object: string;
102
+ }
103
+
104
+ /**
105
+ * OpenAI chat completion request parameters
106
+ */
107
+ export interface OpenAIChatRequest {
108
+ model: string;
109
+ messages: Array<{
110
+ role: string;
111
+ content: string;
112
+ }>;
113
+ stream?: boolean;
114
+ usageMetadata?: UsageMetadata;
115
+ max_tokens?: number;
116
+ temperature?: number;
117
+ top_p?: number;
118
+ frequency_penalty?: number;
119
+ presence_penalty?: number;
120
+ stop?: string | string[];
121
+ [key: string]: unknown;
122
+ }
123
+
124
+ /**
125
+ * OpenAI embeddings request parameters
126
+ */
127
+ export interface OpenAIEmbeddingRequest {
128
+ model: string;
129
+ input: string | string[];
130
+ usageMetadata?: UsageMetadata;
131
+ encoding_format?: string;
132
+ dimensions?: number;
133
+ user?: string;
134
+ [key: string]: unknown;
135
+ }
136
+
137
+ /**
138
+ * OpenAI client instance interface
139
+ */
140
+ export interface OpenAIClientInstance {
141
+ baseURL?: string | URL;
142
+ constructor?: {
143
+ name: string;
144
+ };
145
+ chat?: {
146
+ completions?: {
147
+ create: any;
148
+ };
149
+ };
150
+ embeddings?: {
151
+ create: any;
152
+ };
153
+ // Allow additional properties for flexibility with proper typing
154
+ [key: string]: unknown;
155
+ }
156
+
157
+ /**
158
+ * OpenAI request options
159
+ */
160
+ export interface OpenAIRequestOptions {
161
+ headers?: Record<string, string>;
162
+ timeout?: number;
163
+ signal?: AbortSignal;
164
+ [key: string]: unknown;
165
+ }
166
+
167
+ /**
168
+ * Azure model resolver function parameters
169
+ */
170
+ export interface AzureModelResolverParams {
171
+ deploymentName: string;
172
+ useCache?: boolean;
173
+ }
174
+
175
+ /**
176
+ * Provider detection function parameters
177
+ */
178
+ export interface ProviderDetectionParams {
179
+ client: OpenAIClientInstance;
180
+ }
181
+
182
+ /**
183
+ * Azure configuration validation result
184
+ */
185
+ export interface AzureConfigValidationResult {
186
+ isValid: boolean;
187
+ missingFields: string[];
188
+ warnings: string[];
189
+ }
190
+
191
+ /**
192
+ * Lazy loading function type for Azure modules
193
+ */
194
+ export interface LazyLoadedModule {
195
+ [key: string]: any;
196
+ }
197
+
198
+ /**
199
+ * Console logger arguments type
200
+ */
201
+ export type LoggerArgs = unknown[];
202
+
203
+ /**
204
+ * Generic function type for original OpenAI methods
205
+ */
206
+ export type OpenAIOriginalFunction = (
207
+ params: OpenAIChatRequest | OpenAIEmbeddingRequest,
208
+ options?: OpenAIRequestOptions
209
+ ) => Promise<OpenAIChatResponse | OpenAIEmbeddingResponse>;
210
+
211
+ /**
212
+ * Function type for original OpenAI Responses API methods
213
+ */
214
+ export type OpenAIResponsesOriginalFunction = (
215
+ params: OpenAIResponsesRequest,
216
+ options?: OpenAIRequestOptions
217
+ ) => Promise<unknown>;
218
+
219
+ /**
220
+ * Stream chunk interface for streaming responses
221
+ */
222
+ export interface StreamChunk {
223
+ id: string;
224
+ model: string;
225
+ usage?: {
226
+ prompt_tokens: number;
227
+ completion_tokens: number;
228
+ total_tokens: number;
229
+ reasoning_tokens?: number;
230
+ cached_tokens?: number;
231
+ };
232
+ choices?: Array<{
233
+ delta?: {
234
+ content?: string;
235
+ role?: string;
236
+ };
237
+ finish_reason?: string | null;
238
+ }>;
239
+ [key: string]: unknown;
240
+ }
241
+
242
+ /**
243
+ * Extended usage interface that includes optional reasoning and cached tokens
244
+ */
245
+ export interface ExtendedUsage {
246
+ prompt_tokens: number;
247
+ completion_tokens: number;
248
+ total_tokens: number;
249
+ reasoning_tokens?: number;
250
+ cached_tokens?: number;
251
+ }