@revenium/openai 1.0.11 → 1.0.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +20 -0
- package/CHANGELOG.md +21 -47
- package/README.md +141 -690
- package/dist/cjs/core/config/loader.js +1 -1
- package/dist/cjs/core/config/loader.js.map +1 -1
- package/dist/cjs/core/tracking/api-client.js +1 -1
- package/dist/cjs/core/tracking/api-client.js.map +1 -1
- package/dist/cjs/index.js +2 -2
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/utils/url-builder.js +32 -7
- package/dist/cjs/utils/url-builder.js.map +1 -1
- package/dist/esm/core/config/loader.js +1 -1
- package/dist/esm/core/config/loader.js.map +1 -1
- package/dist/esm/core/tracking/api-client.js +1 -1
- package/dist/esm/core/tracking/api-client.js.map +1 -1
- package/dist/esm/index.js +2 -2
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/utils/url-builder.js +32 -7
- package/dist/esm/utils/url-builder.js.map +1 -1
- package/dist/types/index.d.ts +2 -2
- package/dist/types/types/index.d.ts +2 -2
- package/dist/types/types/index.d.ts.map +1 -1
- package/dist/types/utils/url-builder.d.ts +11 -3
- package/dist/types/utils/url-builder.d.ts.map +1 -1
- package/examples/README.md +250 -254
- package/examples/azure-basic.ts +25 -13
- package/examples/azure-responses-basic.ts +36 -7
- package/examples/azure-responses-streaming.ts +36 -7
- package/examples/azure-streaming.ts +40 -19
- package/examples/getting_started.ts +54 -0
- package/examples/openai-basic.ts +39 -17
- package/examples/openai-function-calling.ts +259 -0
- package/examples/openai-responses-basic.ts +36 -7
- package/examples/openai-responses-streaming.ts +36 -7
- package/examples/openai-streaming.ts +24 -13
- package/examples/openai-vision.ts +289 -0
- package/package.json +3 -9
- package/src/core/config/azure-config.ts +72 -0
- package/src/core/config/index.ts +23 -0
- package/src/core/config/loader.ts +66 -0
- package/src/core/config/manager.ts +94 -0
- package/src/core/config/validator.ts +89 -0
- package/src/core/providers/detector.ts +159 -0
- package/src/core/providers/index.ts +16 -0
- package/src/core/tracking/api-client.ts +78 -0
- package/src/core/tracking/index.ts +21 -0
- package/src/core/tracking/payload-builder.ts +132 -0
- package/src/core/tracking/usage-tracker.ts +189 -0
- package/src/core/wrapper/index.ts +9 -0
- package/src/core/wrapper/instance-patcher.ts +288 -0
- package/src/core/wrapper/request-handler.ts +423 -0
- package/src/core/wrapper/stream-wrapper.ts +100 -0
- package/src/index.ts +336 -0
- package/src/types/function-parameters.ts +251 -0
- package/src/types/index.ts +313 -0
- package/src/types/openai-augmentation.ts +233 -0
- package/src/types/responses-api.ts +308 -0
- package/src/utils/azure-model-resolver.ts +220 -0
- package/src/utils/constants.ts +21 -0
- package/src/utils/error-handler.ts +251 -0
- package/src/utils/metadata-builder.ts +219 -0
- package/src/utils/provider-detection.ts +257 -0
- package/src/utils/request-handler-factory.ts +285 -0
- package/src/utils/stop-reason-mapper.ts +74 -0
- package/src/utils/type-guards.ts +202 -0
- package/src/utils/url-builder.ts +68 -0
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Provider Detection Module
|
|
3
|
+
*
|
|
4
|
+
* Handles detection of AI providers (OpenAI vs Azure OpenAI).
|
|
5
|
+
* Extracted from provider.ts for better organization.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { Provider, ProviderInfo, AzureConfig } from '../../types/index.js';
|
|
9
|
+
import { OpenAIClientInstance } from '../../types/function-parameters.js';
|
|
10
|
+
import { isOpenAIClientInstance } from '../../utils/type-guards.js';
|
|
11
|
+
import { createProviderInfo, validateProviderInfo } from '../../utils/provider-detection.js';
|
|
12
|
+
import { getLogger } from '../config/index.js';
|
|
13
|
+
|
|
14
|
+
// Global logger
|
|
15
|
+
const logger = getLogger();
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Detect Azure OpenAI provider from client instance
|
|
19
|
+
*
|
|
20
|
+
* @param client - OpenAI or AzureOpenAI client instance
|
|
21
|
+
* @returns ProviderInfo with detection results
|
|
22
|
+
*/
|
|
23
|
+
export function detectProvider(client: OpenAIClientInstance): ProviderInfo {
|
|
24
|
+
// Validate client instance
|
|
25
|
+
if (!isOpenAIClientInstance(client)) {
|
|
26
|
+
logger.warn('Invalid OpenAI client instance provided to detectProvider');
|
|
27
|
+
return {
|
|
28
|
+
provider: Provider.OPENAI,
|
|
29
|
+
isAzure: false,
|
|
30
|
+
azureConfig: undefined,
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
try {
|
|
35
|
+
// Use strategy pattern for clean provider detection
|
|
36
|
+
const providerInfo = createProviderInfo(client, logger);
|
|
37
|
+
|
|
38
|
+
// Validate the result
|
|
39
|
+
const validation = validateProviderInfo(providerInfo);
|
|
40
|
+
if (validation.warnings.length > 0) {
|
|
41
|
+
logger.warn('Provider detection completed with warnings', {
|
|
42
|
+
warnings: validation.warnings,
|
|
43
|
+
});
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
// Log final result
|
|
47
|
+
if (providerInfo.isAzure) {
|
|
48
|
+
logger.info('Azure OpenAI provider detected', {
|
|
49
|
+
provider: providerInfo.provider,
|
|
50
|
+
hasAzureConfig: !!providerInfo.azureConfig,
|
|
51
|
+
endpoint: providerInfo.azureConfig?.endpoint ? '[REDACTED]' : undefined,
|
|
52
|
+
});
|
|
53
|
+
} else {
|
|
54
|
+
logger.debug('Standard OpenAI provider detected');
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
return providerInfo;
|
|
58
|
+
} catch (error) {
|
|
59
|
+
logger.warn('Error during provider detection, defaulting to OpenAI', {
|
|
60
|
+
error: error instanceof Error ? error.message : String(error),
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
// Default to OpenAI on any error
|
|
64
|
+
return {
|
|
65
|
+
provider: Provider.OPENAI,
|
|
66
|
+
isAzure: false,
|
|
67
|
+
azureConfig: undefined,
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Check if the current environment has Azure configuration
|
|
74
|
+
* This is a lightweight check for lazy loading decisions
|
|
75
|
+
*
|
|
76
|
+
* @returns boolean indicating if Azure config is present
|
|
77
|
+
*/
|
|
78
|
+
export function hasAzureConfig(): boolean {
|
|
79
|
+
return !!(
|
|
80
|
+
process.env.AZURE_OPENAI_ENDPOINT ||
|
|
81
|
+
process.env.AZURE_OPENAI_DEPLOYMENT ||
|
|
82
|
+
process.env.AZURE_OPENAI_API_KEY
|
|
83
|
+
);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* Validate Azure configuration completeness
|
|
88
|
+
*
|
|
89
|
+
* @param config - Azure configuration to validate
|
|
90
|
+
* @returns validation result with missing fields
|
|
91
|
+
*/
|
|
92
|
+
export function validateAzureConfig(config: AzureConfig): {
|
|
93
|
+
isValid: boolean;
|
|
94
|
+
missingFields: string[];
|
|
95
|
+
warnings: string[];
|
|
96
|
+
} {
|
|
97
|
+
const missingFields: string[] = [];
|
|
98
|
+
const warnings: string[] = [];
|
|
99
|
+
|
|
100
|
+
// Required fields for basic Azure OpenAI operation
|
|
101
|
+
if (!config.endpoint) {
|
|
102
|
+
missingFields.push('endpoint');
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (!config.apiKey) {
|
|
106
|
+
missingFields.push('apiKey');
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Optional but recommended fields
|
|
110
|
+
if (!config.deployment) {
|
|
111
|
+
warnings.push('deployment name not specified - may need to be included in model parameter');
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
if (!config.apiVersion) {
|
|
115
|
+
warnings.push('API version not specified - using default 2024-12-01-preview');
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// Validate endpoint format
|
|
119
|
+
if (config.endpoint) {
|
|
120
|
+
try {
|
|
121
|
+
new URL(config.endpoint);
|
|
122
|
+
if (!config.endpoint.toLowerCase().includes('azure')) {
|
|
123
|
+
warnings.push(
|
|
124
|
+
'endpoint does not contain "azure" - please verify this is an Azure OpenAI endpoint'
|
|
125
|
+
);
|
|
126
|
+
}
|
|
127
|
+
} catch (error) {
|
|
128
|
+
missingFields.push('valid endpoint URL');
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
return {
|
|
133
|
+
isValid: missingFields.length === 0,
|
|
134
|
+
missingFields,
|
|
135
|
+
warnings,
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Get provider metadata for Revenium payload
|
|
141
|
+
*
|
|
142
|
+
* @param providerInfo - Provider detection result
|
|
143
|
+
* @returns metadata object for Revenium
|
|
144
|
+
*/
|
|
145
|
+
export function getProviderMetadata(providerInfo: ProviderInfo): {
|
|
146
|
+
provider: string;
|
|
147
|
+
modelSource: string;
|
|
148
|
+
} {
|
|
149
|
+
if (providerInfo.isAzure) {
|
|
150
|
+
return {
|
|
151
|
+
provider: 'Azure',
|
|
152
|
+
modelSource: 'OPENAI',
|
|
153
|
+
};
|
|
154
|
+
}
|
|
155
|
+
return {
|
|
156
|
+
provider: 'OPENAI',
|
|
157
|
+
modelSource: 'OPENAI',
|
|
158
|
+
};
|
|
159
|
+
}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Providers module - Main exports
|
|
3
|
+
*
|
|
4
|
+
* This module provides a clean interface for provider detection and management,
|
|
5
|
+
* separating concerns into focused sub-modules.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
// Re-export all provider functionality
|
|
9
|
+
export {
|
|
10
|
+
detectProvider,
|
|
11
|
+
hasAzureConfig,
|
|
12
|
+
validateAzureConfig,
|
|
13
|
+
getProviderMetadata,
|
|
14
|
+
} from './detector.js';
|
|
15
|
+
|
|
16
|
+
export { gatherAzureConfig } from '../config/azure-config.js';
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Revenium API Client Module
|
|
3
|
+
*
|
|
4
|
+
* Handles HTTP communication with the Revenium API.
|
|
5
|
+
* Extracted from tracking.ts for single responsibility.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { ReveniumPayload } from '../../types/index.js';
|
|
9
|
+
import { getConfig, getLogger } from '../config/index.js';
|
|
10
|
+
import { buildReveniumUrl } from '../../utils/url-builder.js';
|
|
11
|
+
|
|
12
|
+
// Global logger
|
|
13
|
+
const logger = getLogger();
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Send payload to Revenium API
|
|
17
|
+
*
|
|
18
|
+
* This is the shared HTTP function that eliminates all duplication
|
|
19
|
+
* between chat completions and embeddings tracking.
|
|
20
|
+
*
|
|
21
|
+
* @param payload - The payload to send to Revenium
|
|
22
|
+
*/
|
|
23
|
+
export async function sendToRevenium(payload: ReveniumPayload): Promise<void> {
|
|
24
|
+
const config = getConfig();
|
|
25
|
+
if (!config) return logger.warn('Revenium configuration not found, skipping tracking');
|
|
26
|
+
|
|
27
|
+
// Use the new URL builder utility instead of nested conditionals
|
|
28
|
+
const url = buildReveniumUrl(
|
|
29
|
+
config.reveniumBaseUrl || 'https://api.revenium.io',
|
|
30
|
+
'/ai/completions'
|
|
31
|
+
);
|
|
32
|
+
|
|
33
|
+
logger.debug('Sending Revenium API request', {
|
|
34
|
+
url,
|
|
35
|
+
operationType: payload.operationType,
|
|
36
|
+
transactionId: payload.transactionId,
|
|
37
|
+
model: payload.model,
|
|
38
|
+
totalTokens: payload.totalTokenCount,
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
const response = await fetch(url, {
|
|
42
|
+
method: 'POST',
|
|
43
|
+
headers: {
|
|
44
|
+
'Content-Type': 'application/json',
|
|
45
|
+
Accept: 'application/json',
|
|
46
|
+
'x-api-key': config.reveniumApiKey,
|
|
47
|
+
},
|
|
48
|
+
body: JSON.stringify(payload),
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
logger.debug('Revenium API response', {
|
|
52
|
+
status: response.status,
|
|
53
|
+
statusText: response.statusText,
|
|
54
|
+
transactionId: payload.transactionId,
|
|
55
|
+
operationType: payload.operationType,
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
if (!response.ok) {
|
|
59
|
+
const responseText = await response.text();
|
|
60
|
+
logger.error('Revenium API error response', {
|
|
61
|
+
status: response.status,
|
|
62
|
+
statusText: response.statusText,
|
|
63
|
+
body: responseText,
|
|
64
|
+
transactionId: payload.transactionId,
|
|
65
|
+
operationType: payload.operationType,
|
|
66
|
+
});
|
|
67
|
+
throw new Error(
|
|
68
|
+
`Revenium API error: ${response.status} ${response.statusText} - ${responseText}`
|
|
69
|
+
);
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
const responseBody = await response.text();
|
|
73
|
+
logger.debug('Revenium tracking successful', {
|
|
74
|
+
transactionId: payload.transactionId,
|
|
75
|
+
operationType: payload.operationType,
|
|
76
|
+
response: responseBody,
|
|
77
|
+
});
|
|
78
|
+
}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tracking module - Main exports
|
|
3
|
+
*
|
|
4
|
+
* This module provides a clean interface for usage tracking,
|
|
5
|
+
* separating concerns into focused sub-modules.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
// Re-export all tracking functionality
|
|
9
|
+
export { sendToRevenium } from './api-client.js';
|
|
10
|
+
|
|
11
|
+
export { buildPayload } from './payload-builder.js';
|
|
12
|
+
|
|
13
|
+
export {
|
|
14
|
+
sendReveniumMetrics,
|
|
15
|
+
sendReveniumEmbeddingsMetrics,
|
|
16
|
+
trackUsageAsync,
|
|
17
|
+
trackEmbeddingsUsageAsync,
|
|
18
|
+
} from './usage-tracker.js';
|
|
19
|
+
|
|
20
|
+
// Export utility functions
|
|
21
|
+
export { mapStopReason } from '../../utils/stop-reason-mapper.js';
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Payload Builder Module
|
|
3
|
+
*
|
|
4
|
+
* Handles construction of Revenium API payloads.
|
|
5
|
+
* Extracted from tracking.ts for single responsibility.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { randomUUID } from 'crypto';
|
|
9
|
+
import { ReveniumPayload, ProviderInfo } from '../../types/index.js';
|
|
10
|
+
import {
|
|
11
|
+
OpenAIChatResponse,
|
|
12
|
+
OpenAIEmbeddingResponse,
|
|
13
|
+
OpenAIChatRequest,
|
|
14
|
+
OpenAIEmbeddingRequest,
|
|
15
|
+
} from '../../types/function-parameters.js';
|
|
16
|
+
import { getLogger } from '../config/index.js';
|
|
17
|
+
import { mapStopReason } from '../../utils/stop-reason-mapper.js';
|
|
18
|
+
import { buildMetadataFields } from '../../utils/metadata-builder.js';
|
|
19
|
+
import { resolveAzureModelName } from '../../utils/azure-model-resolver.js';
|
|
20
|
+
import { getProviderMetadata } from '../providers/index.js';
|
|
21
|
+
|
|
22
|
+
// Global logger
|
|
23
|
+
const logger = getLogger();
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Build payload for Revenium API
|
|
27
|
+
*
|
|
28
|
+
* This shared payload builder eliminates payload duplication between
|
|
29
|
+
* chat completions and embeddings. Handles both CHAT and EMBED operation types.
|
|
30
|
+
*
|
|
31
|
+
* @param operationType - Type of operation (CHAT or EMBED)
|
|
32
|
+
* @param response - API response from OpenAI/Azure
|
|
33
|
+
* @param request - Original request parameters
|
|
34
|
+
* @param startTime - Request start timestamp
|
|
35
|
+
* @param duration - Request duration in milliseconds
|
|
36
|
+
* @param providerInfo - Provider information for Azure support
|
|
37
|
+
* @returns Constructed payload for Revenium API
|
|
38
|
+
*/
|
|
39
|
+
export function buildPayload(
|
|
40
|
+
operationType: 'CHAT' | 'EMBED',
|
|
41
|
+
response: OpenAIChatResponse | OpenAIEmbeddingResponse,
|
|
42
|
+
request: OpenAIChatRequest | OpenAIEmbeddingRequest,
|
|
43
|
+
startTime: number,
|
|
44
|
+
duration: number,
|
|
45
|
+
providerInfo?: ProviderInfo
|
|
46
|
+
): ReveniumPayload {
|
|
47
|
+
const now = new Date().toISOString();
|
|
48
|
+
const requestTime = new Date(startTime).toISOString();
|
|
49
|
+
const usage = response.usage;
|
|
50
|
+
|
|
51
|
+
// Resolve model name for Azure deployments
|
|
52
|
+
const originalModel = response.model;
|
|
53
|
+
const resolvedModel = providerInfo?.isAzure
|
|
54
|
+
? resolveAzureModelName(originalModel)
|
|
55
|
+
: originalModel;
|
|
56
|
+
|
|
57
|
+
// Get provider metadata
|
|
58
|
+
const providerMetadata = providerInfo
|
|
59
|
+
? getProviderMetadata(providerInfo)
|
|
60
|
+
: { provider: 'OPENAI', modelSource: 'OPENAI' };
|
|
61
|
+
|
|
62
|
+
if (providerInfo?.isAzure && resolvedModel !== originalModel) {
|
|
63
|
+
logger.debug('Azure model name resolved for pricing', {
|
|
64
|
+
deployment: originalModel,
|
|
65
|
+
resolved: resolvedModel,
|
|
66
|
+
provider: providerMetadata.provider,
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// Build metadata fields using utility (eliminates repetitive spreading)
|
|
71
|
+
const metadataFields = buildMetadataFields(request.usageMetadata);
|
|
72
|
+
|
|
73
|
+
// Common fields for all operations
|
|
74
|
+
const commonPayload = {
|
|
75
|
+
costType: 'AI' as const,
|
|
76
|
+
model: resolvedModel, // Use resolved model name for accurate pricing
|
|
77
|
+
responseTime: now,
|
|
78
|
+
requestDuration: duration,
|
|
79
|
+
provider: providerMetadata.provider,
|
|
80
|
+
modelSource: providerMetadata.modelSource,
|
|
81
|
+
requestTime,
|
|
82
|
+
completionStartTime: now,
|
|
83
|
+
|
|
84
|
+
// Common token counts
|
|
85
|
+
inputTokenCount: usage.prompt_tokens,
|
|
86
|
+
totalTokenCount: usage.total_tokens,
|
|
87
|
+
|
|
88
|
+
// Metadata fields (processed by utility)
|
|
89
|
+
...metadataFields,
|
|
90
|
+
|
|
91
|
+
// Fixed middleware source identifier
|
|
92
|
+
middlewareSource: 'nodejs',
|
|
93
|
+
|
|
94
|
+
// Backend calculates costs
|
|
95
|
+
inputTokenCost: undefined,
|
|
96
|
+
outputTokenCost: undefined,
|
|
97
|
+
totalCost: undefined,
|
|
98
|
+
};
|
|
99
|
+
|
|
100
|
+
// Operation-specific fields
|
|
101
|
+
|
|
102
|
+
if (operationType !== 'CHAT') {
|
|
103
|
+
// For embeddings, we don't need the response cast since we use commonPayload
|
|
104
|
+
return {
|
|
105
|
+
...commonPayload,
|
|
106
|
+
operationType: 'EMBED',
|
|
107
|
+
transactionId: `embed-${randomUUID()}`,
|
|
108
|
+
outputTokenCount: 0,
|
|
109
|
+
reasoningTokenCount: 0,
|
|
110
|
+
cacheCreationTokenCount: 0,
|
|
111
|
+
cacheReadTokenCount: 0,
|
|
112
|
+
stopReason: 'END',
|
|
113
|
+
isStreamed: false,
|
|
114
|
+
timeToFirstToken: 0,
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
const chatResponse = response as OpenAIChatResponse;
|
|
118
|
+
const chatUsage = chatResponse.usage;
|
|
119
|
+
|
|
120
|
+
return {
|
|
121
|
+
...commonPayload,
|
|
122
|
+
operationType: 'CHAT',
|
|
123
|
+
transactionId: chatResponse.id || `chat-${randomUUID()}`,
|
|
124
|
+
outputTokenCount: chatUsage.completion_tokens || 0,
|
|
125
|
+
reasoningTokenCount: chatUsage.reasoning_tokens || 0,
|
|
126
|
+
cacheCreationTokenCount: 0,
|
|
127
|
+
cacheReadTokenCount: chatUsage.cached_tokens || 0,
|
|
128
|
+
stopReason: mapStopReason(chatResponse.choices?.[0]?.finish_reason, logger),
|
|
129
|
+
isStreamed: Boolean((request as OpenAIChatRequest).stream),
|
|
130
|
+
timeToFirstToken: (request as OpenAIChatRequest).stream ? 0 : duration,
|
|
131
|
+
};
|
|
132
|
+
}
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Usage Tracker Module
|
|
3
|
+
*
|
|
4
|
+
* High-level tracking functions that combine payload building and API communication.
|
|
5
|
+
* Extracted from tracking.ts for better organization.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { randomUUID } from 'crypto';
|
|
9
|
+
import { UsageMetadata, ProviderInfo } from '../../types/index.js';
|
|
10
|
+
import {
|
|
11
|
+
OpenAIChatResponse,
|
|
12
|
+
OpenAIEmbeddingResponse,
|
|
13
|
+
OpenAIChatRequest,
|
|
14
|
+
OpenAIEmbeddingRequest,
|
|
15
|
+
} from '../../types/function-parameters.js';
|
|
16
|
+
import { getLogger } from '../config/index.js';
|
|
17
|
+
import { sendToRevenium } from './api-client.js';
|
|
18
|
+
import { buildPayload } from './payload-builder.js';
|
|
19
|
+
import { safeAsyncOperation } from '../../utils/error-handler.js';
|
|
20
|
+
|
|
21
|
+
// Global logger
|
|
22
|
+
const logger = getLogger();
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Chat completions tracking - now a thin wrapper with Azure support
|
|
26
|
+
*/
|
|
27
|
+
export async function sendReveniumMetrics(
|
|
28
|
+
response: OpenAIChatResponse,
|
|
29
|
+
request: OpenAIChatRequest,
|
|
30
|
+
startTime: number,
|
|
31
|
+
duration: number,
|
|
32
|
+
providerInfo?: ProviderInfo
|
|
33
|
+
): Promise<void> {
|
|
34
|
+
await safeAsyncOperation(
|
|
35
|
+
async () => {
|
|
36
|
+
const payload = buildPayload('CHAT', response, request, startTime, duration, providerInfo);
|
|
37
|
+
await sendToRevenium(payload);
|
|
38
|
+
},
|
|
39
|
+
'Chat completion tracking',
|
|
40
|
+
{
|
|
41
|
+
logError: true,
|
|
42
|
+
rethrow: false, // Don't rethrow to maintain fire-and-forget behavior
|
|
43
|
+
messagePrefix: 'Chat completion tracking failed: ',
|
|
44
|
+
},
|
|
45
|
+
logger
|
|
46
|
+
);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Embeddings tracking - now a thin wrapper with Azure support
|
|
51
|
+
*/
|
|
52
|
+
export async function sendReveniumEmbeddingsMetrics(
|
|
53
|
+
response: OpenAIEmbeddingResponse,
|
|
54
|
+
request: OpenAIEmbeddingRequest,
|
|
55
|
+
startTime: number,
|
|
56
|
+
duration: number,
|
|
57
|
+
providerInfo?: ProviderInfo
|
|
58
|
+
): Promise<void> {
|
|
59
|
+
await safeAsyncOperation(
|
|
60
|
+
async () => {
|
|
61
|
+
const payload = buildPayload('EMBED', response, request, startTime, duration, providerInfo);
|
|
62
|
+
await sendToRevenium(payload);
|
|
63
|
+
},
|
|
64
|
+
'Embeddings tracking',
|
|
65
|
+
{
|
|
66
|
+
logError: true,
|
|
67
|
+
rethrow: false, // Don't rethrow to maintain fire-and-forget behavior
|
|
68
|
+
messagePrefix: 'Embeddings tracking failed: ',
|
|
69
|
+
},
|
|
70
|
+
logger
|
|
71
|
+
);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Fire-and-forget wrapper for chat completions with Azure support
|
|
76
|
+
*/
|
|
77
|
+
export function trackUsageAsync(trackingData: {
|
|
78
|
+
requestId: string;
|
|
79
|
+
model: string;
|
|
80
|
+
promptTokens: number;
|
|
81
|
+
completionTokens: number;
|
|
82
|
+
totalTokens: number;
|
|
83
|
+
reasoningTokens?: number;
|
|
84
|
+
cachedTokens?: number;
|
|
85
|
+
duration: number;
|
|
86
|
+
finishReason: string | null;
|
|
87
|
+
usageMetadata?: UsageMetadata;
|
|
88
|
+
isStreamed?: boolean;
|
|
89
|
+
timeToFirstToken?: number;
|
|
90
|
+
providerInfo?: ProviderInfo;
|
|
91
|
+
}): void {
|
|
92
|
+
const mockResponse = {
|
|
93
|
+
id: trackingData.requestId,
|
|
94
|
+
model: trackingData.model,
|
|
95
|
+
usage: {
|
|
96
|
+
prompt_tokens: trackingData.promptTokens,
|
|
97
|
+
completion_tokens: trackingData.completionTokens,
|
|
98
|
+
total_tokens: trackingData.totalTokens,
|
|
99
|
+
...(trackingData.reasoningTokens && { reasoning_tokens: trackingData.reasoningTokens }),
|
|
100
|
+
...(trackingData.cachedTokens && { cached_tokens: trackingData.cachedTokens }),
|
|
101
|
+
},
|
|
102
|
+
choices: [
|
|
103
|
+
{
|
|
104
|
+
finish_reason: trackingData.finishReason,
|
|
105
|
+
},
|
|
106
|
+
],
|
|
107
|
+
};
|
|
108
|
+
|
|
109
|
+
const mockRequest: OpenAIChatRequest = {
|
|
110
|
+
model: trackingData.model,
|
|
111
|
+
messages: [], // Mock empty messages array for type compliance
|
|
112
|
+
usageMetadata: trackingData.usageMetadata,
|
|
113
|
+
stream: trackingData.isStreamed,
|
|
114
|
+
};
|
|
115
|
+
|
|
116
|
+
const startTime = Date.now() - trackingData.duration;
|
|
117
|
+
|
|
118
|
+
sendReveniumMetrics(
|
|
119
|
+
mockResponse,
|
|
120
|
+
mockRequest,
|
|
121
|
+
startTime,
|
|
122
|
+
trackingData.duration,
|
|
123
|
+
trackingData.providerInfo
|
|
124
|
+
)
|
|
125
|
+
.then(() => {
|
|
126
|
+
logger.debug('Usage tracking completed successfully', {
|
|
127
|
+
requestId: trackingData.requestId,
|
|
128
|
+
model: trackingData.model,
|
|
129
|
+
totalTokens: trackingData.totalTokens,
|
|
130
|
+
isStreamed: trackingData.isStreamed,
|
|
131
|
+
});
|
|
132
|
+
})
|
|
133
|
+
.catch(error => {
|
|
134
|
+
logger.warn('Usage tracking failed', {
|
|
135
|
+
error: error instanceof Error ? error.message : String(error),
|
|
136
|
+
requestId: trackingData.requestId,
|
|
137
|
+
model: trackingData.model,
|
|
138
|
+
});
|
|
139
|
+
});
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
/**
|
|
143
|
+
* Fire-and-forget wrapper for embeddings with Azure support
|
|
144
|
+
*/
|
|
145
|
+
export function trackEmbeddingsUsageAsync(trackingData: {
|
|
146
|
+
transactionId: string;
|
|
147
|
+
model: string;
|
|
148
|
+
promptTokens: number;
|
|
149
|
+
totalTokens: number;
|
|
150
|
+
duration: number;
|
|
151
|
+
usageMetadata?: UsageMetadata;
|
|
152
|
+
requestStartTime: number;
|
|
153
|
+
providerInfo?: ProviderInfo;
|
|
154
|
+
}): void {
|
|
155
|
+
const mockResponse: OpenAIEmbeddingResponse = {
|
|
156
|
+
model: trackingData.model,
|
|
157
|
+
usage: {
|
|
158
|
+
prompt_tokens: trackingData.promptTokens,
|
|
159
|
+
total_tokens: trackingData.totalTokens,
|
|
160
|
+
},
|
|
161
|
+
data: [], // Mock empty data array for type compliance
|
|
162
|
+
object: 'list',
|
|
163
|
+
};
|
|
164
|
+
|
|
165
|
+
const mockRequest: OpenAIEmbeddingRequest = {
|
|
166
|
+
model: trackingData.model,
|
|
167
|
+
input: '', // Mock empty input for type compliance
|
|
168
|
+
usageMetadata: trackingData.usageMetadata,
|
|
169
|
+
};
|
|
170
|
+
|
|
171
|
+
sendReveniumEmbeddingsMetrics(
|
|
172
|
+
mockResponse,
|
|
173
|
+
mockRequest,
|
|
174
|
+
trackingData.requestStartTime,
|
|
175
|
+
trackingData.duration,
|
|
176
|
+
trackingData.providerInfo
|
|
177
|
+
)
|
|
178
|
+
.then(() => {
|
|
179
|
+
logger.debug('Embeddings tracking completed successfully', {
|
|
180
|
+
transactionId: trackingData.transactionId,
|
|
181
|
+
});
|
|
182
|
+
})
|
|
183
|
+
.catch(error => {
|
|
184
|
+
logger.warn('Embeddings tracking failed', {
|
|
185
|
+
error: error instanceof Error ? error.message : String(error),
|
|
186
|
+
transactionId: trackingData.transactionId,
|
|
187
|
+
});
|
|
188
|
+
});
|
|
189
|
+
}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Wrapper module - Main exports
|
|
3
|
+
*
|
|
4
|
+
* This module provides a clean interface for OpenAI client wrapping,
|
|
5
|
+
* separating concerns into focused sub-modules.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
// Re-export all wrapper functionality
|
|
9
|
+
export { patchOpenAI, patchOpenAIInstance, getProviderInfo } from './instance-patcher.js';
|