@revenium/openai 1.0.10 → 1.0.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +20 -0
- package/CHANGELOG.md +52 -0
- package/LICENSE +21 -21
- package/README.md +682 -1152
- package/dist/cjs/core/config/loader.js +1 -1
- package/dist/cjs/core/config/loader.js.map +1 -1
- package/dist/cjs/core/tracking/api-client.js +1 -1
- package/dist/cjs/core/tracking/api-client.js.map +1 -1
- package/dist/cjs/index.js +4 -4
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/types/openai-augmentation.js +1 -1
- package/dist/cjs/utils/url-builder.js +32 -7
- package/dist/cjs/utils/url-builder.js.map +1 -1
- package/dist/esm/core/config/loader.js +1 -1
- package/dist/esm/core/config/loader.js.map +1 -1
- package/dist/esm/core/tracking/api-client.js +1 -1
- package/dist/esm/core/tracking/api-client.js.map +1 -1
- package/dist/esm/index.js +4 -4
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/types/openai-augmentation.js +1 -1
- package/dist/esm/utils/url-builder.js +32 -7
- package/dist/esm/utils/url-builder.js.map +1 -1
- package/dist/types/index.d.ts +4 -4
- package/dist/types/types/index.d.ts +2 -2
- package/dist/types/types/index.d.ts.map +1 -1
- package/dist/types/types/openai-augmentation.d.ts +1 -1
- package/dist/types/utils/url-builder.d.ts +11 -3
- package/dist/types/utils/url-builder.d.ts.map +1 -1
- package/examples/README.md +357 -0
- package/examples/azure-basic.ts +206 -0
- package/examples/azure-responses-basic.ts +233 -0
- package/examples/azure-responses-streaming.ts +255 -0
- package/examples/azure-streaming.ts +209 -0
- package/examples/getting_started.ts +54 -0
- package/examples/openai-basic.ts +147 -0
- package/examples/openai-function-calling.ts +259 -0
- package/examples/openai-responses-basic.ts +212 -0
- package/examples/openai-responses-streaming.ts +232 -0
- package/examples/openai-streaming.ts +172 -0
- package/examples/openai-vision.ts +289 -0
- package/package.json +81 -84
- package/src/core/config/azure-config.ts +72 -0
- package/src/core/config/index.ts +23 -0
- package/src/core/config/loader.ts +66 -0
- package/src/core/config/manager.ts +94 -0
- package/src/core/config/validator.ts +89 -0
- package/src/core/providers/detector.ts +159 -0
- package/src/core/providers/index.ts +16 -0
- package/src/core/tracking/api-client.ts +78 -0
- package/src/core/tracking/index.ts +21 -0
- package/src/core/tracking/payload-builder.ts +132 -0
- package/src/core/tracking/usage-tracker.ts +189 -0
- package/src/core/wrapper/index.ts +9 -0
- package/src/core/wrapper/instance-patcher.ts +288 -0
- package/src/core/wrapper/request-handler.ts +423 -0
- package/src/core/wrapper/stream-wrapper.ts +100 -0
- package/src/index.ts +336 -0
- package/src/types/function-parameters.ts +251 -0
- package/src/types/index.ts +313 -0
- package/src/types/openai-augmentation.ts +233 -0
- package/src/types/responses-api.ts +308 -0
- package/src/utils/azure-model-resolver.ts +220 -0
- package/src/utils/constants.ts +21 -0
- package/src/utils/error-handler.ts +251 -0
- package/src/utils/metadata-builder.ts +219 -0
- package/src/utils/provider-detection.ts +257 -0
- package/src/utils/request-handler-factory.ts +285 -0
- package/src/utils/stop-reason-mapper.ts +74 -0
- package/src/utils/type-guards.ts +202 -0
- package/src/utils/url-builder.ts +68 -0
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Provider Detection Utilities
|
|
3
|
+
*
|
|
4
|
+
* Simplifies complex provider detection logic using strategy pattern
|
|
5
|
+
* instead of nested conditionals.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { Provider, ProviderInfo, AzureConfig } from '../types/index.js';
|
|
9
|
+
import { OpenAIClientInstance } from '../types/function-parameters.js';
|
|
10
|
+
import { Logger } from '../types/index.js';
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Provider detection strategy interface
|
|
14
|
+
*/
|
|
15
|
+
interface ProviderDetectionStrategy {
|
|
16
|
+
/** Strategy name for logging */
|
|
17
|
+
name: string;
|
|
18
|
+
/** Priority (higher = checked first) */
|
|
19
|
+
priority: number;
|
|
20
|
+
/** Detection function */
|
|
21
|
+
detect: (client: OpenAIClientInstance) => boolean;
|
|
22
|
+
/** Additional context for logging */
|
|
23
|
+
getContext?: (client: OpenAIClientInstance) => Record<string, unknown>;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Provider detection strategies in priority order
|
|
28
|
+
*/
|
|
29
|
+
const DETECTION_STRATEGIES: ProviderDetectionStrategy[] = [
|
|
30
|
+
{
|
|
31
|
+
name: 'Constructor Name',
|
|
32
|
+
priority: 100,
|
|
33
|
+
detect: client => {
|
|
34
|
+
return client?.constructor?.name?.includes('Azure') || false;
|
|
35
|
+
},
|
|
36
|
+
getContext: client => ({
|
|
37
|
+
constructorName: client?.constructor?.name,
|
|
38
|
+
}),
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
name: 'Base URL',
|
|
42
|
+
priority: 90,
|
|
43
|
+
detect: client => {
|
|
44
|
+
const baseUrl = getBaseUrlString(client);
|
|
45
|
+
return baseUrl?.toLowerCase().includes('azure') || false;
|
|
46
|
+
},
|
|
47
|
+
getContext: client => ({
|
|
48
|
+
baseURL: getBaseUrlString(client),
|
|
49
|
+
}),
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
name: 'Environment Variables',
|
|
53
|
+
priority: 80,
|
|
54
|
+
detect: client => {
|
|
55
|
+
// Only use env vars if not explicitly OpenAI
|
|
56
|
+
const baseUrl = getBaseUrlString(client);
|
|
57
|
+
const isExplicitlyOpenAI =
|
|
58
|
+
baseUrl?.includes('api.openai.com') ||
|
|
59
|
+
(client?.constructor?.name?.toLowerCase().includes('openai') &&
|
|
60
|
+
!client?.constructor?.name?.toLowerCase().includes('azure'));
|
|
61
|
+
|
|
62
|
+
return !isExplicitlyOpenAI && !!process.env.AZURE_OPENAI_ENDPOINT;
|
|
63
|
+
},
|
|
64
|
+
getContext: () => ({
|
|
65
|
+
hasAzureEndpoint: !!process.env.AZURE_OPENAI_ENDPOINT,
|
|
66
|
+
hasAzureDeployment: !!process.env.AZURE_OPENAI_DEPLOYMENT,
|
|
67
|
+
hasAzureApiKey: !!process.env.AZURE_OPENAI_API_KEY,
|
|
68
|
+
}),
|
|
69
|
+
},
|
|
70
|
+
];
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Extract base URL as string from client
|
|
74
|
+
*/
|
|
75
|
+
function getBaseUrlString(client: OpenAIClientInstance): string | undefined {
|
|
76
|
+
if (!client?.baseURL) return undefined;
|
|
77
|
+
|
|
78
|
+
return typeof client.baseURL === 'string' ? client.baseURL : client.baseURL.toString();
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Detect provider using strategy pattern
|
|
83
|
+
*
|
|
84
|
+
* This replaces the complex nested conditional logic with a clean,
|
|
85
|
+
* testable strategy pattern.
|
|
86
|
+
*
|
|
87
|
+
* @param client - OpenAI client instance
|
|
88
|
+
* @param logger - Logger for debugging
|
|
89
|
+
* @returns Provider detection result
|
|
90
|
+
*/
|
|
91
|
+
export function detectProviderStrategy(
|
|
92
|
+
client: OpenAIClientInstance,
|
|
93
|
+
logger?: Logger
|
|
94
|
+
): { provider: Provider; strategy?: string; context?: Record<string, unknown> } {
|
|
95
|
+
// Sort strategies by priority (highest first)
|
|
96
|
+
const sortedStrategies = [...DETECTION_STRATEGIES].sort((a, b) => b.priority - a.priority);
|
|
97
|
+
|
|
98
|
+
for (const strategy of sortedStrategies) {
|
|
99
|
+
try {
|
|
100
|
+
if (strategy.detect(client)) {
|
|
101
|
+
const context = strategy.getContext?.(client) || {};
|
|
102
|
+
|
|
103
|
+
if (logger) {
|
|
104
|
+
logger.debug(`Azure provider detected via ${strategy.name}`, context);
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
return {
|
|
108
|
+
provider: Provider.AZURE_OPENAI,
|
|
109
|
+
strategy: strategy.name,
|
|
110
|
+
context,
|
|
111
|
+
};
|
|
112
|
+
}
|
|
113
|
+
} catch (error) {
|
|
114
|
+
if (logger) {
|
|
115
|
+
logger.warn(`Provider detection strategy '${strategy.name}' failed`, {
|
|
116
|
+
error: error instanceof Error ? error.message : String(error),
|
|
117
|
+
});
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// Default to OpenAI
|
|
123
|
+
if (logger) {
|
|
124
|
+
logger.debug('Standard OpenAI provider detected (no Azure indicators found)');
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
return { provider: Provider.OPENAI };
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Azure configuration gathering strategies
|
|
132
|
+
*/
|
|
133
|
+
interface AzureConfigStrategy {
|
|
134
|
+
name: string;
|
|
135
|
+
gather: (client: OpenAIClientInstance) => Partial<AzureConfig>;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
const AZURE_CONFIG_STRATEGIES: AzureConfigStrategy[] = [
|
|
139
|
+
{
|
|
140
|
+
name: 'Client BaseURL',
|
|
141
|
+
gather: client => {
|
|
142
|
+
const baseUrl = getBaseUrlString(client);
|
|
143
|
+
return baseUrl ? { endpoint: baseUrl } : {};
|
|
144
|
+
},
|
|
145
|
+
},
|
|
146
|
+
{
|
|
147
|
+
name: 'Environment Variables',
|
|
148
|
+
gather: () => ({
|
|
149
|
+
endpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
|
150
|
+
deployment: process.env.AZURE_OPENAI_DEPLOYMENT,
|
|
151
|
+
apiVersion: process.env.AZURE_OPENAI_API_VERSION || '2024-12-01-preview',
|
|
152
|
+
apiKey: process.env.AZURE_OPENAI_API_KEY,
|
|
153
|
+
tenantId: process.env.AZURE_OPENAI_TENANT_ID,
|
|
154
|
+
resourceGroup: process.env.AZURE_OPENAI_RESOURCE_GROUP,
|
|
155
|
+
}),
|
|
156
|
+
},
|
|
157
|
+
];
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Gather Azure configuration using strategy pattern
|
|
161
|
+
*
|
|
162
|
+
* @param client - OpenAI client instance
|
|
163
|
+
* @param logger - Logger for debugging
|
|
164
|
+
* @returns Merged Azure configuration
|
|
165
|
+
*/
|
|
166
|
+
export function gatherAzureConfigStrategy(
|
|
167
|
+
client: OpenAIClientInstance,
|
|
168
|
+
logger?: Logger
|
|
169
|
+
): AzureConfig {
|
|
170
|
+
const config: AzureConfig = {};
|
|
171
|
+
for (const strategy of AZURE_CONFIG_STRATEGIES) {
|
|
172
|
+
try {
|
|
173
|
+
const strategyConfig = strategy.gather(client);
|
|
174
|
+
|
|
175
|
+
// Merge non-undefined values
|
|
176
|
+
Object.entries(strategyConfig).forEach(([key, value]) => {
|
|
177
|
+
if (value !== undefined && !(key in config)) {
|
|
178
|
+
(config as any)[key] = value;
|
|
179
|
+
}
|
|
180
|
+
});
|
|
181
|
+
} catch (error) {
|
|
182
|
+
if (logger) {
|
|
183
|
+
logger.warn(`Azure config strategy '${strategy.name}' failed`, {
|
|
184
|
+
error: error instanceof Error ? error.message : String(error),
|
|
185
|
+
});
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
if (logger) {
|
|
191
|
+
logger.debug('Azure configuration gathered', {
|
|
192
|
+
hasEndpoint: !!config.endpoint,
|
|
193
|
+
hasDeployment: !!config.deployment,
|
|
194
|
+
hasApiKey: !!config.apiKey,
|
|
195
|
+
apiVersion: config.apiVersion,
|
|
196
|
+
});
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
return config;
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
/**
|
|
203
|
+
* Create complete provider info using strategies
|
|
204
|
+
*
|
|
205
|
+
* @param client - OpenAI client instance
|
|
206
|
+
* @param logger - Logger for debugging
|
|
207
|
+
* @returns Complete provider information
|
|
208
|
+
*/
|
|
209
|
+
export function createProviderInfo(client: OpenAIClientInstance, logger?: Logger): ProviderInfo {
|
|
210
|
+
const detection = detectProviderStrategy(client, logger);
|
|
211
|
+
|
|
212
|
+
if (detection.provider === Provider.AZURE_OPENAI) {
|
|
213
|
+
const azureConfig = gatherAzureConfigStrategy(client, logger);
|
|
214
|
+
|
|
215
|
+
return {
|
|
216
|
+
provider: detection.provider,
|
|
217
|
+
isAzure: true,
|
|
218
|
+
azureConfig,
|
|
219
|
+
};
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
return {
|
|
223
|
+
provider: detection.provider,
|
|
224
|
+
isAzure: false,
|
|
225
|
+
azureConfig: undefined,
|
|
226
|
+
};
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Validate provider detection result
|
|
231
|
+
*
|
|
232
|
+
* @param providerInfo - Provider info to validate
|
|
233
|
+
* @returns Validation result
|
|
234
|
+
*/
|
|
235
|
+
export function validateProviderInfo(providerInfo: ProviderInfo): {
|
|
236
|
+
isValid: boolean;
|
|
237
|
+
warnings: string[];
|
|
238
|
+
} {
|
|
239
|
+
const warnings: string[] = [];
|
|
240
|
+
if (providerInfo.isAzure) {
|
|
241
|
+
if (!providerInfo.azureConfig) {
|
|
242
|
+
warnings.push('Azure provider detected but no Azure configuration available');
|
|
243
|
+
} else {
|
|
244
|
+
if (!providerInfo.azureConfig.endpoint) {
|
|
245
|
+
warnings.push('Azure configuration missing endpoint');
|
|
246
|
+
}
|
|
247
|
+
if (!providerInfo.azureConfig.apiKey) {
|
|
248
|
+
warnings.push('Azure configuration missing API key');
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
return {
|
|
254
|
+
isValid: warnings.length === 0,
|
|
255
|
+
warnings,
|
|
256
|
+
};
|
|
257
|
+
}
|
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Request Handler Factory
|
|
3
|
+
*
|
|
4
|
+
* Eliminates dynamic require() calls and provides a clean factory pattern
|
|
5
|
+
* for creating request handlers with proper dependency injection.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import {
|
|
9
|
+
OpenAIChatRequest,
|
|
10
|
+
OpenAIEmbeddingRequest,
|
|
11
|
+
OpenAIRequestOptions,
|
|
12
|
+
OpenAIClientInstance,
|
|
13
|
+
OpenAIOriginalFunction,
|
|
14
|
+
OpenAIResponsesOriginalFunction,
|
|
15
|
+
} from '../types/function-parameters.js';
|
|
16
|
+
import { UsageMetadata, OpenAIResponsesRequest } from '../types/index.js';
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Request handler function signatures
|
|
20
|
+
*/
|
|
21
|
+
export type StreamingRequestHandler = (
|
|
22
|
+
originalCreate: OpenAIOriginalFunction,
|
|
23
|
+
params: Omit<OpenAIChatRequest, 'usageMetadata'>,
|
|
24
|
+
options: OpenAIRequestOptions | undefined,
|
|
25
|
+
usageMetadata: UsageMetadata | undefined,
|
|
26
|
+
requestStartTime: number,
|
|
27
|
+
instance: OpenAIClientInstance
|
|
28
|
+
) => Promise<AsyncIterable<unknown>>;
|
|
29
|
+
|
|
30
|
+
export type NonStreamingRequestHandler = (
|
|
31
|
+
originalCreate: OpenAIOriginalFunction,
|
|
32
|
+
params: Omit<OpenAIChatRequest, 'usageMetadata'> | Omit<OpenAIEmbeddingRequest, 'usageMetadata'>,
|
|
33
|
+
options: OpenAIRequestOptions | undefined,
|
|
34
|
+
usageMetadata: UsageMetadata | undefined,
|
|
35
|
+
requestStartTime: number,
|
|
36
|
+
instance: OpenAIClientInstance
|
|
37
|
+
) => Promise<unknown>;
|
|
38
|
+
|
|
39
|
+
export type EmbeddingsRequestHandler = (
|
|
40
|
+
originalCreate: OpenAIOriginalFunction,
|
|
41
|
+
params: Omit<OpenAIEmbeddingRequest, 'usageMetadata'>,
|
|
42
|
+
options: OpenAIRequestOptions | undefined,
|
|
43
|
+
usageMetadata: UsageMetadata | undefined,
|
|
44
|
+
requestStartTime: number,
|
|
45
|
+
instance: OpenAIClientInstance
|
|
46
|
+
) => Promise<unknown>;
|
|
47
|
+
|
|
48
|
+
export type ResponsesStreamingRequestHandler = (
|
|
49
|
+
originalCreate: OpenAIResponsesOriginalFunction,
|
|
50
|
+
params: Omit<OpenAIResponsesRequest, 'usageMetadata'>,
|
|
51
|
+
options: OpenAIRequestOptions | undefined,
|
|
52
|
+
usageMetadata: UsageMetadata | undefined,
|
|
53
|
+
requestStartTime: number,
|
|
54
|
+
instance: OpenAIClientInstance
|
|
55
|
+
) => Promise<AsyncIterable<unknown>>;
|
|
56
|
+
|
|
57
|
+
export type ResponsesNonStreamingRequestHandler = (
|
|
58
|
+
originalCreate: OpenAIResponsesOriginalFunction,
|
|
59
|
+
params: Omit<OpenAIResponsesRequest, 'usageMetadata'>,
|
|
60
|
+
options: OpenAIRequestOptions | undefined,
|
|
61
|
+
usageMetadata: UsageMetadata | undefined,
|
|
62
|
+
requestStartTime: number,
|
|
63
|
+
instance: OpenAIClientInstance
|
|
64
|
+
) => Promise<unknown>;
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* Request handler dependencies
|
|
68
|
+
*/
|
|
69
|
+
export interface RequestHandlerDependencies {
|
|
70
|
+
handleStreamingRequest: StreamingRequestHandler;
|
|
71
|
+
handleNonStreamingRequest: NonStreamingRequestHandler;
|
|
72
|
+
handleEmbeddingsRequest: EmbeddingsRequestHandler;
|
|
73
|
+
handleResponsesStreamingRequest: ResponsesStreamingRequestHandler;
|
|
74
|
+
handleResponsesNonStreamingRequest: ResponsesNonStreamingRequestHandler;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Request handler factory class
|
|
79
|
+
*
|
|
80
|
+
* This eliminates the need for dynamic require() calls and provides
|
|
81
|
+
* a clean dependency injection pattern.
|
|
82
|
+
*/
|
|
83
|
+
export class RequestHandlerFactory {
|
|
84
|
+
private dependencies: RequestHandlerDependencies | null = null;
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* Initialize the factory with dependencies
|
|
88
|
+
*/
|
|
89
|
+
initialize(dependencies: RequestHandlerDependencies): void {
|
|
90
|
+
this.dependencies = dependencies;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
/**
|
|
94
|
+
* Get streaming request handler
|
|
95
|
+
*/
|
|
96
|
+
getStreamingHandler(): StreamingRequestHandler {
|
|
97
|
+
if (!this.dependencies) {
|
|
98
|
+
throw new Error('RequestHandlerFactory not initialized. Call initialize() first.');
|
|
99
|
+
}
|
|
100
|
+
return this.dependencies.handleStreamingRequest;
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Get non-streaming request handler
|
|
105
|
+
*/
|
|
106
|
+
getNonStreamingHandler(): NonStreamingRequestHandler {
|
|
107
|
+
if (!this.dependencies) {
|
|
108
|
+
throw new Error('RequestHandlerFactory not initialized. Call initialize() first.');
|
|
109
|
+
}
|
|
110
|
+
return this.dependencies.handleNonStreamingRequest;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
/**
|
|
114
|
+
* Get embeddings request handler
|
|
115
|
+
*/
|
|
116
|
+
getEmbeddingsHandler(): EmbeddingsRequestHandler {
|
|
117
|
+
if (!this.dependencies) {
|
|
118
|
+
throw new Error('RequestHandlerFactory not initialized. Call initialize() first.');
|
|
119
|
+
}
|
|
120
|
+
return this.dependencies.handleEmbeddingsRequest;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
/**
|
|
124
|
+
* Route chat request to appropriate handler
|
|
125
|
+
*/
|
|
126
|
+
routeChatRequest(
|
|
127
|
+
originalCreate: OpenAIOriginalFunction,
|
|
128
|
+
params: Omit<OpenAIChatRequest, 'usageMetadata'>,
|
|
129
|
+
options: OpenAIRequestOptions | undefined,
|
|
130
|
+
usageMetadata: UsageMetadata | undefined,
|
|
131
|
+
requestStartTime: number,
|
|
132
|
+
instance: OpenAIClientInstance
|
|
133
|
+
): Promise<unknown> {
|
|
134
|
+
if (!this.dependencies) {
|
|
135
|
+
throw new Error('RequestHandlerFactory not initialized. Call initialize() first.');
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// Pass clean params (without usageMetadata) to handlers
|
|
139
|
+
// The handlers will receive usageMetadata separately
|
|
140
|
+
if (params.stream) {
|
|
141
|
+
return this.dependencies.handleStreamingRequest(
|
|
142
|
+
originalCreate,
|
|
143
|
+
params,
|
|
144
|
+
options,
|
|
145
|
+
usageMetadata,
|
|
146
|
+
requestStartTime,
|
|
147
|
+
instance
|
|
148
|
+
);
|
|
149
|
+
} else {
|
|
150
|
+
return this.dependencies.handleNonStreamingRequest(
|
|
151
|
+
originalCreate,
|
|
152
|
+
params,
|
|
153
|
+
options,
|
|
154
|
+
usageMetadata,
|
|
155
|
+
requestStartTime,
|
|
156
|
+
instance
|
|
157
|
+
);
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
/**
|
|
162
|
+
* Route embeddings request to appropriate handler
|
|
163
|
+
*/
|
|
164
|
+
routeEmbeddingsRequest(
|
|
165
|
+
originalCreate: OpenAIOriginalFunction,
|
|
166
|
+
params: Omit<OpenAIEmbeddingRequest, 'usageMetadata'>,
|
|
167
|
+
options: OpenAIRequestOptions | undefined,
|
|
168
|
+
usageMetadata: UsageMetadata | undefined,
|
|
169
|
+
requestStartTime: number,
|
|
170
|
+
instance: OpenAIClientInstance
|
|
171
|
+
): Promise<unknown> {
|
|
172
|
+
if (!this.dependencies) {
|
|
173
|
+
throw new Error('RequestHandlerFactory not initialized. Call initialize() first.');
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// Pass clean params (without usageMetadata) to handler
|
|
177
|
+
// The handler will receive usageMetadata separately
|
|
178
|
+
return this.dependencies.handleEmbeddingsRequest(
|
|
179
|
+
originalCreate,
|
|
180
|
+
params,
|
|
181
|
+
options,
|
|
182
|
+
usageMetadata,
|
|
183
|
+
requestStartTime,
|
|
184
|
+
instance
|
|
185
|
+
);
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
/**
|
|
189
|
+
* Route responses request to appropriate handler (new OpenAI Responses API)
|
|
190
|
+
*/
|
|
191
|
+
routeResponsesRequest(
|
|
192
|
+
originalCreate: OpenAIResponsesOriginalFunction,
|
|
193
|
+
params: Omit<OpenAIResponsesRequest, 'usageMetadata'>,
|
|
194
|
+
options: OpenAIRequestOptions | undefined,
|
|
195
|
+
usageMetadata: UsageMetadata | undefined,
|
|
196
|
+
requestStartTime: number,
|
|
197
|
+
instance: OpenAIClientInstance
|
|
198
|
+
): Promise<unknown> {
|
|
199
|
+
if (!this.dependencies) {
|
|
200
|
+
throw new Error('RequestHandlerFactory not initialized. Call initialize() first.');
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
// Route to appropriate handler based on streaming
|
|
204
|
+
if (params.stream) {
|
|
205
|
+
return this.dependencies.handleResponsesStreamingRequest(
|
|
206
|
+
originalCreate,
|
|
207
|
+
params,
|
|
208
|
+
options,
|
|
209
|
+
usageMetadata,
|
|
210
|
+
requestStartTime,
|
|
211
|
+
instance
|
|
212
|
+
);
|
|
213
|
+
} else {
|
|
214
|
+
return this.dependencies.handleResponsesNonStreamingRequest(
|
|
215
|
+
originalCreate,
|
|
216
|
+
params,
|
|
217
|
+
options,
|
|
218
|
+
usageMetadata,
|
|
219
|
+
requestStartTime,
|
|
220
|
+
instance
|
|
221
|
+
);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
/**
|
|
226
|
+
* Check if factory is initialized
|
|
227
|
+
*/
|
|
228
|
+
isInitialized(): boolean {
|
|
229
|
+
return this.dependencies !== null;
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
/**
|
|
234
|
+
* Global factory instance
|
|
235
|
+
*/
|
|
236
|
+
export const requestHandlerFactory = new RequestHandlerFactory();
|
|
237
|
+
|
|
238
|
+
/**
|
|
239
|
+
* Initialize the global factory (called once during module setup)
|
|
240
|
+
* Uses dynamic import to avoid circular dependencies
|
|
241
|
+
*/
|
|
242
|
+
export async function initializeRequestHandlerFactory(): Promise<void> {
|
|
243
|
+
// Use dynamic import instead of require for better TypeScript support
|
|
244
|
+
const handlerModule = await import('../core/wrapper/request-handler.js');
|
|
245
|
+
|
|
246
|
+
requestHandlerFactory.initialize({
|
|
247
|
+
handleStreamingRequest: handlerModule.handleStreamingRequest,
|
|
248
|
+
handleNonStreamingRequest: handlerModule.handleNonStreamingRequest,
|
|
249
|
+
handleEmbeddingsRequest: handlerModule.handleEmbeddingsRequest,
|
|
250
|
+
handleResponsesStreamingRequest: handlerModule.handleResponsesStreamingRequest,
|
|
251
|
+
handleResponsesNonStreamingRequest: handlerModule.handleResponsesNonStreamingRequest,
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
// Promise cache to prevent race conditions during initialization
|
|
256
|
+
let initializationPromise: Promise<void> | null = null;
|
|
257
|
+
|
|
258
|
+
/**
|
|
259
|
+
* Utility function to ensure factory is initialized
|
|
260
|
+
*/
|
|
261
|
+
export async function ensureFactoryInitialized(): Promise<void> {
|
|
262
|
+
if (!requestHandlerFactory.isInitialized()) {
|
|
263
|
+
if (!initializationPromise) {
|
|
264
|
+
initializationPromise = initializeRequestHandlerFactoryAsync();
|
|
265
|
+
}
|
|
266
|
+
await initializationPromise;
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
/**
|
|
271
|
+
* Async initialization for ES Module compatibility
|
|
272
|
+
* Uses dynamic import for ES Module compatibility
|
|
273
|
+
*/
|
|
274
|
+
async function initializeRequestHandlerFactoryAsync(): Promise<void> {
|
|
275
|
+
// Use dynamic import for ES Module compatibility
|
|
276
|
+
const handlerModule = await import('../core/wrapper/request-handler.js');
|
|
277
|
+
|
|
278
|
+
requestHandlerFactory.initialize({
|
|
279
|
+
handleStreamingRequest: handlerModule.handleStreamingRequest,
|
|
280
|
+
handleNonStreamingRequest: handlerModule.handleNonStreamingRequest,
|
|
281
|
+
handleEmbeddingsRequest: handlerModule.handleEmbeddingsRequest,
|
|
282
|
+
handleResponsesStreamingRequest: handlerModule.handleResponsesStreamingRequest,
|
|
283
|
+
handleResponsesNonStreamingRequest: handlerModule.handleResponsesNonStreamingRequest,
|
|
284
|
+
});
|
|
285
|
+
}
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Stop Reason Mapper Utilities
|
|
3
|
+
*
|
|
4
|
+
* Centralized stop reason mapping logic using lookup tables
|
|
5
|
+
* instead of nested conditionals. Based on the good example
|
|
6
|
+
* from the anthropic-node middleware.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Stop reason mapping configuration
|
|
11
|
+
* Maps provider-specific stop reasons to Revenium's standardized set
|
|
12
|
+
*/
|
|
13
|
+
const STOP_REASON_MAP: Record<string, string> = {
|
|
14
|
+
// OpenAI/Azure OpenAI stop reasons
|
|
15
|
+
stop: 'END',
|
|
16
|
+
function_call: 'END_SEQUENCE',
|
|
17
|
+
tool_calls: 'END_SEQUENCE',
|
|
18
|
+
timeout: 'TIMEOUT',
|
|
19
|
+
length: 'TOKEN_LIMIT',
|
|
20
|
+
max_tokens: 'TOKEN_LIMIT',
|
|
21
|
+
content_filter: 'ERROR',
|
|
22
|
+
error: 'ERROR',
|
|
23
|
+
|
|
24
|
+
// Anthropic stop reasons (for consistency across middleware)
|
|
25
|
+
end_turn: 'END',
|
|
26
|
+
stop_sequence: 'END_SEQUENCE',
|
|
27
|
+
tool_use: 'END_SEQUENCE',
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Default stop reason when mapping fails
|
|
32
|
+
*/
|
|
33
|
+
const DEFAULT_STOP_REASON = 'END';
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Map provider stop reasons to Revenium stop reasons
|
|
37
|
+
*
|
|
38
|
+
* This replaces the nested if/switch logic with a clean lookup table approach.
|
|
39
|
+
* Based on the good example from the anthropic-node middleware.
|
|
40
|
+
*
|
|
41
|
+
* @param providerStopReason - Stop reason from the AI provider
|
|
42
|
+
* @param logger - Optional logger for warnings about unknown reasons
|
|
43
|
+
* @returns Standardized Revenium stop reason
|
|
44
|
+
*/
|
|
45
|
+
export function mapStopReason(
|
|
46
|
+
providerStopReason: string | null | undefined,
|
|
47
|
+
logger?: { warn: (message: string, ...args: any[]) => void }
|
|
48
|
+
): string {
|
|
49
|
+
if (!providerStopReason) return DEFAULT_STOP_REASON;
|
|
50
|
+
const normalizedReason = providerStopReason.toLowerCase();
|
|
51
|
+
const mappedReason = STOP_REASON_MAP[normalizedReason];
|
|
52
|
+
|
|
53
|
+
if (!mappedReason) {
|
|
54
|
+
// Log warning for unknown stop reasons to help with future mapping
|
|
55
|
+
logger?.warn(`Unknown stop reason: ${providerStopReason}, mapping to ${DEFAULT_STOP_REASON}`);
|
|
56
|
+
return DEFAULT_STOP_REASON;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
return mappedReason;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Get all supported stop reasons for documentation/testing
|
|
64
|
+
*/
|
|
65
|
+
export function getSupportedStopReasons(): string[] {
|
|
66
|
+
return Object.keys(STOP_REASON_MAP);
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Check if a stop reason is supported
|
|
71
|
+
*/
|
|
72
|
+
export function isStopReasonSupported(reason: string): boolean {
|
|
73
|
+
return reason.toLowerCase() in STOP_REASON_MAP;
|
|
74
|
+
}
|