@revenium/openai 1.0.10 → 1.0.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +20 -0
- package/CHANGELOG.md +52 -0
- package/LICENSE +21 -21
- package/README.md +682 -1152
- package/dist/cjs/core/config/loader.js +1 -1
- package/dist/cjs/core/config/loader.js.map +1 -1
- package/dist/cjs/core/tracking/api-client.js +1 -1
- package/dist/cjs/core/tracking/api-client.js.map +1 -1
- package/dist/cjs/index.js +4 -4
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/types/openai-augmentation.js +1 -1
- package/dist/cjs/utils/url-builder.js +32 -7
- package/dist/cjs/utils/url-builder.js.map +1 -1
- package/dist/esm/core/config/loader.js +1 -1
- package/dist/esm/core/config/loader.js.map +1 -1
- package/dist/esm/core/tracking/api-client.js +1 -1
- package/dist/esm/core/tracking/api-client.js.map +1 -1
- package/dist/esm/index.js +4 -4
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/types/openai-augmentation.js +1 -1
- package/dist/esm/utils/url-builder.js +32 -7
- package/dist/esm/utils/url-builder.js.map +1 -1
- package/dist/types/index.d.ts +4 -4
- package/dist/types/types/index.d.ts +2 -2
- package/dist/types/types/index.d.ts.map +1 -1
- package/dist/types/types/openai-augmentation.d.ts +1 -1
- package/dist/types/utils/url-builder.d.ts +11 -3
- package/dist/types/utils/url-builder.d.ts.map +1 -1
- package/examples/README.md +357 -0
- package/examples/azure-basic.ts +206 -0
- package/examples/azure-responses-basic.ts +233 -0
- package/examples/azure-responses-streaming.ts +255 -0
- package/examples/azure-streaming.ts +209 -0
- package/examples/getting_started.ts +54 -0
- package/examples/openai-basic.ts +147 -0
- package/examples/openai-function-calling.ts +259 -0
- package/examples/openai-responses-basic.ts +212 -0
- package/examples/openai-responses-streaming.ts +232 -0
- package/examples/openai-streaming.ts +172 -0
- package/examples/openai-vision.ts +289 -0
- package/package.json +81 -84
- package/src/core/config/azure-config.ts +72 -0
- package/src/core/config/index.ts +23 -0
- package/src/core/config/loader.ts +66 -0
- package/src/core/config/manager.ts +94 -0
- package/src/core/config/validator.ts +89 -0
- package/src/core/providers/detector.ts +159 -0
- package/src/core/providers/index.ts +16 -0
- package/src/core/tracking/api-client.ts +78 -0
- package/src/core/tracking/index.ts +21 -0
- package/src/core/tracking/payload-builder.ts +132 -0
- package/src/core/tracking/usage-tracker.ts +189 -0
- package/src/core/wrapper/index.ts +9 -0
- package/src/core/wrapper/instance-patcher.ts +288 -0
- package/src/core/wrapper/request-handler.ts +423 -0
- package/src/core/wrapper/stream-wrapper.ts +100 -0
- package/src/index.ts +336 -0
- package/src/types/function-parameters.ts +251 -0
- package/src/types/index.ts +313 -0
- package/src/types/openai-augmentation.ts +233 -0
- package/src/types/responses-api.ts +308 -0
- package/src/utils/azure-model-resolver.ts +220 -0
- package/src/utils/constants.ts +21 -0
- package/src/utils/error-handler.ts +251 -0
- package/src/utils/metadata-builder.ts +219 -0
- package/src/utils/provider-detection.ts +257 -0
- package/src/utils/request-handler-factory.ts +285 -0
- package/src/utils/stop-reason-mapper.ts +74 -0
- package/src/utils/type-guards.ts +202 -0
- package/src/utils/url-builder.ts +68 -0
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Responses API Streaming Examples
|
|
3
|
+
*
|
|
4
|
+
* This file demonstrates how to use the new OpenAI Responses API with streaming enabled
|
|
5
|
+
* using the Revenium middleware. The Responses API supports streaming for real-time
|
|
6
|
+
* response generation.
|
|
7
|
+
*
|
|
8
|
+
* Metadata Options:
|
|
9
|
+
* - Start with basic usage (no metadata) - tracking works automatically
|
|
10
|
+
* - Add subscriber info for user tracking
|
|
11
|
+
* - Include organization/product IDs for business analytics
|
|
12
|
+
* - Use task type and trace ID for detailed analysis
|
|
13
|
+
*
|
|
14
|
+
* For complete metadata field reference, see:
|
|
15
|
+
* https://revenium.readme.io/reference/meter_ai_completion
|
|
16
|
+
*
|
|
17
|
+
* Responses API Reference: https://platform.openai.com/docs/api-reference/responses
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
import 'dotenv/config';
|
|
21
|
+
import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
|
|
22
|
+
import OpenAI from 'openai';
|
|
23
|
+
|
|
24
|
+
// Import types for the new Responses API
|
|
25
|
+
import type { ResponsesCreateParams } from '../src/types/responses-api.js';
|
|
26
|
+
|
|
27
|
+
async function main() {
|
|
28
|
+
// Initialize Revenium middleware
|
|
29
|
+
await initializeReveniumFromEnv();
|
|
30
|
+
|
|
31
|
+
// Create OpenAI client
|
|
32
|
+
const openai = new OpenAI({
|
|
33
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
// Patch the OpenAI instance to add Revenium tracking
|
|
37
|
+
patchOpenAIInstance(openai);
|
|
38
|
+
|
|
39
|
+
console.log(' OpenAI Responses API Streaming Examples\n');
|
|
40
|
+
|
|
41
|
+
// Example 1: Basic Responses API streaming (no metadata)
|
|
42
|
+
console.log(' Example 1: Basic Responses API streaming (no metadata)');
|
|
43
|
+
try {
|
|
44
|
+
const responsesAPI = openai as any; // Type assertion for new API
|
|
45
|
+
|
|
46
|
+
if (responsesAPI.responses?.create) {
|
|
47
|
+
const stream = await responsesAPI.responses.create({
|
|
48
|
+
model: 'gpt-5',
|
|
49
|
+
input: 'Tell me a short story about a robot learning to paint.',
|
|
50
|
+
stream: true,
|
|
51
|
+
} as ResponsesCreateParams);
|
|
52
|
+
|
|
53
|
+
console.log('Streaming response:');
|
|
54
|
+
for await (const event of stream) {
|
|
55
|
+
if (event.type === 'response.output_text.delta') {
|
|
56
|
+
process.stdout.write(event.delta);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
console.log('\n Stream completed');
|
|
60
|
+
} else {
|
|
61
|
+
throw new Error('Responses API not available');
|
|
62
|
+
}
|
|
63
|
+
} catch (error) {
|
|
64
|
+
console.log('️ Responses API not yet available in this OpenAI SDK version');
|
|
65
|
+
console.log(' Error:', (error as Error).message);
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
console.log('\n' + '='.repeat(50) + '\n');
|
|
69
|
+
|
|
70
|
+
// Example 2: Responses API streaming with rich metadata
|
|
71
|
+
console.log(' Example 2: Responses API streaming with rich metadata');
|
|
72
|
+
try {
|
|
73
|
+
const responsesAPI = openai as any;
|
|
74
|
+
|
|
75
|
+
if (responsesAPI.responses?.create) {
|
|
76
|
+
const stream = await responsesAPI.responses.create({
|
|
77
|
+
model: 'gpt-5',
|
|
78
|
+
input: [
|
|
79
|
+
{
|
|
80
|
+
role: 'user',
|
|
81
|
+
content: 'Explain the concept of machine learning in a conversational way.',
|
|
82
|
+
},
|
|
83
|
+
],
|
|
84
|
+
stream: true,
|
|
85
|
+
max_output_tokens: 200,
|
|
86
|
+
usageMetadata: {
|
|
87
|
+
// User identification
|
|
88
|
+
subscriber: {
|
|
89
|
+
id: 'streaming-user-123',
|
|
90
|
+
email: 'streaming@example.com',
|
|
91
|
+
credential: {
|
|
92
|
+
name: 'api-key-prod',
|
|
93
|
+
value: 'key-klm-789',
|
|
94
|
+
},
|
|
95
|
+
},
|
|
96
|
+
|
|
97
|
+
// Organization & billing
|
|
98
|
+
organizationId: 'streaming-org-456',
|
|
99
|
+
subscriptionId: 'plan-streaming-edu-2024',
|
|
100
|
+
|
|
101
|
+
// Product & task tracking
|
|
102
|
+
productId: 'ml-educator',
|
|
103
|
+
taskType: 'educational-streaming',
|
|
104
|
+
agent: 'ml-tutor-stream',
|
|
105
|
+
|
|
106
|
+
// Session tracking
|
|
107
|
+
traceId: 'stream-trace-789',
|
|
108
|
+
|
|
109
|
+
// Quality metrics
|
|
110
|
+
responseQualityScore: 0.92,
|
|
111
|
+
},
|
|
112
|
+
} as ResponsesCreateParams);
|
|
113
|
+
|
|
114
|
+
console.log('Streaming response with metadata:');
|
|
115
|
+
for await (const event of stream) {
|
|
116
|
+
if (event.type === 'response.output_text.delta') {
|
|
117
|
+
process.stdout.write(event.delta);
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
console.log('\n Stream with metadata completed');
|
|
121
|
+
} else {
|
|
122
|
+
throw new Error('Responses API not available');
|
|
123
|
+
}
|
|
124
|
+
} catch (error) {
|
|
125
|
+
console.log('️ Responses API not yet available in this OpenAI SDK version');
|
|
126
|
+
console.log(' Error:', (error as Error).message);
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
console.log('\n' + '='.repeat(50) + '\n');
|
|
130
|
+
|
|
131
|
+
// Example 3: Basic Responses API streaming with array input (no metadata)
|
|
132
|
+
console.log(' Example 3: Basic Responses API streaming with array input (no metadata)');
|
|
133
|
+
try {
|
|
134
|
+
const responsesAPI = openai as any;
|
|
135
|
+
|
|
136
|
+
if (responsesAPI.responses?.create) {
|
|
137
|
+
const stream = await responsesAPI.responses.create({
|
|
138
|
+
model: 'gpt-5',
|
|
139
|
+
input: [
|
|
140
|
+
{
|
|
141
|
+
role: 'user',
|
|
142
|
+
content: 'Write a poem about the beauty of code.',
|
|
143
|
+
},
|
|
144
|
+
],
|
|
145
|
+
stream: true,
|
|
146
|
+
} as ResponsesCreateParams);
|
|
147
|
+
|
|
148
|
+
console.log('Streaming poetry:');
|
|
149
|
+
for await (const event of stream) {
|
|
150
|
+
if (event.type === 'response.output_text.delta') {
|
|
151
|
+
process.stdout.write(event.delta);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
console.log('\n Poetry stream completed');
|
|
155
|
+
} else {
|
|
156
|
+
throw new Error('Responses API not available');
|
|
157
|
+
}
|
|
158
|
+
} catch (error) {
|
|
159
|
+
console.log('️ Responses API not yet available in this OpenAI SDK version');
|
|
160
|
+
console.log(' Error:', (error as Error).message);
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
console.log('\n' + '='.repeat(50) + '\n');
|
|
164
|
+
|
|
165
|
+
// Example 4: Advanced Responses API streaming with comprehensive metadata
|
|
166
|
+
console.log(' Example 4: Advanced Responses API streaming with comprehensive metadata');
|
|
167
|
+
try {
|
|
168
|
+
const responsesAPI = openai as any;
|
|
169
|
+
|
|
170
|
+
if (responsesAPI.responses?.create) {
|
|
171
|
+
const stream = await responsesAPI.responses.create({
|
|
172
|
+
model: 'gpt-5',
|
|
173
|
+
input: [
|
|
174
|
+
{
|
|
175
|
+
role: 'user',
|
|
176
|
+
content:
|
|
177
|
+
'Provide a detailed explanation of how streaming APIs work in real-time applications.',
|
|
178
|
+
},
|
|
179
|
+
],
|
|
180
|
+
stream: true,
|
|
181
|
+
max_output_tokens: 300,
|
|
182
|
+
instructions:
|
|
183
|
+
'You are a technical expert explaining streaming APIs with practical examples.',
|
|
184
|
+
usageMetadata: {
|
|
185
|
+
// User identification
|
|
186
|
+
subscriber: {
|
|
187
|
+
id: 'advanced-streaming-user-789',
|
|
188
|
+
email: 'advanced@enterprise.com',
|
|
189
|
+
credential: {
|
|
190
|
+
name: 'api-key-prod',
|
|
191
|
+
value: 'key-nop-012',
|
|
192
|
+
},
|
|
193
|
+
},
|
|
194
|
+
|
|
195
|
+
// Organization & billing
|
|
196
|
+
organizationId: 'enterprise-streaming-org-012',
|
|
197
|
+
subscriptionId: 'plan-enterprise-stream-2024',
|
|
198
|
+
|
|
199
|
+
// Product & task tracking
|
|
200
|
+
productId: 'streaming-api-educator',
|
|
201
|
+
taskType: 'advanced-technical-streaming',
|
|
202
|
+
agent: 'streaming-expert',
|
|
203
|
+
|
|
204
|
+
// Session tracking
|
|
205
|
+
traceId: 'advanced-stream-trace-345',
|
|
206
|
+
|
|
207
|
+
// Quality metrics
|
|
208
|
+
responseQualityScore: 0.97,
|
|
209
|
+
},
|
|
210
|
+
} as ResponsesCreateParams);
|
|
211
|
+
|
|
212
|
+
console.log('Advanced streaming response:');
|
|
213
|
+
let deltaCount = 0;
|
|
214
|
+
for await (const event of stream) {
|
|
215
|
+
if (event.type === 'response.output_text.delta') {
|
|
216
|
+
process.stdout.write(event.delta);
|
|
217
|
+
deltaCount++;
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
console.log(`\n Advanced stream completed (${deltaCount} delta events)`);
|
|
221
|
+
} else {
|
|
222
|
+
throw new Error('Responses API not available');
|
|
223
|
+
}
|
|
224
|
+
} catch (error) {
|
|
225
|
+
console.log('️ Responses API not yet available in this OpenAI SDK version');
|
|
226
|
+
console.log(' Error:', (error as Error).message);
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
console.log('\n All Responses API streaming examples completed!');
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
main().catch(console.error);
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Streaming Example
|
|
3
|
+
*
|
|
4
|
+
* Shows how to use Revenium middleware with streaming OpenAI responses and batch embeddings.
|
|
5
|
+
* Demonstrates seamless metadata integration with streaming - all metadata fields are optional!
|
|
6
|
+
*
|
|
7
|
+
* Metadata Options:
|
|
8
|
+
* - Start with basic usage (no metadata) - tracking works automatically
|
|
9
|
+
* - Add subscriber info for user tracking
|
|
10
|
+
* - Include organization/product IDs for business analytics
|
|
11
|
+
* - Use task type and trace ID for detailed analysis
|
|
12
|
+
*
|
|
13
|
+
* For complete metadata field reference, see:
|
|
14
|
+
* https://revenium.readme.io/reference/meter_ai_completion
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import 'dotenv/config';
|
|
18
|
+
import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
|
|
19
|
+
import OpenAI from 'openai';
|
|
20
|
+
|
|
21
|
+
async function openaiStreamingExample() {
|
|
22
|
+
console.log(' OpenAI Streaming with Seamless Metadata Integration\n');
|
|
23
|
+
|
|
24
|
+
// Initialize Revenium middleware
|
|
25
|
+
const initResult = initializeReveniumFromEnv();
|
|
26
|
+
if (!initResult.success) {
|
|
27
|
+
console.error(' Failed to initialize Revenium:', initResult.message);
|
|
28
|
+
process.exit(1);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// Create and patch OpenAI instance
|
|
32
|
+
const openai = patchOpenAIInstance(new OpenAI());
|
|
33
|
+
|
|
34
|
+
// Example 1: Basic streaming (no metadata)
|
|
35
|
+
console.log(' Example 1: Basic streaming chat (automatic tracking)');
|
|
36
|
+
console.log(' Assistant: ');
|
|
37
|
+
|
|
38
|
+
const basicStream = await openai.chat.completions.create({
|
|
39
|
+
model: 'gpt-4o-mini',
|
|
40
|
+
messages: [{ role: 'user', content: 'Count from 1 to 5 slowly' }],
|
|
41
|
+
stream: true,
|
|
42
|
+
// No usageMetadata - still automatically tracked when stream completes!
|
|
43
|
+
// No max_tokens - let response complete naturally
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
for await (const chunk of basicStream) {
|
|
47
|
+
const content = chunk.choices[0]?.delta?.content || '';
|
|
48
|
+
if (content) {
|
|
49
|
+
process.stdout.write(content);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
console.log('\n Streaming automatically tracked to Revenium without metadata\n');
|
|
54
|
+
|
|
55
|
+
// Example 2: Streaming with rich metadata (all optional!)
|
|
56
|
+
console.log(' Example 2: Streaming chat with rich metadata');
|
|
57
|
+
console.log(' Assistant: ');
|
|
58
|
+
|
|
59
|
+
const metadataStream = await openai.chat.completions.create({
|
|
60
|
+
model: 'gpt-4o-mini',
|
|
61
|
+
messages: [{ role: 'user', content: 'Write a haiku about middleware' }],
|
|
62
|
+
stream: true,
|
|
63
|
+
|
|
64
|
+
// Optional metadata for advanced reporting, lineage tracking, and cost allocation
|
|
65
|
+
usageMetadata: {
|
|
66
|
+
// User identification
|
|
67
|
+
subscriber: {
|
|
68
|
+
id: 'streaming-user-456',
|
|
69
|
+
email: 'poet@company.com',
|
|
70
|
+
credential: {
|
|
71
|
+
name: 'api-key-prod',
|
|
72
|
+
value: 'key-ghi-789',
|
|
73
|
+
},
|
|
74
|
+
},
|
|
75
|
+
|
|
76
|
+
// Organization & billing
|
|
77
|
+
organizationId: 'creative-company',
|
|
78
|
+
subscriptionId: 'plan-creative-2024',
|
|
79
|
+
|
|
80
|
+
// Product & task tracking
|
|
81
|
+
productId: 'ai-poet',
|
|
82
|
+
taskType: 'creative-writing',
|
|
83
|
+
agent: 'openai-streaming-chat-node',
|
|
84
|
+
|
|
85
|
+
// Session tracking
|
|
86
|
+
traceId: 'stream-' + Date.now(),
|
|
87
|
+
|
|
88
|
+
// Quality metrics
|
|
89
|
+
responseQualityScore: 0.92,
|
|
90
|
+
},
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
for await (const chunk of metadataStream) {
|
|
94
|
+
const content = chunk.choices[0]?.delta?.content || '';
|
|
95
|
+
if (content) {
|
|
96
|
+
process.stdout.write(content);
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
console.log('\n Streaming tracked with rich metadata for analytics\n');
|
|
101
|
+
|
|
102
|
+
// Example 3: Batch embeddings (no metadata)
|
|
103
|
+
console.log(' Example 3: Batch embeddings (automatic tracking)');
|
|
104
|
+
|
|
105
|
+
const batchEmbeddings = await openai.embeddings.create({
|
|
106
|
+
model: 'text-embedding-3-small',
|
|
107
|
+
input: [
|
|
108
|
+
'First document for batch processing',
|
|
109
|
+
'Second document for batch processing',
|
|
110
|
+
'Third document for batch processing',
|
|
111
|
+
],
|
|
112
|
+
// No usageMetadata - still automatically tracked!
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
console.log(' Model:', batchEmbeddings.model);
|
|
116
|
+
console.log(' Usage:', batchEmbeddings.usage);
|
|
117
|
+
console.log(' Embeddings count:', batchEmbeddings.data.length);
|
|
118
|
+
console.log(' Batch embeddings automatically tracked without metadata\n');
|
|
119
|
+
|
|
120
|
+
// Example 4: Embeddings with metadata for batch processing
|
|
121
|
+
console.log(' Example 4: Batch embeddings with metadata');
|
|
122
|
+
|
|
123
|
+
const metadataBatchEmbeddings = await openai.embeddings.create({
|
|
124
|
+
model: 'text-embedding-3-small',
|
|
125
|
+
input: [
|
|
126
|
+
'Document 1: Streaming responses provide real-time feedback',
|
|
127
|
+
'Document 2: Metadata enables rich business analytics',
|
|
128
|
+
'Document 3: Batch processing improves efficiency',
|
|
129
|
+
],
|
|
130
|
+
|
|
131
|
+
// All metadata fields are optional - perfect for batch operations!
|
|
132
|
+
usageMetadata: {
|
|
133
|
+
// User tracking (optional) - nested subscriber object
|
|
134
|
+
subscriber: {
|
|
135
|
+
id: 'batch-processor-123',
|
|
136
|
+
email: 'batch@data-company.com',
|
|
137
|
+
credential: {
|
|
138
|
+
name: 'batch-key',
|
|
139
|
+
value: 'batch-value-456',
|
|
140
|
+
},
|
|
141
|
+
},
|
|
142
|
+
|
|
143
|
+
// Business context (optional)
|
|
144
|
+
organizationId: 'data-company',
|
|
145
|
+
productId: 'document-search',
|
|
146
|
+
|
|
147
|
+
// Task classification (optional)
|
|
148
|
+
taskType: 'batch-document-embedding',
|
|
149
|
+
traceId: `batch-${Date.now()}`,
|
|
150
|
+
|
|
151
|
+
// Custom fields (optional)
|
|
152
|
+
agent: 'openai-batch-embeddings-metadata-node',
|
|
153
|
+
},
|
|
154
|
+
});
|
|
155
|
+
|
|
156
|
+
console.log(' Model:', metadataBatchEmbeddings.model);
|
|
157
|
+
console.log(' Usage:', metadataBatchEmbeddings.usage);
|
|
158
|
+
console.log(' Embeddings count:', metadataBatchEmbeddings.data.length);
|
|
159
|
+
console.log(' Batch embeddings tracked with metadata for business insights\n');
|
|
160
|
+
|
|
161
|
+
// Summary
|
|
162
|
+
console.log(' Summary:');
|
|
163
|
+
console.log(' Streaming responses work seamlessly with metadata');
|
|
164
|
+
console.log(' Usage tracked automatically when streams complete');
|
|
165
|
+
console.log(' Batch embeddings supported with optional metadata');
|
|
166
|
+
console.log(' All metadata fields are optional');
|
|
167
|
+
console.log(' No type casting required - native TypeScript support');
|
|
168
|
+
console.log(' Real-time streaming + comprehensive analytics');
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
// Run the example
|
|
172
|
+
openaiStreamingExample().catch(console.error);
|
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Vision Example
|
|
3
|
+
*
|
|
4
|
+
* Demonstrates how Revenium middleware seamlessly tracks GPT-4o vision API usage
|
|
5
|
+
* with multimodal inputs (text + images). Shows automatic tracking of:
|
|
6
|
+
* - Vision API calls with image URLs
|
|
7
|
+
* - Token usage including image processing tokens
|
|
8
|
+
* - Cost calculation for vision features
|
|
9
|
+
* - Different image detail levels (low, high, auto)
|
|
10
|
+
*
|
|
11
|
+
* All metadata fields are optional and work seamlessly with vision API!
|
|
12
|
+
*
|
|
13
|
+
* For complete metadata field reference, see:
|
|
14
|
+
* https://revenium.readme.io/reference/meter_ai_completion
|
|
15
|
+
*
|
|
16
|
+
* OpenAI Vision API Reference:
|
|
17
|
+
* https://platform.openai.com/docs/guides/vision
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
import 'dotenv/config';
|
|
21
|
+
import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
|
|
22
|
+
import OpenAI from 'openai';
|
|
23
|
+
|
|
24
|
+
async function openAIVisionExample() {
|
|
25
|
+
console.log('🖼️ OpenAI Vision API with Revenium Tracking\n');
|
|
26
|
+
|
|
27
|
+
// Initialize Revenium middleware
|
|
28
|
+
const initResult = initializeReveniumFromEnv();
|
|
29
|
+
if (!initResult.success) {
|
|
30
|
+
console.error('❌ Failed to initialize Revenium:', initResult.message);
|
|
31
|
+
process.exit(1);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
// Create and patch OpenAI instance
|
|
35
|
+
const openai = patchOpenAIInstance(new OpenAI());
|
|
36
|
+
|
|
37
|
+
// Sample image URLs for demonstration
|
|
38
|
+
const imageUrl = 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg';
|
|
39
|
+
|
|
40
|
+
// Example 1: Basic vision request without metadata (automatic tracking)
|
|
41
|
+
console.log('📸 Example 1: Basic vision analysis (automatic tracking)\n');
|
|
42
|
+
|
|
43
|
+
const basicVisionResponse = await openai.chat.completions.create({
|
|
44
|
+
model: 'gpt-4o-mini',
|
|
45
|
+
messages: [
|
|
46
|
+
{
|
|
47
|
+
role: 'user',
|
|
48
|
+
content: [
|
|
49
|
+
{
|
|
50
|
+
type: 'text',
|
|
51
|
+
text: 'What is in this image? Describe it in one sentence.',
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
type: 'image_url',
|
|
55
|
+
image_url: {
|
|
56
|
+
url: imageUrl,
|
|
57
|
+
},
|
|
58
|
+
},
|
|
59
|
+
],
|
|
60
|
+
},
|
|
61
|
+
],
|
|
62
|
+
max_tokens: 300,
|
|
63
|
+
// No usageMetadata - still automatically tracked!
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
console.log('AI Response:', basicVisionResponse.choices[0]?.message?.content);
|
|
67
|
+
console.log('Usage:', basicVisionResponse.usage);
|
|
68
|
+
console.log('✅ Vision API automatically tracked without metadata\n');
|
|
69
|
+
|
|
70
|
+
// Example 2: Vision with detailed image analysis and metadata
|
|
71
|
+
console.log('🔍 Example 2: High-detail vision analysis with comprehensive metadata\n');
|
|
72
|
+
|
|
73
|
+
const detailedVisionResponse = await openai.chat.completions.create({
|
|
74
|
+
model: 'gpt-4o',
|
|
75
|
+
messages: [
|
|
76
|
+
{
|
|
77
|
+
role: 'user',
|
|
78
|
+
content: [
|
|
79
|
+
{
|
|
80
|
+
type: 'text',
|
|
81
|
+
text: 'Please analyze this image in detail. Describe the scene, colors, composition, and any notable features.',
|
|
82
|
+
},
|
|
83
|
+
{
|
|
84
|
+
type: 'image_url',
|
|
85
|
+
image_url: {
|
|
86
|
+
url: imageUrl,
|
|
87
|
+
detail: 'high', // High detail for better analysis (costs more tokens)
|
|
88
|
+
},
|
|
89
|
+
},
|
|
90
|
+
],
|
|
91
|
+
},
|
|
92
|
+
],
|
|
93
|
+
max_tokens: 500,
|
|
94
|
+
|
|
95
|
+
// ✨ All metadata fields are optional - perfect for tracking vision AI!
|
|
96
|
+
usageMetadata: {
|
|
97
|
+
subscriber: {
|
|
98
|
+
id: 'vision-user-123',
|
|
99
|
+
email: 'vision@company.com',
|
|
100
|
+
credential: {
|
|
101
|
+
name: 'api-key',
|
|
102
|
+
value: 'vision-key-456',
|
|
103
|
+
},
|
|
104
|
+
},
|
|
105
|
+
organizationId: 'image-analysis-corp',
|
|
106
|
+
productId: 'ai-vision-platform',
|
|
107
|
+
subscriptionId: 'sub-vision-pro-789',
|
|
108
|
+
taskType: 'image-analysis-detailed',
|
|
109
|
+
traceId: `vision-${Date.now()}`,
|
|
110
|
+
agent: 'vision-analyzer-node',
|
|
111
|
+
responseQualityScore: 0.96,
|
|
112
|
+
},
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
console.log('AI Detailed Analysis:', detailedVisionResponse.choices[0]?.message?.content);
|
|
116
|
+
console.log('Usage:', detailedVisionResponse.usage);
|
|
117
|
+
console.log('✅ Vision API tracked with rich metadata for image analytics\n');
|
|
118
|
+
|
|
119
|
+
// Example 3: Multiple images in one request
|
|
120
|
+
console.log('🖼️ 🖼️ Example 3: Multiple images with low-detail mode\n');
|
|
121
|
+
|
|
122
|
+
const multiImageResponse = await openai.chat.completions.create({
|
|
123
|
+
model: 'gpt-4o-mini',
|
|
124
|
+
messages: [
|
|
125
|
+
{
|
|
126
|
+
role: 'user',
|
|
127
|
+
content: [
|
|
128
|
+
{
|
|
129
|
+
type: 'text',
|
|
130
|
+
text: 'Compare these two images. What do they have in common?',
|
|
131
|
+
},
|
|
132
|
+
{
|
|
133
|
+
type: 'image_url',
|
|
134
|
+
image_url: {
|
|
135
|
+
url: imageUrl,
|
|
136
|
+
detail: 'low', // Low detail for cost-effective processing
|
|
137
|
+
},
|
|
138
|
+
},
|
|
139
|
+
{
|
|
140
|
+
type: 'image_url',
|
|
141
|
+
image_url: {
|
|
142
|
+
url: 'https://upload.wikimedia.org/wikipedia/commons/thumb/3/3f/Placeholder_view_vector.svg/310px-Placeholder_view_vector.svg.png',
|
|
143
|
+
detail: 'low',
|
|
144
|
+
},
|
|
145
|
+
},
|
|
146
|
+
],
|
|
147
|
+
},
|
|
148
|
+
],
|
|
149
|
+
max_tokens: 300,
|
|
150
|
+
|
|
151
|
+
// ✨ Tracking multi-image requests
|
|
152
|
+
usageMetadata: {
|
|
153
|
+
subscriber: {
|
|
154
|
+
id: 'multi-vision-user-789',
|
|
155
|
+
email: 'multi@company.com',
|
|
156
|
+
credential: {
|
|
157
|
+
name: 'api-key',
|
|
158
|
+
value: 'multi-key-999',
|
|
159
|
+
},
|
|
160
|
+
},
|
|
161
|
+
organizationId: 'comparison-ai-corp',
|
|
162
|
+
productId: 'image-comparison-tool',
|
|
163
|
+
subscriptionId: 'sub-comparison-basic-456',
|
|
164
|
+
taskType: 'multi-image-comparison',
|
|
165
|
+
traceId: `multi-vision-${Date.now()}`,
|
|
166
|
+
agent: 'comparison-analyzer-node',
|
|
167
|
+
responseQualityScore: 0.88,
|
|
168
|
+
},
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
console.log('AI Comparison:', multiImageResponse.choices[0]?.message?.content);
|
|
172
|
+
console.log('Usage:', multiImageResponse.usage);
|
|
173
|
+
console.log('✅ Multiple images tracked with metadata for comparison analytics\n');
|
|
174
|
+
|
|
175
|
+
// Example 4: Vision with conversation context
|
|
176
|
+
console.log('💬 Example 4: Vision with multi-turn conversation\n');
|
|
177
|
+
|
|
178
|
+
const conversationMessages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [
|
|
179
|
+
{
|
|
180
|
+
role: 'user',
|
|
181
|
+
content: [
|
|
182
|
+
{
|
|
183
|
+
type: 'text',
|
|
184
|
+
text: 'What colors are prominent in this image?',
|
|
185
|
+
},
|
|
186
|
+
{
|
|
187
|
+
type: 'image_url',
|
|
188
|
+
image_url: {
|
|
189
|
+
url: imageUrl,
|
|
190
|
+
detail: 'auto', // Auto detail - OpenAI decides optimal level
|
|
191
|
+
},
|
|
192
|
+
},
|
|
193
|
+
],
|
|
194
|
+
},
|
|
195
|
+
];
|
|
196
|
+
|
|
197
|
+
const firstTurnResponse = await openai.chat.completions.create({
|
|
198
|
+
model: 'gpt-4o-mini',
|
|
199
|
+
messages: conversationMessages,
|
|
200
|
+
max_tokens: 200,
|
|
201
|
+
|
|
202
|
+
usageMetadata: {
|
|
203
|
+
subscriber: {
|
|
204
|
+
id: 'conversation-user-456',
|
|
205
|
+
email: 'conversation@company.com',
|
|
206
|
+
credential: {
|
|
207
|
+
name: 'api-key',
|
|
208
|
+
value: 'conv-key-123',
|
|
209
|
+
},
|
|
210
|
+
},
|
|
211
|
+
organizationId: 'chat-vision-corp',
|
|
212
|
+
productId: 'interactive-vision-assistant',
|
|
213
|
+
subscriptionId: 'sub-interactive-premium-321',
|
|
214
|
+
taskType: 'conversational-vision',
|
|
215
|
+
traceId: `conv-vision-${Date.now()}`,
|
|
216
|
+
agent: 'conversation-vision-node',
|
|
217
|
+
},
|
|
218
|
+
});
|
|
219
|
+
|
|
220
|
+
console.log('First Turn - AI:', firstTurnResponse.choices[0]?.message?.content);
|
|
221
|
+
console.log('Usage:', firstTurnResponse.usage);
|
|
222
|
+
|
|
223
|
+
// Add AI response to conversation
|
|
224
|
+
conversationMessages.push({
|
|
225
|
+
role: 'assistant',
|
|
226
|
+
content: firstTurnResponse.choices[0]?.message?.content || '',
|
|
227
|
+
});
|
|
228
|
+
|
|
229
|
+
// Follow-up question without image
|
|
230
|
+
conversationMessages.push({
|
|
231
|
+
role: 'user',
|
|
232
|
+
content: 'Based on those colors, what mood does the image convey?',
|
|
233
|
+
});
|
|
234
|
+
|
|
235
|
+
const secondTurnResponse = await openai.chat.completions.create({
|
|
236
|
+
model: 'gpt-4o-mini',
|
|
237
|
+
messages: conversationMessages,
|
|
238
|
+
max_tokens: 200,
|
|
239
|
+
|
|
240
|
+
usageMetadata: {
|
|
241
|
+
subscriber: {
|
|
242
|
+
id: 'conversation-user-456',
|
|
243
|
+
email: 'conversation@company.com',
|
|
244
|
+
credential: {
|
|
245
|
+
name: 'api-key',
|
|
246
|
+
value: 'conv-key-123',
|
|
247
|
+
},
|
|
248
|
+
},
|
|
249
|
+
organizationId: 'chat-vision-corp',
|
|
250
|
+
productId: 'interactive-vision-assistant',
|
|
251
|
+
subscriptionId: 'sub-interactive-premium-321',
|
|
252
|
+
taskType: 'conversational-vision-followup',
|
|
253
|
+
traceId: `conv-vision-${Date.now()}`,
|
|
254
|
+
agent: 'conversation-vision-node',
|
|
255
|
+
},
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
console.log('Second Turn - AI:', secondTurnResponse.choices[0]?.message?.content);
|
|
259
|
+
console.log('Usage:', secondTurnResponse.usage);
|
|
260
|
+
console.log('✅ Multi-turn vision conversation fully tracked\n');
|
|
261
|
+
|
|
262
|
+
// Summary
|
|
263
|
+
console.log('📈 Vision API Summary:');
|
|
264
|
+
console.log('✅ Image analysis with URLs fully supported');
|
|
265
|
+
console.log('✅ Token usage tracked including image processing tokens');
|
|
266
|
+
console.log('✅ Multiple images in one request work seamlessly');
|
|
267
|
+
console.log('✅ Detail levels (low, high, auto) all supported');
|
|
268
|
+
console.log('✅ Multi-turn conversations with image context tracked');
|
|
269
|
+
console.log('✅ All metadata fields optional and work perfectly');
|
|
270
|
+
console.log('✅ Cost calculation includes vision-specific tokens');
|
|
271
|
+
console.log('✅ No type casting required - native TypeScript support\n');
|
|
272
|
+
|
|
273
|
+
console.log('💡 Use Cases:');
|
|
274
|
+
console.log(' - Image content moderation and analysis');
|
|
275
|
+
console.log(' - Product catalog image descriptions');
|
|
276
|
+
console.log(' - Document and diagram understanding');
|
|
277
|
+
console.log(' - Visual question answering systems');
|
|
278
|
+
console.log(' - Accessibility tools (image descriptions)');
|
|
279
|
+
console.log(' - Quality control and inspection automation\n');
|
|
280
|
+
|
|
281
|
+
console.log('💰 Cost Optimization Tips:');
|
|
282
|
+
console.log(' - Use "low" detail for simple images to save tokens');
|
|
283
|
+
console.log(' - Use "high" detail only when fine details matter');
|
|
284
|
+
console.log(' - Use "auto" to let OpenAI optimize automatically');
|
|
285
|
+
console.log(' - Revenium tracks all token usage for accurate cost analytics');
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
// Run the example
|
|
289
|
+
openAIVisionExample().catch(console.error);
|