@revenium/openai 1.0.10 → 1.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/.env.example +20 -0
  2. package/CHANGELOG.md +52 -0
  3. package/LICENSE +21 -21
  4. package/README.md +682 -1152
  5. package/dist/cjs/core/config/loader.js +1 -1
  6. package/dist/cjs/core/config/loader.js.map +1 -1
  7. package/dist/cjs/core/tracking/api-client.js +1 -1
  8. package/dist/cjs/core/tracking/api-client.js.map +1 -1
  9. package/dist/cjs/index.js +4 -4
  10. package/dist/cjs/index.js.map +1 -1
  11. package/dist/cjs/types/openai-augmentation.js +1 -1
  12. package/dist/cjs/utils/url-builder.js +32 -7
  13. package/dist/cjs/utils/url-builder.js.map +1 -1
  14. package/dist/esm/core/config/loader.js +1 -1
  15. package/dist/esm/core/config/loader.js.map +1 -1
  16. package/dist/esm/core/tracking/api-client.js +1 -1
  17. package/dist/esm/core/tracking/api-client.js.map +1 -1
  18. package/dist/esm/index.js +4 -4
  19. package/dist/esm/index.js.map +1 -1
  20. package/dist/esm/types/openai-augmentation.js +1 -1
  21. package/dist/esm/utils/url-builder.js +32 -7
  22. package/dist/esm/utils/url-builder.js.map +1 -1
  23. package/dist/types/index.d.ts +4 -4
  24. package/dist/types/types/index.d.ts +2 -2
  25. package/dist/types/types/index.d.ts.map +1 -1
  26. package/dist/types/types/openai-augmentation.d.ts +1 -1
  27. package/dist/types/utils/url-builder.d.ts +11 -3
  28. package/dist/types/utils/url-builder.d.ts.map +1 -1
  29. package/examples/README.md +357 -0
  30. package/examples/azure-basic.ts +206 -0
  31. package/examples/azure-responses-basic.ts +233 -0
  32. package/examples/azure-responses-streaming.ts +255 -0
  33. package/examples/azure-streaming.ts +209 -0
  34. package/examples/getting_started.ts +54 -0
  35. package/examples/openai-basic.ts +147 -0
  36. package/examples/openai-function-calling.ts +259 -0
  37. package/examples/openai-responses-basic.ts +212 -0
  38. package/examples/openai-responses-streaming.ts +232 -0
  39. package/examples/openai-streaming.ts +172 -0
  40. package/examples/openai-vision.ts +289 -0
  41. package/package.json +81 -84
  42. package/src/core/config/azure-config.ts +72 -0
  43. package/src/core/config/index.ts +23 -0
  44. package/src/core/config/loader.ts +66 -0
  45. package/src/core/config/manager.ts +94 -0
  46. package/src/core/config/validator.ts +89 -0
  47. package/src/core/providers/detector.ts +159 -0
  48. package/src/core/providers/index.ts +16 -0
  49. package/src/core/tracking/api-client.ts +78 -0
  50. package/src/core/tracking/index.ts +21 -0
  51. package/src/core/tracking/payload-builder.ts +132 -0
  52. package/src/core/tracking/usage-tracker.ts +189 -0
  53. package/src/core/wrapper/index.ts +9 -0
  54. package/src/core/wrapper/instance-patcher.ts +288 -0
  55. package/src/core/wrapper/request-handler.ts +423 -0
  56. package/src/core/wrapper/stream-wrapper.ts +100 -0
  57. package/src/index.ts +336 -0
  58. package/src/types/function-parameters.ts +251 -0
  59. package/src/types/index.ts +313 -0
  60. package/src/types/openai-augmentation.ts +233 -0
  61. package/src/types/responses-api.ts +308 -0
  62. package/src/utils/azure-model-resolver.ts +220 -0
  63. package/src/utils/constants.ts +21 -0
  64. package/src/utils/error-handler.ts +251 -0
  65. package/src/utils/metadata-builder.ts +219 -0
  66. package/src/utils/provider-detection.ts +257 -0
  67. package/src/utils/request-handler-factory.ts +285 -0
  68. package/src/utils/stop-reason-mapper.ts +74 -0
  69. package/src/utils/type-guards.ts +202 -0
  70. package/src/utils/url-builder.ts +68 -0
@@ -0,0 +1,233 @@
1
+ /**
2
+ * Azure OpenAI Responses API Basic Examples
3
+ *
4
+ * This file demonstrates how to use the new Azure OpenAI Responses API with the Revenium middleware.
5
+ * The Responses API is a new stateful API that brings together capabilities from chat completions
6
+ * and assistants API in one unified experience.
7
+ *
8
+ * Metadata Options:
9
+ * - Start with basic usage (no metadata) - tracking works automatically
10
+ * - Add subscriber info for user tracking
11
+ * - Include organization/product IDs for business analytics
12
+ * - Use task type and trace ID for detailed analysis
13
+ *
14
+ * For complete metadata field reference, see:
15
+ * https://revenium.readme.io/reference/meter_ai_completion
16
+ *
17
+ * Responses API Reference: https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/responses
18
+ */
19
+
20
+ import 'dotenv/config';
21
+ import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
22
+ import OpenAI from 'openai';
23
+
24
+ // Import types for the new Responses API
25
+ import type { ResponsesCreateParams, ResponsesResponse } from '../src/types/responses-api.js';
26
+
27
+ async function main() {
28
+ // Initialize Revenium middleware
29
+ await initializeReveniumFromEnv();
30
+
31
+ // Check for Azure configuration
32
+ if (
33
+ !process.env.AZURE_OPENAI_API_KEY ||
34
+ !process.env.AZURE_OPENAI_ENDPOINT ||
35
+ !process.env.AZURE_OPENAI_DEPLOYMENT_NAME
36
+ ) {
37
+ console.log(
38
+ '️ Azure OpenAI configuration missing. Please set AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, and AZURE_OPENAI_DEPLOYMENT_NAME'
39
+ );
40
+ return;
41
+ }
42
+
43
+ // Create Azure OpenAI client
44
+ const azure = new OpenAI({
45
+ apiKey: process.env.AZURE_OPENAI_API_KEY,
46
+ baseURL: `${process.env.AZURE_OPENAI_ENDPOINT}/openai/deployments/${process.env.AZURE_OPENAI_DEPLOYMENT_NAME}`,
47
+ defaultQuery: { 'api-version': '2024-10-21' },
48
+ defaultHeaders: {
49
+ 'api-key': process.env.AZURE_OPENAI_API_KEY,
50
+ },
51
+ });
52
+
53
+ // Patch the Azure OpenAI instance to add Revenium tracking
54
+ patchOpenAIInstance(azure);
55
+
56
+ console.log(' Azure OpenAI Responses API Basic Examples\n');
57
+
58
+ // Example 1: Basic Azure Responses API call (no metadata)
59
+ console.log(' Example 1: Basic Azure Responses API call (no metadata)');
60
+ try {
61
+ const responsesAPI = azure as any; // Type assertion for new API
62
+
63
+ if (responsesAPI.responses?.create) {
64
+ const response: ResponsesResponse = await responsesAPI.responses.create({
65
+ model: process.env.AZURE_OPENAI_DEPLOYMENT_NAME || 'gpt-4o',
66
+ input: 'What are the benefits of using Azure OpenAI Service?',
67
+ } as ResponsesCreateParams);
68
+
69
+ console.log('Response ID:', response.id);
70
+ console.log('Model:', response.model);
71
+ console.log('Status:', response.status);
72
+ console.log('Output Text:', response.output_text);
73
+ console.log('Usage:', response.usage);
74
+ } else {
75
+ throw new Error('Responses API not available');
76
+ }
77
+ } catch (error) {
78
+ console.log('️ Azure Responses API not yet available in this OpenAI SDK version');
79
+ console.log(' Error:', (error as Error).message);
80
+ }
81
+
82
+ console.log('\n' + '='.repeat(50) + '\n');
83
+
84
+ // Example 2: Azure Responses API with rich enterprise metadata
85
+ console.log(' Example 2: Azure Responses API with rich enterprise metadata');
86
+ try {
87
+ const responsesAPI = azure as any;
88
+
89
+ if (responsesAPI.responses?.create) {
90
+ const response: ResponsesResponse = await responsesAPI.responses.create({
91
+ model: process.env.AZURE_OPENAI_DEPLOYMENT_NAME || 'gpt-4o',
92
+ input: [
93
+ {
94
+ role: 'user',
95
+ content: 'Explain Azure AI services integration patterns for enterprise applications.',
96
+ },
97
+ ],
98
+ temperature: 0.6,
99
+ max_output_tokens: 200,
100
+ usageMetadata: {
101
+ // User identification
102
+ subscriber: {
103
+ id: 'azure-enterprise-user-123',
104
+ email: 'enterprise@azurecorp.com',
105
+ credential: {
106
+ name: 'api-key-prod',
107
+ value: 'key-stu-901',
108
+ },
109
+ },
110
+
111
+ // Organization & billing
112
+ organizationId: 'azure-enterprise-org-456',
113
+ subscriptionId: 'plan-azure-responses-2024',
114
+
115
+ // Product & task tracking
116
+ productId: 'azure-ai-integration-assistant',
117
+ taskType: 'enterprise-architecture-guidance',
118
+ agent: 'azure-ai-architect',
119
+
120
+ // Session tracking
121
+ traceId: 'azure-trace-789',
122
+
123
+ // Quality metrics
124
+ responseQualityScore: 0.96,
125
+ },
126
+ } as ResponsesCreateParams);
127
+
128
+ console.log('Response ID:', response.id);
129
+ console.log('Model:', response.model);
130
+ console.log('Status:', response.status);
131
+ console.log('Output Text:', response.output_text?.substring(0, 100) + '...');
132
+ console.log('Usage:', response.usage);
133
+ } else {
134
+ throw new Error('Responses API not available');
135
+ }
136
+ } catch (error) {
137
+ console.log('️ Azure Responses API not yet available in this OpenAI SDK version');
138
+ console.log(' Error:', (error as Error).message);
139
+ }
140
+
141
+ console.log('\n' + '='.repeat(50) + '\n');
142
+
143
+ // Example 3: Basic Azure Responses API with string input (no metadata)
144
+ console.log(' Example 3: Basic Azure Responses API with string input (no metadata)');
145
+ try {
146
+ const responsesAPI = azure as any;
147
+
148
+ if (responsesAPI.responses?.create) {
149
+ const response: ResponsesResponse = await responsesAPI.responses.create({
150
+ model: process.env.AZURE_OPENAI_DEPLOYMENT_NAME || 'gpt-4o',
151
+ input: 'Write a brief summary of Azure OpenAI capabilities.',
152
+ } as ResponsesCreateParams);
153
+
154
+ console.log('Response ID:', response.id);
155
+ console.log('Model:', response.model);
156
+ console.log('Status:', response.status);
157
+ console.log('Output Text:', response.output_text);
158
+ console.log('Usage:', response.usage);
159
+ } else {
160
+ throw new Error('Responses API not available');
161
+ }
162
+ } catch (error) {
163
+ console.log('️ Azure Responses API not yet available in this OpenAI SDK version');
164
+ console.log(' Error:', (error as Error).message);
165
+ }
166
+
167
+ console.log('\n' + '='.repeat(50) + '\n');
168
+
169
+ // Example 4: Advanced Azure Responses API with comprehensive enterprise metadata
170
+ console.log(' Example 4: Advanced Azure Responses API with comprehensive enterprise metadata');
171
+ try {
172
+ const responsesAPI = azure as any;
173
+
174
+ if (responsesAPI.responses?.create) {
175
+ const response: ResponsesResponse = await responsesAPI.responses.create({
176
+ model: process.env.AZURE_OPENAI_DEPLOYMENT_NAME || 'gpt-4o',
177
+ input: [
178
+ {
179
+ role: 'user',
180
+ content:
181
+ 'Provide a comprehensive guide for implementing Azure OpenAI in a multi-tenant SaaS application.',
182
+ },
183
+ ],
184
+ temperature: 0.3,
185
+ max_output_tokens: 250,
186
+ instructions:
187
+ 'You are an Azure AI solutions architect providing detailed technical guidance.',
188
+ usageMetadata: {
189
+ // User identification
190
+ subscriber: {
191
+ id: 'azure-saas-architect-789',
192
+ email: 'architect@azuresaas.com',
193
+ credential: {
194
+ name: 'api-key-prod',
195
+ value: 'key-vwx-234',
196
+ },
197
+ },
198
+
199
+ // Organization & billing
200
+ organizationId: 'azure-saas-enterprise-012',
201
+ subscriptionId: 'plan-azure-saas-2024',
202
+
203
+ // Product & task tracking
204
+ productId: 'azure-saas-ai-architect',
205
+ taskType: 'multi-tenant-architecture-design',
206
+ agent: 'azure-saas-solutions-architect',
207
+
208
+ // Session tracking
209
+ traceId: 'azure-saas-trace-345',
210
+
211
+ // Quality metrics
212
+ responseQualityScore: 0.99,
213
+ },
214
+ } as ResponsesCreateParams);
215
+
216
+ console.log('Response ID:', response.id);
217
+ console.log('Model:', response.model);
218
+ console.log('Status:', response.status);
219
+ console.log('Output Text:', response.output_text?.substring(0, 150) + '...');
220
+ console.log('Usage:', response.usage);
221
+ console.log('Output Array Length:', response.output?.length);
222
+ } else {
223
+ throw new Error('Responses API not available');
224
+ }
225
+ } catch (error) {
226
+ console.log('️ Azure Responses API not yet available in this OpenAI SDK version');
227
+ console.log(' Error:', (error as Error).message);
228
+ }
229
+
230
+ console.log('\n All Azure Responses API examples completed!');
231
+ }
232
+
233
+ main().catch(console.error);
@@ -0,0 +1,255 @@
1
+ /**
2
+ * Azure OpenAI Responses API Streaming Examples
3
+ *
4
+ * This file demonstrates how to use the new Azure OpenAI Responses API with streaming enabled
5
+ * using the Revenium middleware. The Responses API supports streaming for real-time
6
+ * response generation in Azure environments.
7
+ *
8
+ * Metadata Options:
9
+ * - Start with basic usage (no metadata) - tracking works automatically
10
+ * - Add subscriber info for user tracking
11
+ * - Include organization/product IDs for business analytics
12
+ * - Use task type and trace ID for detailed analysis
13
+ *
14
+ * For complete metadata field reference, see:
15
+ * https://revenium.readme.io/reference/meter_ai_completion
16
+ *
17
+ * Responses API Reference: https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/responses
18
+ */
19
+
20
+ import 'dotenv/config';
21
+ import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
22
+ import OpenAI from 'openai';
23
+
24
+ // Import types for the new Responses API
25
+ import type { ResponsesCreateParams } from '../src/types/responses-api.js';
26
+
27
+ async function main() {
28
+ // Initialize Revenium middleware
29
+ await initializeReveniumFromEnv();
30
+
31
+ // Check for Azure configuration
32
+ if (
33
+ !process.env.AZURE_OPENAI_API_KEY ||
34
+ !process.env.AZURE_OPENAI_ENDPOINT ||
35
+ !process.env.AZURE_OPENAI_DEPLOYMENT_NAME
36
+ ) {
37
+ console.log(
38
+ '️ Azure OpenAI configuration missing. Please set AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, and AZURE_OPENAI_DEPLOYMENT_NAME'
39
+ );
40
+ return;
41
+ }
42
+
43
+ // Create Azure OpenAI client
44
+ const azure = new OpenAI({
45
+ apiKey: process.env.AZURE_OPENAI_API_KEY,
46
+ baseURL: `${process.env.AZURE_OPENAI_ENDPOINT}/openai/deployments/${process.env.AZURE_OPENAI_DEPLOYMENT_NAME}`,
47
+ defaultQuery: { 'api-version': '2024-10-21' },
48
+ defaultHeaders: {
49
+ 'api-key': process.env.AZURE_OPENAI_API_KEY,
50
+ },
51
+ });
52
+
53
+ // Patch the Azure OpenAI instance to add Revenium tracking
54
+ patchOpenAIInstance(azure);
55
+
56
+ console.log(' Azure OpenAI Responses API Streaming Examples\n');
57
+
58
+ // Example 1: Basic Azure Responses API streaming (no metadata)
59
+ console.log(' Example 1: Basic Azure Responses API streaming (no metadata)');
60
+ try {
61
+ const responsesAPI = azure as any; // Type assertion for new API
62
+
63
+ if (responsesAPI.responses?.create) {
64
+ const stream = await responsesAPI.responses.create({
65
+ model: process.env.AZURE_OPENAI_DEPLOYMENT_NAME || 'gpt-4o',
66
+ input: 'Tell me about the advantages of using Azure cloud services for AI workloads.',
67
+ stream: true,
68
+ } as ResponsesCreateParams);
69
+
70
+ console.log('Streaming Azure response:');
71
+ for await (const event of stream) {
72
+ if (event.type === 'response.output_text.delta') {
73
+ process.stdout.write(event.delta);
74
+ }
75
+ }
76
+ console.log('\n Azure stream completed');
77
+ } else {
78
+ throw new Error('Responses API not available');
79
+ }
80
+ } catch (error) {
81
+ console.log('️ Azure Responses API not yet available in this OpenAI SDK version');
82
+ console.log(' Error:', (error as Error).message);
83
+ }
84
+
85
+ console.log('\n' + '='.repeat(50) + '\n');
86
+
87
+ // Example 2: Azure Responses API streaming with rich enterprise metadata
88
+ console.log(' Example 2: Azure Responses API streaming with rich enterprise metadata');
89
+ try {
90
+ const responsesAPI = azure as any;
91
+
92
+ if (responsesAPI.responses?.create) {
93
+ const stream = await responsesAPI.responses.create({
94
+ model: process.env.AZURE_OPENAI_DEPLOYMENT_NAME || 'gpt-4o',
95
+ input: [
96
+ {
97
+ role: 'user',
98
+ content:
99
+ 'Explain how to implement secure AI solutions using Azure OpenAI with enterprise-grade security.',
100
+ },
101
+ ],
102
+ stream: true,
103
+ temperature: 0.7,
104
+ max_output_tokens: 250,
105
+ usageMetadata: {
106
+ // User identification
107
+ subscriber: {
108
+ id: 'azure-security-expert-123',
109
+ email: 'security@azureenterprise.com',
110
+ credential: {
111
+ name: 'api-key-prod',
112
+ value: 'key-yza-567',
113
+ },
114
+ },
115
+
116
+ // Organization & billing
117
+ organizationId: 'azure-security-org-456',
118
+ subscriptionId: 'plan-azure-security-2024',
119
+
120
+ // Product & task tracking
121
+ productId: 'azure-ai-security-advisor',
122
+ taskType: 'enterprise-security-streaming',
123
+ agent: 'azure-security-architect',
124
+
125
+ // Session tracking
126
+ traceId: 'azure-security-trace-789',
127
+
128
+ // Quality metrics
129
+ responseQualityScore: 0.94,
130
+ },
131
+ } as ResponsesCreateParams);
132
+
133
+ console.log('Streaming Azure security guidance:');
134
+ for await (const event of stream) {
135
+ if (event.type === 'response.output_text.delta') {
136
+ process.stdout.write(event.delta);
137
+ }
138
+ }
139
+ console.log('\n Azure security stream completed');
140
+ } else {
141
+ throw new Error('Responses API not available');
142
+ }
143
+ } catch (error) {
144
+ console.log('️ Azure Responses API not yet available in this OpenAI SDK version');
145
+ console.log(' Error:', (error as Error).message);
146
+ }
147
+
148
+ console.log('\n' + '='.repeat(50) + '\n');
149
+
150
+ // Example 3: Basic Azure Responses API streaming with array input (no metadata)
151
+ console.log(' Example 3: Basic Azure Responses API streaming with array input (no metadata)');
152
+ try {
153
+ const responsesAPI = azure as any;
154
+
155
+ if (responsesAPI.responses?.create) {
156
+ const stream = await responsesAPI.responses.create({
157
+ model: process.env.AZURE_OPENAI_DEPLOYMENT_NAME || 'gpt-4o',
158
+ input: [
159
+ {
160
+ role: 'user',
161
+ content:
162
+ 'Write a technical overview of Azure AI services and their integration capabilities.',
163
+ },
164
+ ],
165
+ stream: true,
166
+ } as ResponsesCreateParams);
167
+
168
+ console.log('Streaming Azure AI overview:');
169
+ for await (const event of stream) {
170
+ if (event.type === 'response.output_text.delta') {
171
+ process.stdout.write(event.delta);
172
+ }
173
+ }
174
+ console.log('\n Azure AI overview stream completed');
175
+ } else {
176
+ throw new Error('Responses API not available');
177
+ }
178
+ } catch (error) {
179
+ console.log('️ Azure Responses API not yet available in this OpenAI SDK version');
180
+ console.log(' Error:', (error as Error).message);
181
+ }
182
+
183
+ console.log('\n' + '='.repeat(50) + '\n');
184
+
185
+ // Example 4: Advanced Azure Responses API streaming with comprehensive enterprise metadata
186
+ console.log(
187
+ ' Example 4: Advanced Azure Responses API streaming with comprehensive enterprise metadata'
188
+ );
189
+ try {
190
+ const responsesAPI = azure as any;
191
+
192
+ if (responsesAPI.responses?.create) {
193
+ const stream = await responsesAPI.responses.create({
194
+ model: process.env.AZURE_OPENAI_DEPLOYMENT_NAME || 'gpt-4o',
195
+ input: [
196
+ {
197
+ role: 'user',
198
+ content:
199
+ 'Provide a detailed implementation guide for building scalable AI applications on Azure with proper monitoring and governance.',
200
+ },
201
+ ],
202
+ stream: true,
203
+ temperature: 0.4,
204
+ max_output_tokens: 350,
205
+ instructions:
206
+ 'You are an Azure solutions architect specializing in scalable AI implementations with enterprise governance.',
207
+ usageMetadata: {
208
+ // User identification
209
+ subscriber: {
210
+ id: 'azure-enterprise-architect-789',
211
+ email: 'architect@azureenterprise.com',
212
+ credential: {
213
+ name: 'api-key-prod',
214
+ value: 'key-bcd-890',
215
+ },
216
+ },
217
+
218
+ // Organization & billing
219
+ organizationId: 'azure-enterprise-streaming-012',
220
+ subscriptionId: 'plan-azure-scalable-2024',
221
+
222
+ // Product & task tracking
223
+ productId: 'azure-scalable-ai-architect',
224
+ taskType: 'enterprise-scalable-ai-streaming',
225
+ agent: 'azure-enterprise-solutions-architect',
226
+
227
+ // Session tracking
228
+ traceId: 'azure-scalable-trace-345',
229
+
230
+ // Quality metrics
231
+ responseQualityScore: 0.98,
232
+ },
233
+ } as ResponsesCreateParams);
234
+
235
+ console.log('Advanced Azure enterprise streaming:');
236
+ let deltaCount = 0;
237
+ for await (const event of stream) {
238
+ if (event.type === 'response.output_text.delta') {
239
+ process.stdout.write(event.delta);
240
+ deltaCount++;
241
+ }
242
+ }
243
+ console.log(`\n Advanced Azure enterprise stream completed (${deltaCount} delta events)`);
244
+ } else {
245
+ throw new Error('Responses API not available');
246
+ }
247
+ } catch (error) {
248
+ console.log('️ Azure Responses API not yet available in this OpenAI SDK version');
249
+ console.log(' Error:', (error as Error).message);
250
+ }
251
+
252
+ console.log('\n All Azure Responses API streaming examples completed!');
253
+ }
254
+
255
+ main().catch(console.error);
@@ -0,0 +1,209 @@
1
+ /**
2
+ * Azure OpenAI Streaming Example
3
+ *
4
+ * Shows how to use Revenium middleware with Azure OpenAI streaming responses.
5
+ * Demonstrates seamless metadata integration with Azure streaming - all metadata fields are optional!
6
+ *
7
+ * Metadata Options:
8
+ * - Start with basic usage (no metadata) - tracking works automatically
9
+ * - Add subscriber info for user tracking
10
+ * - Include organization/product IDs for business analytics
11
+ * - Use task type and trace ID for detailed analysis
12
+ *
13
+ * For complete metadata field reference, see:
14
+ * https://revenium.readme.io/reference/meter_ai_completion
15
+ */
16
+
17
+ import 'dotenv/config';
18
+ import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
19
+ import { AzureOpenAI } from 'openai';
20
+
21
+ async function azureStreamingExample() {
22
+ console.log('️ Azure OpenAI Streaming with Seamless Metadata Integration\n');
23
+
24
+ // Initialize Revenium middleware
25
+ const initResult = initializeReveniumFromEnv();
26
+ if (!initResult.success) {
27
+ console.error(' Failed to initialize Revenium:', initResult.message);
28
+ process.exit(1);
29
+ }
30
+
31
+ // Create Azure OpenAI instance and patch it
32
+ const azure = patchOpenAIInstance(
33
+ new AzureOpenAI({
34
+ endpoint: process.env.AZURE_OPENAI_ENDPOINT,
35
+ apiKey: process.env.AZURE_OPENAI_API_KEY,
36
+ apiVersion: process.env.AZURE_OPENAI_API_VERSION || '2024-12-01-preview',
37
+ })
38
+ );
39
+
40
+ console.log(' Azure OpenAI client configured and patched');
41
+ console.log(' Endpoint:', process.env.AZURE_OPENAI_ENDPOINT);
42
+ console.log(' API Version:', process.env.AZURE_OPENAI_API_VERSION || '2024-12-01-preview');
43
+ console.log();
44
+
45
+ // Check if we have a chat model configured
46
+ const deployment = process.env.AZURE_OPENAI_DEPLOYMENT;
47
+ const isChatModel = deployment && !deployment.includes('embedding');
48
+
49
+ if (!isChatModel) {
50
+ console.log('️ Note: Current Azure deployment appears to be for embeddings.');
51
+ console.log(' To test streaming chat, update .env to use a chat model:');
52
+ console.log(' - Comment out the embeddings section');
53
+ console.log(' - Uncomment the chat testing section');
54
+ console.log(' - Set AZURE_OPENAI_DEPLOYMENT=gpt-4o');
55
+ console.log('\n Testing embeddings instead (no streaming for embeddings)...\n');
56
+ } else {
57
+ // Example 1: Basic Azure streaming (no metadata)
58
+ console.log(' Example 1: Basic Azure streaming chat (automatic tracking)');
59
+ console.log(' Assistant: ');
60
+
61
+ const basicStream = await azure.chat.completions.create({
62
+ model: deployment,
63
+ messages: [
64
+ { role: 'user', content: 'List 3 advantages of Azure OpenAI over standard OpenAI' },
65
+ ],
66
+ stream: true,
67
+ // No usageMetadata - still automatically tracked with Azure provider info when stream completes!
68
+ // No max_tokens - let response complete naturally
69
+ });
70
+
71
+ for await (const chunk of basicStream) {
72
+ const content = chunk.choices[0]?.delta?.content || '';
73
+ if (content) {
74
+ process.stdout.write(content);
75
+ }
76
+ }
77
+
78
+ console.log('\n Azure streaming automatically tracked to Revenium without metadata\n');
79
+ }
80
+
81
+ // Example 2: Azure streaming with rich metadata (all optional!)
82
+ console.log(' Example 2: Azure streaming chat with rich metadata');
83
+ console.log(' Assistant: ');
84
+
85
+ const metadataStream = await azure.chat.completions.create({
86
+ model: deployment || 'gpt-4o',
87
+ messages: [
88
+ {
89
+ role: 'user',
90
+ content: 'Write a professional summary about Azure OpenAI benefits for enterprises',
91
+ },
92
+ ],
93
+ stream: true,
94
+
95
+ // Optional metadata for advanced reporting, lineage tracking, and cost allocation
96
+ usageMetadata: {
97
+ // User identification
98
+ subscriber: {
99
+ id: 'azure-stream-user-789',
100
+ email: 'enterprise@company.com',
101
+ credential: {
102
+ name: 'api-key-prod',
103
+ value: 'key-mno-345',
104
+ },
105
+ },
106
+
107
+ // Organization & billing
108
+ organizationId: 'enterprise-corp',
109
+ subscriptionId: 'plan-azure-stream-2024',
110
+
111
+ // Product & task tracking
112
+ productId: 'azure-ai-consultant',
113
+ taskType: 'enterprise-consultation',
114
+ agent: 'azure-streaming-chat-node',
115
+
116
+ // Session tracking
117
+ traceId: 'azure-stream-' + Date.now(),
118
+
119
+ // Quality metrics
120
+ responseQualityScore: 0.95,
121
+ },
122
+ });
123
+
124
+ for await (const chunk of metadataStream) {
125
+ const content = chunk.choices[0]?.delta?.content || '';
126
+ if (content) {
127
+ process.stdout.write(content);
128
+ }
129
+ }
130
+
131
+ console.log('\n Azure streaming tracked with rich metadata for enterprise analytics\n');
132
+
133
+ // Example 3: Azure batch embeddings (no metadata)
134
+ console.log(' Example 3: Azure batch embeddings (automatic tracking)');
135
+
136
+ const batchEmbeddings = await azure.embeddings.create({
137
+ model: process.env.AZURE_OPENAI_DEPLOYMENT || 'text-embedding-3-large',
138
+ input: [
139
+ 'Azure OpenAI provides enterprise security and compliance',
140
+ 'Private network access ensures data protection',
141
+ 'Managed identity integration simplifies authentication',
142
+ ],
143
+ // No usageMetadata - still automatically tracked with Azure provider info!
144
+ });
145
+
146
+ console.log(' Model:', batchEmbeddings.model);
147
+ console.log(' Usage:', batchEmbeddings.usage);
148
+ console.log(' Embeddings count:', batchEmbeddings.data.length);
149
+ console.log(' Azure batch embeddings automatically tracked without metadata\n');
150
+
151
+ // Example 4: Azure embeddings with enterprise metadata
152
+ console.log(' Example 4: Azure batch embeddings with enterprise metadata');
153
+
154
+ const enterpriseEmbeddings = await azure.embeddings.create({
155
+ model: process.env.AZURE_OPENAI_DEPLOYMENT || 'text-embedding-3-large',
156
+ input: [
157
+ 'Enterprise document: Azure OpenAI compliance framework',
158
+ 'Enterprise document: Data residency and sovereignty requirements',
159
+ 'Enterprise document: Integration with Azure Active Directory',
160
+ ],
161
+
162
+ // Optional metadata for advanced reporting, lineage tracking, and cost allocation
163
+ usageMetadata: {
164
+ // User identification
165
+ subscriber: {
166
+ id: 'azure-enterprise-processor',
167
+ email: 'processor@enterprise-corp.com',
168
+ credential: {
169
+ name: 'api-key-prod',
170
+ value: 'key-pqr-678',
171
+ },
172
+ },
173
+
174
+ // Organization & billing
175
+ organizationId: 'enterprise-corp',
176
+ subscriptionId: 'plan-azure-enterprise-2024',
177
+
178
+ // Product & task tracking
179
+ productId: 'azure-document-intelligence',
180
+ taskType: 'enterprise-document-processing',
181
+ agent: 'azure-batch-embeddings-node',
182
+
183
+ // Session tracking
184
+ traceId: 'azure-enterprise-' + Date.now(),
185
+
186
+ // Quality metrics
187
+ responseQualityScore: 0.96,
188
+ },
189
+ });
190
+
191
+ console.log(' Model:', enterpriseEmbeddings.model);
192
+ console.log(' Usage:', enterpriseEmbeddings.usage);
193
+ console.log(' Embeddings count:', enterpriseEmbeddings.data.length);
194
+ console.log(' Azure enterprise embeddings tracked with comprehensive metadata\n');
195
+
196
+ // Summary
197
+ console.log(' Azure OpenAI Streaming Summary:');
198
+ console.log(' Azure streaming responses work seamlessly with metadata');
199
+ console.log(' Usage tracked automatically when Azure streams complete');
200
+ console.log(' Azure batch embeddings supported with optional metadata');
201
+ console.log(' Enterprise-grade tracking with Azure provider metadata');
202
+ console.log(' Model name resolution for accurate Azure pricing');
203
+ console.log(' All metadata fields are optional');
204
+ console.log(' No type casting required - native TypeScript support');
205
+ console.log(' Real-time Azure streaming + comprehensive enterprise analytics');
206
+ }
207
+
208
+ // Run the example
209
+ azureStreamingExample().catch(console.error);