@revenium/openai 1.0.13 → 1.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (240) hide show
  1. package/.env.example +10 -15
  2. package/CHANGELOG.md +65 -11
  3. package/CODE_OF_CONDUCT.md +57 -0
  4. package/CONTRIBUTING.md +38 -0
  5. package/README.md +104 -216
  6. package/SECURITY.md +34 -0
  7. package/dist/cjs/core/client/index.js +14 -0
  8. package/dist/cjs/core/client/index.js.map +1 -0
  9. package/dist/cjs/core/client/manager.js +109 -0
  10. package/dist/cjs/core/client/manager.js.map +1 -0
  11. package/dist/cjs/core/config/azure-config.js +5 -17
  12. package/dist/cjs/core/config/azure-config.js.map +1 -1
  13. package/dist/cjs/core/config/index.js +2 -2
  14. package/dist/cjs/core/config/index.js.map +1 -1
  15. package/dist/cjs/core/config/loader.js +34 -14
  16. package/dist/cjs/core/config/loader.js.map +1 -1
  17. package/dist/cjs/core/config/manager.js +11 -5
  18. package/dist/cjs/core/config/manager.js.map +1 -1
  19. package/dist/cjs/core/config/validator.js +3 -45
  20. package/dist/cjs/core/config/validator.js.map +1 -1
  21. package/dist/cjs/core/middleware/index.js +21 -0
  22. package/dist/cjs/core/middleware/index.js.map +1 -0
  23. package/dist/cjs/core/middleware/interfaces.js +454 -0
  24. package/dist/cjs/core/middleware/interfaces.js.map +1 -0
  25. package/dist/cjs/core/middleware/revenium-client.js +152 -0
  26. package/dist/cjs/core/middleware/revenium-client.js.map +1 -0
  27. package/dist/cjs/core/providers/detector.js +45 -23
  28. package/dist/cjs/core/providers/detector.js.map +1 -1
  29. package/dist/cjs/core/providers/index.js +2 -1
  30. package/dist/cjs/core/providers/index.js.map +1 -1
  31. package/dist/cjs/core/tracking/api-client.js +21 -14
  32. package/dist/cjs/core/tracking/api-client.js.map +1 -1
  33. package/dist/cjs/core/tracking/index.js +5 -1
  34. package/dist/cjs/core/tracking/index.js.map +1 -1
  35. package/dist/cjs/core/tracking/payload-builder.js +143 -25
  36. package/dist/cjs/core/tracking/payload-builder.js.map +1 -1
  37. package/dist/cjs/core/tracking/usage-tracker.js +111 -18
  38. package/dist/cjs/core/tracking/usage-tracker.js.map +1 -1
  39. package/dist/cjs/index.js +39 -202
  40. package/dist/cjs/index.js.map +1 -1
  41. package/dist/cjs/types/index.js +0 -8
  42. package/dist/cjs/types/index.js.map +1 -1
  43. package/dist/cjs/types/openai-augmentation.js +0 -49
  44. package/dist/cjs/types/openai-augmentation.js.map +1 -1
  45. package/dist/cjs/utils/constants.js +17 -20
  46. package/dist/cjs/utils/constants.js.map +1 -1
  47. package/dist/cjs/utils/error-handler.js +18 -14
  48. package/dist/cjs/utils/error-handler.js.map +1 -1
  49. package/dist/cjs/utils/metadata-builder.js +17 -16
  50. package/dist/cjs/utils/metadata-builder.js.map +1 -1
  51. package/dist/cjs/utils/provider-detection.js +25 -28
  52. package/dist/cjs/utils/provider-detection.js.map +1 -1
  53. package/dist/cjs/utils/trace-fields.js +115 -0
  54. package/dist/cjs/utils/trace-fields.js.map +1 -0
  55. package/dist/esm/core/client/index.js +6 -0
  56. package/dist/esm/core/client/index.js.map +1 -0
  57. package/dist/esm/core/client/manager.js +102 -0
  58. package/dist/esm/core/client/manager.js.map +1 -0
  59. package/dist/esm/core/config/azure-config.js +6 -18
  60. package/dist/esm/core/config/azure-config.js.map +1 -1
  61. package/dist/esm/core/config/index.js +5 -4
  62. package/dist/esm/core/config/index.js.map +1 -1
  63. package/dist/esm/core/config/loader.js +33 -13
  64. package/dist/esm/core/config/loader.js.map +1 -1
  65. package/dist/esm/core/config/manager.js +13 -7
  66. package/dist/esm/core/config/manager.js.map +1 -1
  67. package/dist/esm/core/config/validator.js +3 -44
  68. package/dist/esm/core/config/validator.js.map +1 -1
  69. package/dist/esm/core/middleware/index.js +8 -0
  70. package/dist/esm/core/middleware/index.js.map +1 -0
  71. package/dist/esm/core/middleware/interfaces.js +442 -0
  72. package/dist/esm/core/middleware/interfaces.js.map +1 -0
  73. package/dist/esm/core/middleware/revenium-client.js +115 -0
  74. package/dist/esm/core/middleware/revenium-client.js.map +1 -0
  75. package/dist/esm/core/providers/detector.js +43 -22
  76. package/dist/esm/core/providers/detector.js.map +1 -1
  77. package/dist/esm/core/providers/index.js +2 -2
  78. package/dist/esm/core/providers/index.js.map +1 -1
  79. package/dist/esm/core/tracking/api-client.js +20 -13
  80. package/dist/esm/core/tracking/api-client.js.map +1 -1
  81. package/dist/esm/core/tracking/index.js +4 -4
  82. package/dist/esm/core/tracking/index.js.map +1 -1
  83. package/dist/esm/core/tracking/payload-builder.js +142 -26
  84. package/dist/esm/core/tracking/payload-builder.js.map +1 -1
  85. package/dist/esm/core/tracking/usage-tracker.js +78 -20
  86. package/dist/esm/core/tracking/usage-tracker.js.map +1 -1
  87. package/dist/esm/index.js +9 -177
  88. package/dist/esm/index.js.map +1 -1
  89. package/dist/esm/types/index.js +2 -10
  90. package/dist/esm/types/index.js.map +1 -1
  91. package/dist/esm/types/openai-augmentation.js +0 -49
  92. package/dist/esm/types/openai-augmentation.js.map +1 -1
  93. package/dist/esm/utils/constants.js +16 -19
  94. package/dist/esm/utils/constants.js.map +1 -1
  95. package/dist/esm/utils/error-handler.js +19 -15
  96. package/dist/esm/utils/error-handler.js.map +1 -1
  97. package/dist/esm/utils/metadata-builder.js +17 -16
  98. package/dist/esm/utils/metadata-builder.js.map +1 -1
  99. package/dist/esm/utils/provider-detection.js +26 -29
  100. package/dist/esm/utils/provider-detection.js.map +1 -1
  101. package/dist/esm/utils/trace-fields.js +100 -0
  102. package/dist/esm/utils/trace-fields.js.map +1 -0
  103. package/dist/types/core/client/index.d.ts +6 -0
  104. package/dist/types/core/client/index.d.ts.map +1 -0
  105. package/dist/types/core/client/manager.d.ts +32 -0
  106. package/dist/types/core/client/manager.d.ts.map +1 -0
  107. package/dist/types/core/config/azure-config.d.ts +2 -2
  108. package/dist/types/core/config/azure-config.d.ts.map +1 -1
  109. package/dist/types/core/config/index.d.ts +4 -4
  110. package/dist/types/core/config/index.d.ts.map +1 -1
  111. package/dist/types/core/config/loader.d.ts +3 -1
  112. package/dist/types/core/config/loader.d.ts.map +1 -1
  113. package/dist/types/core/config/manager.d.ts +1 -1
  114. package/dist/types/core/config/manager.d.ts.map +1 -1
  115. package/dist/types/core/config/validator.d.ts +1 -12
  116. package/dist/types/core/config/validator.d.ts.map +1 -1
  117. package/dist/types/core/middleware/index.d.ts +8 -0
  118. package/dist/types/core/middleware/index.d.ts.map +1 -0
  119. package/dist/types/core/middleware/interfaces.d.ts +104 -0
  120. package/dist/types/core/middleware/interfaces.d.ts.map +1 -0
  121. package/dist/types/core/middleware/revenium-client.d.ts +64 -0
  122. package/dist/types/core/middleware/revenium-client.d.ts.map +1 -0
  123. package/dist/types/core/providers/detector.d.ts +9 -2
  124. package/dist/types/core/providers/detector.d.ts.map +1 -1
  125. package/dist/types/core/providers/index.d.ts +2 -2
  126. package/dist/types/core/providers/index.d.ts.map +1 -1
  127. package/dist/types/core/tracking/api-client.d.ts +1 -1
  128. package/dist/types/core/tracking/api-client.d.ts.map +1 -1
  129. package/dist/types/core/tracking/index.d.ts +4 -4
  130. package/dist/types/core/tracking/index.d.ts.map +1 -1
  131. package/dist/types/core/tracking/payload-builder.d.ts +5 -3
  132. package/dist/types/core/tracking/payload-builder.d.ts.map +1 -1
  133. package/dist/types/core/tracking/usage-tracker.d.ts +4 -2
  134. package/dist/types/core/tracking/usage-tracker.d.ts.map +1 -1
  135. package/dist/types/index.d.ts +11 -135
  136. package/dist/types/index.d.ts.map +1 -1
  137. package/dist/types/types/function-parameters.d.ts +91 -23
  138. package/dist/types/types/function-parameters.d.ts.map +1 -1
  139. package/dist/types/types/index.d.ts +53 -108
  140. package/dist/types/types/index.d.ts.map +1 -1
  141. package/dist/types/types/openai-augmentation.d.ts +4 -138
  142. package/dist/types/types/openai-augmentation.d.ts.map +1 -1
  143. package/dist/types/utils/constants.d.ts +7 -1
  144. package/dist/types/utils/constants.d.ts.map +1 -1
  145. package/dist/types/utils/error-handler.d.ts +2 -2
  146. package/dist/types/utils/error-handler.d.ts.map +1 -1
  147. package/dist/types/utils/metadata-builder.d.ts +2 -2
  148. package/dist/types/utils/metadata-builder.d.ts.map +1 -1
  149. package/dist/types/utils/provider-detection.d.ts +3 -3
  150. package/dist/types/utils/provider-detection.d.ts.map +1 -1
  151. package/dist/types/utils/trace-fields.d.ts +11 -0
  152. package/dist/types/utils/trace-fields.d.ts.map +1 -0
  153. package/examples/README.md +282 -198
  154. package/examples/azure/basic.ts +62 -0
  155. package/examples/azure/responses-basic.ts +45 -0
  156. package/examples/azure/responses-stream.ts +61 -0
  157. package/examples/azure/stream.ts +56 -0
  158. package/examples/getting_started.ts +31 -43
  159. package/examples/openai/basic.ts +45 -0
  160. package/examples/openai/metadata.ts +67 -0
  161. package/examples/openai/responses-basic.ts +44 -0
  162. package/examples/openai/responses-embed.ts +34 -0
  163. package/examples/openai/responses-streaming.ts +63 -0
  164. package/examples/openai/streaming.ts +59 -0
  165. package/package.json +23 -13
  166. package/dist/cjs/core/wrapper/index.js +0 -15
  167. package/dist/cjs/core/wrapper/index.js.map +0 -1
  168. package/dist/cjs/core/wrapper/instance-patcher.js +0 -202
  169. package/dist/cjs/core/wrapper/instance-patcher.js.map +0 -1
  170. package/dist/cjs/core/wrapper/request-handler.js +0 -317
  171. package/dist/cjs/core/wrapper/request-handler.js.map +0 -1
  172. package/dist/cjs/core/wrapper/stream-wrapper.js +0 -82
  173. package/dist/cjs/core/wrapper/stream-wrapper.js.map +0 -1
  174. package/dist/cjs/utils/azure-model-resolver.js +0 -211
  175. package/dist/cjs/utils/azure-model-resolver.js.map +0 -1
  176. package/dist/cjs/utils/request-handler-factory.js +0 -185
  177. package/dist/cjs/utils/request-handler-factory.js.map +0 -1
  178. package/dist/esm/core/wrapper/index.js +0 -9
  179. package/dist/esm/core/wrapper/index.js.map +0 -1
  180. package/dist/esm/core/wrapper/instance-patcher.js +0 -199
  181. package/dist/esm/core/wrapper/instance-patcher.js.map +0 -1
  182. package/dist/esm/core/wrapper/request-handler.js +0 -310
  183. package/dist/esm/core/wrapper/request-handler.js.map +0 -1
  184. package/dist/esm/core/wrapper/stream-wrapper.js +0 -79
  185. package/dist/esm/core/wrapper/stream-wrapper.js.map +0 -1
  186. package/dist/esm/utils/azure-model-resolver.js +0 -204
  187. package/dist/esm/utils/azure-model-resolver.js.map +0 -1
  188. package/dist/esm/utils/request-handler-factory.js +0 -146
  189. package/dist/esm/utils/request-handler-factory.js.map +0 -1
  190. package/dist/types/core/wrapper/index.d.ts +0 -8
  191. package/dist/types/core/wrapper/index.d.ts.map +0 -1
  192. package/dist/types/core/wrapper/instance-patcher.d.ts +0 -33
  193. package/dist/types/core/wrapper/instance-patcher.d.ts.map +0 -1
  194. package/dist/types/core/wrapper/request-handler.d.ts +0 -29
  195. package/dist/types/core/wrapper/request-handler.d.ts.map +0 -1
  196. package/dist/types/core/wrapper/stream-wrapper.d.ts +0 -13
  197. package/dist/types/core/wrapper/stream-wrapper.d.ts.map +0 -1
  198. package/dist/types/utils/azure-model-resolver.d.ts +0 -41
  199. package/dist/types/utils/azure-model-resolver.d.ts.map +0 -1
  200. package/dist/types/utils/request-handler-factory.d.ts +0 -81
  201. package/dist/types/utils/request-handler-factory.d.ts.map +0 -1
  202. package/examples/azure-basic.ts +0 -206
  203. package/examples/azure-responses-basic.ts +0 -233
  204. package/examples/azure-responses-streaming.ts +0 -255
  205. package/examples/azure-streaming.ts +0 -209
  206. package/examples/openai-basic.ts +0 -147
  207. package/examples/openai-function-calling.ts +0 -259
  208. package/examples/openai-responses-basic.ts +0 -212
  209. package/examples/openai-responses-streaming.ts +0 -232
  210. package/examples/openai-streaming.ts +0 -172
  211. package/examples/openai-vision.ts +0 -289
  212. package/src/core/config/azure-config.ts +0 -72
  213. package/src/core/config/index.ts +0 -23
  214. package/src/core/config/loader.ts +0 -66
  215. package/src/core/config/manager.ts +0 -95
  216. package/src/core/config/validator.ts +0 -89
  217. package/src/core/providers/detector.ts +0 -159
  218. package/src/core/providers/index.ts +0 -16
  219. package/src/core/tracking/api-client.ts +0 -78
  220. package/src/core/tracking/index.ts +0 -21
  221. package/src/core/tracking/payload-builder.ts +0 -137
  222. package/src/core/tracking/usage-tracker.ts +0 -189
  223. package/src/core/wrapper/index.ts +0 -9
  224. package/src/core/wrapper/instance-patcher.ts +0 -288
  225. package/src/core/wrapper/request-handler.ts +0 -423
  226. package/src/core/wrapper/stream-wrapper.ts +0 -100
  227. package/src/index.ts +0 -360
  228. package/src/types/function-parameters.ts +0 -251
  229. package/src/types/index.ts +0 -310
  230. package/src/types/openai-augmentation.ts +0 -232
  231. package/src/types/responses-api.ts +0 -308
  232. package/src/utils/azure-model-resolver.ts +0 -220
  233. package/src/utils/constants.ts +0 -21
  234. package/src/utils/error-handler.ts +0 -251
  235. package/src/utils/metadata-builder.ts +0 -228
  236. package/src/utils/provider-detection.ts +0 -257
  237. package/src/utils/request-handler-factory.ts +0 -285
  238. package/src/utils/stop-reason-mapper.ts +0 -78
  239. package/src/utils/type-guards.ts +0 -202
  240. package/src/utils/url-builder.ts +0 -68
@@ -1,232 +0,0 @@
1
- /**
2
- * OpenAI Responses API Streaming Examples
3
- *
4
- * This file demonstrates how to use the new OpenAI Responses API with streaming enabled
5
- * using the Revenium middleware. The Responses API supports streaming for real-time
6
- * response generation.
7
- *
8
- * Metadata Options:
9
- * - Start with basic usage (no metadata) - tracking works automatically
10
- * - Add subscriber info for user tracking
11
- * - Include organization/product IDs for business analytics
12
- * - Use task type and trace ID for detailed analysis
13
- *
14
- * For complete metadata field reference, see:
15
- * https://revenium.readme.io/reference/meter_ai_completion
16
- *
17
- * Responses API Reference: https://platform.openai.com/docs/api-reference/responses
18
- */
19
-
20
- import 'dotenv/config';
21
- import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
22
- import OpenAI from 'openai';
23
-
24
- // Import types for the new Responses API
25
- import type { ResponsesCreateParams } from '../src/types/responses-api.js';
26
-
27
- async function main() {
28
- // Initialize Revenium middleware
29
- await initializeReveniumFromEnv();
30
-
31
- // Create OpenAI client
32
- const openai = new OpenAI({
33
- apiKey: process.env.OPENAI_API_KEY,
34
- });
35
-
36
- // Patch the OpenAI instance to add Revenium tracking
37
- patchOpenAIInstance(openai);
38
-
39
- console.log(' OpenAI Responses API Streaming Examples\n');
40
-
41
- // Example 1: Basic Responses API streaming (no metadata)
42
- console.log(' Example 1: Basic Responses API streaming (no metadata)');
43
- try {
44
- const responsesAPI = openai as any; // Type assertion for new API
45
-
46
- if (responsesAPI.responses?.create) {
47
- const stream = await responsesAPI.responses.create({
48
- model: 'gpt-5',
49
- input: 'Tell me a short story about a robot learning to paint.',
50
- stream: true,
51
- } as ResponsesCreateParams);
52
-
53
- console.log('Streaming response:');
54
- for await (const event of stream) {
55
- if (event.type === 'response.output_text.delta') {
56
- process.stdout.write(event.delta);
57
- }
58
- }
59
- console.log('\n Stream completed');
60
- } else {
61
- throw new Error('Responses API not available');
62
- }
63
- } catch (error) {
64
- console.log('️ Responses API not yet available in this OpenAI SDK version');
65
- console.log(' Error:', (error as Error).message);
66
- }
67
-
68
- console.log('\n' + '='.repeat(50) + '\n');
69
-
70
- // Example 2: Responses API streaming with rich metadata
71
- console.log(' Example 2: Responses API streaming with rich metadata');
72
- try {
73
- const responsesAPI = openai as any;
74
-
75
- if (responsesAPI.responses?.create) {
76
- const stream = await responsesAPI.responses.create({
77
- model: 'gpt-5',
78
- input: [
79
- {
80
- role: 'user',
81
- content: 'Explain the concept of machine learning in a conversational way.',
82
- },
83
- ],
84
- stream: true,
85
- max_output_tokens: 200,
86
- usageMetadata: {
87
- // User identification
88
- subscriber: {
89
- id: 'streaming-user-123',
90
- email: 'streaming@example.com',
91
- credential: {
92
- name: 'api-key-prod',
93
- value: 'key-klm-789',
94
- },
95
- },
96
-
97
- // Organization & billing
98
- organizationId: 'streaming-org-456',
99
- subscriptionId: 'plan-streaming-edu-2024',
100
-
101
- // Product & task tracking
102
- productId: 'ml-educator',
103
- taskType: 'educational-streaming',
104
- agent: 'ml-tutor-stream',
105
-
106
- // Session tracking
107
- traceId: 'stream-trace-789',
108
-
109
- // Quality metrics
110
- responseQualityScore: 0.92, // 0.0-1.0 scale
111
- },
112
- } as ResponsesCreateParams);
113
-
114
- console.log('Streaming response with metadata:');
115
- for await (const event of stream) {
116
- if (event.type === 'response.output_text.delta') {
117
- process.stdout.write(event.delta);
118
- }
119
- }
120
- console.log('\n Stream with metadata completed');
121
- } else {
122
- throw new Error('Responses API not available');
123
- }
124
- } catch (error) {
125
- console.log('️ Responses API not yet available in this OpenAI SDK version');
126
- console.log(' Error:', (error as Error).message);
127
- }
128
-
129
- console.log('\n' + '='.repeat(50) + '\n');
130
-
131
- // Example 3: Basic Responses API streaming with array input (no metadata)
132
- console.log(' Example 3: Basic Responses API streaming with array input (no metadata)');
133
- try {
134
- const responsesAPI = openai as any;
135
-
136
- if (responsesAPI.responses?.create) {
137
- const stream = await responsesAPI.responses.create({
138
- model: 'gpt-5',
139
- input: [
140
- {
141
- role: 'user',
142
- content: 'Write a poem about the beauty of code.',
143
- },
144
- ],
145
- stream: true,
146
- } as ResponsesCreateParams);
147
-
148
- console.log('Streaming poetry:');
149
- for await (const event of stream) {
150
- if (event.type === 'response.output_text.delta') {
151
- process.stdout.write(event.delta);
152
- }
153
- }
154
- console.log('\n Poetry stream completed');
155
- } else {
156
- throw new Error('Responses API not available');
157
- }
158
- } catch (error) {
159
- console.log('️ Responses API not yet available in this OpenAI SDK version');
160
- console.log(' Error:', (error as Error).message);
161
- }
162
-
163
- console.log('\n' + '='.repeat(50) + '\n');
164
-
165
- // Example 4: Advanced Responses API streaming with comprehensive metadata
166
- console.log(' Example 4: Advanced Responses API streaming with comprehensive metadata');
167
- try {
168
- const responsesAPI = openai as any;
169
-
170
- if (responsesAPI.responses?.create) {
171
- const stream = await responsesAPI.responses.create({
172
- model: 'gpt-5',
173
- input: [
174
- {
175
- role: 'user',
176
- content:
177
- 'Provide a detailed explanation of how streaming APIs work in real-time applications.',
178
- },
179
- ],
180
- stream: true,
181
- max_output_tokens: 300,
182
- instructions:
183
- 'You are a technical expert explaining streaming APIs with practical examples.',
184
- usageMetadata: {
185
- // User identification
186
- subscriber: {
187
- id: 'advanced-streaming-user-789',
188
- email: 'advanced@enterprise.com',
189
- credential: {
190
- name: 'api-key-prod',
191
- value: 'key-nop-012',
192
- },
193
- },
194
-
195
- // Organization & billing
196
- organizationId: 'enterprise-streaming-org-012',
197
- subscriptionId: 'plan-enterprise-stream-2024',
198
-
199
- // Product & task tracking
200
- productId: 'streaming-api-educator',
201
- taskType: 'advanced-technical-streaming',
202
- agent: 'streaming-expert',
203
-
204
- // Session tracking
205
- traceId: 'advanced-stream-trace-345',
206
-
207
- // Quality metrics
208
- responseQualityScore: 0.97, // 0.0-1.0 scale
209
- },
210
- } as ResponsesCreateParams);
211
-
212
- console.log('Advanced streaming response:');
213
- let deltaCount = 0;
214
- for await (const event of stream) {
215
- if (event.type === 'response.output_text.delta') {
216
- process.stdout.write(event.delta);
217
- deltaCount++;
218
- }
219
- }
220
- console.log(`\n Advanced stream completed (${deltaCount} delta events)`);
221
- } else {
222
- throw new Error('Responses API not available');
223
- }
224
- } catch (error) {
225
- console.log('️ Responses API not yet available in this OpenAI SDK version');
226
- console.log(' Error:', (error as Error).message);
227
- }
228
-
229
- console.log('\n All Responses API streaming examples completed!');
230
- }
231
-
232
- main().catch(console.error);
@@ -1,172 +0,0 @@
1
- /**
2
- * OpenAI Streaming Example
3
- *
4
- * Shows how to use Revenium middleware with streaming OpenAI responses and batch embeddings.
5
- * Demonstrates seamless metadata integration with streaming - all metadata fields are optional!
6
- *
7
- * Metadata Options:
8
- * - Start with basic usage (no metadata) - tracking works automatically
9
- * - Add subscriber info for user tracking
10
- * - Include organization/product IDs for business analytics
11
- * - Use task type and trace ID for detailed analysis
12
- *
13
- * For complete metadata field reference, see:
14
- * https://revenium.readme.io/reference/meter_ai_completion
15
- */
16
-
17
- import 'dotenv/config';
18
- import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
19
- import OpenAI from 'openai';
20
-
21
- async function openaiStreamingExample() {
22
- console.log(' OpenAI Streaming with Seamless Metadata Integration\n');
23
-
24
- // Initialize Revenium middleware
25
- const initResult = initializeReveniumFromEnv();
26
- if (!initResult.success) {
27
- console.error(' Failed to initialize Revenium:', initResult.message);
28
- process.exit(1);
29
- }
30
-
31
- // Create and patch OpenAI instance
32
- const openai = patchOpenAIInstance(new OpenAI());
33
-
34
- // Example 1: Basic streaming (no metadata)
35
- console.log(' Example 1: Basic streaming chat (automatic tracking)');
36
- console.log(' Assistant: ');
37
-
38
- const basicStream = await openai.chat.completions.create({
39
- model: 'gpt-4o-mini',
40
- messages: [{ role: 'user', content: 'Count from 1 to 5 slowly' }],
41
- stream: true,
42
- // No usageMetadata - still automatically tracked when stream completes!
43
- // No max_tokens - let response complete naturally
44
- });
45
-
46
- for await (const chunk of basicStream) {
47
- const content = chunk.choices[0]?.delta?.content || '';
48
- if (content) {
49
- process.stdout.write(content);
50
- }
51
- }
52
-
53
- console.log('\n Streaming automatically tracked to Revenium without metadata\n');
54
-
55
- // Example 2: Streaming with rich metadata (all optional!)
56
- console.log(' Example 2: Streaming chat with rich metadata');
57
- console.log(' Assistant: ');
58
-
59
- const metadataStream = await openai.chat.completions.create({
60
- model: 'gpt-4o-mini',
61
- messages: [{ role: 'user', content: 'Write a haiku about middleware' }],
62
- stream: true,
63
-
64
- // Optional metadata for advanced reporting, lineage tracking, and cost allocation
65
- usageMetadata: {
66
- // User identification
67
- subscriber: {
68
- id: 'streaming-user-456',
69
- email: 'poet@company.com',
70
- credential: {
71
- name: 'api-key-prod',
72
- value: 'key-ghi-789',
73
- },
74
- },
75
-
76
- // Organization & billing
77
- organizationId: 'creative-company',
78
- subscriptionId: 'plan-creative-2024',
79
-
80
- // Product & task tracking
81
- productId: 'ai-poet',
82
- taskType: 'creative-writing',
83
- agent: 'openai-streaming-chat-node',
84
-
85
- // Session tracking
86
- traceId: 'stream-' + Date.now(),
87
-
88
- // Quality metrics
89
- responseQualityScore: 0.92, // 0.0-1.0 scale
90
- },
91
- });
92
-
93
- for await (const chunk of metadataStream) {
94
- const content = chunk.choices[0]?.delta?.content || '';
95
- if (content) {
96
- process.stdout.write(content);
97
- }
98
- }
99
-
100
- console.log('\n Streaming tracked with rich metadata for analytics\n');
101
-
102
- // Example 3: Batch embeddings (no metadata)
103
- console.log(' Example 3: Batch embeddings (automatic tracking)');
104
-
105
- const batchEmbeddings = await openai.embeddings.create({
106
- model: 'text-embedding-3-small',
107
- input: [
108
- 'First document for batch processing',
109
- 'Second document for batch processing',
110
- 'Third document for batch processing',
111
- ],
112
- // No usageMetadata - still automatically tracked!
113
- });
114
-
115
- console.log(' Model:', batchEmbeddings.model);
116
- console.log(' Usage:', batchEmbeddings.usage);
117
- console.log(' Embeddings count:', batchEmbeddings.data.length);
118
- console.log(' Batch embeddings automatically tracked without metadata\n');
119
-
120
- // Example 4: Embeddings with metadata for batch processing
121
- console.log(' Example 4: Batch embeddings with metadata');
122
-
123
- const metadataBatchEmbeddings = await openai.embeddings.create({
124
- model: 'text-embedding-3-small',
125
- input: [
126
- 'Document 1: Streaming responses provide real-time feedback',
127
- 'Document 2: Metadata enables rich business analytics',
128
- 'Document 3: Batch processing improves efficiency',
129
- ],
130
-
131
- // All metadata fields are optional - perfect for batch operations!
132
- usageMetadata: {
133
- // User tracking (optional) - nested subscriber object
134
- subscriber: {
135
- id: 'batch-processor-123',
136
- email: 'batch@data-company.com',
137
- credential: {
138
- name: 'batch-key',
139
- value: 'batch-value-456',
140
- },
141
- },
142
-
143
- // Business context (optional)
144
- organizationId: 'data-company',
145
- productId: 'document-search',
146
-
147
- // Task classification (optional)
148
- taskType: 'batch-document-embedding',
149
- traceId: `batch-${Date.now()}`,
150
-
151
- // Custom fields (optional)
152
- agent: 'openai-batch-embeddings-metadata-node',
153
- },
154
- });
155
-
156
- console.log(' Model:', metadataBatchEmbeddings.model);
157
- console.log(' Usage:', metadataBatchEmbeddings.usage);
158
- console.log(' Embeddings count:', metadataBatchEmbeddings.data.length);
159
- console.log(' Batch embeddings tracked with metadata for business insights\n');
160
-
161
- // Summary
162
- console.log(' Summary:');
163
- console.log(' Streaming responses work seamlessly with metadata');
164
- console.log(' Usage tracked automatically when streams complete');
165
- console.log(' Batch embeddings supported with optional metadata');
166
- console.log(' All metadata fields are optional');
167
- console.log(' No type casting required - native TypeScript support');
168
- console.log(' Real-time streaming + comprehensive analytics');
169
- }
170
-
171
- // Run the example
172
- openaiStreamingExample().catch(console.error);
@@ -1,289 +0,0 @@
1
- /**
2
- * OpenAI Vision Example
3
- *
4
- * Demonstrates how Revenium middleware seamlessly tracks GPT-4o vision API usage
5
- * with multimodal inputs (text + images). Shows automatic tracking of:
6
- * - Vision API calls with image URLs
7
- * - Token usage including image processing tokens
8
- * - Cost calculation for vision features
9
- * - Different image detail levels (low, high, auto)
10
- *
11
- * All metadata fields are optional and work seamlessly with vision API!
12
- *
13
- * For complete metadata field reference, see:
14
- * https://revenium.readme.io/reference/meter_ai_completion
15
- *
16
- * OpenAI Vision API Reference:
17
- * https://platform.openai.com/docs/guides/vision
18
- */
19
-
20
- import 'dotenv/config';
21
- import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
22
- import OpenAI from 'openai';
23
-
24
- async function openAIVisionExample() {
25
- console.log('🖼️ OpenAI Vision API with Revenium Tracking\n');
26
-
27
- // Initialize Revenium middleware
28
- const initResult = initializeReveniumFromEnv();
29
- if (!initResult.success) {
30
- console.error('❌ Failed to initialize Revenium:', initResult.message);
31
- process.exit(1);
32
- }
33
-
34
- // Create and patch OpenAI instance
35
- const openai = patchOpenAIInstance(new OpenAI());
36
-
37
- // Sample image URLs for demonstration
38
- const imageUrl = 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg';
39
-
40
- // Example 1: Basic vision request without metadata (automatic tracking)
41
- console.log('📸 Example 1: Basic vision analysis (automatic tracking)\n');
42
-
43
- const basicVisionResponse = await openai.chat.completions.create({
44
- model: 'gpt-4o-mini',
45
- messages: [
46
- {
47
- role: 'user',
48
- content: [
49
- {
50
- type: 'text',
51
- text: 'What is in this image? Describe it in one sentence.',
52
- },
53
- {
54
- type: 'image_url',
55
- image_url: {
56
- url: imageUrl,
57
- },
58
- },
59
- ],
60
- },
61
- ],
62
- max_tokens: 300,
63
- // No usageMetadata - still automatically tracked!
64
- });
65
-
66
- console.log('AI Response:', basicVisionResponse.choices[0]?.message?.content);
67
- console.log('Usage:', basicVisionResponse.usage);
68
- console.log('✅ Vision API automatically tracked without metadata\n');
69
-
70
- // Example 2: Vision with detailed image analysis and metadata
71
- console.log('🔍 Example 2: High-detail vision analysis with comprehensive metadata\n');
72
-
73
- const detailedVisionResponse = await openai.chat.completions.create({
74
- model: 'gpt-4o',
75
- messages: [
76
- {
77
- role: 'user',
78
- content: [
79
- {
80
- type: 'text',
81
- text: 'Please analyze this image in detail. Describe the scene, colors, composition, and any notable features.',
82
- },
83
- {
84
- type: 'image_url',
85
- image_url: {
86
- url: imageUrl,
87
- detail: 'high', // High detail for better analysis (costs more tokens)
88
- },
89
- },
90
- ],
91
- },
92
- ],
93
- max_tokens: 500,
94
-
95
- // ✨ All metadata fields are optional - perfect for tracking vision AI!
96
- usageMetadata: {
97
- subscriber: {
98
- id: 'vision-user-123',
99
- email: 'vision@company.com',
100
- credential: {
101
- name: 'api-key',
102
- value: 'vision-key-456',
103
- },
104
- },
105
- organizationId: 'image-analysis-corp',
106
- productId: 'ai-vision-platform',
107
- subscriptionId: 'sub-vision-pro-789',
108
- taskType: 'image-analysis-detailed',
109
- traceId: `vision-${Date.now()}`,
110
- agent: 'vision-analyzer-node',
111
- responseQualityScore: 0.96, // 0.0-1.0 scale
112
- },
113
- });
114
-
115
- console.log('AI Detailed Analysis:', detailedVisionResponse.choices[0]?.message?.content);
116
- console.log('Usage:', detailedVisionResponse.usage);
117
- console.log('✅ Vision API tracked with rich metadata for image analytics\n');
118
-
119
- // Example 3: Multiple images in one request
120
- console.log('🖼️ 🖼️ Example 3: Multiple images with low-detail mode\n');
121
-
122
- const multiImageResponse = await openai.chat.completions.create({
123
- model: 'gpt-4o-mini',
124
- messages: [
125
- {
126
- role: 'user',
127
- content: [
128
- {
129
- type: 'text',
130
- text: 'Compare these two images. What do they have in common?',
131
- },
132
- {
133
- type: 'image_url',
134
- image_url: {
135
- url: imageUrl,
136
- detail: 'low', // Low detail for cost-effective processing
137
- },
138
- },
139
- {
140
- type: 'image_url',
141
- image_url: {
142
- url: 'https://upload.wikimedia.org/wikipedia/commons/thumb/3/3f/Placeholder_view_vector.svg/310px-Placeholder_view_vector.svg.png',
143
- detail: 'low',
144
- },
145
- },
146
- ],
147
- },
148
- ],
149
- max_tokens: 300,
150
-
151
- // ✨ Tracking multi-image requests
152
- usageMetadata: {
153
- subscriber: {
154
- id: 'multi-vision-user-789',
155
- email: 'multi@company.com',
156
- credential: {
157
- name: 'api-key',
158
- value: 'multi-key-999',
159
- },
160
- },
161
- organizationId: 'comparison-ai-corp',
162
- productId: 'image-comparison-tool',
163
- subscriptionId: 'sub-comparison-basic-456',
164
- taskType: 'multi-image-comparison',
165
- traceId: `multi-vision-${Date.now()}`,
166
- agent: 'comparison-analyzer-node',
167
- responseQualityScore: 0.88, // 0.0-1.0 scale
168
- },
169
- });
170
-
171
- console.log('AI Comparison:', multiImageResponse.choices[0]?.message?.content);
172
- console.log('Usage:', multiImageResponse.usage);
173
- console.log('✅ Multiple images tracked with metadata for comparison analytics\n');
174
-
175
- // Example 4: Vision with conversation context
176
- console.log('💬 Example 4: Vision with multi-turn conversation\n');
177
-
178
- const conversationMessages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [
179
- {
180
- role: 'user',
181
- content: [
182
- {
183
- type: 'text',
184
- text: 'What colors are prominent in this image?',
185
- },
186
- {
187
- type: 'image_url',
188
- image_url: {
189
- url: imageUrl,
190
- detail: 'auto', // Auto detail - OpenAI decides optimal level
191
- },
192
- },
193
- ],
194
- },
195
- ];
196
-
197
- const firstTurnResponse = await openai.chat.completions.create({
198
- model: 'gpt-4o-mini',
199
- messages: conversationMessages,
200
- max_tokens: 200,
201
-
202
- usageMetadata: {
203
- subscriber: {
204
- id: 'conversation-user-456',
205
- email: 'conversation@company.com',
206
- credential: {
207
- name: 'api-key',
208
- value: 'conv-key-123',
209
- },
210
- },
211
- organizationId: 'chat-vision-corp',
212
- productId: 'interactive-vision-assistant',
213
- subscriptionId: 'sub-interactive-premium-321',
214
- taskType: 'conversational-vision',
215
- traceId: `conv-vision-${Date.now()}`,
216
- agent: 'conversation-vision-node',
217
- },
218
- });
219
-
220
- console.log('First Turn - AI:', firstTurnResponse.choices[0]?.message?.content);
221
- console.log('Usage:', firstTurnResponse.usage);
222
-
223
- // Add AI response to conversation
224
- conversationMessages.push({
225
- role: 'assistant',
226
- content: firstTurnResponse.choices[0]?.message?.content || '',
227
- });
228
-
229
- // Follow-up question without image
230
- conversationMessages.push({
231
- role: 'user',
232
- content: 'Based on those colors, what mood does the image convey?',
233
- });
234
-
235
- const secondTurnResponse = await openai.chat.completions.create({
236
- model: 'gpt-4o-mini',
237
- messages: conversationMessages,
238
- max_tokens: 200,
239
-
240
- usageMetadata: {
241
- subscriber: {
242
- id: 'conversation-user-456',
243
- email: 'conversation@company.com',
244
- credential: {
245
- name: 'api-key',
246
- value: 'conv-key-123',
247
- },
248
- },
249
- organizationId: 'chat-vision-corp',
250
- productId: 'interactive-vision-assistant',
251
- subscriptionId: 'sub-interactive-premium-321',
252
- taskType: 'conversational-vision-followup',
253
- traceId: `conv-vision-${Date.now()}`,
254
- agent: 'conversation-vision-node',
255
- },
256
- });
257
-
258
- console.log('Second Turn - AI:', secondTurnResponse.choices[0]?.message?.content);
259
- console.log('Usage:', secondTurnResponse.usage);
260
- console.log('✅ Multi-turn vision conversation fully tracked\n');
261
-
262
- // Summary
263
- console.log('📈 Vision API Summary:');
264
- console.log('✅ Image analysis with URLs fully supported');
265
- console.log('✅ Token usage tracked including image processing tokens');
266
- console.log('✅ Multiple images in one request work seamlessly');
267
- console.log('✅ Detail levels (low, high, auto) all supported');
268
- console.log('✅ Multi-turn conversations with image context tracked');
269
- console.log('✅ All metadata fields optional and work perfectly');
270
- console.log('✅ Cost calculation includes vision-specific tokens');
271
- console.log('✅ No type casting required - native TypeScript support\n');
272
-
273
- console.log('💡 Use Cases:');
274
- console.log(' - Image content moderation and analysis');
275
- console.log(' - Product catalog image descriptions');
276
- console.log(' - Document and diagram understanding');
277
- console.log(' - Visual question answering systems');
278
- console.log(' - Accessibility tools (image descriptions)');
279
- console.log(' - Quality control and inspection automation\n');
280
-
281
- console.log('💰 Cost Optimization Tips:');
282
- console.log(' - Use "low" detail for simple images to save tokens');
283
- console.log(' - Use "high" detail only when fine details matter');
284
- console.log(' - Use "auto" to let OpenAI optimize automatically');
285
- console.log(' - Revenium tracks all token usage for accurate cost analytics');
286
- }
287
-
288
- // Run the example
289
- openAIVisionExample().catch(console.error);