@revenium/openai 1.0.10 → 1.0.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +20 -0
- package/CHANGELOG.md +52 -0
- package/LICENSE +21 -21
- package/README.md +682 -1152
- package/dist/cjs/core/config/loader.js +1 -1
- package/dist/cjs/core/config/loader.js.map +1 -1
- package/dist/cjs/core/tracking/api-client.js +1 -1
- package/dist/cjs/core/tracking/api-client.js.map +1 -1
- package/dist/cjs/index.js +4 -4
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/types/openai-augmentation.js +1 -1
- package/dist/cjs/utils/url-builder.js +32 -7
- package/dist/cjs/utils/url-builder.js.map +1 -1
- package/dist/esm/core/config/loader.js +1 -1
- package/dist/esm/core/config/loader.js.map +1 -1
- package/dist/esm/core/tracking/api-client.js +1 -1
- package/dist/esm/core/tracking/api-client.js.map +1 -1
- package/dist/esm/index.js +4 -4
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/types/openai-augmentation.js +1 -1
- package/dist/esm/utils/url-builder.js +32 -7
- package/dist/esm/utils/url-builder.js.map +1 -1
- package/dist/types/index.d.ts +4 -4
- package/dist/types/types/index.d.ts +2 -2
- package/dist/types/types/index.d.ts.map +1 -1
- package/dist/types/types/openai-augmentation.d.ts +1 -1
- package/dist/types/utils/url-builder.d.ts +11 -3
- package/dist/types/utils/url-builder.d.ts.map +1 -1
- package/examples/README.md +357 -0
- package/examples/azure-basic.ts +206 -0
- package/examples/azure-responses-basic.ts +233 -0
- package/examples/azure-responses-streaming.ts +255 -0
- package/examples/azure-streaming.ts +209 -0
- package/examples/getting_started.ts +54 -0
- package/examples/openai-basic.ts +147 -0
- package/examples/openai-function-calling.ts +259 -0
- package/examples/openai-responses-basic.ts +212 -0
- package/examples/openai-responses-streaming.ts +232 -0
- package/examples/openai-streaming.ts +172 -0
- package/examples/openai-vision.ts +289 -0
- package/package.json +81 -84
- package/src/core/config/azure-config.ts +72 -0
- package/src/core/config/index.ts +23 -0
- package/src/core/config/loader.ts +66 -0
- package/src/core/config/manager.ts +94 -0
- package/src/core/config/validator.ts +89 -0
- package/src/core/providers/detector.ts +159 -0
- package/src/core/providers/index.ts +16 -0
- package/src/core/tracking/api-client.ts +78 -0
- package/src/core/tracking/index.ts +21 -0
- package/src/core/tracking/payload-builder.ts +132 -0
- package/src/core/tracking/usage-tracker.ts +189 -0
- package/src/core/wrapper/index.ts +9 -0
- package/src/core/wrapper/instance-patcher.ts +288 -0
- package/src/core/wrapper/request-handler.ts +423 -0
- package/src/core/wrapper/stream-wrapper.ts +100 -0
- package/src/index.ts +336 -0
- package/src/types/function-parameters.ts +251 -0
- package/src/types/index.ts +313 -0
- package/src/types/openai-augmentation.ts +233 -0
- package/src/types/responses-api.ts +308 -0
- package/src/utils/azure-model-resolver.ts +220 -0
- package/src/utils/constants.ts +21 -0
- package/src/utils/error-handler.ts +251 -0
- package/src/utils/metadata-builder.ts +219 -0
- package/src/utils/provider-detection.ts +257 -0
- package/src/utils/request-handler-factory.ts +285 -0
- package/src/utils/stop-reason-mapper.ts +74 -0
- package/src/utils/type-guards.ts +202 -0
- package/src/utils/url-builder.ts +68 -0
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import 'dotenv/config';
|
|
2
|
+
import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
|
|
3
|
+
import OpenAI from 'openai';
|
|
4
|
+
|
|
5
|
+
async function main() {
|
|
6
|
+
const initResult = initializeReveniumFromEnv();
|
|
7
|
+
if (!initResult.success) {
|
|
8
|
+
console.error('Failed to initialize Revenium:', initResult.message);
|
|
9
|
+
process.exit(1);
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
const openai = patchOpenAIInstance(new OpenAI());
|
|
13
|
+
|
|
14
|
+
const response = await openai.chat.completions.create({
|
|
15
|
+
model: 'gpt-4o-mini',
|
|
16
|
+
messages: [
|
|
17
|
+
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
18
|
+
{ role: 'user', content: 'Please verify you are ready to assist me.' }
|
|
19
|
+
],
|
|
20
|
+
|
|
21
|
+
/* Optional metadata for advanced reporting, lineage tracking, and cost allocation
|
|
22
|
+
usageMetadata: {
|
|
23
|
+
// User identification
|
|
24
|
+
subscriber: {
|
|
25
|
+
id: 'user-123',
|
|
26
|
+
email: 'user@example.com',
|
|
27
|
+
credential: {
|
|
28
|
+
name: 'api-key-prod',
|
|
29
|
+
value: 'key-abc-123'
|
|
30
|
+
}
|
|
31
|
+
},
|
|
32
|
+
|
|
33
|
+
// Organization & billing
|
|
34
|
+
organizationId: 'my-customers-name',
|
|
35
|
+
subscriptionId: 'plan-enterprise-2024',
|
|
36
|
+
|
|
37
|
+
// Product & task tracking
|
|
38
|
+
productId: 'my-product',
|
|
39
|
+
taskType: 'doc-summary',
|
|
40
|
+
agent: 'customer-support',
|
|
41
|
+
|
|
42
|
+
// Session tracking
|
|
43
|
+
traceId: 'session-' + Date.now(),
|
|
44
|
+
|
|
45
|
+
// Quality metrics
|
|
46
|
+
responseQualityScore: 0.95
|
|
47
|
+
}
|
|
48
|
+
*/
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
console.log('Response:', response.choices[0]?.message?.content);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
main().catch(console.error);
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Basic Example
|
|
3
|
+
*
|
|
4
|
+
* Shows how to use Revenium middleware with OpenAI chat completions and embeddings.
|
|
5
|
+
* Demonstrates seamless metadata integration - all metadata fields are optional!
|
|
6
|
+
*
|
|
7
|
+
* Metadata Options:
|
|
8
|
+
* - Start with basic usage (no metadata) - tracking works automatically
|
|
9
|
+
* - Add subscriber info for user tracking
|
|
10
|
+
* - Include organization/product IDs for business analytics
|
|
11
|
+
* - Use task type and trace ID for detailed analysis
|
|
12
|
+
*
|
|
13
|
+
* For complete metadata field reference, see:
|
|
14
|
+
* https://revenium.readme.io/reference/meter_ai_completion
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import 'dotenv/config';
|
|
18
|
+
import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
|
|
19
|
+
import OpenAI from 'openai';
|
|
20
|
+
|
|
21
|
+
async function openaiBasicExample() {
|
|
22
|
+
console.log(' OpenAI Basic Usage with Seamless Metadata Integration\n');
|
|
23
|
+
|
|
24
|
+
// Initialize Revenium middleware
|
|
25
|
+
const initResult = initializeReveniumFromEnv();
|
|
26
|
+
if (!initResult.success) {
|
|
27
|
+
console.error(' Failed to initialize Revenium:', initResult.message);
|
|
28
|
+
process.exit(1);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// Create and patch OpenAI instance
|
|
32
|
+
const openai = patchOpenAIInstance(new OpenAI());
|
|
33
|
+
|
|
34
|
+
// Example 1: Basic chat completion (no metadata)
|
|
35
|
+
console.log('Example 1: Basic chat completion (automatic tracking)');
|
|
36
|
+
|
|
37
|
+
const basicResponse = await openai.chat.completions.create({
|
|
38
|
+
model: 'gpt-4o-mini',
|
|
39
|
+
messages: [{ role: 'user', content: 'What is TypeScript in one sentence?' }],
|
|
40
|
+
// No usageMetadata - still automatically tracked!
|
|
41
|
+
// No max_tokens - let response complete naturally
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
console.log('Response:', basicResponse.choices[0]?.message?.content);
|
|
45
|
+
console.log('Usage:', basicResponse.usage);
|
|
46
|
+
console.log(' Automatically tracked to Revenium without metadata\n');
|
|
47
|
+
|
|
48
|
+
// Example 2: Chat completion with rich metadata (all optional!)
|
|
49
|
+
console.log('Example 2: Chat completion with rich metadata');
|
|
50
|
+
|
|
51
|
+
const metadataResponse = await openai.chat.completions.create({
|
|
52
|
+
model: 'gpt-4o-mini',
|
|
53
|
+
messages: [
|
|
54
|
+
{ role: 'user', content: 'Explain the benefits of using middleware in 2 sentences.' },
|
|
55
|
+
],
|
|
56
|
+
|
|
57
|
+
// Optional metadata for advanced reporting, lineage tracking, and cost allocation
|
|
58
|
+
usageMetadata: {
|
|
59
|
+
// User identification
|
|
60
|
+
subscriber: {
|
|
61
|
+
id: 'user-12345',
|
|
62
|
+
email: 'developer@company.com',
|
|
63
|
+
credential: {
|
|
64
|
+
name: 'api-key-prod',
|
|
65
|
+
value: 'key-abc-123',
|
|
66
|
+
},
|
|
67
|
+
},
|
|
68
|
+
|
|
69
|
+
// Organization & billing
|
|
70
|
+
organizationId: 'my-customer',
|
|
71
|
+
subscriptionId: 'plan-premium-2024',
|
|
72
|
+
|
|
73
|
+
// Product & task tracking
|
|
74
|
+
productId: 'ai-assistant',
|
|
75
|
+
taskType: 'explanation-request',
|
|
76
|
+
agent: 'openai-basic-chat-node',
|
|
77
|
+
|
|
78
|
+
// Session tracking
|
|
79
|
+
traceId: 'session-' + Date.now(),
|
|
80
|
+
|
|
81
|
+
// Quality metrics
|
|
82
|
+
responseQualityScore: 0.95,
|
|
83
|
+
},
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
console.log('Response:', metadataResponse.choices[0]?.message?.content);
|
|
87
|
+
console.log('Usage:', metadataResponse.usage);
|
|
88
|
+
console.log(' Tracked with rich metadata for analytics\n');
|
|
89
|
+
|
|
90
|
+
// Example 3: Basic embeddings (no metadata)
|
|
91
|
+
console.log('Example 3: Basic embeddings (automatic tracking)');
|
|
92
|
+
|
|
93
|
+
const basicEmbedding = await openai.embeddings.create({
|
|
94
|
+
model: 'text-embedding-3-small',
|
|
95
|
+
input: 'Revenium middleware automatically tracks OpenAI usage',
|
|
96
|
+
// No usageMetadata - still automatically tracked!
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
console.log('Model:', basicEmbedding.model);
|
|
100
|
+
console.log('Usage:', basicEmbedding.usage);
|
|
101
|
+
console.log('Embedding dimensions:', basicEmbedding.data[0]?.embedding.length);
|
|
102
|
+
console.log('Embeddings automatically tracked without metadata\n');
|
|
103
|
+
|
|
104
|
+
// Example 4: Embeddings with metadata (all optional!)
|
|
105
|
+
console.log(' Example 4: Embeddings with rich metadata');
|
|
106
|
+
|
|
107
|
+
const metadataEmbedding = await openai.embeddings.create({
|
|
108
|
+
model: 'text-embedding-3-small',
|
|
109
|
+
input: 'Advanced text embedding with comprehensive tracking metadata',
|
|
110
|
+
|
|
111
|
+
// Optional metadata for advanced reporting, lineage tracking, and cost allocation
|
|
112
|
+
usageMetadata: {
|
|
113
|
+
// User identification
|
|
114
|
+
subscriber: {
|
|
115
|
+
id: 'embedding-user-789',
|
|
116
|
+
email: 'embeddings@company.com',
|
|
117
|
+
credential: {
|
|
118
|
+
name: 'api-key-prod',
|
|
119
|
+
value: 'key-def-456',
|
|
120
|
+
},
|
|
121
|
+
},
|
|
122
|
+
|
|
123
|
+
// Organization & billing
|
|
124
|
+
organizationId: 'my-company',
|
|
125
|
+
subscriptionId: 'plan-enterprise-2024',
|
|
126
|
+
|
|
127
|
+
// Product & task tracking
|
|
128
|
+
productId: 'search-engine',
|
|
129
|
+
taskType: 'document-embedding',
|
|
130
|
+
agent: 'openai-basic-embeddings-node',
|
|
131
|
+
|
|
132
|
+
// Session tracking
|
|
133
|
+
traceId: 'embed-' + Date.now(),
|
|
134
|
+
|
|
135
|
+
// Quality metrics
|
|
136
|
+
responseQualityScore: 0.98,
|
|
137
|
+
},
|
|
138
|
+
});
|
|
139
|
+
|
|
140
|
+
console.log('Model:', metadataEmbedding.model);
|
|
141
|
+
console.log('Usage:', metadataEmbedding.usage);
|
|
142
|
+
console.log('Embedding dimensions:', metadataEmbedding.data[0]?.embedding.length);
|
|
143
|
+
console.log(' Embeddings tracked with metadata for business analytics\n');
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// Run the example
|
|
147
|
+
openaiBasicExample().catch(console.error);
|
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Function Calling Example
|
|
3
|
+
*
|
|
4
|
+
* Demonstrates how Revenium middleware seamlessly tracks function/tool calling usage
|
|
5
|
+
* with OpenAI chat completions. Shows automatic tracking of:
|
|
6
|
+
* - Function definitions and calls
|
|
7
|
+
* - Token usage including function call overhead
|
|
8
|
+
* - Multi-turn conversations with function execution
|
|
9
|
+
* - Cost calculation for function calling features
|
|
10
|
+
*
|
|
11
|
+
* All metadata fields are optional and work seamlessly with function calling!
|
|
12
|
+
*
|
|
13
|
+
* For complete metadata field reference, see:
|
|
14
|
+
* https://revenium.readme.io/reference/meter_ai_completion
|
|
15
|
+
*
|
|
16
|
+
* OpenAI Function Calling Reference:
|
|
17
|
+
* https://platform.openai.com/docs/guides/function-calling
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
import 'dotenv/config';
|
|
21
|
+
import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
|
|
22
|
+
import OpenAI from 'openai';
|
|
23
|
+
|
|
24
|
+
// Simulated weather API function
|
|
25
|
+
function getCurrentWeather(location: string, unit: 'celsius' | 'fahrenheit' = 'celsius'): string {
|
|
26
|
+
// Simulate weather data
|
|
27
|
+
const temperature = unit === 'celsius' ? 22 : 72;
|
|
28
|
+
const conditions = 'sunny';
|
|
29
|
+
|
|
30
|
+
return JSON.stringify({
|
|
31
|
+
location,
|
|
32
|
+
temperature,
|
|
33
|
+
unit,
|
|
34
|
+
conditions,
|
|
35
|
+
forecast: 'Clear skies throughout the day',
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
async function openAIFunctionCallingExample() {
|
|
40
|
+
console.log('🔧 OpenAI Function Calling with Revenium Tracking\n');
|
|
41
|
+
|
|
42
|
+
// Initialize Revenium middleware
|
|
43
|
+
const initResult = initializeReveniumFromEnv();
|
|
44
|
+
if (!initResult.success) {
|
|
45
|
+
console.error('❌ Failed to initialize Revenium:', initResult.message);
|
|
46
|
+
process.exit(1);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// Create and patch OpenAI instance
|
|
50
|
+
const openai = patchOpenAIInstance(new OpenAI());
|
|
51
|
+
|
|
52
|
+
// Define function schema for OpenAI
|
|
53
|
+
const tools: OpenAI.Chat.Completions.ChatCompletionTool[] = [
|
|
54
|
+
{
|
|
55
|
+
type: 'function',
|
|
56
|
+
function: {
|
|
57
|
+
name: 'get_current_weather',
|
|
58
|
+
description: 'Get the current weather in a given location',
|
|
59
|
+
parameters: {
|
|
60
|
+
type: 'object',
|
|
61
|
+
properties: {
|
|
62
|
+
location: {
|
|
63
|
+
type: 'string',
|
|
64
|
+
description: 'The city and state, e.g. San Francisco, CA',
|
|
65
|
+
},
|
|
66
|
+
unit: {
|
|
67
|
+
type: 'string',
|
|
68
|
+
enum: ['celsius', 'fahrenheit'],
|
|
69
|
+
description: 'The temperature unit to use',
|
|
70
|
+
},
|
|
71
|
+
},
|
|
72
|
+
required: ['location'],
|
|
73
|
+
},
|
|
74
|
+
},
|
|
75
|
+
},
|
|
76
|
+
];
|
|
77
|
+
|
|
78
|
+
// Example 1: Function calling without metadata (automatic tracking)
|
|
79
|
+
console.log('📍 Example 1: Basic function calling (automatic tracking)\n');
|
|
80
|
+
|
|
81
|
+
const messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [
|
|
82
|
+
{
|
|
83
|
+
role: 'user',
|
|
84
|
+
content: "What's the weather like in San Francisco?",
|
|
85
|
+
},
|
|
86
|
+
];
|
|
87
|
+
|
|
88
|
+
// First call - AI decides to use the function
|
|
89
|
+
const firstResponse = await openai.chat.completions.create({
|
|
90
|
+
model: 'gpt-4o-mini',
|
|
91
|
+
messages: messages,
|
|
92
|
+
tools: tools,
|
|
93
|
+
tool_choice: 'auto',
|
|
94
|
+
// No usageMetadata - still automatically tracked!
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
console.log('AI Response (First call):');
|
|
98
|
+
console.log('Usage:', firstResponse.usage);
|
|
99
|
+
|
|
100
|
+
const firstMessage = firstResponse.choices[0]?.message;
|
|
101
|
+
console.log('Finish reason:', firstResponse.choices[0]?.finish_reason);
|
|
102
|
+
|
|
103
|
+
// Check if AI wants to call a function
|
|
104
|
+
if (firstMessage?.tool_calls) {
|
|
105
|
+
console.log('\n🔧 AI decided to call function:', firstMessage.tool_calls[0]?.function.name);
|
|
106
|
+
console.log('Function arguments:', firstMessage.tool_calls[0]?.function.arguments);
|
|
107
|
+
|
|
108
|
+
// Execute the function
|
|
109
|
+
const functionCall = firstMessage.tool_calls[0];
|
|
110
|
+
const functionArgs = JSON.parse(functionCall?.function.arguments || '{}');
|
|
111
|
+
const functionResponse = getCurrentWeather(
|
|
112
|
+
functionArgs.location,
|
|
113
|
+
functionArgs.unit
|
|
114
|
+
);
|
|
115
|
+
|
|
116
|
+
console.log('Function result:', functionResponse);
|
|
117
|
+
|
|
118
|
+
// Add function call and result to conversation
|
|
119
|
+
messages.push(firstMessage);
|
|
120
|
+
messages.push({
|
|
121
|
+
role: 'tool',
|
|
122
|
+
tool_call_id: functionCall.id,
|
|
123
|
+
content: functionResponse,
|
|
124
|
+
});
|
|
125
|
+
|
|
126
|
+
// Second call - AI uses function result to answer
|
|
127
|
+
const secondResponse = await openai.chat.completions.create({
|
|
128
|
+
model: 'gpt-4o-mini',
|
|
129
|
+
messages: messages,
|
|
130
|
+
tools: tools,
|
|
131
|
+
// No usageMetadata - still automatically tracked!
|
|
132
|
+
});
|
|
133
|
+
|
|
134
|
+
console.log('\n💬 AI Final Response:');
|
|
135
|
+
console.log('Content:', secondResponse.choices[0]?.message?.content);
|
|
136
|
+
console.log('Usage:', secondResponse.usage);
|
|
137
|
+
console.log('✅ Function calling automatically tracked without metadata\n');
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
// Example 2: Function calling with rich metadata
|
|
141
|
+
console.log('📊 Example 2: Function calling with comprehensive metadata\n');
|
|
142
|
+
|
|
143
|
+
const metadataMessages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [
|
|
144
|
+
{
|
|
145
|
+
role: 'user',
|
|
146
|
+
content: "I'm planning a trip. What's the weather in Boston and should I bring a jacket?",
|
|
147
|
+
},
|
|
148
|
+
];
|
|
149
|
+
|
|
150
|
+
// First call with metadata
|
|
151
|
+
const metadataFirstResponse = await openai.chat.completions.create({
|
|
152
|
+
model: 'gpt-4o',
|
|
153
|
+
messages: metadataMessages,
|
|
154
|
+
tools: tools,
|
|
155
|
+
tool_choice: 'auto',
|
|
156
|
+
|
|
157
|
+
// ✨ All metadata fields are optional - perfect for tracking AI agents!
|
|
158
|
+
usageMetadata: {
|
|
159
|
+
subscriber: {
|
|
160
|
+
id: 'travel-user-456',
|
|
161
|
+
email: 'traveler@company.com',
|
|
162
|
+
credential: {
|
|
163
|
+
name: 'api-key',
|
|
164
|
+
value: 'travel-key-789',
|
|
165
|
+
},
|
|
166
|
+
},
|
|
167
|
+
organizationId: 'travel-agency-corp',
|
|
168
|
+
productId: 'ai-travel-assistant',
|
|
169
|
+
subscriptionId: 'sub-pro-travel-123',
|
|
170
|
+
taskType: 'function-calling-weather',
|
|
171
|
+
traceId: `func-call-${Date.now()}`,
|
|
172
|
+
agent: 'weather-assistant-node',
|
|
173
|
+
responseQualityScore: 0.95,
|
|
174
|
+
},
|
|
175
|
+
});
|
|
176
|
+
|
|
177
|
+
console.log('AI Response with Metadata (First call):');
|
|
178
|
+
console.log('Usage:', metadataFirstResponse.usage);
|
|
179
|
+
|
|
180
|
+
const metadataFirstMessage = metadataFirstResponse.choices[0]?.message;
|
|
181
|
+
console.log('Finish reason:', metadataFirstResponse.choices[0]?.finish_reason);
|
|
182
|
+
|
|
183
|
+
// Process function calls if any
|
|
184
|
+
if (metadataFirstMessage?.tool_calls) {
|
|
185
|
+
console.log('\n🔧 Function calls requested:', metadataFirstMessage.tool_calls.length);
|
|
186
|
+
|
|
187
|
+
// Execute each function call
|
|
188
|
+
for (const toolCall of metadataFirstMessage.tool_calls) {
|
|
189
|
+
console.log(' - Function:', toolCall.function.name);
|
|
190
|
+
console.log(' Arguments:', toolCall.function.arguments);
|
|
191
|
+
|
|
192
|
+
const functionArgs = JSON.parse(toolCall.function.arguments);
|
|
193
|
+
const functionResponse = getCurrentWeather(
|
|
194
|
+
functionArgs.location,
|
|
195
|
+
functionArgs.unit
|
|
196
|
+
);
|
|
197
|
+
|
|
198
|
+
console.log(' Result:', functionResponse);
|
|
199
|
+
|
|
200
|
+
// Add function result to conversation
|
|
201
|
+
metadataMessages.push(metadataFirstMessage);
|
|
202
|
+
metadataMessages.push({
|
|
203
|
+
role: 'tool',
|
|
204
|
+
tool_call_id: toolCall.id,
|
|
205
|
+
content: functionResponse,
|
|
206
|
+
});
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
// Second call with metadata - AI generates final response
|
|
210
|
+
const metadataSecondResponse = await openai.chat.completions.create({
|
|
211
|
+
model: 'gpt-4o',
|
|
212
|
+
messages: metadataMessages,
|
|
213
|
+
tools: tools,
|
|
214
|
+
|
|
215
|
+
// ✨ Same metadata carried through the conversation
|
|
216
|
+
usageMetadata: {
|
|
217
|
+
subscriber: {
|
|
218
|
+
id: 'travel-user-456',
|
|
219
|
+
email: 'traveler@company.com',
|
|
220
|
+
credential: {
|
|
221
|
+
name: 'api-key',
|
|
222
|
+
value: 'travel-key-789',
|
|
223
|
+
},
|
|
224
|
+
},
|
|
225
|
+
organizationId: 'travel-agency-corp',
|
|
226
|
+
productId: 'ai-travel-assistant',
|
|
227
|
+
subscriptionId: 'sub-pro-travel-123',
|
|
228
|
+
taskType: 'function-calling-weather',
|
|
229
|
+
traceId: `func-call-${Date.now()}`,
|
|
230
|
+
agent: 'weather-assistant-node',
|
|
231
|
+
responseQualityScore: 0.98,
|
|
232
|
+
},
|
|
233
|
+
});
|
|
234
|
+
|
|
235
|
+
console.log('\n💬 AI Final Response with Metadata:');
|
|
236
|
+
console.log('Content:', metadataSecondResponse.choices[0]?.message?.content);
|
|
237
|
+
console.log('Usage:', metadataSecondResponse.usage);
|
|
238
|
+
console.log('✅ Function calling tracked with rich metadata for AI agent analytics\n');
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
// Summary
|
|
242
|
+
console.log('📈 Function Calling Summary:');
|
|
243
|
+
console.log('✅ Function definitions work seamlessly with middleware');
|
|
244
|
+
console.log('✅ Token usage tracked including function call overhead');
|
|
245
|
+
console.log('✅ Multi-turn conversations fully supported');
|
|
246
|
+
console.log('✅ All metadata fields optional and work perfectly');
|
|
247
|
+
console.log('✅ Cost calculation includes function calling tokens');
|
|
248
|
+
console.log('✅ No type casting required - native TypeScript support');
|
|
249
|
+
console.log('✅ Perfect for tracking AI agents and tool usage\n');
|
|
250
|
+
|
|
251
|
+
console.log('💡 Use Cases:');
|
|
252
|
+
console.log(' - AI agents with tool access');
|
|
253
|
+
console.log(' - Customer support bots with API integrations');
|
|
254
|
+
console.log(' - Data analysis assistants with function execution');
|
|
255
|
+
console.log(' - Multi-step workflows with external tools');
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
// Run the example
|
|
259
|
+
openAIFunctionCallingExample().catch(console.error);
|
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Responses API Basic Examples
|
|
3
|
+
*
|
|
4
|
+
* This file demonstrates how to use the new OpenAI Responses API with the Revenium middleware.
|
|
5
|
+
* The Responses API is a new stateful API that brings together capabilities from chat completions
|
|
6
|
+
* and assistants API in one unified experience.
|
|
7
|
+
*
|
|
8
|
+
* Metadata Options:
|
|
9
|
+
* - Start with basic usage (no metadata) - tracking works automatically
|
|
10
|
+
* - Add subscriber info for user tracking
|
|
11
|
+
* - Include organization/product IDs for business analytics
|
|
12
|
+
* - Use task type and trace ID for detailed analysis
|
|
13
|
+
*
|
|
14
|
+
* For complete metadata field reference, see:
|
|
15
|
+
* https://revenium.readme.io/reference/meter_ai_completion
|
|
16
|
+
*
|
|
17
|
+
* Responses API Reference: https://platform.openai.com/docs/api-reference/responses
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
import 'dotenv/config';
|
|
21
|
+
import { initializeReveniumFromEnv, patchOpenAIInstance } from '@revenium/openai';
|
|
22
|
+
import OpenAI from 'openai';
|
|
23
|
+
|
|
24
|
+
// Import types for the new Responses API
|
|
25
|
+
import type { ResponsesCreateParams, ResponsesResponse } from '../src/types/responses-api.js';
|
|
26
|
+
|
|
27
|
+
async function main() {
|
|
28
|
+
// Initialize Revenium middleware
|
|
29
|
+
await initializeReveniumFromEnv();
|
|
30
|
+
|
|
31
|
+
// Create OpenAI client
|
|
32
|
+
const openai = new OpenAI({
|
|
33
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
// Patch the OpenAI instance to add Revenium tracking
|
|
37
|
+
patchOpenAIInstance(openai);
|
|
38
|
+
|
|
39
|
+
console.log(' OpenAI Responses API Basic Examples\n');
|
|
40
|
+
|
|
41
|
+
// Example 1: Basic Responses API call (no metadata)
|
|
42
|
+
console.log(' Example 1: Basic Responses API call (no metadata)');
|
|
43
|
+
try {
|
|
44
|
+
const responsesAPI = openai as any; // Type assertion for new API
|
|
45
|
+
|
|
46
|
+
if (responsesAPI.responses?.create) {
|
|
47
|
+
const response: ResponsesResponse = await responsesAPI.responses.create({
|
|
48
|
+
model: 'gpt-5',
|
|
49
|
+
input: 'What is the capital of France?',
|
|
50
|
+
} as ResponsesCreateParams);
|
|
51
|
+
|
|
52
|
+
console.log('Response ID:', response.id);
|
|
53
|
+
console.log('Model:', response.model);
|
|
54
|
+
console.log('Status:', response.status);
|
|
55
|
+
console.log('Output Text:', response.output_text);
|
|
56
|
+
console.log('Usage:', response.usage);
|
|
57
|
+
} else {
|
|
58
|
+
throw new Error('Responses API not available');
|
|
59
|
+
}
|
|
60
|
+
} catch (error) {
|
|
61
|
+
console.log('️ Responses API not yet available in this OpenAI SDK version');
|
|
62
|
+
console.log(' Error:', (error as Error).message);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
console.log('\n' + '='.repeat(50) + '\n');
|
|
66
|
+
|
|
67
|
+
// Example 2: Responses API with rich metadata
|
|
68
|
+
console.log(' Example 2: Responses API with rich metadata');
|
|
69
|
+
try {
|
|
70
|
+
const responsesAPI = openai as any;
|
|
71
|
+
|
|
72
|
+
if (responsesAPI.responses?.create) {
|
|
73
|
+
const response: ResponsesResponse = await responsesAPI.responses.create({
|
|
74
|
+
model: 'gpt-5',
|
|
75
|
+
input: [
|
|
76
|
+
{
|
|
77
|
+
role: 'user',
|
|
78
|
+
content: 'Explain quantum computing in simple terms.',
|
|
79
|
+
},
|
|
80
|
+
],
|
|
81
|
+
max_output_tokens: 150,
|
|
82
|
+
usageMetadata: {
|
|
83
|
+
// User identification
|
|
84
|
+
subscriber: {
|
|
85
|
+
id: 'user-123',
|
|
86
|
+
email: 'user@example.com',
|
|
87
|
+
credential: {
|
|
88
|
+
name: 'api-key-prod',
|
|
89
|
+
value: 'key-efg-123',
|
|
90
|
+
},
|
|
91
|
+
},
|
|
92
|
+
|
|
93
|
+
// Organization & billing
|
|
94
|
+
organizationId: 'org-456',
|
|
95
|
+
subscriptionId: 'plan-responses-2024',
|
|
96
|
+
|
|
97
|
+
// Product & task tracking
|
|
98
|
+
productId: 'quantum-explainer',
|
|
99
|
+
taskType: 'educational-content',
|
|
100
|
+
agent: 'quantum-tutor',
|
|
101
|
+
|
|
102
|
+
// Session tracking
|
|
103
|
+
traceId: 'trace-789',
|
|
104
|
+
|
|
105
|
+
// Quality metrics
|
|
106
|
+
responseQualityScore: 0.95,
|
|
107
|
+
},
|
|
108
|
+
} as ResponsesCreateParams);
|
|
109
|
+
|
|
110
|
+
console.log('Response ID:', response.id);
|
|
111
|
+
console.log('Model:', response.model);
|
|
112
|
+
console.log('Status:', response.status);
|
|
113
|
+
console.log('Output Text:', response.output_text?.substring(0, 100) + '...');
|
|
114
|
+
console.log('Usage:', response.usage);
|
|
115
|
+
} else {
|
|
116
|
+
throw new Error('Responses API not available');
|
|
117
|
+
}
|
|
118
|
+
} catch (error) {
|
|
119
|
+
console.log('️ Responses API not yet available in this OpenAI SDK version');
|
|
120
|
+
console.log(' Error:', (error as Error).message);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
console.log('\n' + '='.repeat(50) + '\n');
|
|
124
|
+
|
|
125
|
+
// Example 3: Basic Responses API with string input (no metadata)
|
|
126
|
+
console.log(' Example 3: Basic Responses API with string input (no metadata)');
|
|
127
|
+
try {
|
|
128
|
+
const responsesAPI = openai as any;
|
|
129
|
+
|
|
130
|
+
if (responsesAPI.responses?.create) {
|
|
131
|
+
const response: ResponsesResponse = await responsesAPI.responses.create({
|
|
132
|
+
model: 'gpt-5',
|
|
133
|
+
input: 'Write a haiku about programming.',
|
|
134
|
+
} as ResponsesCreateParams);
|
|
135
|
+
|
|
136
|
+
console.log('Response ID:', response.id);
|
|
137
|
+
console.log('Model:', response.model);
|
|
138
|
+
console.log('Status:', response.status);
|
|
139
|
+
console.log('Output Text:', response.output_text);
|
|
140
|
+
console.log('Usage:', response.usage);
|
|
141
|
+
} else {
|
|
142
|
+
throw new Error('Responses API not available');
|
|
143
|
+
}
|
|
144
|
+
} catch (error) {
|
|
145
|
+
console.log('️ Responses API not yet available in this OpenAI SDK version');
|
|
146
|
+
console.log(' Error:', (error as Error).message);
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
console.log('\n' + '='.repeat(50) + '\n');
|
|
150
|
+
|
|
151
|
+
// Example 4: Advanced Responses API with comprehensive metadata
|
|
152
|
+
console.log(' Example 4: Advanced Responses API with comprehensive metadata');
|
|
153
|
+
try {
|
|
154
|
+
const responsesAPI = openai as any;
|
|
155
|
+
|
|
156
|
+
if (responsesAPI.responses?.create) {
|
|
157
|
+
const response: ResponsesResponse = await responsesAPI.responses.create({
|
|
158
|
+
model: 'gpt-5',
|
|
159
|
+
input: [
|
|
160
|
+
{
|
|
161
|
+
role: 'user',
|
|
162
|
+
content: 'Provide a comprehensive overview of the Responses API capabilities.',
|
|
163
|
+
},
|
|
164
|
+
],
|
|
165
|
+
max_output_tokens: 200,
|
|
166
|
+
instructions: 'You are a helpful AI assistant specializing in API documentation.',
|
|
167
|
+
usageMetadata: {
|
|
168
|
+
// User identification
|
|
169
|
+
subscriber: {
|
|
170
|
+
id: 'enterprise-user-456',
|
|
171
|
+
email: 'enterprise@company.com',
|
|
172
|
+
credential: {
|
|
173
|
+
name: 'api-key-prod',
|
|
174
|
+
value: 'key-hij-456',
|
|
175
|
+
},
|
|
176
|
+
},
|
|
177
|
+
|
|
178
|
+
// Organization & billing
|
|
179
|
+
organizationId: 'enterprise-org-789',
|
|
180
|
+
subscriptionId: 'plan-enterprise-docs-2024',
|
|
181
|
+
|
|
182
|
+
// Product & task tracking
|
|
183
|
+
productId: 'api-documentation-assistant',
|
|
184
|
+
taskType: 'technical-documentation',
|
|
185
|
+
agent: 'documentation-expert',
|
|
186
|
+
|
|
187
|
+
// Session tracking
|
|
188
|
+
traceId: 'enterprise-trace-012',
|
|
189
|
+
|
|
190
|
+
// Quality metrics
|
|
191
|
+
responseQualityScore: 0.98,
|
|
192
|
+
},
|
|
193
|
+
} as ResponsesCreateParams);
|
|
194
|
+
|
|
195
|
+
console.log('Response ID:', response.id);
|
|
196
|
+
console.log('Model:', response.model);
|
|
197
|
+
console.log('Status:', response.status);
|
|
198
|
+
console.log('Output Text:', response.output_text?.substring(0, 150) + '...');
|
|
199
|
+
console.log('Usage:', response.usage);
|
|
200
|
+
console.log('Output Array Length:', response.output?.length);
|
|
201
|
+
} else {
|
|
202
|
+
throw new Error('Responses API not available');
|
|
203
|
+
}
|
|
204
|
+
} catch (error) {
|
|
205
|
+
console.log('️ Responses API not yet available in this OpenAI SDK version');
|
|
206
|
+
console.log(' Error:', (error as Error).message);
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
console.log('\n All Responses API examples completed!');
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
main().catch(console.error);
|