@revenium/litellm 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +630 -0
  3. package/dist/client.d.ts +17 -0
  4. package/dist/client.d.ts.map +1 -0
  5. package/dist/client.js +713 -0
  6. package/dist/client.js.map +1 -0
  7. package/dist/config.d.ts +42 -0
  8. package/dist/config.d.ts.map +1 -0
  9. package/dist/config.js +332 -0
  10. package/dist/config.js.map +1 -0
  11. package/dist/constants.d.ts +15 -0
  12. package/dist/constants.d.ts.map +1 -0
  13. package/dist/constants.js +101 -0
  14. package/dist/constants.js.map +1 -0
  15. package/dist/index.d.ts +42 -0
  16. package/dist/index.d.ts.map +1 -0
  17. package/dist/index.js +189 -0
  18. package/dist/index.js.map +1 -0
  19. package/dist/prompt-extraction.d.ts +11 -0
  20. package/dist/prompt-extraction.d.ts.map +1 -0
  21. package/dist/prompt-extraction.js +201 -0
  22. package/dist/prompt-extraction.js.map +1 -0
  23. package/dist/tracking.d.ts +47 -0
  24. package/dist/tracking.d.ts.map +1 -0
  25. package/dist/tracking.js +299 -0
  26. package/dist/tracking.js.map +1 -0
  27. package/dist/types.d.ts +348 -0
  28. package/dist/types.d.ts.map +1 -0
  29. package/dist/types.js +3 -0
  30. package/dist/types.js.map +1 -0
  31. package/dist/utils/circuit-breaker.d.ts +114 -0
  32. package/dist/utils/circuit-breaker.d.ts.map +1 -0
  33. package/dist/utils/circuit-breaker.js +216 -0
  34. package/dist/utils/circuit-breaker.js.map +1 -0
  35. package/dist/utils/error-handling.d.ts +166 -0
  36. package/dist/utils/error-handling.d.ts.map +1 -0
  37. package/dist/utils/error-handling.js +306 -0
  38. package/dist/utils/error-handling.js.map +1 -0
  39. package/dist/utils/logger-types.d.ts +171 -0
  40. package/dist/utils/logger-types.d.ts.map +1 -0
  41. package/dist/utils/logger-types.js +210 -0
  42. package/dist/utils/logger-types.js.map +1 -0
  43. package/dist/utils/provider-detection.d.ts +43 -0
  44. package/dist/utils/provider-detection.d.ts.map +1 -0
  45. package/dist/utils/provider-detection.js +103 -0
  46. package/dist/utils/provider-detection.js.map +1 -0
  47. package/dist/utils/stop-reason.d.ts +58 -0
  48. package/dist/utils/stop-reason.d.ts.map +1 -0
  49. package/dist/utils/stop-reason.js +136 -0
  50. package/dist/utils/stop-reason.js.map +1 -0
  51. package/dist/utils/summary-printer.d.ts +23 -0
  52. package/dist/utils/summary-printer.d.ts.map +1 -0
  53. package/dist/utils/summary-printer.js +234 -0
  54. package/dist/utils/summary-printer.js.map +1 -0
  55. package/dist/utils/trace-fields.d.ts +10 -0
  56. package/dist/utils/trace-fields.d.ts.map +1 -0
  57. package/dist/utils/trace-fields.js +117 -0
  58. package/dist/utils/trace-fields.js.map +1 -0
  59. package/dist/utils/validation.d.ts +121 -0
  60. package/dist/utils/validation.d.ts.map +1 -0
  61. package/dist/utils/validation.js +451 -0
  62. package/dist/utils/validation.js.map +1 -0
  63. package/examples/README.md +321 -0
  64. package/examples/litellm-basic.ts +240 -0
  65. package/examples/litellm-streaming.ts +309 -0
  66. package/examples/prompt-capture.ts +128 -0
  67. package/package.json +85 -0
@@ -0,0 +1,321 @@
1
+ # Revenium LiteLLM Middleware Examples
2
+
3
+ Clear, focused examples that demonstrate seamless HTTP interception with LiteLLM Proxy for all providers.
4
+
5
+ ## Quick Start
6
+
7
+ 1. **Set up environment variables** (create `.env` in project root):
8
+
9
+ ```bash
10
+ # Required for all examples
11
+ REVENIUM_METERING_API_KEY=hak_your_api_key
12
+ REVENIUM_METERING_BASE_URL=https://api.revenium.ai
13
+ LITELLM_PROXY_URL=https://your-litellm-proxy.com
14
+ LITELLM_API_KEY=your_litellm_api_key
15
+
16
+ # Optional: Enable debug logging
17
+ REVENIUM_DEBUG=true
18
+ ```
19
+
20
+ 2. **Install the package**:
21
+
22
+ ```bash
23
+ npm install @revenium/litellm dotenv
24
+ npm install --save-dev typescript tsx @types/node
25
+ ```
26
+
27
+ 3. **Run any example**:
28
+
29
+ ```bash
30
+ REVENIUM_DEBUG=true npx tsx examples/litellm-basic.ts # Basic LiteLLM proxy usage with metadata
31
+ REVENIUM_DEBUG=true npx tsx examples/litellm-streaming.ts # Streaming, multi-provider, and advanced features
32
+ ```
33
+
34
+ ## Getting Started - Step by Step
35
+
36
+ This guide walks you through creating a complete project from scratch. For GitHub users who cloned this repository, you can run the included examples directly. For npm users, copy these examples from `node_modules/@revenium/litellm/examples/` to your project directory.
37
+
38
+ ### Step 1: Create Your First Test
39
+
40
+ #### TypeScript Test
41
+
42
+ Create `test-litellm.ts`:
43
+
44
+ ```typescript
45
+ // test-litellm.ts
46
+ import "dotenv/config";
47
+ import "@revenium/litellm";
48
+
49
+ async function testLiteLLM() {
50
+ const proxyUrl = process.env.LITELLM_PROXY_URL;
51
+ const apiKey = process.env.LITELLM_API_KEY;
52
+
53
+ try {
54
+ const response = await fetch(`${proxyUrl}/chat/completions`, {
55
+ method: "POST",
56
+ headers: {
57
+ "Content-Type": "application/json",
58
+ Authorization: `Bearer ${apiKey}`,
59
+ // Optional: Add metadata for tracking
60
+ "x-revenium-subscriber-id": "test-user",
61
+ "x-revenium-subscriber-email": "test@example.com",
62
+ "x-revenium-organization-id": "test-org",
63
+ },
64
+ body: JSON.stringify({
65
+ model: "gpt-4o-mini",
66
+ messages: [{ role: "user", content: "Hello!" }],
67
+ }),
68
+ });
69
+
70
+ const data = await response.json();
71
+ console.log("Response:", data.choices[0].message.content);
72
+ console.log("Usage tracked automatically by Revenium middleware!");
73
+ } catch (error) {
74
+ console.error("Error:", error);
75
+ }
76
+ }
77
+
78
+ testLiteLLM();
79
+ ```
80
+
81
+ #### JavaScript Test
82
+
83
+ Create `test-litellm.js`:
84
+
85
+ ```javascript
86
+ // test-litellm.js
87
+ require("dotenv/config");
88
+ require("@revenium/litellm");
89
+
90
+ async function testLiteLLM() {
91
+ const proxyUrl = process.env.LITELLM_PROXY_URL;
92
+ const apiKey = process.env.LITELLM_API_KEY;
93
+
94
+ try {
95
+ const response = await fetch(`${proxyUrl}/chat/completions`, {
96
+ method: "POST",
97
+ headers: {
98
+ "Content-Type": "application/json",
99
+ Authorization: `Bearer ${apiKey}`,
100
+ "x-revenium-subscriber-id": "test-user",
101
+ "x-revenium-subscriber-email": "test@example.com",
102
+ },
103
+ body: JSON.stringify({
104
+ model: "gpt-4o-mini",
105
+ messages: [{ role: "user", content: "Hello!" }],
106
+ }),
107
+ });
108
+
109
+ const data = await response.json();
110
+ console.log("Response:", data.choices[0].message.content);
111
+ } catch (error) {
112
+ console.error("Error:", error);
113
+ }
114
+ }
115
+
116
+ testLiteLLM();
117
+ ```
118
+
119
+ ### Step 2: Update package.json
120
+
121
+ Add test scripts to your `package.json`:
122
+
123
+ ```json
124
+ {
125
+ "scripts": {
126
+ "test:litellm": "tsx test-litellm.ts",
127
+ "test:litellm:js": "node test-litellm.js"
128
+ }
129
+ }
130
+ ```
131
+
132
+ ### Step 3: Run Your Tests
133
+
134
+ ```bash
135
+ # TypeScript version
136
+ npm run test:litellm
137
+
138
+ # JavaScript version
139
+ npm run test:litellm:js
140
+
141
+ # Or run directly
142
+ npx tsx test-litellm.ts
143
+ node test-litellm.js
144
+ ```
145
+
146
+ ### Step 4: Explore Advanced Examples
147
+
148
+ Once your basic test works, explore the included examples (GitHub users can run directly, npm users should copy from `node_modules/@revenium/litellm/examples/`):
149
+
150
+ ```bash
151
+ # Basic usage with metadata
152
+ npx tsx examples/litellm-basic.ts
153
+
154
+ # Streaming and multi-provider
155
+ npx tsx examples/litellm-streaming.ts
156
+ ```
157
+
158
+ ### Step 5: Project Structure
159
+
160
+ A typical project structure:
161
+
162
+ ```
163
+ your-project/
164
+ ├── .env # API keys (never commit!)
165
+ ├── .gitignore # Protect your .env file
166
+ ├── package.json
167
+ ├── test-litellm.ts # Your first test
168
+ └── src/
169
+ └── index.ts # Your application code
170
+ ```
171
+
172
+ ## Examples
173
+
174
+ ### litellm-basic.ts
175
+
176
+ **Basic LiteLLM Proxy usage** with seamless metadata:
177
+
178
+ - Chat completions with and without metadata
179
+ - Embeddings with metadata tracking
180
+ - Shows metadata usage patterns
181
+ - Multiple examples in one file
182
+
183
+ ```typescript
184
+ // Chat with metadata - no complex setup needed!
185
+ const response = await fetch(`${proxyUrl}/chat/completions`, {
186
+ method: "POST",
187
+ headers: {
188
+ "Content-Type": "application/json",
189
+ Authorization: `Bearer ${apiKey}`,
190
+ // Subscriber metadata for enhanced tracking:
191
+ "x-revenium-subscriber-id": "user-123",
192
+ "x-revenium-subscriber-email": "user@my-company.com",
193
+ "x-revenium-subscriber-credential-name": "api-key",
194
+ "x-revenium-subscriber-credential": "credential-value",
195
+ "x-revenium-organization-id": "my-company",
196
+ },
197
+ body: JSON.stringify({
198
+ model: "openai/gpt-4o-mini",
199
+ messages: [{ role: "user", content: "Hello!" }],
200
+ }),
201
+ });
202
+ ```
203
+
204
+ ### litellm-streaming.ts
205
+
206
+ **Streaming responses and multi-provider support** with seamless metadata:
207
+
208
+ - Streaming chat completions with metadata
209
+ - Multi-provider examples (OpenAI, Anthropic, etc.)
210
+ - Advanced embeddings with comprehensive metadata
211
+ - Usage tracked automatically when streams complete
212
+ - Real-time responses + comprehensive analytics
213
+
214
+ ### Key Features Demonstrated
215
+
216
+ **Seamless HTTP Interception**: Automatic tracking of all LiteLLM Proxy requests
217
+ **Flexible Metadata**: Add metadata headers as needed for enhanced tracking
218
+ **Multi-Provider Support**: Works with OpenAI, Anthropic, Google, Azure, and more
219
+ **Chat & Embeddings**: Full support for both operation types
220
+ **Streaming Support**: Real-time tracking when streams complete **LiteLLM Proxy Integration**: Purpose-built for LiteLLM's proxy architecture
221
+
222
+ ## Running Examples
223
+
224
+ All examples require:
225
+
226
+ - Node.js 18+
227
+ - Valid Revenium API key
228
+ - Running LiteLLM Proxy server
229
+
230
+ **Individual examples:**
231
+
232
+ ```bash
233
+ REVENIUM_DEBUG=true npx tsx examples/litellm-basic.ts # Basic chat completions and embeddings
234
+ REVENIUM_DEBUG=true npx tsx examples/litellm-streaming.ts # Streaming and multi-provider features
235
+ ```
236
+
237
+ ## LiteLLM Proxy Setup
238
+
239
+ For local testing, you can run LiteLLM Proxy locally:
240
+
241
+ ### Option 1: Simple OpenAI Setup
242
+
243
+ ```bash
244
+ # Install LiteLLM
245
+ pip install litellm
246
+
247
+ # Start with OpenAI (replace with your API key)
248
+ export OPENAI_API_KEY=sk_your_openai_key
249
+ litellm --model gpt-3.5-turbo --port 4000
250
+
251
+ # Update your .env for local testing
252
+ LITELLM_PROXY_URL=http://localhost:4000
253
+ LITELLM_API_KEY=sk-1234
254
+ ```
255
+
256
+ ### Option 2: Multi-Provider Setup
257
+
258
+ ```bash
259
+ # Create a config file for multiple providers
260
+ cat > litellm_config.yaml << EOF
261
+ model_list:
262
+ - model_name: gpt-4o-mini
263
+ litellm_params:
264
+ model: openai/gpt-4o-mini
265
+ api_key: \${OPENAI_API_KEY}
266
+ - model_name: claude-3-haiku
267
+ litellm_params:
268
+ model: anthropic/claude-3-haiku-20240307
269
+ api_key: \${ANTHROPIC_API_KEY}
270
+ EOF
271
+
272
+ # Start with config
273
+ litellm --config litellm_config.yaml --port 4000
274
+ ```
275
+
276
+ ## Understanding the Magic
277
+
278
+ The middleware works by:
279
+
280
+ 1. **Import**: Import the middleware before making fetch requests
281
+ 2. **HTTP Interception**: Middleware patches global `fetch` function
282
+ 3. **Request Detection**: Identifies LiteLLM Proxy requests by URL pattern
283
+ 4. **Seamless Integration**: Use fetch normally with metadata headers as needed
284
+ 5. **Data Extraction**: Captures tokens, timing, model info, and metadata
285
+ 6. **Background Tracking**: Sends data to Revenium without blocking your app
286
+ 7. **Transparent Response**: Returns original LiteLLM response unchanged
287
+
288
+ **The result**: Your existing LiteLLM Proxy code works exactly the same, but now you get automatic usage tracking and rich analytics!
289
+
290
+ ## Troubleshooting
291
+
292
+ **Environment variable errors:**
293
+
294
+ - Ensure `.env` file is in project root
295
+ - Check variable names match exactly (note: `REVENIUM_METERING_API_KEY`)
296
+ - Verify API keys are valid
297
+
298
+ **LiteLLM Proxy setup issues:**
299
+
300
+ - Ensure LiteLLM Proxy is running and accessible
301
+ - Check that LITELLM_PROXY_URL points to the correct server
302
+ - Verify your LiteLLM Proxy has the required provider API keys
303
+
304
+ **Middleware not working:**
305
+
306
+ - Ensure middleware is imported before making fetch requests
307
+ - Verify the package is properly installed with `npm install @revenium/litellm`
308
+ - Check that TypeScript compilation is successful if using TypeScript
309
+
310
+ **Debug mode:**
311
+
312
+ ```bash
313
+ export REVENIUM_DEBUG=true
314
+ npx ts-node examples/litellm-basic.ts
315
+ ```
316
+
317
+ Look for log messages like:
318
+
319
+ - `[Revenium] LiteLLM request intercepted`
320
+ - `[Revenium] Usage metadata extracted`
321
+ - `[Revenium] Revenium tracking successful`
@@ -0,0 +1,240 @@
1
+ /**
2
+ * LiteLLM Basic Example
3
+ *
4
+ * This example demonstrates basic LiteLLM Proxy usage with optional metadata tracking.
5
+ * Shows both chat completions and embeddings with and without metadata.
6
+ */
7
+
8
+ // Load environment variables from .env file
9
+ import "dotenv/config";
10
+
11
+ // Step 1: Import the middleware (this enables automatic tracking)
12
+ import "@revenium/litellm";
13
+
14
+ async function basicExample() {
15
+ console.log("Starting basic Revenium LiteLLM middleware example...\n");
16
+
17
+ // Check environment variables
18
+ const requiredVars = ["REVENIUM_METERING_API_KEY", "LITELLM_PROXY_URL"];
19
+ const missing = requiredVars.filter((key) => !process.env[key]);
20
+
21
+ if (missing.length > 0) {
22
+ console.error("❌ Missing required environment variables:");
23
+ missing.forEach((key) => console.error(` ${key}`));
24
+ console.error("\nPlease set them in a .env file in the project root:");
25
+ console.error(" REVENIUM_METERING_API_KEY=hak_your_api_key");
26
+ console.error(" REVENIUM_METERING_BASE_URL=https://api.revenium.ai");
27
+ console.error(" LITELLM_PROXY_URL=https://your-proxy.com");
28
+ console.error(" LITELLM_API_KEY=your_litellm_key # Optional");
29
+ process.exit(1);
30
+ }
31
+
32
+ const proxyUrl = process.env.LITELLM_PROXY_URL!;
33
+ const apiKey = process.env.LITELLM_API_KEY || "sk-1234";
34
+
35
+ // Handle proxy URL - remove endpoint if already included
36
+ const baseProxyUrl = proxyUrl.replace(
37
+ /\/(chat\/completions|embeddings)$/,
38
+ ""
39
+ );
40
+
41
+ // Debug: Show loaded configuration (partially obfuscated)
42
+ console.log("Configuration loaded:");
43
+ console.log(
44
+ ` Revenium API Key: ${process.env.REVENIUM_METERING_API_KEY?.substring(
45
+ 0,
46
+ 8
47
+ )}...${process.env.REVENIUM_METERING_API_KEY?.slice(-4)}`
48
+ );
49
+ console.log(
50
+ ` Revenium Base URL: ${process.env.REVENIUM_METERING_BASE_URL}`
51
+ );
52
+ console.log(` LiteLLM Proxy URL: ${proxyUrl}`);
53
+ console.log(` Base Proxy URL: ${baseProxyUrl}`);
54
+ console.log(
55
+ ` LiteLLM API Key: ${apiKey.substring(0, 8)}...${apiKey.slice(-4)}\n`
56
+ );
57
+
58
+ let successCount = 0;
59
+ let totalRequests = 3;
60
+
61
+ try {
62
+ // Example 1: Basic chat completion without metadata
63
+ console.log("Example 1: Basic chat completion without metadata...");
64
+ const chatUrl = `${baseProxyUrl}/chat/completions`;
65
+ console.log(` Calling: ${chatUrl}`);
66
+
67
+ const basicResponse = await fetch(chatUrl, {
68
+ method: "POST",
69
+ headers: {
70
+ "Content-Type": "application/json",
71
+ Authorization: `Bearer ${apiKey}`,
72
+ // No metadata - still tracked automatically!
73
+ },
74
+ body: JSON.stringify({
75
+ model: "openai/gpt-4o-mini",
76
+ max_tokens: 50,
77
+ messages: [
78
+ {
79
+ role: "user",
80
+ content: "What is the capital of France? Please be concise.",
81
+ },
82
+ ],
83
+ }),
84
+ });
85
+
86
+ if (basicResponse.ok) {
87
+ const basicData = await basicResponse.json();
88
+ console.log(
89
+ "Basic response:",
90
+ basicData.choices[0]?.message?.content || "No response"
91
+ );
92
+ console.log(
93
+ ` Tokens: ${basicData.usage?.prompt_tokens} input + ${basicData.usage?.completion_tokens} output\n`
94
+ );
95
+ successCount++;
96
+ } else {
97
+ console.log(
98
+ "❌ Basic request failed:",
99
+ basicResponse.status,
100
+ basicResponse.statusText
101
+ );
102
+ const errorText = await basicResponse.text();
103
+ console.log(" Error details:", errorText.substring(0, 200));
104
+ }
105
+
106
+ // Example 2: Chat completion with custom metadata
107
+ console.log("Example 2: Chat completion with custom metadata...");
108
+
109
+ const metadataResponse = await fetch(`${baseProxyUrl}/chat/completions`, {
110
+ method: "POST",
111
+ headers: {
112
+ "Content-Type": "application/json",
113
+ Authorization: `Bearer ${apiKey}`,
114
+ // Add custom metadata for enhanced tracking and analytics
115
+ "x-revenium-subscriber-id": "demo-user-123",
116
+ "x-revenium-subscriber-email": "demo-user@acme.com",
117
+ "x-revenium-subscriber-credential-name": "api-key",
118
+ "x-revenium-subscriber-credential": "demo-credential-value",
119
+ "x-revenium-organization-id": "my-customer-name",
120
+ "x-revenium-task-type": "litellm-node-basic",
121
+ "x-revenium-product-id": "litellm-middleware-demo",
122
+ "x-revenium-agent": "littellm-node-basic",
123
+ },
124
+ body: JSON.stringify({
125
+ model: "openai/gpt-4o-mini",
126
+ max_tokens: 100,
127
+ messages: [
128
+ {
129
+ role: "user",
130
+ content: "Explain quantum computing in one sentence.",
131
+ },
132
+ ],
133
+ }),
134
+ });
135
+
136
+ if (metadataResponse.ok) {
137
+ const metadataData = await metadataResponse.json();
138
+ console.log(
139
+ "Metadata response:",
140
+ metadataData.choices[0]?.message?.content || "No response"
141
+ );
142
+ console.log(
143
+ ` Tokens: ${metadataData.usage?.prompt_tokens} input + ${metadataData.usage?.completion_tokens} output\n`
144
+ );
145
+ successCount++;
146
+ } else {
147
+ console.log(
148
+ "❌ Metadata request failed:",
149
+ metadataResponse.status,
150
+ metadataResponse.statusText
151
+ );
152
+ const errorText = await metadataResponse.text();
153
+ console.log(" Error details:", errorText.substring(0, 200));
154
+ }
155
+
156
+ // Example 3: Embeddings with metadata
157
+ console.log("Example 3: Embeddings with metadata...");
158
+
159
+ const embeddingResponse = await fetch(`${baseProxyUrl}/embeddings`, {
160
+ method: "POST",
161
+ headers: {
162
+ "Content-Type": "application/json",
163
+ Authorization: `Bearer ${apiKey}`,
164
+ // Metadata works with embeddings too!
165
+ "x-revenium-subscriber-id": "demo-user-123",
166
+ "x-revenium-subscriber-email": "demo-user@acme.com",
167
+ "x-revenium-subscriber-credential-name": "embedding-key",
168
+ "x-revenium-subscriber-credential": "demo-embedding-credential",
169
+ "x-revenium-organization-id": "my-customer-name",
170
+ "x-revenium-task-type": "text-embedding",
171
+ "x-revenium-product-id": "semantic-search-demo",
172
+ },
173
+ body: JSON.stringify({
174
+ model: "text-embedding-ada-002",
175
+ input: "This is a sample text for embedding generation.",
176
+ }),
177
+ });
178
+
179
+ if (embeddingResponse.ok) {
180
+ const embeddingData = await embeddingResponse.json();
181
+ console.log("Embedding response: Vector generated successfully");
182
+ console.log(
183
+ ` Dimensions: ${
184
+ embeddingData.data[0]?.embedding?.length || "Unknown"
185
+ }`
186
+ );
187
+ console.log(` Tokens: ${embeddingData.usage?.prompt_tokens} input\n`);
188
+ successCount++;
189
+ } else {
190
+ console.log(
191
+ "❌ Embedding request failed:",
192
+ embeddingResponse.status,
193
+ embeddingResponse.statusText
194
+ );
195
+ const errorText = await embeddingResponse.text();
196
+ console.log(" Error details:", errorText.substring(0, 200));
197
+ }
198
+
199
+ // Report results
200
+ console.log(
201
+ `\nResults: ${successCount}/${totalRequests} requests successful`
202
+ );
203
+
204
+ if (successCount === totalRequests) {
205
+ console.log(
206
+ "✅ All requests successful and automatically tracked to Revenium!"
207
+ );
208
+ console.log("Check your Revenium dashboard to see the tracked usage.");
209
+ } else if (successCount > 0) {
210
+ console.log("⚠️ Some requests successful and tracked to Revenium.");
211
+ console.log("Check your Revenium dashboard to see the tracked usage.");
212
+ } else {
213
+ console.log(
214
+ "❌ No requests were successful. Check your LiteLLM Proxy configuration."
215
+ );
216
+ console.log(
217
+ "Ensure your LiteLLM Proxy is running and accessible at:",
218
+ baseProxyUrl
219
+ );
220
+ }
221
+ } catch (error) {
222
+ console.error("❌ Error:", error);
223
+ throw error;
224
+ }
225
+ }
226
+
227
+ // Run the example
228
+ if (require.main === module) {
229
+ basicExample()
230
+ .then(() => {
231
+ console.log("\nBasic example completed!");
232
+ console.log(
233
+ "Enable REVENIUM_DEBUG=true to see detailed request tracking logs"
234
+ );
235
+ })
236
+ .catch((error) => {
237
+ console.error("\nExample failed:", error);
238
+ process.exit(1);
239
+ });
240
+ }