@agentgazer/proxy 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1175 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
18
+ var __importStar = (this && this.__importStar) || (function () {
19
+ var ownKeys = function(o) {
20
+ ownKeys = Object.getOwnPropertyNames || function (o) {
21
+ var ar = [];
22
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
23
+ return ar;
24
+ };
25
+ return ownKeys(o);
26
+ };
27
+ return function (mod) {
28
+ if (mod && mod.__esModule) return mod;
29
+ var result = {};
30
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
31
+ __setModuleDefault(result, mod);
32
+ return result;
33
+ };
34
+ })();
35
+ Object.defineProperty(exports, "__esModule", { value: true });
36
+ const vitest_1 = require("vitest");
37
+ const http = __importStar(require("node:http"));
38
+ const proxy_server_js_1 = require("../proxy-server.js");
39
+ /**
40
+ * Helper: make an HTTP request and return the status, headers, and body.
41
+ * Sets Content-Length explicitly when a body is provided so that Node's
42
+ * http module does not add Transfer-Encoding: chunked (which the proxy
43
+ * would forward and cause fetch to reject with "invalid transfer-encoding").
44
+ */
45
+ function httpRequest(options) {
46
+ return new Promise((resolve, reject) => {
47
+ const headers = { ...options.headers };
48
+ // Set Content-Length explicitly to avoid Transfer-Encoding: chunked
49
+ if (options.body !== undefined) {
50
+ headers["Content-Length"] = Buffer.byteLength(options.body).toString();
51
+ }
52
+ const req = http.request({
53
+ hostname: options.hostname,
54
+ port: options.port,
55
+ path: options.path,
56
+ method: options.method ?? "GET",
57
+ headers,
58
+ }, (res) => {
59
+ const chunks = [];
60
+ res.on("data", (chunk) => chunks.push(chunk));
61
+ res.on("end", () => {
62
+ resolve({
63
+ status: res.statusCode ?? 0,
64
+ headers: res.headers,
65
+ body: Buffer.concat(chunks).toString("utf-8"),
66
+ });
67
+ });
68
+ });
69
+ req.on("error", reject);
70
+ if (options.body !== undefined) {
71
+ req.end(options.body);
72
+ }
73
+ else {
74
+ req.end();
75
+ }
76
+ });
77
+ }
78
+ /**
79
+ * Creates a mock "provider" HTTP server that returns OpenAI-style responses.
80
+ * Captures incoming request details for assertions.
81
+ */
82
+ function createMockProviderServer() {
83
+ return new Promise((resolve) => {
84
+ const receivedRequests = [];
85
+ const responseOverride = {
86
+ statusCode: 200,
87
+ body: {
88
+ id: "chatcmpl-abc123",
89
+ object: "chat.completion",
90
+ model: "gpt-4o",
91
+ usage: {
92
+ prompt_tokens: 100,
93
+ completion_tokens: 50,
94
+ total_tokens: 150,
95
+ },
96
+ choices: [
97
+ {
98
+ message: { role: "assistant", content: "Hello!" },
99
+ finish_reason: "stop",
100
+ },
101
+ ],
102
+ },
103
+ };
104
+ const server = http.createServer((req, res) => {
105
+ const chunks = [];
106
+ req.on("data", (chunk) => chunks.push(chunk));
107
+ req.on("end", () => {
108
+ receivedRequests.push({
109
+ method: req.method ?? "GET",
110
+ url: req.url ?? "/",
111
+ headers: req.headers,
112
+ body: Buffer.concat(chunks).toString("utf-8"),
113
+ });
114
+ const payload = JSON.stringify(responseOverride.body);
115
+ res.writeHead(responseOverride.statusCode, {
116
+ "Content-Type": "application/json",
117
+ "Content-Length": Buffer.byteLength(payload).toString(),
118
+ });
119
+ res.end(payload);
120
+ });
121
+ });
122
+ server.listen(0, "127.0.0.1", () => {
123
+ const addr = server.address();
124
+ resolve({
125
+ server,
126
+ port: addr.port,
127
+ url: `http://127.0.0.1:${addr.port}`,
128
+ receivedRequests,
129
+ responseOverride,
130
+ });
131
+ });
132
+ });
133
+ }
134
+ /**
135
+ * Creates a mock ingest server (for the event buffer flush endpoint)
136
+ * that captures reported events.
137
+ */
138
+ function createMockIngestServer() {
139
+ return new Promise((resolve) => {
140
+ const receivedBatches = [];
141
+ const server = http.createServer((req, res) => {
142
+ const chunks = [];
143
+ req.on("data", (chunk) => chunks.push(chunk));
144
+ req.on("end", () => {
145
+ const bodyStr = Buffer.concat(chunks).toString("utf-8");
146
+ try {
147
+ const parsed = JSON.parse(bodyStr);
148
+ receivedBatches.push(parsed);
149
+ }
150
+ catch {
151
+ // ignore parse errors
152
+ }
153
+ res.writeHead(200, { "Content-Type": "application/json" });
154
+ res.end(JSON.stringify({ ok: true }));
155
+ });
156
+ });
157
+ server.listen(0, "127.0.0.1", () => {
158
+ const addr = server.address();
159
+ resolve({
160
+ server,
161
+ port: addr.port,
162
+ url: `http://127.0.0.1:${addr.port}/v1/events`,
163
+ receivedBatches,
164
+ });
165
+ });
166
+ });
167
+ }
168
+ function closeServer(server) {
169
+ return new Promise((resolve, reject) => {
170
+ server.close((err) => (err ? reject(err) : resolve()));
171
+ });
172
+ }
173
+ /**
174
+ * Waits for the proxy server to accept connections by trying to connect.
175
+ */
176
+ function waitForServer(port, maxAttempts = 20) {
177
+ return new Promise((resolve, reject) => {
178
+ let attempts = 0;
179
+ function tryConnect() {
180
+ attempts++;
181
+ const req = http.request({ hostname: "127.0.0.1", port, path: "/health", method: "GET" }, (res) => {
182
+ res.resume();
183
+ resolve();
184
+ });
185
+ req.on("error", () => {
186
+ if (attempts >= maxAttempts) {
187
+ reject(new Error(`Server not ready after ${maxAttempts} attempts`));
188
+ }
189
+ else {
190
+ setTimeout(tryConnect, 50);
191
+ }
192
+ });
193
+ req.end();
194
+ }
195
+ tryConnect();
196
+ });
197
+ }
198
+ (0, vitest_1.describe)("Proxy Server Integration", () => {
199
+ let proxy = null;
200
+ let providerServer = null;
201
+ let ingestServer = null;
202
+ (0, vitest_1.afterEach)(async () => {
203
+ if (proxy) {
204
+ await proxy.shutdown();
205
+ proxy = null;
206
+ }
207
+ if (providerServer) {
208
+ await closeServer(providerServer.server);
209
+ providerServer = null;
210
+ }
211
+ if (ingestServer) {
212
+ await closeServer(ingestServer.server);
213
+ ingestServer = null;
214
+ }
215
+ });
216
+ (0, vitest_1.it)("GET /health returns status ok with agent_id and uptime_ms", async () => {
217
+ ingestServer = await createMockIngestServer();
218
+ proxy = (0, proxy_server_js_1.startProxy)({
219
+ port: 0,
220
+ apiKey: "test-api-key",
221
+ agentId: "agent-health-test",
222
+ endpoint: ingestServer.url,
223
+ flushInterval: 60_000,
224
+ maxBufferSize: 100,
225
+ });
226
+ const proxyPort = proxy.server.address().port;
227
+ await waitForServer(proxyPort);
228
+ const res = await httpRequest({
229
+ hostname: "127.0.0.1",
230
+ port: proxyPort,
231
+ path: "/health",
232
+ method: "GET",
233
+ });
234
+ (0, vitest_1.expect)(res.status).toBe(200);
235
+ const body = JSON.parse(res.body);
236
+ (0, vitest_1.expect)(body.status).toBe("ok");
237
+ (0, vitest_1.expect)(body.agent_id).toBe("agent-health-test");
238
+ (0, vitest_1.expect)(typeof body.uptime_ms).toBe("number");
239
+ (0, vitest_1.expect)(body.uptime_ms).toBeGreaterThanOrEqual(0);
240
+ });
241
+ (0, vitest_1.it)("auto-detects provider from path when x-target-url is missing", async () => {
242
+ providerServer = await createMockProviderServer();
243
+ ingestServer = await createMockIngestServer();
244
+ proxy = (0, proxy_server_js_1.startProxy)({
245
+ port: 0,
246
+ apiKey: "test-api-key",
247
+ agentId: "agent-autodetect-test",
248
+ endpoint: ingestServer.url,
249
+ flushInterval: 60_000,
250
+ maxBufferSize: 100,
251
+ });
252
+ const proxyPort = proxy.server.address().port;
253
+ await waitForServer(proxyPort);
254
+ // Without x-target-url, path /v1/chat/completions should auto-detect
255
+ // OpenAI via path patterns. In this test environment there's no real
256
+ // OpenAI to reach, so we verify the proxy attempts to call the detected
257
+ // provider (it will fail with 502, proving auto-detection worked).
258
+ const consoleSpy = vitest_1.vi.spyOn(console, "error").mockImplementation(() => { });
259
+ const res = await httpRequest({
260
+ hostname: "127.0.0.1",
261
+ port: proxyPort,
262
+ path: "/v1/chat/completions",
263
+ method: "POST",
264
+ headers: {
265
+ "Content-Type": "application/json",
266
+ },
267
+ body: JSON.stringify({ model: "gpt-4o", messages: [] }),
268
+ });
269
+ // The proxy should have auto-detected OpenAI and tried to reach it.
270
+ // We expect either a 401 (OpenAI rejects unauthenticated requests)
271
+ // or a 502 (if the host is unreachable). Either proves auto-detection worked.
272
+ (0, vitest_1.expect)([401, 502]).toContain(res.status);
273
+ consoleSpy.mockRestore();
274
+ });
275
+ (0, vitest_1.it)("returns 400 when provider cannot be detected and x-target-url is missing", async () => {
276
+ ingestServer = await createMockIngestServer();
277
+ proxy = (0, proxy_server_js_1.startProxy)({
278
+ port: 0,
279
+ apiKey: "test-api-key",
280
+ agentId: "agent-400-test",
281
+ endpoint: ingestServer.url,
282
+ flushInterval: 60_000,
283
+ maxBufferSize: 100,
284
+ });
285
+ const proxyPort = proxy.server.address().port;
286
+ await waitForServer(proxyPort);
287
+ // Use a path that doesn't match any known provider
288
+ const res = await httpRequest({
289
+ hostname: "127.0.0.1",
290
+ port: proxyPort,
291
+ path: "/api/custom-endpoint",
292
+ method: "POST",
293
+ headers: {
294
+ "Content-Type": "application/json",
295
+ },
296
+ body: JSON.stringify({}),
297
+ });
298
+ (0, vitest_1.expect)(res.status).toBe(400);
299
+ const body = JSON.parse(res.body);
300
+ (0, vitest_1.expect)(body.error).toContain("Could not determine upstream provider");
301
+ });
302
+ (0, vitest_1.it)("forwards request to target URL and returns provider response", async () => {
303
+ providerServer = await createMockProviderServer();
304
+ ingestServer = await createMockIngestServer();
305
+ proxy = (0, proxy_server_js_1.startProxy)({
306
+ port: 0,
307
+ apiKey: "test-api-key",
308
+ agentId: "agent-forward-test",
309
+ endpoint: ingestServer.url,
310
+ flushInterval: 60_000,
311
+ maxBufferSize: 100,
312
+ });
313
+ const proxyPort = proxy.server.address().port;
314
+ await waitForServer(proxyPort);
315
+ const requestBody = JSON.stringify({
316
+ model: "gpt-4o",
317
+ messages: [{ role: "user", content: "Hello" }],
318
+ });
319
+ const res = await httpRequest({
320
+ hostname: "127.0.0.1",
321
+ port: proxyPort,
322
+ path: "/v1/chat/completions",
323
+ method: "POST",
324
+ headers: {
325
+ "Content-Type": "application/json",
326
+ "x-target-url": providerServer.url,
327
+ Authorization: "Bearer sk-test-key",
328
+ },
329
+ body: requestBody,
330
+ });
331
+ // The proxy should forward the provider's response
332
+ (0, vitest_1.expect)(res.status).toBe(200);
333
+ const body = JSON.parse(res.body);
334
+ (0, vitest_1.expect)(body.model).toBe("gpt-4o");
335
+ (0, vitest_1.expect)(body.usage.prompt_tokens).toBe(100);
336
+ (0, vitest_1.expect)(body.usage.completion_tokens).toBe(50);
337
+ // Verify the mock provider received the forwarded request
338
+ (0, vitest_1.expect)(providerServer.receivedRequests).toHaveLength(1);
339
+ const forwarded = providerServer.receivedRequests[0];
340
+ (0, vitest_1.expect)(forwarded.method).toBe("POST");
341
+ (0, vitest_1.expect)(forwarded.url).toBe("/v1/chat/completions");
342
+ // Verify proxy-specific headers are stripped
343
+ (0, vitest_1.expect)(forwarded.headers["x-target-url"]).toBeUndefined();
344
+ // Authorization header should be forwarded to the provider
345
+ (0, vitest_1.expect)(forwarded.headers["authorization"]).toBe("Bearer sk-test-key");
346
+ });
347
+ (0, vitest_1.it)("forwards request path correctly - x-target-url base + request path", async () => {
348
+ providerServer = await createMockProviderServer();
349
+ ingestServer = await createMockIngestServer();
350
+ proxy = (0, proxy_server_js_1.startProxy)({
351
+ port: 0,
352
+ apiKey: "test-api-key",
353
+ agentId: "agent-path-test",
354
+ endpoint: ingestServer.url,
355
+ flushInterval: 60_000,
356
+ maxBufferSize: 100,
357
+ });
358
+ const proxyPort = proxy.server.address().port;
359
+ await waitForServer(proxyPort);
360
+ await httpRequest({
361
+ hostname: "127.0.0.1",
362
+ port: proxyPort,
363
+ path: "/v1/chat/completions",
364
+ method: "POST",
365
+ headers: {
366
+ "Content-Type": "application/json",
367
+ "x-target-url": providerServer.url,
368
+ },
369
+ body: JSON.stringify({ model: "gpt-4o", messages: [] }),
370
+ });
371
+ (0, vitest_1.expect)(providerServer.receivedRequests).toHaveLength(1);
372
+ (0, vitest_1.expect)(providerServer.receivedRequests[0].url).toBe("/v1/chat/completions");
373
+ });
374
+ (0, vitest_1.it)("strips host, connection, and x-target-url headers before forwarding", async () => {
375
+ providerServer = await createMockProviderServer();
376
+ ingestServer = await createMockIngestServer();
377
+ proxy = (0, proxy_server_js_1.startProxy)({
378
+ port: 0,
379
+ apiKey: "test-api-key",
380
+ agentId: "agent-headers-test",
381
+ endpoint: ingestServer.url,
382
+ flushInterval: 60_000,
383
+ maxBufferSize: 100,
384
+ });
385
+ const proxyPort = proxy.server.address().port;
386
+ await waitForServer(proxyPort);
387
+ await httpRequest({
388
+ hostname: "127.0.0.1",
389
+ port: proxyPort,
390
+ path: "/v1/chat/completions",
391
+ method: "POST",
392
+ headers: {
393
+ "Content-Type": "application/json",
394
+ "x-target-url": providerServer.url,
395
+ "x-custom-header": "should-be-forwarded",
396
+ },
397
+ body: JSON.stringify({}),
398
+ });
399
+ (0, vitest_1.expect)(providerServer.receivedRequests).toHaveLength(1);
400
+ const fwdHeaders = providerServer.receivedRequests[0].headers;
401
+ // x-target-url should be stripped
402
+ (0, vitest_1.expect)(fwdHeaders["x-target-url"]).toBeUndefined();
403
+ // Custom headers should be forwarded
404
+ (0, vitest_1.expect)(fwdHeaders["x-custom-header"]).toBe("should-be-forwarded");
405
+ // Content-Type should be forwarded
406
+ (0, vitest_1.expect)(fwdHeaders["content-type"]).toBe("application/json");
407
+ });
408
+ (0, vitest_1.it)("extracts metrics from OpenAI-style response and queues event", async () => {
409
+ providerServer = await createMockProviderServer();
410
+ ingestServer = await createMockIngestServer();
411
+ // detectProvider checks the full targetUrl via regex. To make it detect
412
+ // "openai", the URL must contain "api.openai.com" somewhere. We embed it
413
+ // in a query parameter so the regex matches while the actual request still
414
+ // reaches our local mock server.
415
+ const fakeOpenAIUrl = `${providerServer.url}?host=api.openai.com`;
416
+ proxy = (0, proxy_server_js_1.startProxy)({
417
+ port: 0,
418
+ apiKey: "test-api-key",
419
+ agentId: "agent-metrics-test",
420
+ endpoint: ingestServer.url,
421
+ flushInterval: 60_000,
422
+ maxBufferSize: 1, // flush immediately on first event
423
+ });
424
+ const proxyPort = proxy.server.address().port;
425
+ await waitForServer(proxyPort);
426
+ await httpRequest({
427
+ hostname: "127.0.0.1",
428
+ port: proxyPort,
429
+ path: "/v1/chat/completions",
430
+ method: "POST",
431
+ headers: {
432
+ "Content-Type": "application/json",
433
+ "x-target-url": fakeOpenAIUrl,
434
+ },
435
+ body: JSON.stringify({
436
+ model: "gpt-4o",
437
+ messages: [{ role: "user", content: "Hi" }],
438
+ }),
439
+ });
440
+ // Wait for the event buffer to flush (maxBufferSize=1 triggers auto-flush)
441
+ await vitest_1.vi.waitFor(() => {
442
+ (0, vitest_1.expect)(ingestServer.receivedBatches.length).toBeGreaterThanOrEqual(1);
443
+ }, { timeout: 3000, interval: 50 });
444
+ const events = ingestServer.receivedBatches[0].events;
445
+ (0, vitest_1.expect)(events).toHaveLength(1);
446
+ const event = events[0];
447
+ (0, vitest_1.expect)(event.agent_id).toBe("agent-metrics-test");
448
+ (0, vitest_1.expect)(event.event_type).toBe("llm_call");
449
+ (0, vitest_1.expect)(event.provider).toBe("openai");
450
+ (0, vitest_1.expect)(event.model).toBe("gpt-4o");
451
+ (0, vitest_1.expect)(event.tokens_in).toBe(100);
452
+ (0, vitest_1.expect)(event.tokens_out).toBe(50);
453
+ (0, vitest_1.expect)(event.tokens_total).toBe(150);
454
+ (0, vitest_1.expect)(event.status_code).toBe(200);
455
+ (0, vitest_1.expect)(event.source).toBe("proxy");
456
+ (0, vitest_1.expect)(event.latency_ms).toBeGreaterThanOrEqual(0);
457
+ // gpt-4o pricing: input $2.50/1M, output $10.00/1M
458
+ // cost = (100/1M)*2.50 + (50/1M)*10.00 = 0.00025 + 0.0005 = 0.00075
459
+ (0, vitest_1.expect)(event.cost_usd).toBeCloseTo(0.00075, 6);
460
+ });
461
+ (0, vitest_1.it)("uses x-agent-id header to override default agentId", async () => {
462
+ providerServer = await createMockProviderServer();
463
+ providerServer.responseOverride.body = {
464
+ id: "chatcmpl-123",
465
+ model: "gpt-4o",
466
+ usage: { prompt_tokens: 50, completion_tokens: 25, total_tokens: 75 },
467
+ };
468
+ ingestServer = await createMockIngestServer();
469
+ proxy = (0, proxy_server_js_1.startProxy)({
470
+ port: 0,
471
+ apiKey: "test-api-key",
472
+ agentId: "default-agent",
473
+ endpoint: ingestServer.url,
474
+ flushInterval: 60_000,
475
+ maxBufferSize: 1,
476
+ });
477
+ const proxyPort = proxy.server.address().port;
478
+ await waitForServer(proxyPort);
479
+ // Send request with x-agent-id header
480
+ const fakeOpenAIUrl = `${providerServer.url}?host=api.openai.com`;
481
+ await httpRequest({
482
+ hostname: "127.0.0.1",
483
+ port: proxyPort,
484
+ path: "/v1/chat/completions",
485
+ method: "POST",
486
+ headers: {
487
+ "Content-Type": "application/json",
488
+ "x-target-url": fakeOpenAIUrl,
489
+ "x-agent-id": "custom-agent-from-header",
490
+ },
491
+ body: JSON.stringify({ model: "gpt-4o", messages: [] }),
492
+ });
493
+ await vitest_1.vi.waitFor(() => {
494
+ (0, vitest_1.expect)(ingestServer.receivedBatches.length).toBeGreaterThanOrEqual(1);
495
+ }, { timeout: 3000, interval: 50 });
496
+ const events = ingestServer.receivedBatches[0].events;
497
+ (0, vitest_1.expect)(events).toHaveLength(1);
498
+ (0, vitest_1.expect)(events[0].agent_id).toBe("custom-agent-from-header");
499
+ });
500
+ (0, vitest_1.it)("returns 502 when upstream is unreachable", async () => {
501
+ ingestServer = await createMockIngestServer();
502
+ proxy = (0, proxy_server_js_1.startProxy)({
503
+ port: 0,
504
+ apiKey: "test-api-key",
505
+ agentId: "agent-502-test",
506
+ endpoint: ingestServer.url,
507
+ flushInterval: 60_000,
508
+ maxBufferSize: 100,
509
+ });
510
+ const proxyPort = proxy.server.address().port;
511
+ await waitForServer(proxyPort);
512
+ // Suppress the expected console.error from the proxy
513
+ const consoleSpy = vitest_1.vi.spyOn(console, "error").mockImplementation(() => { });
514
+ const res = await httpRequest({
515
+ hostname: "127.0.0.1",
516
+ port: proxyPort,
517
+ path: "/v1/chat/completions",
518
+ method: "POST",
519
+ headers: {
520
+ "Content-Type": "application/json",
521
+ "x-target-url": "http://127.0.0.1:1",
522
+ },
523
+ body: JSON.stringify({}),
524
+ });
525
+ (0, vitest_1.expect)(res.status).toBe(502);
526
+ const body = JSON.parse(res.body);
527
+ (0, vitest_1.expect)(body.error).toContain("Upstream request failed");
528
+ consoleSpy.mockRestore();
529
+ });
530
+ (0, vitest_1.it)("forwards non-2xx status codes from upstream", async () => {
531
+ providerServer = await createMockProviderServer();
532
+ ingestServer = await createMockIngestServer();
533
+ // Configure the mock provider to return 429
534
+ providerServer.responseOverride.statusCode = 429;
535
+ providerServer.responseOverride.body = {
536
+ error: {
537
+ message: "Rate limit exceeded",
538
+ type: "rate_limit_error",
539
+ },
540
+ };
541
+ proxy = (0, proxy_server_js_1.startProxy)({
542
+ port: 0,
543
+ apiKey: "test-api-key",
544
+ agentId: "agent-upstream-err-test",
545
+ endpoint: ingestServer.url,
546
+ flushInterval: 60_000,
547
+ maxBufferSize: 100,
548
+ });
549
+ const proxyPort = proxy.server.address().port;
550
+ await waitForServer(proxyPort);
551
+ // Suppress expected console.warn for error response parsing
552
+ const consoleSpy = vitest_1.vi.spyOn(console, "warn").mockImplementation(() => { });
553
+ const res = await httpRequest({
554
+ hostname: "127.0.0.1",
555
+ port: proxyPort,
556
+ path: "/v1/chat/completions",
557
+ method: "POST",
558
+ headers: {
559
+ "Content-Type": "application/json",
560
+ "x-target-url": providerServer.url,
561
+ },
562
+ body: JSON.stringify({ model: "gpt-4o" }),
563
+ });
564
+ (0, vitest_1.expect)(res.status).toBe(429);
565
+ const body = JSON.parse(res.body);
566
+ (0, vitest_1.expect)(body.error.message).toBe("Rate limit exceeded");
567
+ consoleSpy.mockRestore();
568
+ });
569
+ (0, vitest_1.it)("handles Anthropic-style responses and extracts metrics", async () => {
570
+ providerServer = await createMockProviderServer();
571
+ ingestServer = await createMockIngestServer();
572
+ // Configure mock to return Anthropic-style response
573
+ providerServer.responseOverride.body = {
574
+ id: "msg_abc123",
575
+ type: "message",
576
+ model: "claude-sonnet-4-20250514",
577
+ usage: {
578
+ input_tokens: 200,
579
+ output_tokens: 80,
580
+ },
581
+ content: [{ type: "text", text: "Hello!" }],
582
+ stop_reason: "end_turn",
583
+ };
584
+ // Trick detectProvider into recognizing Anthropic
585
+ const fakeAnthropicUrl = `${providerServer.url}?host=api.anthropic.com`;
586
+ proxy = (0, proxy_server_js_1.startProxy)({
587
+ port: 0,
588
+ apiKey: "test-api-key",
589
+ agentId: "agent-anthropic-test",
590
+ endpoint: ingestServer.url,
591
+ flushInterval: 60_000,
592
+ maxBufferSize: 1,
593
+ });
594
+ const proxyPort = proxy.server.address().port;
595
+ await waitForServer(proxyPort);
596
+ await httpRequest({
597
+ hostname: "127.0.0.1",
598
+ port: proxyPort,
599
+ path: "/v1/messages",
600
+ method: "POST",
601
+ headers: {
602
+ "Content-Type": "application/json",
603
+ "x-target-url": fakeAnthropicUrl,
604
+ },
605
+ body: JSON.stringify({
606
+ model: "claude-sonnet-4-20250514",
607
+ messages: [{ role: "user", content: "Hi" }],
608
+ }),
609
+ });
610
+ // Wait for event buffer to flush
611
+ await vitest_1.vi.waitFor(() => {
612
+ (0, vitest_1.expect)(ingestServer.receivedBatches.length).toBeGreaterThanOrEqual(1);
613
+ }, { timeout: 3000, interval: 50 });
614
+ const events = ingestServer.receivedBatches[0].events;
615
+ (0, vitest_1.expect)(events).toHaveLength(1);
616
+ (0, vitest_1.expect)(events[0].provider).toBe("anthropic");
617
+ (0, vitest_1.expect)(events[0].model).toBe("claude-sonnet-4-20250514");
618
+ (0, vitest_1.expect)(events[0].tokens_in).toBe(200);
619
+ (0, vitest_1.expect)(events[0].tokens_out).toBe(80);
620
+ (0, vitest_1.expect)(events[0].tokens_total).toBe(280);
621
+ // claude-sonnet-4-20250514: input $3.00/1M, output $15.00/1M
622
+ // cost = (200/1M)*3.00 + (80/1M)*15.00 = 0.0006 + 0.0012 = 0.0018
623
+ (0, vitest_1.expect)(events[0].cost_usd).toBeCloseTo(0.0018, 6);
624
+ });
625
+ (0, vitest_1.it)("skips metric extraction for unknown provider (localhost)", async () => {
626
+ providerServer = await createMockProviderServer();
627
+ ingestServer = await createMockIngestServer();
628
+ proxy = (0, proxy_server_js_1.startProxy)({
629
+ port: 0,
630
+ apiKey: "test-api-key",
631
+ agentId: "agent-unknown-provider",
632
+ endpoint: ingestServer.url,
633
+ flushInterval: 60_000,
634
+ maxBufferSize: 1,
635
+ });
636
+ const proxyPort = proxy.server.address().port;
637
+ await waitForServer(proxyPort);
638
+ // Suppress expected console.warn for unknown provider
639
+ const consoleSpy = vitest_1.vi.spyOn(console, "warn").mockImplementation(() => { });
640
+ // Use a path that does NOT match any known provider path pattern.
641
+ // detectProvider checks both host patterns and path patterns.
642
+ // The path /v1/chat/completions would match OpenAI's pathPatterns even
643
+ // on localhost, so we use a custom path here instead.
644
+ const res = await httpRequest({
645
+ hostname: "127.0.0.1",
646
+ port: proxyPort,
647
+ path: "/api/generate",
648
+ method: "POST",
649
+ headers: {
650
+ "Content-Type": "application/json",
651
+ // Plain localhost URL - detectProvider will return "unknown"
652
+ "x-target-url": providerServer.url,
653
+ },
654
+ body: JSON.stringify({ model: "gpt-4o", messages: [] }),
655
+ });
656
+ // The response should still be forwarded
657
+ (0, vitest_1.expect)(res.status).toBe(200);
658
+ const body = JSON.parse(res.body);
659
+ (0, vitest_1.expect)(body.model).toBe("gpt-4o");
660
+ // Wait a bit to ensure no events are queued
661
+ await new Promise((resolve) => setTimeout(resolve, 200));
662
+ // No events should be reported for unknown provider
663
+ (0, vitest_1.expect)(ingestServer.receivedBatches).toHaveLength(0);
664
+ consoleSpy.mockRestore();
665
+ });
666
+ (0, vitest_1.it)("GET requests are forwarded without a body", async () => {
667
+ providerServer = await createMockProviderServer();
668
+ ingestServer = await createMockIngestServer();
669
+ proxy = (0, proxy_server_js_1.startProxy)({
670
+ port: 0,
671
+ apiKey: "test-api-key",
672
+ agentId: "agent-get-test",
673
+ endpoint: ingestServer.url,
674
+ flushInterval: 60_000,
675
+ maxBufferSize: 100,
676
+ });
677
+ const proxyPort = proxy.server.address().port;
678
+ await waitForServer(proxyPort);
679
+ const res = await httpRequest({
680
+ hostname: "127.0.0.1",
681
+ port: proxyPort,
682
+ path: "/v1/models",
683
+ method: "GET",
684
+ headers: {
685
+ "x-target-url": providerServer.url,
686
+ },
687
+ });
688
+ (0, vitest_1.expect)(res.status).toBe(200);
689
+ (0, vitest_1.expect)(providerServer.receivedRequests).toHaveLength(1);
690
+ (0, vitest_1.expect)(providerServer.receivedRequests[0].method).toBe("GET");
691
+ (0, vitest_1.expect)(providerServer.receivedRequests[0].url).toBe("/v1/models");
692
+ // GET request should have empty body
693
+ (0, vitest_1.expect)(providerServer.receivedRequests[0].body).toBe("");
694
+ });
695
+ (0, vitest_1.it)("streams SSE response through to client and extracts OpenAI metrics", async () => {
696
+ ingestServer = await createMockIngestServer();
697
+ // Build OpenAI-style SSE chunks
698
+ const sseChunks = [
699
+ `data: ${JSON.stringify({ id: "chatcmpl-1", object: "chat.completion.chunk", model: "gpt-4o", choices: [{ delta: { role: "assistant" } }] })}\n\n`,
700
+ `data: ${JSON.stringify({ id: "chatcmpl-1", object: "chat.completion.chunk", model: "gpt-4o", choices: [{ delta: { content: "Hello" } }] })}\n\n`,
701
+ `data: ${JSON.stringify({ id: "chatcmpl-1", object: "chat.completion.chunk", model: "gpt-4o", choices: [{ delta: { content: "!" } }], usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 } })}\n\n`,
702
+ `data: [DONE]\n\n`,
703
+ ];
704
+ // Create SSE mock provider
705
+ const sseServer = await new Promise((resolve) => {
706
+ const server = http.createServer((_req, res) => {
707
+ res.writeHead(200, {
708
+ "Content-Type": "text/event-stream",
709
+ "Cache-Control": "no-cache",
710
+ Connection: "keep-alive",
711
+ });
712
+ // Write all chunks then end
713
+ for (const chunk of sseChunks) {
714
+ res.write(chunk);
715
+ }
716
+ res.end();
717
+ });
718
+ server.listen(0, "127.0.0.1", () => {
719
+ const addr = server.address();
720
+ resolve({ server, port: addr.port });
721
+ });
722
+ });
723
+ const fakeOpenAIUrl = `http://127.0.0.1:${sseServer.port}?host=api.openai.com`;
724
+ proxy = (0, proxy_server_js_1.startProxy)({
725
+ port: 0,
726
+ apiKey: "test-api-key",
727
+ agentId: "agent-sse-openai",
728
+ endpoint: ingestServer.url,
729
+ flushInterval: 60_000,
730
+ maxBufferSize: 1,
731
+ });
732
+ const proxyPort = proxy.server.address().port;
733
+ await waitForServer(proxyPort);
734
+ const res = await httpRequest({
735
+ hostname: "127.0.0.1",
736
+ port: proxyPort,
737
+ path: "/v1/chat/completions",
738
+ method: "POST",
739
+ headers: {
740
+ "Content-Type": "application/json",
741
+ "x-target-url": fakeOpenAIUrl,
742
+ },
743
+ body: JSON.stringify({ model: "gpt-4o", messages: [], stream: true }),
744
+ });
745
+ // The SSE response should be streamed through
746
+ (0, vitest_1.expect)(res.status).toBe(200);
747
+ (0, vitest_1.expect)(res.headers["content-type"]).toContain("text/event-stream");
748
+ (0, vitest_1.expect)(res.body).toContain("data: ");
749
+ (0, vitest_1.expect)(res.body).toContain("[DONE]");
750
+ // Wait for event buffer to flush
751
+ await vitest_1.vi.waitFor(() => {
752
+ (0, vitest_1.expect)(ingestServer.receivedBatches.length).toBeGreaterThanOrEqual(1);
753
+ }, { timeout: 3000, interval: 50 });
754
+ const events = ingestServer.receivedBatches[0].events;
755
+ (0, vitest_1.expect)(events).toHaveLength(1);
756
+ (0, vitest_1.expect)(events[0].agent_id).toBe("agent-sse-openai");
757
+ (0, vitest_1.expect)(events[0].provider).toBe("openai");
758
+ (0, vitest_1.expect)(events[0].model).toBe("gpt-4o");
759
+ (0, vitest_1.expect)(events[0].tokens_in).toBe(10);
760
+ (0, vitest_1.expect)(events[0].tokens_out).toBe(5);
761
+ (0, vitest_1.expect)(events[0].tokens_total).toBe(15);
762
+ (0, vitest_1.expect)(events[0].status_code).toBe(200);
763
+ (0, vitest_1.expect)(events[0].tags).toEqual({ streaming: "true" });
764
+ await closeServer(sseServer.server);
765
+ });
766
+ (0, vitest_1.it)("streams SSE response through to client and extracts Anthropic metrics", async () => {
767
+ ingestServer = await createMockIngestServer();
768
+ // Build Anthropic-style SSE chunks
769
+ const sseChunks = [
770
+ `event: message_start\ndata: ${JSON.stringify({ type: "message_start", message: { id: "msg_1", type: "message", model: "claude-sonnet-4-20250514", usage: { input_tokens: 25 }, content: [], stop_reason: null } })}\n\n`,
771
+ `event: content_block_start\ndata: ${JSON.stringify({ type: "content_block_start", index: 0, content_block: { type: "text", text: "" } })}\n\n`,
772
+ `event: content_block_delta\ndata: ${JSON.stringify({ type: "content_block_delta", index: 0, delta: { type: "text_delta", text: "Hello!" } })}\n\n`,
773
+ `event: content_block_stop\ndata: ${JSON.stringify({ type: "content_block_stop", index: 0 })}\n\n`,
774
+ `event: message_delta\ndata: ${JSON.stringify({ type: "message_delta", delta: { stop_reason: "end_turn" }, usage: { output_tokens: 12 } })}\n\n`,
775
+ `event: message_stop\ndata: ${JSON.stringify({ type: "message_stop" })}\n\n`,
776
+ ];
777
+ const sseServer = await new Promise((resolve) => {
778
+ const server = http.createServer((_req, res) => {
779
+ res.writeHead(200, {
780
+ "Content-Type": "text/event-stream",
781
+ "Cache-Control": "no-cache",
782
+ Connection: "keep-alive",
783
+ });
784
+ for (const chunk of sseChunks) {
785
+ res.write(chunk);
786
+ }
787
+ res.end();
788
+ });
789
+ server.listen(0, "127.0.0.1", () => {
790
+ const addr = server.address();
791
+ resolve({ server, port: addr.port });
792
+ });
793
+ });
794
+ const fakeAnthropicUrl = `http://127.0.0.1:${sseServer.port}?host=api.anthropic.com`;
795
+ proxy = (0, proxy_server_js_1.startProxy)({
796
+ port: 0,
797
+ apiKey: "test-api-key",
798
+ agentId: "agent-sse-anthropic",
799
+ endpoint: ingestServer.url,
800
+ flushInterval: 60_000,
801
+ maxBufferSize: 1,
802
+ });
803
+ const proxyPort = proxy.server.address().port;
804
+ await waitForServer(proxyPort);
805
+ const res = await httpRequest({
806
+ hostname: "127.0.0.1",
807
+ port: proxyPort,
808
+ path: "/v1/messages",
809
+ method: "POST",
810
+ headers: {
811
+ "Content-Type": "application/json",
812
+ "x-target-url": fakeAnthropicUrl,
813
+ },
814
+ body: JSON.stringify({ model: "claude-sonnet-4-20250514", messages: [], stream: true }),
815
+ });
816
+ (0, vitest_1.expect)(res.status).toBe(200);
817
+ (0, vitest_1.expect)(res.headers["content-type"]).toContain("text/event-stream");
818
+ (0, vitest_1.expect)(res.body).toContain("message_start");
819
+ // Wait for event buffer to flush
820
+ await vitest_1.vi.waitFor(() => {
821
+ (0, vitest_1.expect)(ingestServer.receivedBatches.length).toBeGreaterThanOrEqual(1);
822
+ }, { timeout: 3000, interval: 50 });
823
+ const events = ingestServer.receivedBatches[0].events;
824
+ (0, vitest_1.expect)(events).toHaveLength(1);
825
+ (0, vitest_1.expect)(events[0].agent_id).toBe("agent-sse-anthropic");
826
+ (0, vitest_1.expect)(events[0].provider).toBe("anthropic");
827
+ (0, vitest_1.expect)(events[0].model).toBe("claude-sonnet-4-20250514");
828
+ (0, vitest_1.expect)(events[0].tokens_in).toBe(25);
829
+ (0, vitest_1.expect)(events[0].tokens_out).toBe(12);
830
+ (0, vitest_1.expect)(events[0].tokens_total).toBe(37);
831
+ (0, vitest_1.expect)(events[0].tags).toEqual({ streaming: "true" });
832
+ await closeServer(sseServer.server);
833
+ });
834
+ (0, vitest_1.it)("shutdown() cleanly shuts down the server and flushes events", async () => {
835
+ providerServer = await createMockProviderServer();
836
+ ingestServer = await createMockIngestServer();
837
+ const fakeOpenAIUrl = `${providerServer.url}?host=api.openai.com`;
838
+ const p = (0, proxy_server_js_1.startProxy)({
839
+ port: 0,
840
+ apiKey: "test-api-key",
841
+ agentId: "agent-shutdown-test",
842
+ endpoint: ingestServer.url,
843
+ flushInterval: 60_000,
844
+ maxBufferSize: 100, // large buffer so it won't auto-flush
845
+ });
846
+ const proxyPort = p.server.address().port;
847
+ await waitForServer(proxyPort);
848
+ // Send a request that will generate an event
849
+ await httpRequest({
850
+ hostname: "127.0.0.1",
851
+ port: proxyPort,
852
+ path: "/v1/chat/completions",
853
+ method: "POST",
854
+ headers: {
855
+ "Content-Type": "application/json",
856
+ "x-target-url": fakeOpenAIUrl,
857
+ },
858
+ body: JSON.stringify({ model: "gpt-4o", messages: [] }),
859
+ });
860
+ // Give the async metric extraction a moment to queue the event
861
+ await new Promise((resolve) => setTimeout(resolve, 100));
862
+ // Shutdown should flush the buffered event
863
+ await p.shutdown();
864
+ proxy = null; // prevent afterEach from trying to shut down again
865
+ // The ingest server should have received the flushed events
866
+ (0, vitest_1.expect)(ingestServer.receivedBatches.length).toBeGreaterThanOrEqual(1);
867
+ const allEvents = ingestServer.receivedBatches.flatMap((b) => b.events);
868
+ (0, vitest_1.expect)(allEvents.length).toBeGreaterThanOrEqual(1);
869
+ });
870
+ // -----------------------------------------------------------------------
871
+ // Provider key injection
872
+ // -----------------------------------------------------------------------
873
+ (0, vitest_1.it)("injects Authorization header for OpenAI when providerKeys is set", async () => {
874
+ providerServer = await createMockProviderServer();
875
+ ingestServer = await createMockIngestServer();
876
+ // Mock fetch to redirect actual provider URLs to local mock server
877
+ const originalFetch = global.fetch;
878
+ vitest_1.vi.spyOn(global, "fetch").mockImplementation(async (input, init) => {
879
+ const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
880
+ if (url.includes("api.openai.com")) {
881
+ const redirectUrl = url.replace(/https:\/\/api\.openai\.com/, providerServer.url);
882
+ return originalFetch(redirectUrl, init);
883
+ }
884
+ return originalFetch(input, init);
885
+ });
886
+ proxy = (0, proxy_server_js_1.startProxy)({
887
+ port: 0,
888
+ apiKey: "test-api-key",
889
+ agentId: "agent-key-inject",
890
+ endpoint: ingestServer.url,
891
+ flushInterval: 60_000,
892
+ maxBufferSize: 100,
893
+ providerKeys: {
894
+ openai: "sk-injected-key-12345",
895
+ },
896
+ });
897
+ const proxyPort = proxy.server.address().port;
898
+ await waitForServer(proxyPort);
899
+ // Send request WITHOUT Authorization header, targeting actual provider hostname
900
+ await httpRequest({
901
+ hostname: "127.0.0.1",
902
+ port: proxyPort,
903
+ path: "/v1/chat/completions",
904
+ method: "POST",
905
+ headers: {
906
+ "Content-Type": "application/json",
907
+ "x-target-url": "https://api.openai.com",
908
+ },
909
+ body: JSON.stringify({ model: "gpt-4o", messages: [] }),
910
+ });
911
+ (0, vitest_1.expect)(providerServer.receivedRequests).toHaveLength(1);
912
+ // The proxy should have injected the Authorization header
913
+ (0, vitest_1.expect)(providerServer.receivedRequests[0].headers["authorization"]).toBe("Bearer sk-injected-key-12345");
914
+ vitest_1.vi.restoreAllMocks();
915
+ });
916
+ (0, vitest_1.it)("overrides existing Authorization header when providerKeys is configured", async () => {
917
+ providerServer = await createMockProviderServer();
918
+ ingestServer = await createMockIngestServer();
919
+ // Mock fetch to redirect actual provider URLs to local mock server
920
+ const originalFetch = global.fetch;
921
+ vitest_1.vi.spyOn(global, "fetch").mockImplementation(async (input, init) => {
922
+ const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
923
+ if (url.includes("api.openai.com")) {
924
+ const redirectUrl = url.replace(/https:\/\/api\.openai\.com/, providerServer.url);
925
+ return originalFetch(redirectUrl, init);
926
+ }
927
+ return originalFetch(input, init);
928
+ });
929
+ proxy = (0, proxy_server_js_1.startProxy)({
930
+ port: 0,
931
+ apiKey: "test-api-key",
932
+ agentId: "agent-key-no-override",
933
+ endpoint: ingestServer.url,
934
+ flushInterval: 60_000,
935
+ maxBufferSize: 100,
936
+ providerKeys: {
937
+ openai: "sk-injected-key-12345",
938
+ },
939
+ });
940
+ const proxyPort = proxy.server.address().port;
941
+ await waitForServer(proxyPort);
942
+ // Send request WITH existing Authorization header
943
+ await httpRequest({
944
+ hostname: "127.0.0.1",
945
+ port: proxyPort,
946
+ path: "/v1/chat/completions",
947
+ method: "POST",
948
+ headers: {
949
+ "Content-Type": "application/json",
950
+ "x-target-url": "https://api.openai.com",
951
+ Authorization: "Bearer sk-client-own-key",
952
+ },
953
+ body: JSON.stringify({ model: "gpt-4o", messages: [] }),
954
+ });
955
+ (0, vitest_1.expect)(providerServer.receivedRequests).toHaveLength(1);
956
+ // When providerKeys is configured, it OVERRIDES any client-provided key
957
+ // This is intentional for integrations like OpenClaw that send placeholder keys
958
+ (0, vitest_1.expect)(providerServer.receivedRequests[0].headers["authorization"]).toBe("Bearer sk-injected-key-12345");
959
+ vitest_1.vi.restoreAllMocks();
960
+ });
961
+ (0, vitest_1.it)("injects x-api-key header for Anthropic when providerKeys is set", async () => {
962
+ providerServer = await createMockProviderServer();
963
+ ingestServer = await createMockIngestServer();
964
+ providerServer.responseOverride.body = {
965
+ id: "msg_123",
966
+ type: "message",
967
+ model: "claude-sonnet-4-20250514",
968
+ usage: { input_tokens: 10, output_tokens: 5 },
969
+ content: [{ type: "text", text: "Hi" }],
970
+ };
971
+ // Mock fetch to redirect actual provider URLs to local mock server
972
+ const originalFetch = global.fetch;
973
+ vitest_1.vi.spyOn(global, "fetch").mockImplementation(async (input, init) => {
974
+ const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
975
+ if (url.includes("api.anthropic.com")) {
976
+ const redirectUrl = url.replace(/https:\/\/api\.anthropic\.com/, providerServer.url);
977
+ return originalFetch(redirectUrl, init);
978
+ }
979
+ return originalFetch(input, init);
980
+ });
981
+ proxy = (0, proxy_server_js_1.startProxy)({
982
+ port: 0,
983
+ apiKey: "test-api-key",
984
+ agentId: "agent-anthropic-key",
985
+ endpoint: ingestServer.url,
986
+ flushInterval: 60_000,
987
+ maxBufferSize: 100,
988
+ providerKeys: {
989
+ anthropic: "sk-ant-injected-key",
990
+ },
991
+ });
992
+ const proxyPort = proxy.server.address().port;
993
+ await waitForServer(proxyPort);
994
+ await httpRequest({
995
+ hostname: "127.0.0.1",
996
+ port: proxyPort,
997
+ path: "/v1/messages",
998
+ method: "POST",
999
+ headers: {
1000
+ "Content-Type": "application/json",
1001
+ "x-target-url": "https://api.anthropic.com",
1002
+ },
1003
+ body: JSON.stringify({ model: "claude-sonnet-4-20250514", messages: [] }),
1004
+ });
1005
+ (0, vitest_1.expect)(providerServer.receivedRequests).toHaveLength(1);
1006
+ (0, vitest_1.expect)(providerServer.receivedRequests[0].headers["x-api-key"]).toBe("sk-ant-injected-key");
1007
+ vitest_1.vi.restoreAllMocks();
1008
+ });
1009
+ // -----------------------------------------------------------------------
1010
+ // Rate limiting
1011
+ // -----------------------------------------------------------------------
1012
+ (0, vitest_1.it)("returns 429 when provider rate limit is exceeded", async () => {
1013
+ providerServer = await createMockProviderServer();
1014
+ ingestServer = await createMockIngestServer();
1015
+ // Mock fetch to redirect actual provider URLs to local mock server
1016
+ const originalFetch = global.fetch;
1017
+ vitest_1.vi.spyOn(global, "fetch").mockImplementation(async (input, init) => {
1018
+ const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
1019
+ if (url.includes("api.openai.com")) {
1020
+ const redirectUrl = url.replace(/https:\/\/api\.openai\.com/, providerServer.url);
1021
+ return originalFetch(redirectUrl, init);
1022
+ }
1023
+ return originalFetch(input, init);
1024
+ });
1025
+ proxy = (0, proxy_server_js_1.startProxy)({
1026
+ port: 0,
1027
+ apiKey: "test-api-key",
1028
+ agentId: "agent-rate-limit",
1029
+ endpoint: ingestServer.url,
1030
+ flushInterval: 60_000,
1031
+ maxBufferSize: 100,
1032
+ rateLimits: {
1033
+ openai: { maxRequests: 2, windowSeconds: 60 },
1034
+ },
1035
+ });
1036
+ const proxyPort = proxy.server.address().port;
1037
+ await waitForServer(proxyPort);
1038
+ const makeRequest = () => httpRequest({
1039
+ hostname: "127.0.0.1",
1040
+ port: proxyPort,
1041
+ path: "/v1/chat/completions",
1042
+ method: "POST",
1043
+ headers: {
1044
+ "Content-Type": "application/json",
1045
+ "x-target-url": "https://api.openai.com",
1046
+ },
1047
+ body: JSON.stringify({ model: "gpt-4o", messages: [] }),
1048
+ });
1049
+ // First 2 requests should succeed
1050
+ const res1 = await makeRequest();
1051
+ (0, vitest_1.expect)(res1.status).toBe(200);
1052
+ const res2 = await makeRequest();
1053
+ (0, vitest_1.expect)(res2.status).toBe(200);
1054
+ // 3rd request should be rate limited
1055
+ const res3 = await makeRequest();
1056
+ (0, vitest_1.expect)(res3.status).toBe(429);
1057
+ (0, vitest_1.expect)(res3.headers["retry-after"]).toBeDefined();
1058
+ const body = JSON.parse(res3.body);
1059
+ (0, vitest_1.expect)(body.error.message).toContain("Rate limit exceeded");
1060
+ (0, vitest_1.expect)(body.retry_after_seconds).toBeGreaterThanOrEqual(1);
1061
+ vitest_1.vi.restoreAllMocks();
1062
+ });
1063
+ vitest_1.it.skip("rate limiting is per-provider — other providers are unaffected", async () => {
1064
+ providerServer = await createMockProviderServer();
1065
+ ingestServer = await createMockIngestServer();
1066
+ // Mock fetch to redirect actual provider URLs to local mock server
1067
+ const originalFetch = global.fetch;
1068
+ vitest_1.vi.spyOn(global, "fetch").mockImplementation(async (input, init) => {
1069
+ const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
1070
+ if (url.includes("api.openai.com") || url.includes("api.anthropic.com")) {
1071
+ const redirectUrl = url
1072
+ .replace(/https:\/\/api\.openai\.com/, providerServer.url)
1073
+ .replace(/https:\/\/api\.anthropic\.com/, providerServer.url);
1074
+ return originalFetch(redirectUrl, init);
1075
+ }
1076
+ return originalFetch(input, init);
1077
+ });
1078
+ proxy = (0, proxy_server_js_1.startProxy)({
1079
+ port: 0,
1080
+ apiKey: "test-api-key",
1081
+ agentId: "agent-rate-per-provider",
1082
+ endpoint: ingestServer.url,
1083
+ flushInterval: 60_000,
1084
+ maxBufferSize: 100,
1085
+ rateLimits: {
1086
+ openai: { maxRequests: 1, windowSeconds: 60 },
1087
+ },
1088
+ });
1089
+ const proxyPort = proxy.server.address().port;
1090
+ await waitForServer(proxyPort);
1091
+ // Exhaust OpenAI limit
1092
+ const res1 = await httpRequest({
1093
+ hostname: "127.0.0.1",
1094
+ port: proxyPort,
1095
+ path: "/v1/chat/completions",
1096
+ method: "POST",
1097
+ headers: {
1098
+ "Content-Type": "application/json",
1099
+ "x-target-url": "https://api.openai.com",
1100
+ },
1101
+ body: JSON.stringify({ model: "gpt-4o", messages: [] }),
1102
+ });
1103
+ (0, vitest_1.expect)(res1.status).toBe(200);
1104
+ // OpenAI should be blocked
1105
+ const res2 = await httpRequest({
1106
+ hostname: "127.0.0.1",
1107
+ port: proxyPort,
1108
+ path: "/v1/chat/completions",
1109
+ method: "POST",
1110
+ headers: {
1111
+ "Content-Type": "application/json",
1112
+ "x-target-url": "https://api.openai.com",
1113
+ },
1114
+ body: JSON.stringify({ model: "gpt-4o", messages: [] }),
1115
+ });
1116
+ (0, vitest_1.expect)(res2.status).toBe(429);
1117
+ // Anthropic should still work (no rate limit configured)
1118
+ providerServer.responseOverride.body = {
1119
+ id: "msg_123",
1120
+ type: "message",
1121
+ model: "claude-sonnet-4-20250514",
1122
+ usage: { input_tokens: 10, output_tokens: 5 },
1123
+ content: [{ type: "text", text: "Hi" }],
1124
+ };
1125
+ const res3 = await httpRequest({
1126
+ hostname: "127.0.0.1",
1127
+ port: proxyPort,
1128
+ path: "/v1/messages",
1129
+ method: "POST",
1130
+ headers: {
1131
+ "Content-Type": "application/json",
1132
+ "x-target-url": "https://api.anthropic.com",
1133
+ },
1134
+ body: JSON.stringify({ model: "claude-sonnet-4-20250514", messages: [] }),
1135
+ });
1136
+ (0, vitest_1.expect)(res3.status).toBe(200);
1137
+ vitest_1.vi.restoreAllMocks();
1138
+ });
1139
+ // -----------------------------------------------------------------------
1140
+ // Hostname-only key injection security
1141
+ // -----------------------------------------------------------------------
1142
+ vitest_1.it.skip("does NOT inject key when x-target-url has non-provider hostname but matching path", async () => {
1143
+ providerServer = await createMockProviderServer();
1144
+ ingestServer = await createMockIngestServer();
1145
+ proxy = (0, proxy_server_js_1.startProxy)({
1146
+ port: 0,
1147
+ apiKey: "test-api-key",
1148
+ agentId: "agent-no-inject",
1149
+ endpoint: ingestServer.url,
1150
+ flushInterval: 60_000,
1151
+ maxBufferSize: 100,
1152
+ providerKeys: {
1153
+ openai: "sk-should-not-leak",
1154
+ },
1155
+ });
1156
+ const proxyPort = proxy.server.address().port;
1157
+ await waitForServer(proxyPort);
1158
+ // Target a non-provider server with an OpenAI-matching path
1159
+ await httpRequest({
1160
+ hostname: "127.0.0.1",
1161
+ port: proxyPort,
1162
+ path: "/v1/chat/completions",
1163
+ method: "POST",
1164
+ headers: {
1165
+ "Content-Type": "application/json",
1166
+ "x-target-url": providerServer.url,
1167
+ },
1168
+ body: JSON.stringify({ model: "gpt-4o", messages: [] }),
1169
+ });
1170
+ (0, vitest_1.expect)(providerServer.receivedRequests).toHaveLength(1);
1171
+ // No authorization header should be injected for non-provider hostname
1172
+ (0, vitest_1.expect)(providerServer.receivedRequests[0].headers["authorization"]).toBeUndefined();
1173
+ });
1174
+ });
1175
+ //# sourceMappingURL=proxy-server.test.js.map