@agentgazer/proxy 0.3.4 → 0.3.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1198,5 +1198,473 @@ function waitForServer(port, maxAttempts = 20) {
1198
1198
  // No authorization header should be injected for non-provider hostname
1199
1199
  (0, vitest_1.expect)(providerServer.receivedRequests[0].headers["authorization"]).toBeUndefined();
1200
1200
  });
1201
+ // -----------------------------------------------------------------------
1202
+ // Cross-Provider Override Tests
1203
+ // -----------------------------------------------------------------------
1204
+ // Helper: Create a test database with proper schema matching the production DB
1205
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1206
+ function createTestDb(agentId) {
1207
+ const Database = require("better-sqlite3");
1208
+ const db = new Database(":memory:");
1209
+ db.exec(`
1210
+ CREATE TABLE agents (
1211
+ id TEXT PRIMARY KEY,
1212
+ agent_id TEXT NOT NULL UNIQUE,
1213
+ name TEXT,
1214
+ active INTEGER NOT NULL DEFAULT 1,
1215
+ deactivated_by TEXT,
1216
+ budget_limit REAL,
1217
+ allowed_hours_start INTEGER,
1218
+ allowed_hours_end INTEGER,
1219
+ kill_switch_enabled INTEGER NOT NULL DEFAULT 0,
1220
+ kill_switch_window_size INTEGER NOT NULL DEFAULT 20,
1221
+ kill_switch_threshold REAL NOT NULL DEFAULT 10.0,
1222
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
1223
+ updated_at TEXT NOT NULL DEFAULT (datetime('now'))
1224
+ );
1225
+ CREATE TABLE agent_model_rules (
1226
+ id TEXT PRIMARY KEY,
1227
+ agent_id TEXT NOT NULL,
1228
+ provider TEXT NOT NULL,
1229
+ model_override TEXT,
1230
+ target_provider TEXT,
1231
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
1232
+ updated_at TEXT NOT NULL DEFAULT (datetime('now')),
1233
+ UNIQUE(agent_id, provider)
1234
+ );
1235
+ CREATE TABLE agent_rate_limits (
1236
+ id TEXT PRIMARY KEY,
1237
+ agent_id TEXT NOT NULL,
1238
+ provider TEXT NOT NULL,
1239
+ max_requests INTEGER NOT NULL,
1240
+ window_seconds INTEGER NOT NULL
1241
+ );
1242
+ CREATE TABLE provider_settings (
1243
+ provider TEXT PRIMARY KEY,
1244
+ active INTEGER DEFAULT 1,
1245
+ rate_limit_max_requests INTEGER,
1246
+ rate_limit_window_seconds INTEGER
1247
+ );
1248
+ `);
1249
+ db.prepare("INSERT INTO agents (id, agent_id, name) VALUES (?, ?, ?)").run(`id-${agentId}`, agentId, "Test Agent");
1250
+ return db;
1251
+ }
1252
+ (0, vitest_1.describe)("Cross-Provider Override", () => {
1253
+ (0, vitest_1.it)("converts OpenAI request to Anthropic format when override is configured", async () => {
1254
+ // Create mock Anthropic server that expects Anthropic-format requests
1255
+ const anthropicServer = await new Promise((resolve) => {
1256
+ const receivedRequests = [];
1257
+ const server = http.createServer((req, res) => {
1258
+ const chunks = [];
1259
+ req.on("data", (chunk) => chunks.push(chunk));
1260
+ req.on("end", () => {
1261
+ receivedRequests.push({
1262
+ headers: req.headers,
1263
+ body: Buffer.concat(chunks).toString("utf-8"),
1264
+ });
1265
+ // Return Anthropic-format response
1266
+ const response = {
1267
+ id: "msg_cross_provider",
1268
+ type: "message",
1269
+ role: "assistant",
1270
+ content: [{ type: "text", text: "Hello from Anthropic!" }],
1271
+ model: "claude-sonnet-4-20250514",
1272
+ stop_reason: "end_turn",
1273
+ usage: { input_tokens: 50, output_tokens: 20 },
1274
+ };
1275
+ const payload = JSON.stringify(response);
1276
+ res.writeHead(200, {
1277
+ "Content-Type": "application/json",
1278
+ "Content-Length": Buffer.byteLength(payload).toString(),
1279
+ });
1280
+ res.end(payload);
1281
+ });
1282
+ });
1283
+ server.listen(0, "127.0.0.1", () => {
1284
+ const addr = server.address();
1285
+ resolve({ server, port: addr.port, receivedRequests });
1286
+ });
1287
+ });
1288
+ ingestServer = await createMockIngestServer();
1289
+ // Mock fetch to redirect Anthropic API calls to our mock server
1290
+ const originalFetch = global.fetch;
1291
+ vitest_1.vi.spyOn(global, "fetch").mockImplementation(async (input, init) => {
1292
+ const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
1293
+ if (url.includes("api.anthropic.com")) {
1294
+ const redirectUrl = url.replace(/https:\/\/api\.anthropic\.com/, `http://127.0.0.1:${anthropicServer.port}`);
1295
+ return originalFetch(redirectUrl, init);
1296
+ }
1297
+ return originalFetch(input, init);
1298
+ });
1299
+ // Create mock DB with model override rule - use unique agent ID to avoid cache issues
1300
+ const agentId = `test-agent-openai-to-anthropic-${Date.now()}`;
1301
+ const db = createTestDb(agentId);
1302
+ db.prepare("INSERT INTO agent_model_rules (id, agent_id, provider, model_override, target_provider) VALUES (?, ?, ?, ?, ?)").run("rule-1", agentId, "openai", "claude-sonnet-4-20250514", "anthropic");
1303
+ proxy = (0, proxy_server_js_1.startProxy)({
1304
+ port: 0,
1305
+ apiKey: "test-api-key",
1306
+ agentId: agentId,
1307
+ endpoint: ingestServer.url,
1308
+ flushInterval: 60_000,
1309
+ maxBufferSize: 1,
1310
+ providerKeys: {
1311
+ openai: "sk-openai-key",
1312
+ anthropic: "sk-ant-key",
1313
+ },
1314
+ db,
1315
+ });
1316
+ const proxyPort = proxy.server.address().port;
1317
+ await waitForServer(proxyPort);
1318
+ // Send OpenAI-format request to OpenAI endpoint
1319
+ const res = await httpRequest({
1320
+ hostname: "127.0.0.1",
1321
+ port: proxyPort,
1322
+ path: `/agents/${agentId}/openai/v1/chat/completions`,
1323
+ method: "POST",
1324
+ headers: {
1325
+ "Content-Type": "application/json",
1326
+ },
1327
+ body: JSON.stringify({
1328
+ model: "gpt-4o",
1329
+ messages: [
1330
+ { role: "system", content: "You are helpful." },
1331
+ { role: "user", content: "Hello" },
1332
+ ],
1333
+ max_tokens: 100,
1334
+ }),
1335
+ });
1336
+ // Response should be converted back to OpenAI format
1337
+ (0, vitest_1.expect)(res.status).toBe(200);
1338
+ const responseBody = JSON.parse(res.body);
1339
+ (0, vitest_1.expect)(responseBody.object).toBe("chat.completion");
1340
+ (0, vitest_1.expect)(responseBody.choices[0].message.content).toBe("Hello from Anthropic!");
1341
+ (0, vitest_1.expect)(responseBody.usage.prompt_tokens).toBe(50);
1342
+ (0, vitest_1.expect)(responseBody.usage.completion_tokens).toBe(20);
1343
+ // Verify the request was converted to Anthropic format
1344
+ (0, vitest_1.expect)(anthropicServer.receivedRequests).toHaveLength(1);
1345
+ const sentRequest = JSON.parse(anthropicServer.receivedRequests[0].body);
1346
+ (0, vitest_1.expect)(sentRequest.model).toBe("claude-sonnet-4-20250514");
1347
+ (0, vitest_1.expect)(sentRequest.system).toBe("You are helpful."); // System extracted to top-level
1348
+ (0, vitest_1.expect)(sentRequest.messages).toHaveLength(1);
1349
+ (0, vitest_1.expect)(sentRequest.messages[0].role).toBe("user");
1350
+ (0, vitest_1.expect)(sentRequest.max_tokens).toBe(100);
1351
+ // Verify Anthropic auth header was used
1352
+ (0, vitest_1.expect)(anthropicServer.receivedRequests[0].headers["x-api-key"]).toBe("sk-ant-key");
1353
+ vitest_1.vi.restoreAllMocks();
1354
+ await closeServer(anthropicServer.server);
1355
+ db.close();
1356
+ });
1357
+ (0, vitest_1.it)("converts Anthropic request to OpenAI format when override is configured", async () => {
1358
+ providerServer = await createMockProviderServer();
1359
+ ingestServer = await createMockIngestServer();
1360
+ // Mock fetch to redirect OpenAI API calls to our mock server
1361
+ const originalFetch = global.fetch;
1362
+ vitest_1.vi.spyOn(global, "fetch").mockImplementation(async (input, init) => {
1363
+ const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
1364
+ if (url.includes("api.openai.com")) {
1365
+ const redirectUrl = url.replace(/https:\/\/api\.openai\.com/, providerServer.url);
1366
+ return originalFetch(redirectUrl, init);
1367
+ }
1368
+ return originalFetch(input, init);
1369
+ });
1370
+ // Create mock DB with unique agent ID
1371
+ const agentId = `test-agent-anthropic-to-openai-${Date.now()}`;
1372
+ const db = createTestDb(agentId);
1373
+ db.prepare("INSERT INTO agent_model_rules (id, agent_id, provider, model_override, target_provider) VALUES (?, ?, ?, ?, ?)").run("rule-1", agentId, "anthropic", "gpt-4o", "openai");
1374
+ proxy = (0, proxy_server_js_1.startProxy)({
1375
+ port: 0,
1376
+ apiKey: "test-api-key",
1377
+ agentId: agentId,
1378
+ endpoint: ingestServer.url,
1379
+ flushInterval: 60_000,
1380
+ maxBufferSize: 1,
1381
+ providerKeys: {
1382
+ openai: "sk-openai-key",
1383
+ anthropic: "sk-ant-key",
1384
+ },
1385
+ db,
1386
+ });
1387
+ const proxyPort = proxy.server.address().port;
1388
+ await waitForServer(proxyPort);
1389
+ // Send Anthropic-format request to Anthropic endpoint
1390
+ const res = await httpRequest({
1391
+ hostname: "127.0.0.1",
1392
+ port: proxyPort,
1393
+ path: `/agents/${agentId}/anthropic/v1/messages`,
1394
+ method: "POST",
1395
+ headers: {
1396
+ "Content-Type": "application/json",
1397
+ },
1398
+ body: JSON.stringify({
1399
+ model: "claude-sonnet-4-20250514",
1400
+ system: "You are helpful.",
1401
+ messages: [{ role: "user", content: "Hello" }],
1402
+ max_tokens: 100,
1403
+ }),
1404
+ });
1405
+ // Response should be converted back to Anthropic format
1406
+ (0, vitest_1.expect)(res.status).toBe(200);
1407
+ const responseBody = JSON.parse(res.body);
1408
+ (0, vitest_1.expect)(responseBody.type).toBe("message");
1409
+ (0, vitest_1.expect)(responseBody.content[0].type).toBe("text");
1410
+ // Verify the request was converted to OpenAI format
1411
+ (0, vitest_1.expect)(providerServer.receivedRequests).toHaveLength(1);
1412
+ const sentRequest = JSON.parse(providerServer.receivedRequests[0].body);
1413
+ (0, vitest_1.expect)(sentRequest.model).toBe("gpt-4o");
1414
+ (0, vitest_1.expect)(sentRequest.messages).toHaveLength(2);
1415
+ (0, vitest_1.expect)(sentRequest.messages[0].role).toBe("system");
1416
+ (0, vitest_1.expect)(sentRequest.messages[0].content).toBe("You are helpful.");
1417
+ (0, vitest_1.expect)(sentRequest.messages[1].role).toBe("user");
1418
+ // Verify OpenAI auth header was used
1419
+ (0, vitest_1.expect)(providerServer.receivedRequests[0].headers["authorization"]).toBe("Bearer sk-openai-key");
1420
+ vitest_1.vi.restoreAllMocks();
1421
+ db.close();
1422
+ });
1423
+ (0, vitest_1.it)("triggers cross-provider override even when request body has no model field", async () => {
1424
+ // This tests the fix for Google → Anthropic override where Google requests
1425
+ // don't have model in the body (model is in URL path)
1426
+ // Create mock Anthropic server
1427
+ const anthropicServer = await new Promise((resolve) => {
1428
+ const receivedRequests = [];
1429
+ const server = http.createServer((req, res) => {
1430
+ const chunks = [];
1431
+ req.on("data", (chunk) => chunks.push(chunk));
1432
+ req.on("end", () => {
1433
+ receivedRequests.push({
1434
+ body: Buffer.concat(chunks).toString("utf-8"),
1435
+ });
1436
+ const response = {
1437
+ id: "msg_123",
1438
+ type: "message",
1439
+ role: "assistant",
1440
+ content: [{ type: "text", text: "Hello!" }],
1441
+ model: "claude-sonnet-4-20250514",
1442
+ stop_reason: "end_turn",
1443
+ usage: { input_tokens: 10, output_tokens: 5 },
1444
+ };
1445
+ const payload = JSON.stringify(response);
1446
+ res.writeHead(200, {
1447
+ "Content-Type": "application/json",
1448
+ "Content-Length": Buffer.byteLength(payload).toString(),
1449
+ });
1450
+ res.end(payload);
1451
+ });
1452
+ });
1453
+ server.listen(0, "127.0.0.1", () => {
1454
+ const addr = server.address();
1455
+ resolve({ server, port: addr.port, receivedRequests });
1456
+ });
1457
+ });
1458
+ ingestServer = await createMockIngestServer();
1459
+ // Mock fetch
1460
+ const originalFetch = global.fetch;
1461
+ vitest_1.vi.spyOn(global, "fetch").mockImplementation(async (input, init) => {
1462
+ const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
1463
+ if (url.includes("api.anthropic.com")) {
1464
+ const redirectUrl = url.replace(/https:\/\/api\.anthropic\.com/, `http://127.0.0.1:${anthropicServer.port}`);
1465
+ return originalFetch(redirectUrl, init);
1466
+ }
1467
+ return originalFetch(input, init);
1468
+ });
1469
+ // Create mock DB with unique agent ID
1470
+ const agentId = `test-agent-google-no-model-${Date.now()}`;
1471
+ const db = createTestDb(agentId);
1472
+ db.prepare("INSERT INTO agent_model_rules (id, agent_id, provider, model_override, target_provider) VALUES (?, ?, ?, ?, ?)").run("rule-1", agentId, "google", "claude-sonnet-4-20250514", "anthropic");
1473
+ proxy = (0, proxy_server_js_1.startProxy)({
1474
+ port: 0,
1475
+ apiKey: "test-api-key",
1476
+ agentId: agentId,
1477
+ endpoint: ingestServer.url,
1478
+ flushInterval: 60_000,
1479
+ maxBufferSize: 1,
1480
+ providerKeys: {
1481
+ google: "google-api-key",
1482
+ anthropic: "sk-ant-key",
1483
+ },
1484
+ db,
1485
+ });
1486
+ const proxyPort = proxy.server.address().port;
1487
+ await waitForServer(proxyPort);
1488
+ // Send request to Google endpoint WITHOUT model in body
1489
+ const res = await httpRequest({
1490
+ hostname: "127.0.0.1",
1491
+ port: proxyPort,
1492
+ path: `/agents/${agentId}/google/v1/chat/completions`,
1493
+ method: "POST",
1494
+ headers: {
1495
+ "Content-Type": "application/json",
1496
+ },
1497
+ body: JSON.stringify({
1498
+ // No model field! This is the key test case
1499
+ messages: [{ role: "user", content: "Hello" }],
1500
+ max_tokens: 100,
1501
+ }),
1502
+ });
1503
+ // Should have been redirected to Anthropic
1504
+ (0, vitest_1.expect)(res.status).toBe(200);
1505
+ (0, vitest_1.expect)(anthropicServer.receivedRequests).toHaveLength(1);
1506
+ // Verify the override model was applied
1507
+ const sentRequest = JSON.parse(anthropicServer.receivedRequests[0].body);
1508
+ (0, vitest_1.expect)(sentRequest.model).toBe("claude-sonnet-4-20250514");
1509
+ vitest_1.vi.restoreAllMocks();
1510
+ await closeServer(anthropicServer.server);
1511
+ db.close();
1512
+ });
1513
+ (0, vitest_1.it)("does not apply cross-provider override when no rule exists", async () => {
1514
+ providerServer = await createMockProviderServer();
1515
+ ingestServer = await createMockIngestServer();
1516
+ // Mock fetch
1517
+ const originalFetch = global.fetch;
1518
+ vitest_1.vi.spyOn(global, "fetch").mockImplementation(async (input, init) => {
1519
+ const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
1520
+ if (url.includes("api.openai.com")) {
1521
+ const redirectUrl = url.replace(/https:\/\/api\.openai\.com/, providerServer.url);
1522
+ return originalFetch(redirectUrl, init);
1523
+ }
1524
+ return originalFetch(input, init);
1525
+ });
1526
+ // Create mock DB with unique agent ID but NO model rules
1527
+ const agentId = `test-agent-no-rules-${Date.now()}`;
1528
+ const db = createTestDb(agentId);
1529
+ // No model rules inserted
1530
+ proxy = (0, proxy_server_js_1.startProxy)({
1531
+ port: 0,
1532
+ apiKey: "test-api-key",
1533
+ agentId: agentId,
1534
+ endpoint: ingestServer.url,
1535
+ flushInterval: 60_000,
1536
+ maxBufferSize: 1,
1537
+ providerKeys: {
1538
+ openai: "sk-openai-key",
1539
+ },
1540
+ db,
1541
+ });
1542
+ const proxyPort = proxy.server.address().port;
1543
+ await waitForServer(proxyPort);
1544
+ // Send request - should go directly to OpenAI without conversion
1545
+ const res = await httpRequest({
1546
+ hostname: "127.0.0.1",
1547
+ port: proxyPort,
1548
+ path: `/agents/${agentId}/openai/v1/chat/completions`,
1549
+ method: "POST",
1550
+ headers: {
1551
+ "Content-Type": "application/json",
1552
+ },
1553
+ body: JSON.stringify({
1554
+ model: "gpt-4o",
1555
+ messages: [{ role: "user", content: "Hello" }],
1556
+ }),
1557
+ });
1558
+ (0, vitest_1.expect)(res.status).toBe(200);
1559
+ // Request should have been forwarded as-is (OpenAI format)
1560
+ (0, vitest_1.expect)(providerServer.receivedRequests).toHaveLength(1);
1561
+ const sentRequest = JSON.parse(providerServer.receivedRequests[0].body);
1562
+ (0, vitest_1.expect)(sentRequest.model).toBe("gpt-4o");
1563
+ (0, vitest_1.expect)(sentRequest.messages[0].role).toBe("user");
1564
+ vitest_1.vi.restoreAllMocks();
1565
+ db.close();
1566
+ });
1567
+ (0, vitest_1.it)("returns error when cross-provider override target has no API key", async () => {
1568
+ ingestServer = await createMockIngestServer();
1569
+ // Create mock DB with unique agent ID
1570
+ const agentId = `test-agent-no-key-${Date.now()}`;
1571
+ const db = createTestDb(agentId);
1572
+ db.prepare("INSERT INTO agent_model_rules (id, agent_id, provider, model_override, target_provider) VALUES (?, ?, ?, ?, ?)").run("rule-1", agentId, "openai", "claude-sonnet-4-20250514", "anthropic");
1573
+ proxy = (0, proxy_server_js_1.startProxy)({
1574
+ port: 0,
1575
+ apiKey: "test-api-key",
1576
+ agentId: agentId,
1577
+ endpoint: ingestServer.url,
1578
+ flushInterval: 60_000,
1579
+ maxBufferSize: 1,
1580
+ providerKeys: {
1581
+ openai: "sk-openai-key",
1582
+ // No anthropic key!
1583
+ },
1584
+ db,
1585
+ });
1586
+ const proxyPort = proxy.server.address().port;
1587
+ await waitForServer(proxyPort);
1588
+ const res = await httpRequest({
1589
+ hostname: "127.0.0.1",
1590
+ port: proxyPort,
1591
+ path: `/agents/${agentId}/openai/v1/chat/completions`,
1592
+ method: "POST",
1593
+ headers: {
1594
+ "Content-Type": "application/json",
1595
+ },
1596
+ body: JSON.stringify({
1597
+ model: "gpt-4o",
1598
+ messages: [{ role: "user", content: "Hello" }],
1599
+ }),
1600
+ });
1601
+ (0, vitest_1.expect)(res.status).toBe(400);
1602
+ const body = JSON.parse(res.body);
1603
+ (0, vitest_1.expect)(body.error).toContain("no API key for anthropic");
1604
+ db.close();
1605
+ });
1606
+ (0, vitest_1.it)("same-provider override works (model change without cross-provider)", async () => {
1607
+ ingestServer = await createMockIngestServer();
1608
+ // Create mock DB with same-provider override (target_provider is NULL)
1609
+ const agentId = `test-agent-same-provider-${Date.now()}`;
1610
+ const db = createTestDb(agentId);
1611
+ db.prepare("INSERT INTO agent_model_rules (id, agent_id, provider, model_override, target_provider) VALUES (?, ?, ?, ?, ?)").run("rule-1", agentId, "openai", "gpt-4o-mini", null // same provider, different model
1612
+ );
1613
+ // Mock fetch - verify same-provider override by checking the response
1614
+ const originalFetch = global.fetch;
1615
+ vitest_1.vi.spyOn(global, "fetch").mockImplementation(async (input, init) => {
1616
+ const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
1617
+ if (url.includes("api.openai.com")) {
1618
+ // Echo back the model from the request body to verify override
1619
+ let reqModel = "unknown";
1620
+ if (init?.body) {
1621
+ const bodyText = typeof init.body === "string" ? init.body : await new Response(init.body).text();
1622
+ const parsed = JSON.parse(bodyText);
1623
+ reqModel = parsed.model;
1624
+ }
1625
+ return new Response(JSON.stringify({
1626
+ id: "chatcmpl-123",
1627
+ object: "chat.completion",
1628
+ model: reqModel, // Echo the model we received
1629
+ choices: [{ index: 0, message: { role: "assistant", content: "Hello!" }, finish_reason: "stop" }],
1630
+ usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 },
1631
+ }), {
1632
+ status: 200,
1633
+ headers: { "Content-Type": "application/json" },
1634
+ });
1635
+ }
1636
+ return originalFetch(input, init);
1637
+ });
1638
+ proxy = (0, proxy_server_js_1.startProxy)({
1639
+ port: 0,
1640
+ apiKey: "test-api-key",
1641
+ agentId: agentId,
1642
+ endpoint: ingestServer.url,
1643
+ flushInterval: 60_000,
1644
+ maxBufferSize: 1,
1645
+ providerKeys: { openai: "sk-openai-key" },
1646
+ db,
1647
+ });
1648
+ const proxyPort = proxy.server.address().port;
1649
+ await waitForServer(proxyPort);
1650
+ const res = await httpRequest({
1651
+ hostname: "127.0.0.1",
1652
+ port: proxyPort,
1653
+ path: `/agents/${agentId}/openai/v1/chat/completions`,
1654
+ method: "POST",
1655
+ headers: { "Content-Type": "application/json" },
1656
+ body: JSON.stringify({
1657
+ model: "gpt-4o", // Original model
1658
+ messages: [{ role: "user", content: "Hello" }],
1659
+ }),
1660
+ });
1661
+ (0, vitest_1.expect)(res.status).toBe(200);
1662
+ const response = JSON.parse(res.body);
1663
+ // The mock echoes the model it received - should be the overridden model
1664
+ (0, vitest_1.expect)(response.model).toBe("gpt-4o-mini");
1665
+ vitest_1.vi.restoreAllMocks();
1666
+ db.close();
1667
+ });
1668
+ });
1201
1669
  });
1202
1670
  //# sourceMappingURL=proxy-server.test.js.map