@oh-my-pi/pi-ai 8.0.16 → 8.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +17 -11
- package/src/cli.ts +1 -1
- package/src/index.ts +2 -1
- package/src/models.generated.ts +100 -101
- package/src/providers/anthropic.ts +43 -12
- package/src/providers/cursor.ts +1 -1
- package/src/providers/google-gemini-cli.ts +1 -1
- package/src/providers/openai-codex-responses.ts +10 -10
- package/src/providers/openai-completions.ts +10 -10
- package/src/providers/openai-responses.ts +12 -12
- package/src/utils/oauth/github-copilot.ts +1 -1
- package/src/utils/retry.ts +78 -0
- package/tsconfig.json +0 -42
package/package.json
CHANGED
|
@@ -1,17 +1,30 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@oh-my-pi/pi-ai",
|
|
3
|
-
"version": "8.0
|
|
3
|
+
"version": "8.1.0",
|
|
4
4
|
"description": "Unified LLM API with automatic model discovery and provider configuration",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./src/index.ts",
|
|
7
7
|
"types": "./src/index.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"types": "./src/index.ts",
|
|
11
|
+
"import": "./src/index.ts"
|
|
12
|
+
},
|
|
13
|
+
"./utils/*": {
|
|
14
|
+
"types": "./src/utils/*.ts",
|
|
15
|
+
"import": "./src/utils/*.ts"
|
|
16
|
+
},
|
|
17
|
+
"./*": {
|
|
18
|
+
"types": "./src/*",
|
|
19
|
+
"import": "./src/*"
|
|
20
|
+
}
|
|
21
|
+
},
|
|
8
22
|
"bin": {
|
|
9
23
|
"pi-ai": "./src/cli.ts"
|
|
10
24
|
},
|
|
11
25
|
"files": [
|
|
12
26
|
"src",
|
|
13
|
-
"README.md"
|
|
14
|
-
"tsconfig.json"
|
|
27
|
+
"README.md"
|
|
15
28
|
],
|
|
16
29
|
"scripts": {
|
|
17
30
|
"generate-models": "bun scripts/generate-models.ts",
|
|
@@ -19,7 +32,7 @@
|
|
|
19
32
|
"prepublishOnly": "cp tsconfig.publish.json tsconfig.json"
|
|
20
33
|
},
|
|
21
34
|
"dependencies": {
|
|
22
|
-
"@oh-my-pi/pi-utils": "
|
|
35
|
+
"@oh-my-pi/pi-utils": "workspace:*",
|
|
23
36
|
"@anthropic-ai/sdk": "0.71.2",
|
|
24
37
|
"@aws-sdk/client-bedrock-runtime": "^3.968.0",
|
|
25
38
|
"@bufbuild/protobuf": "^2.10.2",
|
|
@@ -57,12 +70,5 @@
|
|
|
57
70
|
},
|
|
58
71
|
"devDependencies": {
|
|
59
72
|
"@types/node": "^24.3.0"
|
|
60
|
-
},
|
|
61
|
-
"exports": {
|
|
62
|
-
".": {
|
|
63
|
-
"types": "./src/index.ts",
|
|
64
|
-
"import": "./src/index.ts"
|
|
65
|
-
},
|
|
66
|
-
"./*": "./src/*"
|
|
67
73
|
}
|
|
68
74
|
}
|
package/src/cli.ts
CHANGED
|
@@ -2,12 +2,12 @@
|
|
|
2
2
|
import { createInterface } from "readline";
|
|
3
3
|
import { CliAuthStorage } from "./storage";
|
|
4
4
|
import "./utils/migrate-env";
|
|
5
|
+
import { getOAuthProviders } from "./utils/oauth";
|
|
5
6
|
import { loginAnthropic } from "./utils/oauth/anthropic";
|
|
6
7
|
import { loginCursor } from "./utils/oauth/cursor";
|
|
7
8
|
import { loginGitHubCopilot } from "./utils/oauth/github-copilot";
|
|
8
9
|
import { loginAntigravity } from "./utils/oauth/google-antigravity";
|
|
9
10
|
import { loginGeminiCli } from "./utils/oauth/google-gemini-cli";
|
|
10
|
-
import { getOAuthProviders } from "./utils/oauth/index";
|
|
11
11
|
import { loginOpenAICodex } from "./utils/oauth/openai-codex";
|
|
12
12
|
import type { OAuthCredentials, OAuthProvider } from "./utils/oauth/types";
|
|
13
13
|
|
package/src/index.ts
CHANGED
|
@@ -18,7 +18,8 @@ export * from "./usage/google-antigravity";
|
|
|
18
18
|
export * from "./usage/openai-codex";
|
|
19
19
|
export * from "./usage/zai";
|
|
20
20
|
export * from "./utils/event-stream";
|
|
21
|
-
export * from "./utils/oauth
|
|
21
|
+
export * from "./utils/oauth";
|
|
22
22
|
export * from "./utils/overflow";
|
|
23
|
+
export * from "./utils/retry";
|
|
23
24
|
export * from "./utils/typebox-helpers";
|
|
24
25
|
export * from "./utils/validation";
|
package/src/models.generated.ts
CHANGED
|
@@ -1726,7 +1726,7 @@ export const MODELS = {
|
|
|
1726
1726
|
cacheRead: 0,
|
|
1727
1727
|
cacheWrite: 0,
|
|
1728
1728
|
},
|
|
1729
|
-
contextWindow:
|
|
1729
|
+
contextWindow: 64000,
|
|
1730
1730
|
maxTokens: 16384,
|
|
1731
1731
|
} satisfies Model<"openai-completions">,
|
|
1732
1732
|
"gpt-4o": {
|
|
@@ -1766,24 +1766,6 @@ export const MODELS = {
|
|
|
1766
1766
|
contextWindow: 128000,
|
|
1767
1767
|
maxTokens: 128000,
|
|
1768
1768
|
} satisfies Model<"openai-responses">,
|
|
1769
|
-
"gpt-5-codex": {
|
|
1770
|
-
id: "gpt-5-codex",
|
|
1771
|
-
name: "GPT-5-Codex",
|
|
1772
|
-
api: "openai-responses",
|
|
1773
|
-
provider: "github-copilot",
|
|
1774
|
-
baseUrl: "https://api.individual.githubcopilot.com",
|
|
1775
|
-
headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"},
|
|
1776
|
-
reasoning: true,
|
|
1777
|
-
input: ["text", "image"],
|
|
1778
|
-
cost: {
|
|
1779
|
-
input: 0,
|
|
1780
|
-
output: 0,
|
|
1781
|
-
cacheRead: 0,
|
|
1782
|
-
cacheWrite: 0,
|
|
1783
|
-
},
|
|
1784
|
-
contextWindow: 128000,
|
|
1785
|
-
maxTokens: 128000,
|
|
1786
|
-
} satisfies Model<"openai-responses">,
|
|
1787
1769
|
"gpt-5-mini": {
|
|
1788
1770
|
id: "gpt-5-mini",
|
|
1789
1771
|
name: "GPT-5-mini",
|
|
@@ -3641,7 +3623,7 @@ export const MODELS = {
|
|
|
3641
3623
|
cost: {
|
|
3642
3624
|
input: 1.25,
|
|
3643
3625
|
output: 10,
|
|
3644
|
-
cacheRead: 0.
|
|
3626
|
+
cacheRead: 0.125,
|
|
3645
3627
|
cacheWrite: 0,
|
|
3646
3628
|
},
|
|
3647
3629
|
contextWindow: 400000,
|
|
@@ -3692,7 +3674,7 @@ export const MODELS = {
|
|
|
3692
3674
|
cost: {
|
|
3693
3675
|
input: 0.25,
|
|
3694
3676
|
output: 2,
|
|
3695
|
-
cacheRead: 0.
|
|
3677
|
+
cacheRead: 0.025,
|
|
3696
3678
|
cacheWrite: 0,
|
|
3697
3679
|
},
|
|
3698
3680
|
contextWindow: 400000,
|
|
@@ -3709,7 +3691,7 @@ export const MODELS = {
|
|
|
3709
3691
|
cost: {
|
|
3710
3692
|
input: 0.05,
|
|
3711
3693
|
output: 0.4,
|
|
3712
|
-
cacheRead: 0.
|
|
3694
|
+
cacheRead: 0.005,
|
|
3713
3695
|
cacheWrite: 0,
|
|
3714
3696
|
},
|
|
3715
3697
|
contextWindow: 400000,
|
|
@@ -4314,6 +4296,23 @@ export const MODELS = {
|
|
|
4314
4296
|
contextWindow: 204800,
|
|
4315
4297
|
maxTokens: 131072,
|
|
4316
4298
|
} satisfies Model<"openai-completions">,
|
|
4299
|
+
"glm-4.7": {
|
|
4300
|
+
id: "glm-4.7",
|
|
4301
|
+
name: "GLM-4.7",
|
|
4302
|
+
api: "openai-completions",
|
|
4303
|
+
provider: "opencode",
|
|
4304
|
+
baseUrl: "https://opencode.ai/zen/v1",
|
|
4305
|
+
reasoning: true,
|
|
4306
|
+
input: ["text"],
|
|
4307
|
+
cost: {
|
|
4308
|
+
input: 0.6,
|
|
4309
|
+
output: 2.2,
|
|
4310
|
+
cacheRead: 0.1,
|
|
4311
|
+
cacheWrite: 0,
|
|
4312
|
+
},
|
|
4313
|
+
contextWindow: 204800,
|
|
4314
|
+
maxTokens: 131072,
|
|
4315
|
+
} satisfies Model<"openai-completions">,
|
|
4317
4316
|
"glm-4.7-free": {
|
|
4318
4317
|
id: "glm-4.7-free",
|
|
4319
4318
|
name: "GLM-4.7",
|
|
@@ -4615,7 +4614,7 @@ export const MODELS = {
|
|
|
4615
4614
|
input: ["text"],
|
|
4616
4615
|
cost: {
|
|
4617
4616
|
input: 0.09,
|
|
4618
|
-
output: 0.
|
|
4617
|
+
output: 0.44999999999999996,
|
|
4619
4618
|
cacheRead: 0,
|
|
4620
4619
|
cacheWrite: 0,
|
|
4621
4620
|
},
|
|
@@ -5028,7 +5027,7 @@ export const MODELS = {
|
|
|
5028
5027
|
cacheWrite: 0,
|
|
5029
5028
|
},
|
|
5030
5029
|
contextWindow: 262144,
|
|
5031
|
-
maxTokens:
|
|
5030
|
+
maxTokens: 32768,
|
|
5032
5031
|
} satisfies Model<"openai-completions">,
|
|
5033
5032
|
"cohere/command-r-08-2024": {
|
|
5034
5033
|
id: "cohere/command-r-08-2024",
|
|
@@ -5297,7 +5296,7 @@ export const MODELS = {
|
|
|
5297
5296
|
input: 0.09999999999999999,
|
|
5298
5297
|
output: 0.39999999999999997,
|
|
5299
5298
|
cacheRead: 0.024999999999999998,
|
|
5300
|
-
cacheWrite: 0.
|
|
5299
|
+
cacheWrite: 0.08333333333333333,
|
|
5301
5300
|
},
|
|
5302
5301
|
contextWindow: 1048576,
|
|
5303
5302
|
maxTokens: 8192,
|
|
@@ -5311,10 +5310,10 @@ export const MODELS = {
|
|
|
5311
5310
|
reasoning: false,
|
|
5312
5311
|
input: ["text", "image"],
|
|
5313
5312
|
cost: {
|
|
5314
|
-
input: 0,
|
|
5315
|
-
output: 0,
|
|
5316
|
-
cacheRead: 0,
|
|
5317
|
-
cacheWrite: 0,
|
|
5313
|
+
input: 0.09999999999999999,
|
|
5314
|
+
output: 0.39999999999999997,
|
|
5315
|
+
cacheRead: 0.024999999999999998,
|
|
5316
|
+
cacheWrite: 0.08333333333333333,
|
|
5318
5317
|
},
|
|
5319
5318
|
contextWindow: 1048576,
|
|
5320
5319
|
maxTokens: 8192,
|
|
@@ -5348,7 +5347,7 @@ export const MODELS = {
|
|
|
5348
5347
|
input: 0.3,
|
|
5349
5348
|
output: 2.5,
|
|
5350
5349
|
cacheRead: 0.03,
|
|
5351
|
-
cacheWrite: 0.
|
|
5350
|
+
cacheWrite: 0.08333333333333333,
|
|
5352
5351
|
},
|
|
5353
5352
|
contextWindow: 1048576,
|
|
5354
5353
|
maxTokens: 65535,
|
|
@@ -5365,7 +5364,7 @@ export const MODELS = {
|
|
|
5365
5364
|
input: 0.09999999999999999,
|
|
5366
5365
|
output: 0.39999999999999997,
|
|
5367
5366
|
cacheRead: 0.01,
|
|
5368
|
-
cacheWrite: 0.
|
|
5367
|
+
cacheWrite: 0.08333333333333333,
|
|
5369
5368
|
},
|
|
5370
5369
|
contextWindow: 1048576,
|
|
5371
5370
|
maxTokens: 65535,
|
|
@@ -5382,10 +5381,10 @@ export const MODELS = {
|
|
|
5382
5381
|
input: 0.09999999999999999,
|
|
5383
5382
|
output: 0.39999999999999997,
|
|
5384
5383
|
cacheRead: 0.01,
|
|
5385
|
-
cacheWrite: 0.
|
|
5384
|
+
cacheWrite: 0.08333333333333333,
|
|
5386
5385
|
},
|
|
5387
5386
|
contextWindow: 1048576,
|
|
5388
|
-
maxTokens:
|
|
5387
|
+
maxTokens: 65535,
|
|
5389
5388
|
} satisfies Model<"openai-completions">,
|
|
5390
5389
|
"google/gemini-2.5-flash-preview-09-2025": {
|
|
5391
5390
|
id: "google/gemini-2.5-flash-preview-09-2025",
|
|
@@ -5398,8 +5397,8 @@ export const MODELS = {
|
|
|
5398
5397
|
cost: {
|
|
5399
5398
|
input: 0.3,
|
|
5400
5399
|
output: 2.5,
|
|
5401
|
-
cacheRead: 0.
|
|
5402
|
-
cacheWrite: 0.
|
|
5400
|
+
cacheRead: 0.03,
|
|
5401
|
+
cacheWrite: 0.08333333333333333,
|
|
5403
5402
|
},
|
|
5404
5403
|
contextWindow: 1048576,
|
|
5405
5404
|
maxTokens: 65535,
|
|
@@ -5432,7 +5431,7 @@ export const MODELS = {
|
|
|
5432
5431
|
cost: {
|
|
5433
5432
|
input: 1.25,
|
|
5434
5433
|
output: 10,
|
|
5435
|
-
cacheRead: 0.
|
|
5434
|
+
cacheRead: 0.125,
|
|
5436
5435
|
cacheWrite: 0.375,
|
|
5437
5436
|
},
|
|
5438
5437
|
contextWindow: 1048576,
|
|
@@ -5449,7 +5448,7 @@ export const MODELS = {
|
|
|
5449
5448
|
cost: {
|
|
5450
5449
|
input: 1.25,
|
|
5451
5450
|
output: 10,
|
|
5452
|
-
cacheRead: 0.
|
|
5451
|
+
cacheRead: 0.125,
|
|
5453
5452
|
cacheWrite: 0.375,
|
|
5454
5453
|
},
|
|
5455
5454
|
contextWindow: 1048576,
|
|
@@ -5467,7 +5466,7 @@ export const MODELS = {
|
|
|
5467
5466
|
input: 0.5,
|
|
5468
5467
|
output: 3,
|
|
5469
5468
|
cacheRead: 0.049999999999999996,
|
|
5470
|
-
cacheWrite: 0,
|
|
5469
|
+
cacheWrite: 0.08333333333333333,
|
|
5471
5470
|
},
|
|
5472
5471
|
contextWindow: 1048576,
|
|
5473
5472
|
maxTokens: 65535,
|
|
@@ -5754,12 +5753,12 @@ export const MODELS = {
|
|
|
5754
5753
|
input: ["text"],
|
|
5755
5754
|
cost: {
|
|
5756
5755
|
input: 0.27,
|
|
5757
|
-
output: 1.
|
|
5756
|
+
output: 1.1,
|
|
5758
5757
|
cacheRead: 0,
|
|
5759
5758
|
cacheWrite: 0,
|
|
5760
5759
|
},
|
|
5761
5760
|
contextWindow: 196608,
|
|
5762
|
-
maxTokens:
|
|
5761
|
+
maxTokens: 196608,
|
|
5763
5762
|
} satisfies Model<"openai-completions">,
|
|
5764
5763
|
"mistralai/codestral-2508": {
|
|
5765
5764
|
id: "mistralai/codestral-2508",
|
|
@@ -6048,7 +6047,7 @@ export const MODELS = {
|
|
|
6048
6047
|
cacheWrite: 0,
|
|
6049
6048
|
},
|
|
6050
6049
|
contextWindow: 131072,
|
|
6051
|
-
maxTokens:
|
|
6050
|
+
maxTokens: 16384,
|
|
6052
6051
|
} satisfies Model<"openai-completions">,
|
|
6053
6052
|
"mistralai/mistral-saba": {
|
|
6054
6053
|
id: "mistralai/mistral-saba",
|
|
@@ -7869,6 +7868,23 @@ export const MODELS = {
|
|
|
7869
7868
|
contextWindow: 262144,
|
|
7870
7869
|
maxTokens: 4096,
|
|
7871
7870
|
} satisfies Model<"openai-completions">,
|
|
7871
|
+
"qwen/qwen3-vl-235b-a22b-thinking": {
|
|
7872
|
+
id: "qwen/qwen3-vl-235b-a22b-thinking",
|
|
7873
|
+
name: "Qwen: Qwen3 VL 235B A22B Thinking",
|
|
7874
|
+
api: "openai-completions",
|
|
7875
|
+
provider: "openrouter",
|
|
7876
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
7877
|
+
reasoning: true,
|
|
7878
|
+
input: ["text", "image"],
|
|
7879
|
+
cost: {
|
|
7880
|
+
input: 0.44999999999999996,
|
|
7881
|
+
output: 3.5,
|
|
7882
|
+
cacheRead: 0,
|
|
7883
|
+
cacheWrite: 0,
|
|
7884
|
+
},
|
|
7885
|
+
contextWindow: 262144,
|
|
7886
|
+
maxTokens: 262144,
|
|
7887
|
+
} satisfies Model<"openai-completions">,
|
|
7872
7888
|
"qwen/qwen3-vl-30b-a3b-instruct": {
|
|
7873
7889
|
id: "qwen/qwen3-vl-30b-a3b-instruct",
|
|
7874
7890
|
name: "Qwen: Qwen3 VL 30B A3B Instruct",
|
|
@@ -8430,6 +8446,23 @@ export const MODELS = {
|
|
|
8430
8446
|
contextWindow: 202752,
|
|
8431
8447
|
maxTokens: 65535,
|
|
8432
8448
|
} satisfies Model<"openai-completions">,
|
|
8449
|
+
"z-ai/glm-4.7-flash": {
|
|
8450
|
+
id: "z-ai/glm-4.7-flash",
|
|
8451
|
+
name: "Z.AI: GLM 4.7 Flash",
|
|
8452
|
+
api: "openai-completions",
|
|
8453
|
+
provider: "openrouter",
|
|
8454
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
8455
|
+
reasoning: true,
|
|
8456
|
+
input: ["text"],
|
|
8457
|
+
cost: {
|
|
8458
|
+
input: 0.07,
|
|
8459
|
+
output: 0.39999999999999997,
|
|
8460
|
+
cacheRead: 0.01,
|
|
8461
|
+
cacheWrite: 0,
|
|
8462
|
+
},
|
|
8463
|
+
contextWindow: 200000,
|
|
8464
|
+
maxTokens: 131072,
|
|
8465
|
+
} satisfies Model<"openai-completions">,
|
|
8433
8466
|
},
|
|
8434
8467
|
"vercel-ai-gateway": {
|
|
8435
8468
|
"alibaba/qwen-3-14b": {
|
|
@@ -8562,7 +8595,7 @@ export const MODELS = {
|
|
|
8562
8595
|
cost: {
|
|
8563
8596
|
input: 1,
|
|
8564
8597
|
output: 5,
|
|
8565
|
-
cacheRead: 0,
|
|
8598
|
+
cacheRead: 0.19999999999999998,
|
|
8566
8599
|
cacheWrite: 0,
|
|
8567
8600
|
},
|
|
8568
8601
|
contextWindow: 1000000,
|
|
@@ -8619,23 +8652,6 @@ export const MODELS = {
|
|
|
8619
8652
|
contextWindow: 200000,
|
|
8620
8653
|
maxTokens: 4096,
|
|
8621
8654
|
} satisfies Model<"anthropic-messages">,
|
|
8622
|
-
"anthropic/claude-3-opus": {
|
|
8623
|
-
id: "anthropic/claude-3-opus",
|
|
8624
|
-
name: "Claude 3 Opus",
|
|
8625
|
-
api: "anthropic-messages",
|
|
8626
|
-
provider: "vercel-ai-gateway",
|
|
8627
|
-
baseUrl: "https://ai-gateway.vercel.sh",
|
|
8628
|
-
reasoning: false,
|
|
8629
|
-
input: ["text", "image"],
|
|
8630
|
-
cost: {
|
|
8631
|
-
input: 15,
|
|
8632
|
-
output: 75,
|
|
8633
|
-
cacheRead: 0,
|
|
8634
|
-
cacheWrite: 0,
|
|
8635
|
-
},
|
|
8636
|
-
contextWindow: 200000,
|
|
8637
|
-
maxTokens: 8192,
|
|
8638
|
-
} satisfies Model<"anthropic-messages">,
|
|
8639
8655
|
"anthropic/claude-3.5-haiku": {
|
|
8640
8656
|
id: "anthropic/claude-3.5-haiku",
|
|
8641
8657
|
name: "Claude 3.5 Haiku",
|
|
@@ -8786,7 +8802,7 @@ export const MODELS = {
|
|
|
8786
8802
|
cacheRead: 0.3,
|
|
8787
8803
|
cacheWrite: 3.75,
|
|
8788
8804
|
},
|
|
8789
|
-
contextWindow:
|
|
8805
|
+
contextWindow: 1000000,
|
|
8790
8806
|
maxTokens: 64000,
|
|
8791
8807
|
} satisfies Model<"anthropic-messages">,
|
|
8792
8808
|
"anthropic/claude-sonnet-4.5": {
|
|
@@ -8803,7 +8819,7 @@ export const MODELS = {
|
|
|
8803
8819
|
cacheRead: 0.3,
|
|
8804
8820
|
cacheWrite: 3.75,
|
|
8805
8821
|
},
|
|
8806
|
-
contextWindow:
|
|
8822
|
+
contextWindow: 1000000,
|
|
8807
8823
|
maxTokens: 64000,
|
|
8808
8824
|
} satisfies Model<"anthropic-messages">,
|
|
8809
8825
|
"bytedance/seed-1.6": {
|
|
@@ -8925,40 +8941,6 @@ export const MODELS = {
|
|
|
8925
8941
|
contextWindow: 128000,
|
|
8926
8942
|
maxTokens: 64000,
|
|
8927
8943
|
} satisfies Model<"anthropic-messages">,
|
|
8928
|
-
"google/gemini-2.0-flash": {
|
|
8929
|
-
id: "google/gemini-2.0-flash",
|
|
8930
|
-
name: "Gemini 2.0 Flash",
|
|
8931
|
-
api: "anthropic-messages",
|
|
8932
|
-
provider: "vercel-ai-gateway",
|
|
8933
|
-
baseUrl: "https://ai-gateway.vercel.sh",
|
|
8934
|
-
reasoning: false,
|
|
8935
|
-
input: ["text", "image"],
|
|
8936
|
-
cost: {
|
|
8937
|
-
input: 0.09999999999999999,
|
|
8938
|
-
output: 0.39999999999999997,
|
|
8939
|
-
cacheRead: 0.024999999999999998,
|
|
8940
|
-
cacheWrite: 0,
|
|
8941
|
-
},
|
|
8942
|
-
contextWindow: 1000000,
|
|
8943
|
-
maxTokens: 8192,
|
|
8944
|
-
} satisfies Model<"anthropic-messages">,
|
|
8945
|
-
"google/gemini-2.0-flash-lite": {
|
|
8946
|
-
id: "google/gemini-2.0-flash-lite",
|
|
8947
|
-
name: "Gemini 2.0 Flash Lite",
|
|
8948
|
-
api: "anthropic-messages",
|
|
8949
|
-
provider: "vercel-ai-gateway",
|
|
8950
|
-
baseUrl: "https://ai-gateway.vercel.sh",
|
|
8951
|
-
reasoning: false,
|
|
8952
|
-
input: ["text", "image"],
|
|
8953
|
-
cost: {
|
|
8954
|
-
input: 0.075,
|
|
8955
|
-
output: 0.3,
|
|
8956
|
-
cacheRead: 0,
|
|
8957
|
-
cacheWrite: 0,
|
|
8958
|
-
},
|
|
8959
|
-
contextWindow: 1048576,
|
|
8960
|
-
maxTokens: 8192,
|
|
8961
|
-
} satisfies Model<"anthropic-messages">,
|
|
8962
8944
|
"google/gemini-2.5-flash": {
|
|
8963
8945
|
id: "google/gemini-2.5-flash",
|
|
8964
8946
|
name: "Gemini 2.5 Flash",
|
|
@@ -8966,15 +8948,15 @@ export const MODELS = {
|
|
|
8966
8948
|
provider: "vercel-ai-gateway",
|
|
8967
8949
|
baseUrl: "https://ai-gateway.vercel.sh",
|
|
8968
8950
|
reasoning: true,
|
|
8969
|
-
input: ["text"
|
|
8951
|
+
input: ["text"],
|
|
8970
8952
|
cost: {
|
|
8971
8953
|
input: 0.3,
|
|
8972
8954
|
output: 2.5,
|
|
8973
|
-
cacheRead: 0
|
|
8955
|
+
cacheRead: 0,
|
|
8974
8956
|
cacheWrite: 0,
|
|
8975
8957
|
},
|
|
8976
8958
|
contextWindow: 1000000,
|
|
8977
|
-
maxTokens:
|
|
8959
|
+
maxTokens: 65536,
|
|
8978
8960
|
} satisfies Model<"anthropic-messages">,
|
|
8979
8961
|
"google/gemini-2.5-flash-lite": {
|
|
8980
8962
|
id: "google/gemini-2.5-flash-lite",
|
|
@@ -9034,11 +9016,11 @@ export const MODELS = {
|
|
|
9034
9016
|
provider: "vercel-ai-gateway",
|
|
9035
9017
|
baseUrl: "https://ai-gateway.vercel.sh",
|
|
9036
9018
|
reasoning: true,
|
|
9037
|
-
input: ["text"
|
|
9019
|
+
input: ["text"],
|
|
9038
9020
|
cost: {
|
|
9039
9021
|
input: 1.25,
|
|
9040
9022
|
output: 10,
|
|
9041
|
-
cacheRead: 0
|
|
9023
|
+
cacheRead: 0,
|
|
9042
9024
|
cacheWrite: 0,
|
|
9043
9025
|
},
|
|
9044
9026
|
contextWindow: 1048576,
|
|
@@ -10449,7 +10431,7 @@ export const MODELS = {
|
|
|
10449
10431
|
cost: {
|
|
10450
10432
|
input: 0.19999999999999998,
|
|
10451
10433
|
output: 1.1,
|
|
10452
|
-
cacheRead: 0,
|
|
10434
|
+
cacheRead: 0.03,
|
|
10453
10435
|
cacheWrite: 0,
|
|
10454
10436
|
},
|
|
10455
10437
|
contextWindow: 128000,
|
|
@@ -10540,6 +10522,23 @@ export const MODELS = {
|
|
|
10540
10522
|
contextWindow: 202752,
|
|
10541
10523
|
maxTokens: 120000,
|
|
10542
10524
|
} satisfies Model<"anthropic-messages">,
|
|
10525
|
+
"zai/glm-4.7-flashx": {
|
|
10526
|
+
id: "zai/glm-4.7-flashx",
|
|
10527
|
+
name: "GLM 4.7 FlashX",
|
|
10528
|
+
api: "anthropic-messages",
|
|
10529
|
+
provider: "vercel-ai-gateway",
|
|
10530
|
+
baseUrl: "https://ai-gateway.vercel.sh",
|
|
10531
|
+
reasoning: true,
|
|
10532
|
+
input: ["text"],
|
|
10533
|
+
cost: {
|
|
10534
|
+
input: 0.06,
|
|
10535
|
+
output: 0.39999999999999997,
|
|
10536
|
+
cacheRead: 0.01,
|
|
10537
|
+
cacheWrite: 0,
|
|
10538
|
+
},
|
|
10539
|
+
contextWindow: 200000,
|
|
10540
|
+
maxTokens: 128000,
|
|
10541
|
+
} satisfies Model<"anthropic-messages">,
|
|
10543
10542
|
},
|
|
10544
10543
|
"xai": {
|
|
10545
10544
|
"grok-2": {
|
|
@@ -727,6 +727,8 @@ function convertMessages(
|
|
|
727
727
|
isOAuthToken: boolean,
|
|
728
728
|
): MessageParam[] {
|
|
729
729
|
const params: MessageParam[] = [];
|
|
730
|
+
// Track tool call IDs from skipped assistant messages to also skip their results
|
|
731
|
+
let skippedToolCallIds: string[] | null = null;
|
|
730
732
|
|
|
731
733
|
// Transform messages for cross-provider compatibility
|
|
732
734
|
const transformedMessages = transformMessages(messages, model);
|
|
@@ -779,6 +781,31 @@ function convertMessages(
|
|
|
779
781
|
// Skip messages with undefined/null content
|
|
780
782
|
if (!msg.content || !Array.isArray(msg.content)) continue;
|
|
781
783
|
|
|
784
|
+
// When interleaved thinking is enabled, Anthropic requires the last assistant
|
|
785
|
+
// message to start with a thinking block. If the first content block is a thinking
|
|
786
|
+
// block with a missing/invalid signature (e.g., from aborted stream), we must skip
|
|
787
|
+
// the entire message to avoid API rejection. Checking the first non-empty block.
|
|
788
|
+
const firstContentBlock = msg.content.find(
|
|
789
|
+
(b) =>
|
|
790
|
+
(b.type === "text" && b.text.trim().length > 0) ||
|
|
791
|
+
(b.type === "thinking" && b.thinking.trim().length > 0) ||
|
|
792
|
+
b.type === "toolCall",
|
|
793
|
+
);
|
|
794
|
+
if (
|
|
795
|
+
firstContentBlock?.type === "thinking" &&
|
|
796
|
+
(!firstContentBlock.thinkingSignature || firstContentBlock.thinkingSignature.trim().length === 0)
|
|
797
|
+
) {
|
|
798
|
+
// Skip this assistant message - it has corrupt thinking that would break the API.
|
|
799
|
+
// Also track any tool calls in this message so we can skip their results.
|
|
800
|
+
for (const block of msg.content) {
|
|
801
|
+
if (block.type === "toolCall") {
|
|
802
|
+
skippedToolCallIds ??= [];
|
|
803
|
+
skippedToolCallIds.push(block.id);
|
|
804
|
+
}
|
|
805
|
+
}
|
|
806
|
+
continue;
|
|
807
|
+
}
|
|
808
|
+
|
|
782
809
|
const blocks: Array<ContentBlockParam & CacheControlBlock> = [];
|
|
783
810
|
|
|
784
811
|
for (const block of msg.content) {
|
|
@@ -824,23 +851,27 @@ function convertMessages(
|
|
|
824
851
|
const toolResults: Array<ContentBlockParam & CacheControlBlock> = [];
|
|
825
852
|
|
|
826
853
|
// Add the current tool result
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
854
|
+
if (!skippedToolCallIds?.includes(msg.toolCallId)) {
|
|
855
|
+
toolResults.push({
|
|
856
|
+
type: "tool_result",
|
|
857
|
+
tool_use_id: sanitizeToolCallId(msg.toolCallId),
|
|
858
|
+
content: convertContentBlocks(msg.content),
|
|
859
|
+
is_error: msg.isError,
|
|
860
|
+
});
|
|
861
|
+
}
|
|
833
862
|
|
|
834
863
|
// Look ahead for consecutive toolResult messages
|
|
835
864
|
let j = i + 1;
|
|
836
865
|
while (j < transformedMessages.length && transformedMessages[j].role === "toolResult") {
|
|
837
866
|
const nextMsg = transformedMessages[j] as ToolResultMessage; // We know it's a toolResult
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
867
|
+
if (!skippedToolCallIds?.includes(nextMsg.toolCallId)) {
|
|
868
|
+
toolResults.push({
|
|
869
|
+
type: "tool_result",
|
|
870
|
+
tool_use_id: sanitizeToolCallId(nextMsg.toolCallId),
|
|
871
|
+
content: convertContentBlocks(nextMsg.content),
|
|
872
|
+
is_error: nextMsg.isError,
|
|
873
|
+
});
|
|
874
|
+
}
|
|
844
875
|
j++;
|
|
845
876
|
}
|
|
846
877
|
|
package/src/providers/cursor.ts
CHANGED
|
@@ -3,7 +3,6 @@ import { appendFile } from "node:fs/promises";
|
|
|
3
3
|
import http2 from "node:http2";
|
|
4
4
|
import { create, fromBinary, fromJson, type JsonValue, toBinary, toJson } from "@bufbuild/protobuf";
|
|
5
5
|
import { ValueSchema } from "@bufbuild/protobuf/wkt";
|
|
6
|
-
import JSON5 from "json5";
|
|
7
6
|
import { calculateCost } from "@oh-my-pi/pi-ai/models";
|
|
8
7
|
import type {
|
|
9
8
|
Api,
|
|
@@ -27,6 +26,7 @@ import type {
|
|
|
27
26
|
import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream";
|
|
28
27
|
import { parseStreamingJson } from "@oh-my-pi/pi-ai/utils/json-parse";
|
|
29
28
|
import { formatErrorMessageWithRetryAfter } from "@oh-my-pi/pi-ai/utils/retry-after";
|
|
29
|
+
import JSON5 from "json5";
|
|
30
30
|
import type { McpToolDefinition } from "./cursor/gen/agent_pb";
|
|
31
31
|
import {
|
|
32
32
|
AgentClientMessageSchema,
|
|
@@ -6,7 +6,6 @@
|
|
|
6
6
|
|
|
7
7
|
import { createHash } from "node:crypto";
|
|
8
8
|
import type { Content, ThinkingConfig } from "@google/genai";
|
|
9
|
-
import { abortableSleep } from "@oh-my-pi/pi-utils";
|
|
10
9
|
import { calculateCost } from "@oh-my-pi/pi-ai/models";
|
|
11
10
|
import type {
|
|
12
11
|
Api,
|
|
@@ -21,6 +20,7 @@ import type {
|
|
|
21
20
|
} from "@oh-my-pi/pi-ai/types";
|
|
22
21
|
import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream";
|
|
23
22
|
import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
|
|
23
|
+
import { abortableSleep } from "@oh-my-pi/pi-utils";
|
|
24
24
|
import {
|
|
25
25
|
convertMessages,
|
|
26
26
|
convertTools,
|
|
@@ -1,14 +1,4 @@
|
|
|
1
1
|
import os from "node:os";
|
|
2
|
-
import { abortableSleep } from "@oh-my-pi/pi-utils";
|
|
3
|
-
import type {
|
|
4
|
-
ResponseFunctionToolCall,
|
|
5
|
-
ResponseInput,
|
|
6
|
-
ResponseInputContent,
|
|
7
|
-
ResponseInputImage,
|
|
8
|
-
ResponseInputText,
|
|
9
|
-
ResponseOutputMessage,
|
|
10
|
-
ResponseReasoningItem,
|
|
11
|
-
} from "openai/resources/responses/responses";
|
|
12
2
|
import { calculateCost } from "@oh-my-pi/pi-ai/models";
|
|
13
3
|
import { getEnvApiKey } from "@oh-my-pi/pi-ai/stream";
|
|
14
4
|
import type {
|
|
@@ -28,6 +18,16 @@ import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream"
|
|
|
28
18
|
import { parseStreamingJson } from "@oh-my-pi/pi-ai/utils/json-parse";
|
|
29
19
|
import { formatErrorMessageWithRetryAfter } from "@oh-my-pi/pi-ai/utils/retry-after";
|
|
30
20
|
import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
|
|
21
|
+
import { abortableSleep } from "@oh-my-pi/pi-utils";
|
|
22
|
+
import type {
|
|
23
|
+
ResponseFunctionToolCall,
|
|
24
|
+
ResponseInput,
|
|
25
|
+
ResponseInputContent,
|
|
26
|
+
ResponseInputImage,
|
|
27
|
+
ResponseInputText,
|
|
28
|
+
ResponseOutputMessage,
|
|
29
|
+
ResponseReasoningItem,
|
|
30
|
+
} from "openai/resources/responses/responses";
|
|
31
31
|
import packageJson from "../../package.json" with { type: "json" };
|
|
32
32
|
import {
|
|
33
33
|
CODEX_BASE_URL,
|
|
@@ -1,13 +1,3 @@
|
|
|
1
|
-
import OpenAI from "openai";
|
|
2
|
-
import type {
|
|
3
|
-
ChatCompletionAssistantMessageParam,
|
|
4
|
-
ChatCompletionChunk,
|
|
5
|
-
ChatCompletionContentPart,
|
|
6
|
-
ChatCompletionContentPartImage,
|
|
7
|
-
ChatCompletionContentPartText,
|
|
8
|
-
ChatCompletionMessageParam,
|
|
9
|
-
ChatCompletionToolMessageParam,
|
|
10
|
-
} from "openai/resources/chat/completions";
|
|
11
1
|
import { calculateCost } from "@oh-my-pi/pi-ai/models";
|
|
12
2
|
import { getEnvApiKey } from "@oh-my-pi/pi-ai/stream";
|
|
13
3
|
import type {
|
|
@@ -29,6 +19,16 @@ import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream"
|
|
|
29
19
|
import { parseStreamingJson } from "@oh-my-pi/pi-ai/utils/json-parse";
|
|
30
20
|
import { formatErrorMessageWithRetryAfter } from "@oh-my-pi/pi-ai/utils/retry-after";
|
|
31
21
|
import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
|
|
22
|
+
import OpenAI from "openai";
|
|
23
|
+
import type {
|
|
24
|
+
ChatCompletionAssistantMessageParam,
|
|
25
|
+
ChatCompletionChunk,
|
|
26
|
+
ChatCompletionContentPart,
|
|
27
|
+
ChatCompletionContentPartImage,
|
|
28
|
+
ChatCompletionContentPartText,
|
|
29
|
+
ChatCompletionMessageParam,
|
|
30
|
+
ChatCompletionToolMessageParam,
|
|
31
|
+
} from "openai/resources/chat/completions";
|
|
32
32
|
import { transformMessages } from "./transform-messages";
|
|
33
33
|
|
|
34
34
|
/**
|
|
@@ -1,15 +1,3 @@
|
|
|
1
|
-
import OpenAI from "openai";
|
|
2
|
-
import type {
|
|
3
|
-
Tool as OpenAITool,
|
|
4
|
-
ResponseCreateParamsStreaming,
|
|
5
|
-
ResponseFunctionToolCall,
|
|
6
|
-
ResponseInput,
|
|
7
|
-
ResponseInputContent,
|
|
8
|
-
ResponseInputImage,
|
|
9
|
-
ResponseInputText,
|
|
10
|
-
ResponseOutputMessage,
|
|
11
|
-
ResponseReasoningItem,
|
|
12
|
-
} from "openai/resources/responses/responses";
|
|
13
1
|
import { calculateCost } from "@oh-my-pi/pi-ai/models";
|
|
14
2
|
import { getEnvApiKey } from "@oh-my-pi/pi-ai/stream";
|
|
15
3
|
import type {
|
|
@@ -29,6 +17,18 @@ import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream"
|
|
|
29
17
|
import { parseStreamingJson } from "@oh-my-pi/pi-ai/utils/json-parse";
|
|
30
18
|
import { formatErrorMessageWithRetryAfter } from "@oh-my-pi/pi-ai/utils/retry-after";
|
|
31
19
|
import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
|
|
20
|
+
import OpenAI from "openai";
|
|
21
|
+
import type {
|
|
22
|
+
Tool as OpenAITool,
|
|
23
|
+
ResponseCreateParamsStreaming,
|
|
24
|
+
ResponseFunctionToolCall,
|
|
25
|
+
ResponseInput,
|
|
26
|
+
ResponseInputContent,
|
|
27
|
+
ResponseInputImage,
|
|
28
|
+
ResponseInputText,
|
|
29
|
+
ResponseOutputMessage,
|
|
30
|
+
ResponseReasoningItem,
|
|
31
|
+
} from "openai/resources/responses/responses";
|
|
32
32
|
import { transformMessages } from "./transform-messages";
|
|
33
33
|
|
|
34
34
|
/** Fast deterministic hash to shorten long strings */
|
|
@@ -2,8 +2,8 @@
|
|
|
2
2
|
* GitHub Copilot OAuth flow
|
|
3
3
|
*/
|
|
4
4
|
|
|
5
|
-
import { abortableSleep } from "@oh-my-pi/pi-utils";
|
|
6
5
|
import { getModels } from "@oh-my-pi/pi-ai/models";
|
|
6
|
+
import { abortableSleep } from "@oh-my-pi/pi-utils";
|
|
7
7
|
import type { OAuthCredentials } from "./types";
|
|
8
8
|
|
|
9
9
|
const decode = (s: string) => atob(s);
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
type ErrorLike = {
|
|
2
|
+
message?: string;
|
|
3
|
+
name?: string;
|
|
4
|
+
status?: number;
|
|
5
|
+
statusCode?: number;
|
|
6
|
+
response?: { status?: number };
|
|
7
|
+
cause?: unknown;
|
|
8
|
+
};
|
|
9
|
+
|
|
10
|
+
const TRANSIENT_MESSAGE_PATTERN =
|
|
11
|
+
/overloaded|rate.?limit|usage.?limit|too many requests|service.?unavailable|server error|internal error|connection.?error|fetch failed/i;
|
|
12
|
+
|
|
13
|
+
const VALIDATION_MESSAGE_PATTERN =
|
|
14
|
+
/invalid|validation|bad request|unsupported|schema|missing required|not found|unauthorized|forbidden/i;
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Identify errors that should be retried (timeouts, 5xx, 408, 429, transient network failures).
|
|
18
|
+
*/
|
|
19
|
+
export function isRetryableError(error: unknown): boolean {
|
|
20
|
+
const info = error as ErrorLike | null;
|
|
21
|
+
const message = info?.message ?? "";
|
|
22
|
+
const name = info?.name ?? "";
|
|
23
|
+
if (name === "AbortError" || /timeout|timed out|aborted/i.test(message)) return true;
|
|
24
|
+
|
|
25
|
+
const status = extractHttpStatusFromError(error);
|
|
26
|
+
if (status !== undefined) {
|
|
27
|
+
if (status >= 500) return true;
|
|
28
|
+
if (status === 408 || status === 429) return true;
|
|
29
|
+
if (status >= 400 && status < 500) return false;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
if (VALIDATION_MESSAGE_PATTERN.test(message)) return false;
|
|
33
|
+
|
|
34
|
+
return TRANSIENT_MESSAGE_PATTERN.test(message);
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
export function extractHttpStatusFromError(error: unknown, depth = 0): number | undefined {
|
|
38
|
+
if (!error || typeof error !== "object" || depth > 3) return undefined;
|
|
39
|
+
const info = error as ErrorLike;
|
|
40
|
+
const status =
|
|
41
|
+
info.status ??
|
|
42
|
+
info.statusCode ??
|
|
43
|
+
(info.response && typeof info.response === "object" ? info.response.status : undefined);
|
|
44
|
+
if (typeof status === "number" && status >= 100 && status <= 599) {
|
|
45
|
+
return status;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
if (info.message) {
|
|
49
|
+
const extracted = extractStatusFromMessage(info.message);
|
|
50
|
+
if (extracted !== undefined) return extracted;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
if (info.cause) {
|
|
54
|
+
return extractHttpStatusFromError(info.cause, depth + 1);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
return undefined;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
function extractStatusFromMessage(message: string): number | undefined {
|
|
61
|
+
const patterns = [
|
|
62
|
+
/error\s*\((\d{3})\)/i,
|
|
63
|
+
/status\s*[:=]?\s*(\d{3})/i,
|
|
64
|
+
/\bhttp\s*(\d{3})\b/i,
|
|
65
|
+
/\b(\d{3})\s*(?:status|error)\b/i,
|
|
66
|
+
];
|
|
67
|
+
|
|
68
|
+
for (const pattern of patterns) {
|
|
69
|
+
const match = pattern.exec(message);
|
|
70
|
+
if (!match) continue;
|
|
71
|
+
const value = Number(match[1]);
|
|
72
|
+
if (Number.isFinite(value) && value >= 100 && value <= 599) {
|
|
73
|
+
return value;
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
return undefined;
|
|
78
|
+
}
|
package/tsconfig.json
DELETED
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"compilerOptions": {
|
|
3
|
-
"target": "ES2024",
|
|
4
|
-
"module": "ESNext",
|
|
5
|
-
"lib": [
|
|
6
|
-
"ES2024"
|
|
7
|
-
],
|
|
8
|
-
"strict": true,
|
|
9
|
-
"esModuleInterop": true,
|
|
10
|
-
"skipLibCheck": true,
|
|
11
|
-
"forceConsistentCasingInFileNames": true,
|
|
12
|
-
"moduleResolution": "Bundler",
|
|
13
|
-
"resolveJsonModule": true,
|
|
14
|
-
"allowImportingTsExtensions": true,
|
|
15
|
-
"experimentalDecorators": true,
|
|
16
|
-
"emitDecoratorMetadata": true,
|
|
17
|
-
"useDefineForClassFields": false,
|
|
18
|
-
"types": [
|
|
19
|
-
"bun",
|
|
20
|
-
"node"
|
|
21
|
-
],
|
|
22
|
-
"noEmit": true,
|
|
23
|
-
"baseUrl": ".",
|
|
24
|
-
"paths": {
|
|
25
|
-
"@oh-my-pi/pi-ai": [
|
|
26
|
-
"./src/index.ts"
|
|
27
|
-
],
|
|
28
|
-
"@oh-my-pi/pi-ai/*": [
|
|
29
|
-
"./src/*"
|
|
30
|
-
]
|
|
31
|
-
}
|
|
32
|
-
},
|
|
33
|
-
"include": [
|
|
34
|
-
"src/**/*.ts"
|
|
35
|
-
],
|
|
36
|
-
"exclude": [
|
|
37
|
-
"node_modules",
|
|
38
|
-
"dist",
|
|
39
|
-
"**/*.test.ts",
|
|
40
|
-
"test/**"
|
|
41
|
-
]
|
|
42
|
-
}
|