pi-yandex-bridge 0.2.9 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -47,3 +47,44 @@ export YANDEX_FOLDER_ID="your-folder-id"
47
47
  ```
48
48
 
49
49
  Models are fetched at startup using the API key. You can generate an API key in the [Yandex AI Studio](https://aistudio.yandex.ru) or in the Yandex Cloud console under **Service accounts → your account → API keys**.
50
+
51
+ ## Development & Testing
52
+
53
+ ### Run tests
54
+
55
+ ```sh
56
+ bun test
57
+ ```
58
+
59
+ Tests cover:
60
+ - Model ID parsing from Yandex API response
61
+ - OAuth → IAM token exchange
62
+ - Header construction (Bearer tokens vs. API keys)
63
+ - Error handling and timeouts
64
+ - Model entry structure validation
65
+
66
+ ### Manual API verification
67
+
68
+ Before releasing, verify that the Yandex API is accessible and models are being fetched correctly:
69
+
70
+ ```sh
71
+ # OAuth flow
72
+ YANDEX_OAUTH_TOKEN="<token>" YANDEX_FOLDER_ID="<id>" bun run verify.ts
73
+
74
+ # API key flow
75
+ YANDEX_API_KEY="<key>" YANDEX_FOLDER_ID="<id>" bun run verify.ts
76
+ ```
77
+
78
+ The verification script:
79
+ 1. Tests IAM token exchange (OAuth only)
80
+ 2. Fetches the list of available models
81
+ 3. Tests connectivity to the model API endpoint
82
+ 4. Reports any 404 errors or misconfiguration
83
+
84
+ ### Build
85
+
86
+ ```sh
87
+ bun run build
88
+ ```
89
+
90
+ Outputs to `dist/index.js`.
package/bun.lock CHANGED
@@ -7,6 +7,7 @@
7
7
  "devDependencies": {
8
8
  "@earendil-works/pi-ai": "latest",
9
9
  "@earendil-works/pi-coding-agent": "latest",
10
+ "@types/bun": "latest",
10
11
  "typescript": "^5",
11
12
  },
12
13
  "peerDependencies": {
@@ -235,6 +236,8 @@
235
236
 
236
237
  "@tootallnate/quickjs-emscripten": ["@tootallnate/quickjs-emscripten@0.23.0", "", {}, "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA=="],
237
238
 
239
+ "@types/bun": ["@types/bun@1.3.13", "", { "dependencies": { "bun-types": "1.3.13" } }, "sha512-9fqXWk5YIHGGnUau9TEi+qdlTYDAnOj+xLCmSTwXfAIqXr2x4tytJb43E9uCvt09zJURKXwAtkoH4nLQfzeTXw=="],
240
+
238
241
  "@types/mime-types": ["@types/mime-types@2.1.4", "", {}, "sha512-lfU4b34HOri+kAY5UheuFMWPDOI+OPceBSHZKp69gEyTL/mmJ4cnU6Y/rlme3UL3GyOn6Y42hyIEw0/q8sWx5w=="],
239
242
 
240
243
  "@types/node": ["@types/node@25.6.2", "", { "dependencies": { "undici-types": "~7.19.0" } }, "sha512-sokuT28dxf9JT5Kady1fsXOvI4HVpjZa95NKT5y9PNTIrs2AsobR4GFAA90ZG8M+nxVRLysCXsVj6eGC7Vbrlw=="],
@@ -269,6 +272,8 @@
269
272
 
270
273
  "buffer-equal-constant-time": ["buffer-equal-constant-time@1.0.1", "", {}, "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA=="],
271
274
 
275
+ "bun-types": ["bun-types@1.3.13", "", { "dependencies": { "@types/node": "*" } }, "sha512-QXKeHLlOLqQX9LgYaHJfzdBaV21T63HhFJnvuRCcjZiaUDpbs5ED1MgxbMra71CsryN/1dAoXuJJJwIv/2drVA=="],
276
+
272
277
  "chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="],
273
278
 
274
279
  "cli-highlight": ["cli-highlight@2.1.11", "", { "dependencies": { "chalk": "^4.0.0", "highlight.js": "^10.7.1", "mz": "^2.4.0", "parse5": "^5.1.1", "parse5-htmlparser2-tree-adapter": "^6.0.0", "yargs": "^16.0.0" }, "bin": { "highlight": "bin/highlight" } }, "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg=="],
package/index.test.ts ADDED
@@ -0,0 +1,274 @@
1
+ import { describe, it, expect, mock } from "bun:test";
2
+
3
+ interface MockResponse {
4
+ ok: boolean;
5
+ status: number;
6
+ json: () => Promise<unknown>;
7
+ text: () => Promise<string>;
8
+ }
9
+
10
+ describe("pi-yandex-bridge", () => {
11
+ describe("fetchModelIds", () => {
12
+ it("should parse Yandex models endpoint response", async () => {
13
+ const mockResponse: MockResponse = {
14
+ ok: true,
15
+ status: 200,
16
+ json: async () => ({
17
+ data: [
18
+ { id: "gpt://b1g123/yandexgpt" },
19
+ { id: "gpt://b1g123/yandexgpt-lite" },
20
+ ],
21
+ }),
22
+ text: async () => "{}",
23
+ };
24
+
25
+ global.fetch = mock(async () => mockResponse) as typeof fetch;
26
+
27
+ const { fetchModelIds } = await import("./index.ts");
28
+ const result = await fetchModelIds("b1g123", "Bearer token123");
29
+
30
+ expect(result).toEqual([
31
+ "gpt://b1g123/yandexgpt",
32
+ "gpt://b1g123/yandexgpt-lite",
33
+ ]);
34
+ });
35
+
36
+ it("should return empty array on API error", async () => {
37
+ const mockResponse: MockResponse = {
38
+ ok: false,
39
+ status: 404,
40
+ json: async () => ({}),
41
+ text: async () => "Not Found",
42
+ };
43
+
44
+ global.fetch = mock(async () => mockResponse) as typeof fetch;
45
+
46
+ const { fetchModelIds } = await import("./index.ts");
47
+ const result = await fetchModelIds("b1g123", "Bearer token123");
48
+
49
+ expect(result).toEqual([]);
50
+ });
51
+
52
+ it("should send correct headers for OAuth Bearer token", async () => {
53
+ let capturedHeaders: Record<string, string> = {};
54
+ const mockResponse: MockResponse = {
55
+ ok: true,
56
+ status: 200,
57
+ json: async () => ({ data: [] }),
58
+ text: async () => "{}",
59
+ };
60
+
61
+ global.fetch = mock(async (_url: string, opts?: RequestInit) => {
62
+ if (opts?.headers) {
63
+ capturedHeaders = opts.headers as Record<string, string>;
64
+ }
65
+ return mockResponse;
66
+ }) as typeof fetch;
67
+
68
+ const { fetchModelIds } = await import("./index.ts");
69
+ await fetchModelIds("b1g123", "Bearer token123");
70
+
71
+ expect(capturedHeaders.Authorization).toBe("Bearer token123");
72
+ expect(capturedHeaders["OpenAI-Project"]).toBe("b1g123");
73
+ });
74
+
75
+ it("should send correct headers for API Key", async () => {
76
+ let capturedHeaders: Record<string, string> = {};
77
+ const mockResponse: MockResponse = {
78
+ ok: true,
79
+ status: 200,
80
+ json: async () => ({ data: [] }),
81
+ text: async () => "{}",
82
+ };
83
+
84
+ global.fetch = mock(async (_url: string, opts?: RequestInit) => {
85
+ if (opts?.headers) {
86
+ capturedHeaders = opts.headers as Record<string, string>;
87
+ }
88
+ return mockResponse;
89
+ }) as typeof fetch;
90
+
91
+ const { fetchModelIds } = await import("./index.ts");
92
+ await fetchModelIds("b1g123", "Api-Key apikey123");
93
+
94
+ expect(capturedHeaders.Authorization).toBe("Api-Key apikey123");
95
+ expect(capturedHeaders["OpenAI-Project"]).toBe("b1g123");
96
+ });
97
+
98
+ it("should call correct endpoint", async () => {
99
+ let capturedUrl: string = "";
100
+ const mockResponse: MockResponse = {
101
+ ok: true,
102
+ status: 200,
103
+ json: async () => ({ data: [] }),
104
+ text: async () => "{}",
105
+ };
106
+
107
+ global.fetch = mock(async (url: string) => {
108
+ capturedUrl = url;
109
+ return mockResponse;
110
+ }) as typeof fetch;
111
+
112
+ const { fetchModelIds } = await import("./index.ts");
113
+ await fetchModelIds("b1g123", "Bearer token");
114
+
115
+ expect(capturedUrl).toContain("llm.api.cloud.yandex.net");
116
+ expect(capturedUrl).toContain("/models");
117
+ });
118
+ });
119
+
120
+ describe("modelEntry", () => {
121
+ it("should create model entry with correct structure", async () => {
122
+ const { modelEntry } = await import("./index.ts");
123
+ const entry = modelEntry("gpt://b1g123/yandexgpt", "b1g123");
124
+
125
+ expect(entry.id).toBe("gpt://b1g123/yandexgpt");
126
+ expect(entry.name).toBe("gpt://b1g123/yandexgpt");
127
+ expect(entry.api).toBe("openai-responses");
128
+ expect(entry.provider).toBe("yandex");
129
+ expect(entry.baseUrl).toContain("ai.api.cloud.yandex.net");
130
+ expect(entry.headers["OpenAI-Project"]).toBe("b1g123");
131
+ });
132
+
133
+ it("should set correct context window", async () => {
134
+ const { modelEntry } = await import("./index.ts");
135
+ const entry = modelEntry("gpt://b1g123/yandexgpt", "b1g123");
136
+
137
+ expect(entry.contextWindow).toBe(128000);
138
+ expect(entry.maxTokens).toBe(8192);
139
+ });
140
+
141
+ it("should disable unsupported features", async () => {
142
+ const { modelEntry } = await import("./index.ts");
143
+ const entry = modelEntry("gpt://b1g123/yandexgpt", "b1g123");
144
+
145
+ expect(entry.reasoning).toBe(true);
146
+ expect(entry.compat.supportsReasoningEffort).toBe(false);
147
+ expect(entry.compat.supportsDeveloperRole).toBe(false);
148
+ });
149
+
150
+ it("should set free cost model", async () => {
151
+ const { modelEntry } = await import("./index.ts");
152
+ const entry = modelEntry("gpt://b1g123/yandexgpt", "b1g123");
153
+
154
+ expect(entry.cost).toEqual({
155
+ input: 0,
156
+ output: 0,
157
+ cacheRead: 0,
158
+ cacheWrite: 0,
159
+ });
160
+ });
161
+ });
162
+
163
+ describe("exchangeOAuthForIam", () => {
164
+ it("should exchange OAuth token for IAM token", async () => {
165
+ const mockResponse: MockResponse = {
166
+ ok: true,
167
+ status: 200,
168
+ json: async () => ({
169
+ iamToken: "iam_token_xyz",
170
+ expiresAt: "2026-05-10T15:00:00Z",
171
+ }),
172
+ text: async () => "{}",
173
+ };
174
+
175
+ global.fetch = mock(async () => mockResponse) as typeof fetch;
176
+
177
+ const { exchangeOAuthForIam } = await import("./index.ts");
178
+ const result = await exchangeOAuthForIam("oauth_token_abc");
179
+
180
+ expect(result.token).toBe("iam_token_xyz");
181
+ expect(typeof result.expiresAt).toBe("number");
182
+ expect(result.expiresAt).toBeGreaterThan(0);
183
+ });
184
+
185
+ it("should throw on failed token exchange", async () => {
186
+ const mockResponse: MockResponse = {
187
+ ok: false,
188
+ status: 401,
189
+ json: async () => ({}),
190
+ text: async () => "Unauthorized",
191
+ };
192
+
193
+ global.fetch = mock(async () => mockResponse) as typeof fetch;
194
+
195
+ const { exchangeOAuthForIam } = await import("./index.ts");
196
+
197
+ try {
198
+ await exchangeOAuthForIam("bad_token");
199
+ expect.unreachable();
200
+ } catch (err) {
201
+ expect(err instanceof Error).toBe(true);
202
+ expect((err as Error).message).toContain("IAM token exchange failed");
203
+ }
204
+ });
205
+
206
+ it("should send OAuth token in correct format", async () => {
207
+ let capturedBody: unknown;
208
+ const mockResponse: MockResponse = {
209
+ ok: true,
210
+ status: 200,
211
+ json: async () => ({
212
+ iamToken: "iam_token",
213
+ expiresAt: "2026-05-10T15:00:00Z",
214
+ }),
215
+ text: async () => "{}",
216
+ };
217
+
218
+ global.fetch = mock(async (_url: string, opts?: RequestInit) => {
219
+ if (opts?.body) {
220
+ capturedBody = JSON.parse(opts.body as string);
221
+ }
222
+ return mockResponse;
223
+ }) as typeof fetch;
224
+
225
+ const { exchangeOAuthForIam } = await import("./index.ts");
226
+ await exchangeOAuthForIam("my_oauth_token");
227
+
228
+ expect(capturedBody).toEqual({
229
+ yandexPassportOauthToken: "my_oauth_token",
230
+ });
231
+ });
232
+ });
233
+
234
+ describe("reasoning mode detection", () => {
235
+ it("should enable reasoning for YandexGPT models", async () => {
236
+ const { modelEntry } = await import("./index.ts");
237
+ const entry = modelEntry("gpt://b1g123/yandexgpt-5-pro/latest", "b1g123");
238
+ expect(entry.reasoning).toBe(true);
239
+ });
240
+
241
+ it("should enable reasoning for DeepSeek v3.2", async () => {
242
+ const { modelEntry } = await import("./index.ts");
243
+ const entry = modelEntry("gpt://b1g123/deepseek-v32/latest", "b1g123");
244
+ expect(entry.reasoning).toBe(true);
245
+ });
246
+
247
+ it("should enable reasoning for Qwen3 models", async () => {
248
+ const { modelEntry } = await import("./index.ts");
249
+ const entry = modelEntry(
250
+ "gpt://b1g123/qwen3-235b-a22b-fp8/latest",
251
+ "b1g123",
252
+ );
253
+ expect(entry.reasoning).toBe(true);
254
+ });
255
+
256
+ it("should enable reasoning for GPT-OSS models", async () => {
257
+ const { modelEntry } = await import("./index.ts");
258
+ const entry = modelEntry("gpt://b1g123/gpt-oss-120b/latest", "b1g123");
259
+ expect(entry.reasoning).toBe(true);
260
+ });
261
+
262
+ it("should disable reasoning for models without reasoning support", async () => {
263
+ const { modelEntry } = await import("./index.ts");
264
+ const entry = modelEntry("gpt://b1g123/aliceai-llm/latest", "b1g123");
265
+ expect(entry.reasoning).toBe(false);
266
+ });
267
+
268
+ it("should disable reasoning for embedding models", async () => {
269
+ const { modelEntry } = await import("./index.ts");
270
+ const entry = modelEntry("emb://b1g123/text-embeddings/latest", "b1g123");
271
+ expect(entry.reasoning).toBe(false);
272
+ });
273
+ });
274
+ });
package/index.ts CHANGED
@@ -51,23 +51,48 @@ interface IamTokenResponse {
51
51
  expiresAt: string;
52
52
  }
53
53
 
54
- // Only applies to yandex gpt:// URIs leaves all other model IDs untouched.
55
- function prettyModelName(id: string): string {
56
- const match = id.match(/^gpt:\/\/([^/]+)\/(.+?)(?:(\/latest))?$/);
57
- if (!match) return id;
58
- const [, folderId, slug, hasLatest] = match;
59
- const tag = hasLatest ? "/l" : "";
60
- return `${slug}{${folderId.slice(-5)}${tag}}`;
54
+ // Model reasoning mode mapping based on Yandex Cloud capabilities
55
+ const REASONING_MODELS = new Set([
56
+ // YandexGPT models with chain-of-thought reasoning
57
+ "yandexgpt",
58
+ "yandexgpt-lite",
59
+ "yandexgpt-5-lite",
60
+ "yandexgpt-lite",
61
+ "yandexgpt-5-pro",
62
+ // DeepSeek with thinking mode
63
+ "deepseek-v32",
64
+ // Qwen models with reasoning support
65
+ "qwen3-235b-a22b-fp8",
66
+ "qwen3.5-35b-a3b-fp8",
67
+ "qwen3.6-35b-a3b",
68
+ // GPT OSS models with reasoning
69
+ "gpt-oss-120b",
70
+ "gpt-oss-20b",
71
+ ]);
72
+
73
+ function getReasoningMode(modelId: string): boolean {
74
+ // Model ID format: gpt://b1g123/model-name/latest or gpt://b1g123/model-name
75
+ // Extract model name: it's the last segment if no version, or second-to-last if version exists
76
+ const parts = modelId.split("/");
77
+ let modelName = "";
78
+ if (parts.length >= 4) {
79
+ // Has version suffix (gpt://b1g123/model-name/latest) → model-name is at parts[3]
80
+ modelName = parts[3];
81
+ } else if (parts.length === 3) {
82
+ // No version suffix (gpt://b1g123/model-name) → model-name is at parts[2]
83
+ modelName = parts[2];
84
+ }
85
+ return REASONING_MODELS.has(modelName);
61
86
  }
62
87
 
63
88
  function modelEntry(id: string, folderId: string) {
64
89
  return {
65
90
  id,
66
- name: prettyModelName(id),
91
+ name: id,
67
92
  api: "openai-responses" as const,
68
93
  provider: "yandex",
69
94
  baseUrl: AI_BASE_URL,
70
- reasoning: true,
95
+ reasoning: getReasoningMode(id),
71
96
  input: ["text"] as ("text" | "image")[],
72
97
  cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
73
98
  contextWindow: 128_000,
@@ -316,6 +341,8 @@ async function runYaLogin(ctx: ExtensionCommandContext) {
316
341
 
317
342
  // ─── extension entry point ────────────────────────────────────────────────────
318
343
 
344
+ // Export for testing
345
+ export { fetchModelIds, modelEntry, exchangeOAuthForIam };
319
346
  export default async function (pi: ExtensionAPI) {
320
347
  const apiKey = process.env.YANDEX_API_KEY;
321
348
  const folderId = process.env.YANDEX_FOLDER_ID;
package/package.json CHANGED
@@ -1,15 +1,19 @@
1
1
  {
2
2
  "name": "pi-yandex-bridge",
3
- "version": "0.2.9",
3
+ "version": "0.3.1",
4
4
  "description": "Pi Coding Agent provider bridge for Yandex Cloud AI (YandexGPT)",
5
5
  "main": "./dist/index.js",
6
6
  "type": "module",
7
7
  "pi": {
8
- "extensions": ["./dist/index.js"],
8
+ "extensions": [
9
+ "./dist/index.js"
10
+ ],
9
11
  "image": "https://upload.wikimedia.org/wikipedia/commons/5/5b/Yandex_cloud_logo_new.jpg"
10
12
  },
11
13
  "scripts": {
12
- "build": "tsc"
14
+ "build": "tsc",
15
+ "test": "bun test",
16
+ "verify": "bun run verify.ts"
13
17
  },
14
18
  "keywords": [
15
19
  "pi-package",
@@ -28,7 +32,8 @@
28
32
  "devDependencies": {
29
33
  "@earendil-works/pi-coding-agent": "latest",
30
34
  "@earendil-works/pi-ai": "latest",
31
- "typescript": "^5"
35
+ "typescript": "^5",
36
+ "@types/bun": "latest"
32
37
  },
33
38
  "license": "MIT",
34
39
  "repository": {
package/verify.ts ADDED
@@ -0,0 +1,198 @@
1
+ #!/usr/bin/env bun
2
+ /**
3
+ * Manual API verification script
4
+ *
5
+ * Tests Yandex Cloud API connectivity and model availability.
6
+ * Run before releasing to ensure all endpoints are responding.
7
+ *
8
+ * Usage:
9
+ * YANDEX_OAUTH_TOKEN=<token> YANDEX_FOLDER_ID=<id> bun run verify.ts
10
+ * OR
11
+ * YANDEX_API_KEY=<key> YANDEX_FOLDER_ID=<id> bun run verify.ts
12
+ */
13
+
14
+ const colors = {
15
+ reset: "\x1b[0m",
16
+ green: "\x1b[32m",
17
+ red: "\x1b[31m",
18
+ yellow: "\x1b[33m",
19
+ blue: "\x1b[34m",
20
+ gray: "\x1b[90m",
21
+ };
22
+
23
+ const log = {
24
+ info: (msg: string) => console.log(`${colors.blue}ℹ${colors.reset} ${msg}`),
25
+ success: (msg: string) => console.log(`${colors.green}✓${colors.reset} ${msg}`),
26
+ error: (msg: string) => console.log(`${colors.red}✗${colors.reset} ${msg}`),
27
+ warn: (msg: string) => console.log(`${colors.yellow}⚠${colors.reset} ${msg}`),
28
+ debug: (msg: string) => console.log(`${colors.gray}${msg}${colors.reset}`),
29
+ };
30
+
31
+ async function verify() {
32
+ const oauthToken = process.env.YANDEX_OAUTH_TOKEN;
33
+ const apiKey = process.env.YANDEX_API_KEY;
34
+ const folderId = process.env.YANDEX_FOLDER_ID;
35
+
36
+ log.info("Yandex Cloud API Verification");
37
+ console.log();
38
+
39
+ // ─── Validate environment ─────────────────────────────────────────────────
40
+
41
+ if (!folderId) {
42
+ log.error("YANDEX_FOLDER_ID env var is required");
43
+ process.exit(1);
44
+ }
45
+
46
+ if (!oauthToken && !apiKey) {
47
+ log.error("Either YANDEX_OAUTH_TOKEN or YANDEX_API_KEY env var is required");
48
+ process.exit(1);
49
+ }
50
+
51
+ log.success(`Using Folder ID: ${folderId}`);
52
+ const authType = oauthToken ? "OAuth Token" : "API Key";
53
+ log.success(`Using Auth: ${authType}`);
54
+ console.log();
55
+
56
+ let authHeader = "";
57
+ let iamToken = "";
58
+
59
+ // ─── Step 1: OAuth → IAM token exchange (if using OAuth) ──────────────────
60
+
61
+ if (oauthToken) {
62
+ log.info("Step 1: Exchanging OAuth token for IAM token…");
63
+ try {
64
+ const res = await fetch("https://iam.api.cloud.yandex.net/iam/v1/tokens", {
65
+ method: "POST",
66
+ headers: { "Content-Type": "application/json" },
67
+ body: JSON.stringify({ yandexPassportOauthToken: oauthToken }),
68
+ });
69
+
70
+ if (!res.ok) {
71
+ const text = await res.text().catch(() => res.statusText);
72
+ log.error(`IAM token exchange failed (${res.status}): ${text}`);
73
+ process.exit(1);
74
+ }
75
+
76
+ const data = (await res.json()) as {
77
+ iamToken: string;
78
+ expiresAt: string;
79
+ };
80
+ iamToken = data.iamToken;
81
+ authHeader = `Bearer ${iamToken}`;
82
+
83
+ const expDate = new Date(data.expiresAt).toLocaleString();
84
+ log.success(`Got IAM token (expires ${expDate})`);
85
+ } catch (err) {
86
+ log.error(`OAuth exchange error: ${err instanceof Error ? err.message : err}`);
87
+ process.exit(1);
88
+ }
89
+ } else {
90
+ authHeader = `Api-Key ${apiKey}`;
91
+ log.success("Using API Key authentication");
92
+ }
93
+
94
+ console.log();
95
+
96
+ // ─── Step 2: Fetch available models ───────────────────────────────────────
97
+
98
+ log.info("Step 2: Fetching available models…");
99
+ let models: Array<{ id: string }> = [];
100
+
101
+ try {
102
+ const res = await fetch("https://llm.api.cloud.yandex.net/v1/models", {
103
+ headers: {
104
+ Authorization: authHeader,
105
+ "OpenAI-Project": folderId,
106
+ },
107
+ });
108
+
109
+ if (!res.ok) {
110
+ const text = await res.text().catch(() => res.statusText);
111
+ log.error(`Models endpoint failed (${res.status}): ${text}`);
112
+ log.debug("This is the main 404 issue — check folder ID and auth");
113
+ process.exit(1);
114
+ }
115
+
116
+ const payload = (await res.json()) as { data: Array<{ id: string }> };
117
+ models = payload.data || [];
118
+
119
+ if (models.length === 0) {
120
+ log.warn("No models returned from API");
121
+ } else {
122
+ log.success(`Found ${models.length} model(s)`);
123
+ models.slice(0, 3).forEach((m) => log.debug(` • ${m.id}`));
124
+ if (models.length > 3) log.debug(` ... and ${models.length - 3} more`);
125
+ }
126
+ } catch (err) {
127
+ log.error(`Models fetch error: ${err instanceof Error ? err.message : err}`);
128
+ process.exit(1);
129
+ }
130
+
131
+ console.log();
132
+
133
+ // ─── Step 3: Test model endpoint (find a working format) ──────────────────
134
+
135
+ if (models.length > 0) {
136
+ log.info("Step 3: Testing model API compatibility…");
137
+ const testModel = models[0];
138
+ const baseId = testModel.id;
139
+
140
+ // Try different ID formats
141
+ const formats = [
142
+ { name: "full ID", id: baseId },
143
+ { name: "without /latest", id: baseId.replace(/\/latest$/, "") },
144
+ { name: "just model name", id: baseId.split("/").pop()! },
145
+ ];
146
+
147
+ let foundWorking = false;
148
+
149
+ for (const fmt of formats) {
150
+ try {
151
+ const res = await fetch("https://llm.api.cloud.yandex.net/openai/v1/chat/completions", {
152
+ method: "POST",
153
+ headers: {
154
+ Authorization: authHeader,
155
+ "OpenAI-Project": folderId,
156
+ "Content-Type": "application/json",
157
+ },
158
+ body: JSON.stringify({
159
+ model: fmt.id,
160
+ messages: [{ role: "user", content: "test" }],
161
+ max_tokens: 10,
162
+ }),
163
+ });
164
+
165
+ if (res.status === 404) {
166
+ log.debug(` ✗ ${fmt.name}: 404 — ${fmt.id}`);
167
+ } else if (!res.ok) {
168
+ const text = await res.text().catch(() => res.statusText);
169
+ log.warn(` ⚠ ${fmt.name}: ${res.status} — ${text.slice(0, 50)}`);
170
+ } else {
171
+ log.success(` ✓ ${fmt.name}: API responds correctly`);
172
+ log.debug(` Model ID format: ${fmt.id}`);
173
+ foundWorking = true;
174
+ break;
175
+ }
176
+ } catch (err) {
177
+ log.debug(` ✗ ${fmt.name}: ${err instanceof Error ? err.message : err}`);
178
+ }
179
+ }
180
+
181
+ if (!foundWorking) {
182
+ log.warn("Could not find working model ID format");
183
+ log.debug(`Test model ID: ${baseId}`);
184
+ }
185
+ }
186
+
187
+ console.log();
188
+
189
+ // ─── Summary ──────────────────────────────────────────────────────────────
190
+
191
+ log.success("✓ All checks passed. API is accessible.");
192
+ log.info(`Ready to release with ${models.length} model(s).`);
193
+ }
194
+
195
+ verify().catch((err) => {
196
+ log.error(`Verification failed: ${err instanceof Error ? err.message : err}`);
197
+ process.exit(1);
198
+ });