universal-social-sdk 1.1.0 → 1.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.env.example CHANGED
@@ -59,3 +59,14 @@ SOCIAL_SDK_MAX_RETRIES=3
59
59
  SOCIAL_SDK_RETRY_BASE_MS=500
60
60
  OLLAMA_HOST=http://127.0.0.1:11434
61
61
  OLLAMA_MODEL=llama3.2:3b
62
+ UPDATER_LLM_PROVIDER=openrouter
63
+ UPDATER_LLM_BASE_URL=https://openrouter.ai/api/v1
64
+ UPDATER_LLM_API_KEY=
65
+ UPDATER_LLM_MODEL=google/gemma-3-4b-it:free
66
+ UPDATER_LLM_APP_NAME=universal-social-sdk-updater
67
+ UPDATER_LLM_APP_URL=https://github.com/Gabo-Tech/universal-social-sdk
68
+ UPDATER_LLM_MAX_TOKENS=1200
69
+ UPDATER_LLM_MAX_DOC_CHARS_PER_PAGE=6000
70
+ UPDATER_LLM_MAX_ENDPOINT_ROWS_PER_PAGE=40
71
+ UPDATER_LLM_MAX_MODEL_ATTEMPTS=4
72
+ UPDATER_LLM_FALLBACK_MODELS=google/gemma-3-4b-it:free,qwen/qwen3-4b:free,openai/gpt-oss-20b:free
package/CHANGELOG.md CHANGED
@@ -12,6 +12,7 @@ and this project follows [Semantic Versioning](https://semver.org/spec/v2.0.0.ht
12
12
  - Non-interactive updater mode for CI/automation (`--ci`, `--open-pr`, `--branch-prefix`, `--base`, `--artifacts-dir`).
13
13
  - Structured updater artifacts (`.artifacts/update-plan.json`, `.artifacts/update-diff-summary.json`, `.artifacts/pr-title.txt`, `.artifacts/pr-body.md`).
14
14
  - Strict Ollama patch-plan schema validation with typed `changes` metadata (platform, endpoint, change type, confidence).
15
+ - Updater LLM provider abstraction with OpenRouter support (`UPDATER_LLM_*`, `OPENROUTER_*`) while keeping legacy Ollama compatibility.
15
16
  - Scheduled PR automation workflow: `.github/workflows/auto-update-pr.yml`.
16
17
  - Workflow-dispatch `dry_run` mode for updater automation (detect and generate artifacts without opening PRs).
17
18
  - Unit tests covering updater plan validation and no-change detection behavior.
package/README.md CHANGED
@@ -192,8 +192,19 @@ router.on("x.tweet_create_events", async (event) => {
192
192
 
193
193
  - `SOCIAL_SDK_MAX_RETRIES` (default `3`)
194
194
  - `SOCIAL_SDK_RETRY_BASE_MS` (default `500`)
195
- - `OLLAMA_HOST` (default `http://127.0.0.1:11434`)
196
- - `OLLAMA_MODEL` (default `llama3.2:3b`)
195
+ - `UPDATER_LLM_PROVIDER` (`openrouter` or `ollama`, default `openrouter` when API key is present, otherwise `ollama`)
196
+ - `UPDATER_LLM_BASE_URL` (default `https://openrouter.ai/api/v1`)
197
+ - `UPDATER_LLM_API_KEY` (or `OPENROUTER_API_KEY`)
198
+ - `UPDATER_LLM_MODEL` (or `OPENROUTER_MODEL`, default `google/gemma-3-4b-it:free` for OpenRouter, `llama3.2:3b` for Ollama)
199
+ - `UPDATER_LLM_APP_NAME` (optional request metadata)
200
+ - `UPDATER_LLM_APP_URL` (optional request metadata)
201
+ - `UPDATER_LLM_MAX_TOKENS` (default `1200`)
202
+ - `UPDATER_LLM_MAX_DOC_CHARS_PER_PAGE` (default `6000`)
203
+ - `UPDATER_LLM_MAX_ENDPOINT_ROWS_PER_PAGE` (default `40`)
204
+ - `UPDATER_LLM_MAX_MODEL_ATTEMPTS` (default `4`)
205
+ - `UPDATER_LLM_FALLBACK_MODELS` (comma-separated OpenRouter model IDs used after primary model fails)
206
+ - `OLLAMA_HOST` (legacy local runtime support, default `http://127.0.0.1:11434`)
207
+ - `OLLAMA_MODEL` (legacy local runtime support)
197
208
 
198
209
  ## CLI
199
210
 
@@ -218,6 +229,8 @@ npx universal-social-sdk update
218
229
  ```bash
219
230
  npx universal-social-sdk update --dry-run
220
231
  npx universal-social-sdk update --model llama3.2
232
+ npx universal-social-sdk update --fallback-models "google/gemma-3-4b-it:free,qwen/qwen3-4b:free"
233
+ npx universal-social-sdk update --max-model-attempts 5
221
234
  npx universal-social-sdk update --yes
222
235
  npx universal-social-sdk update --ci --open-pr --base main --branch-prefix chore/updater
223
236
  ```
@@ -226,10 +239,12 @@ Flow:
226
239
 
227
240
  1. Crawls official docs pages for X, Meta Graph API, Instagram Graph API, and LinkedIn.
228
241
  2. Extracts clean text and table-like endpoint data with Cheerio.
229
- 3. Sends doc snapshots to your local model runtime.
242
+ 3. Sends doc snapshots to your configured LLM provider (OpenRouter or local runtime).
243
+ - In OpenRouter mode, retries across a fallback model chain for `402/404/408/429` model-level failures with a short backoff between attempts.
230
244
  4. Requests generated method updates + full TypeScript file content.
231
- 5. Shows git-style diffs and asks for confirmation.
232
- 6. Applies patches and rebuilds package.
245
+ 5. Runs safety checks to reject suspicious placeholder rewrites or destructive class replacements.
246
+ 6. Shows git-style diffs and asks for confirmation.
247
+ 7. Applies patches and rebuilds package.
233
248
 
234
249
  CI/PR mode writes deterministic artifacts in `.artifacts/`:
235
250
 
@@ -418,5 +433,15 @@ Configure this repository secret for publishing:
418
433
 
419
434
  Configure this repository secret for scheduled updater PRs:
420
435
 
421
- - `OLLAMA_HOST` (required; endpoint reachable from GitHub Actions runner)
422
- - `OLLAMA_MODEL` (optional override)
436
+ - `UPDATER_LLM_PROVIDER` (`openrouter` or `ollama`)
437
+ - `UPDATER_LLM_API_KEY` (required for `openrouter`; or use `OPENROUTER_API_KEY`)
438
+ - `UPDATER_LLM_MODEL` (optional)
439
+ - `UPDATER_LLM_BASE_URL` (optional; defaults to OpenRouter URL)
440
+ - `UPDATER_LLM_MAX_TOKENS` (optional; cap completion size/cost)
441
+ - `UPDATER_LLM_MAX_MODEL_ATTEMPTS` (optional)
442
+ - `UPDATER_LLM_FALLBACK_MODELS` (optional comma-separated model chain)
443
+
444
+ Alternative OpenRouter-compatible secret names also supported:
445
+
446
+ - `OPENROUTER_API_KEY`
447
+ - `OPENROUTER_MODEL`
package/dist/cli/index.js CHANGED
@@ -235,7 +235,7 @@ async function crawlAllDocs() {
235
235
  }
236
236
 
237
237
  // src/updater/ollama.ts
238
- import axios2 from "axios";
238
+ import axios2, { isAxiosError } from "axios";
239
239
  import { z } from "zod";
240
240
 
241
241
  // src/config/env.ts
@@ -249,6 +249,21 @@ function readNumberEnv(name, fallback) {
249
249
  const parsed = Number(value);
250
250
  return Number.isFinite(parsed) ? parsed : fallback;
251
251
  }
252
+ function readListEnv(name) {
253
+ const value = process.env[name];
254
+ if (!value) {
255
+ return [];
256
+ }
257
+ return value.split(",").map((item) => item.trim()).filter(Boolean);
258
+ }
259
+ function resolveLlmProvider() {
260
+ const provider = process.env.UPDATER_LLM_PROVIDER;
261
+ if (provider === "openrouter" || provider === "ollama") {
262
+ return provider;
263
+ }
264
+ return process.env.UPDATER_LLM_API_KEY || process.env.OPENROUTER_API_KEY ? "openrouter" : "ollama";
265
+ }
266
+ var llmProvider = resolveLlmProvider();
252
267
  var env = {
253
268
  x: {
254
269
  apiKey: process.env.X_API_KEY ?? "",
@@ -310,6 +325,22 @@ var env = {
310
325
  maxRetries: readNumberEnv("SOCIAL_SDK_MAX_RETRIES", 3),
311
326
  baseDelayMs: readNumberEnv("SOCIAL_SDK_RETRY_BASE_MS", 500)
312
327
  },
328
+ llm: {
329
+ provider: llmProvider,
330
+ baseUrl: process.env.UPDATER_LLM_BASE_URL ?? "https://openrouter.ai/api/v1",
331
+ apiKey: process.env.UPDATER_LLM_API_KEY ?? process.env.OPENROUTER_API_KEY ?? "",
332
+ model: process.env.UPDATER_LLM_MODEL ?? process.env.OPENROUTER_MODEL ?? process.env.OLLAMA_MODEL ?? (llmProvider === "openrouter" ? "google/gemma-3-4b-it:free" : "llama3.2:3b"),
333
+ appName: process.env.UPDATER_LLM_APP_NAME ?? "universal-social-sdk-updater",
334
+ appUrl: process.env.UPDATER_LLM_APP_URL ?? "https://github.com/Gabo-Tech/universal-social-sdk",
335
+ maxTokens: readNumberEnv("UPDATER_LLM_MAX_TOKENS", 1200),
336
+ maxDocCharsPerPage: readNumberEnv("UPDATER_LLM_MAX_DOC_CHARS_PER_PAGE", 6e3),
337
+ maxEndpointRowsPerPage: readNumberEnv(
338
+ "UPDATER_LLM_MAX_ENDPOINT_ROWS_PER_PAGE",
339
+ 40
340
+ ),
341
+ fallbackModels: readListEnv("UPDATER_LLM_FALLBACK_MODELS"),
342
+ maxModelAttempts: readNumberEnv("UPDATER_LLM_MAX_MODEL_ATTEMPTS", 4)
343
+ },
313
344
  ollama: {
314
345
  host: process.env.OLLAMA_HOST ?? "http://127.0.0.1:11434",
315
346
  model: process.env.OLLAMA_MODEL ?? "llama3.2:3b"
@@ -337,19 +368,68 @@ var ollamaPatchPlanSchema = z.object({
337
368
  ),
338
369
  readmeTable: z.string()
339
370
  });
371
+ var OpenRouterRequestError = class extends Error {
372
+ status;
373
+ canRetryWithAnotherModel;
374
+ constructor(params) {
375
+ super(params.message);
376
+ this.name = "OpenRouterRequestError";
377
+ this.status = params.status;
378
+ this.canRetryWithAnotherModel = params.canRetryWithAnotherModel;
379
+ }
380
+ };
381
+ function sleep(ms) {
382
+ return new Promise((resolve) => setTimeout(resolve, ms));
383
+ }
384
+ function truncate(text, maxChars) {
385
+ if (text.length <= maxChars) {
386
+ return text;
387
+ }
388
+ if (maxChars < 80) {
389
+ return text.slice(0, maxChars);
390
+ }
391
+ const head = Math.floor(maxChars * 0.75);
392
+ const tail = Math.max(0, maxChars - head - 32);
393
+ return `${text.slice(0, head)}
394
+ ...[truncated]...
395
+ ${text.slice(text.length - tail)}`;
396
+ }
397
+ function compactDocs(docs) {
398
+ return docs.map((doc) => {
399
+ const limitedRows = doc.endpointRows.slice(0, env.llm.maxEndpointRowsPerPage).map(
400
+ (row) => Object.fromEntries(
401
+ Object.entries(row).map(([key, value]) => [
402
+ key,
403
+ truncate(String(value), 200)
404
+ ])
405
+ )
406
+ );
407
+ const text = truncate(doc.text, env.llm.maxDocCharsPerPage);
408
+ return {
409
+ url: doc.url,
410
+ title: doc.title,
411
+ text,
412
+ endpointRows: limitedRows,
413
+ truncated: text.length < doc.text.length || limitedRows.length < doc.endpointRows.length
414
+ };
415
+ });
416
+ }
340
417
  function buildPrompt(docs, existingMethodsJson) {
418
+ const compactedDocs = compactDocs(docs);
419
+ const compactedMethods = truncate(existingMethodsJson, 1e5);
341
420
  return [
342
421
  "You are maintaining universal-social-sdk.",
343
422
  "Compare the crawled docs with current methods and identify NEW or CHANGED endpoints.",
344
423
  "Focus areas: content publishing, stories, reels, comments, DMs, analytics.",
424
+ "Crawled text may be truncated. Only report high-confidence changes.",
345
425
  "Output STRICT JSON with this shape only:",
346
426
  '{"summary": string, "updatedMethods": Record<string,string[]>, "changes": [{"platform": string, "endpoint": string, "changeType": "added|modified|deprecated|removed", "confidence": number, "notes"?: string}], "files": [{"path": string, "content": string}], "readmeTable": string}',
347
427
  "Files must target src/platforms/*.ts and supported-methods.json updates when needed.",
348
428
  "Each file.content must be complete TypeScript file content, not patch snippets.",
349
429
  "Current supported-methods.json:",
350
- existingMethodsJson,
430
+ compactedMethods,
351
431
  "Crawled docs:",
352
- JSON.stringify(docs)
432
+ JSON.stringify(compactedDocs)
353
433
  ].join("\n");
354
434
  }
355
435
  function parseJsonOutput(raw) {
@@ -357,33 +437,136 @@ function parseJsonOutput(raw) {
357
437
  const start = cleaned.indexOf("{");
358
438
  const end = cleaned.lastIndexOf("}");
359
439
  if (start === -1 || end === -1 || end <= start) {
360
- throw new Error("Ollama response did not contain a JSON object.");
440
+ throw new Error("LLM response did not contain a JSON object.");
361
441
  }
362
442
  const maybeJson = cleaned.slice(start, end + 1);
363
443
  const parsed = JSON.parse(maybeJson);
364
444
  const result = ollamaPatchPlanSchema.safeParse(parsed);
365
445
  if (!result.success) {
366
446
  throw new Error(
367
- `Ollama patch plan schema validation failed: ${result.error.message}`
447
+ `Patch plan schema validation failed: ${result.error.message}`
368
448
  );
369
449
  }
370
450
  return result.data;
371
451
  }
372
- async function askOllamaForPatchPlan(params) {
373
- const model = params.model || env.ollama.model || "llama3.2:3b";
374
- const prompt = buildPrompt(params.docs, params.existingMethodsJson);
452
+ async function askViaOllama(params) {
375
453
  const response = await axios2.post(
376
454
  `${env.ollama.host}/api/generate`,
377
455
  {
378
- model,
379
- prompt,
456
+ model: params.model,
457
+ prompt: params.prompt,
380
458
  stream: false
381
459
  },
382
460
  {
383
461
  timeout: 12e4
384
462
  }
385
463
  );
386
- return parseJsonOutput(response.data.response);
464
+ return response.data.response;
465
+ }
466
+ async function askViaOpenRouter(params) {
467
+ if (!env.llm.apiKey) {
468
+ throw new Error(
469
+ "Missing updater API key. Set UPDATER_LLM_API_KEY (or OPENROUTER_API_KEY)."
470
+ );
471
+ }
472
+ let response;
473
+ try {
474
+ response = await axios2.post(
475
+ `${env.llm.baseUrl.replace(/\/$/, "")}/chat/completions`,
476
+ {
477
+ model: params.model,
478
+ messages: [
479
+ {
480
+ role: "user",
481
+ content: params.prompt
482
+ }
483
+ ],
484
+ temperature: 0,
485
+ max_tokens: env.llm.maxTokens
486
+ },
487
+ {
488
+ timeout: 12e4,
489
+ headers: {
490
+ Authorization: `Bearer ${env.llm.apiKey}`,
491
+ "Content-Type": "application/json",
492
+ "HTTP-Referer": env.llm.appUrl,
493
+ "X-Title": env.llm.appName
494
+ }
495
+ }
496
+ );
497
+ } catch (error) {
498
+ if (!isAxiosError(error)) {
499
+ throw error;
500
+ }
501
+ const status = error.response?.status;
502
+ const responseBody = error.response?.data;
503
+ const providerMessage = typeof responseBody === "string" ? responseBody : responseBody?.error?.message ?? responseBody?.message ?? "";
504
+ const hint = status === 401 || status === 403 ? "Verify UPDATER_LLM_API_KEY permissions." : status === 402 ? "Your provider account/model likely requires billing or has no quota." : status === 404 ? "Model or endpoint not found. Verify UPDATER_LLM_MODEL and base URL." : status === 429 ? "Rate-limited by provider. Retry later or switch to a less busy model." : status === 413 ? "Prompt payload too large. Reduce UPDATER_LLM_MAX_DOC_CHARS_PER_PAGE." : "Check provider status and updater LLM configuration.";
505
+ throw new OpenRouterRequestError({
506
+ message: `OpenRouter request failed${status ? ` (${status})` : ""}. ${providerMessage || hint}`,
507
+ status,
508
+ canRetryWithAnotherModel: status === 402 || status === 404 || status === 408 || status === 429
509
+ });
510
+ }
511
+ const content = response.data.choices?.[0]?.message?.content;
512
+ if (!content) {
513
+ throw new Error("OpenRouter did not return a message content payload.");
514
+ }
515
+ return content;
516
+ }
517
+ function buildModelChain(params) {
518
+ const fromEnv = params.fallbackModels ?? env.llm.fallbackModels;
519
+ const defaults = [
520
+ "google/gemma-3-4b-it:free",
521
+ "qwen/qwen3-4b:free",
522
+ "openai/gpt-oss-20b:free"
523
+ ];
524
+ const all = [params.primaryModel, ...fromEnv, ...defaults];
525
+ const maxAttempts = Math.max(
526
+ 1,
527
+ params.maxModelAttempts ?? env.llm.maxModelAttempts
528
+ );
529
+ return [...new Set(all.filter(Boolean))].slice(
530
+ 0,
531
+ maxAttempts
532
+ );
533
+ }
534
+ async function askOllamaForPatchPlan(params) {
535
+ const provider = env.llm.provider;
536
+ const model = params.model || env.llm.model || env.ollama.model || "llama3.2:3b";
537
+ const prompt = buildPrompt(params.docs, params.existingMethodsJson);
538
+ if (provider !== "openrouter") {
539
+ const raw = await askViaOllama({ prompt, model });
540
+ return parseJsonOutput(raw);
541
+ }
542
+ const attempts = buildModelChain({
543
+ primaryModel: model,
544
+ fallbackModels: params.fallbackModels,
545
+ maxModelAttempts: params.maxModelAttempts
546
+ });
547
+ const failures = [];
548
+ const fallbackDelayMs = 750;
549
+ for (const [index, candidate] of attempts.entries()) {
550
+ try {
551
+ const raw = await askViaOpenRouter({ prompt, model: candidate });
552
+ return parseJsonOutput(raw);
553
+ } catch (error) {
554
+ if (error instanceof OpenRouterRequestError) {
555
+ failures.push(`${candidate}: ${error.message}`);
556
+ if (!error.canRetryWithAnotherModel) {
557
+ throw error;
558
+ }
559
+ if (index < attempts.length - 1) {
560
+ await sleep(fallbackDelayMs);
561
+ }
562
+ continue;
563
+ }
564
+ throw error;
565
+ }
566
+ }
567
+ throw new Error(
568
+ `All OpenRouter models failed (${attempts.length} attempts). ${failures.join(" | ")}`
569
+ );
387
570
  }
388
571
 
389
572
  // src/updater/patcher.ts
@@ -487,6 +670,66 @@ function parseExistingMethods(methodsJson) {
487
670
  return {};
488
671
  }
489
672
  }
673
+ function expectedPlatformClassName(filePath) {
674
+ const match = filePath.match(/^src\/platforms\/([^/]+)\.ts$/);
675
+ if (!match) {
676
+ return null;
677
+ }
678
+ const slug = match[1];
679
+ const bySlug = {
680
+ x: "X",
681
+ instagram: "Instagram",
682
+ facebook: "Facebook",
683
+ linkedin: "LinkedIn",
684
+ youtube: "YouTube",
685
+ tiktok: "TikTok",
686
+ pinterest: "Pinterest",
687
+ bluesky: "Bluesky",
688
+ mastodon: "Mastodon",
689
+ threads: "Threads"
690
+ };
691
+ return bySlug[slug] ?? null;
692
+ }
693
+ function validateUpdaterPlanSafety(diffs) {
694
+ const findings = [];
695
+ const suspiciousSnippets = [
696
+ "Placeholder for implementation",
697
+ "This file needs to be updated",
698
+ "... other methods ...",
699
+ "from './api'"
700
+ ];
701
+ for (const diff of diffs) {
702
+ if (!diff.path.startsWith("src/platforms/")) {
703
+ continue;
704
+ }
705
+ const beforeLines = diff.before.split(/\r?\n/).length;
706
+ const afterLines = diff.after.split(/\r?\n/).length;
707
+ const shrinkRatio = beforeLines > 0 ? afterLines / beforeLines : 1;
708
+ if (beforeLines >= 120 && shrinkRatio < 0.4) {
709
+ findings.push(
710
+ `${diff.path}: suspicious large rewrite (${beforeLines} -> ${afterLines} lines).`
711
+ );
712
+ }
713
+ for (const snippet of suspiciousSnippets) {
714
+ if (diff.after.includes(snippet)) {
715
+ findings.push(`${diff.path}: suspicious snippet detected (${snippet}).`);
716
+ }
717
+ }
718
+ const className = expectedPlatformClassName(diff.path);
719
+ if (className) {
720
+ const classRegex = new RegExp(`\\bexport\\s+class\\s+${className}\\b`);
721
+ if (!classRegex.test(diff.after)) {
722
+ findings.push(
723
+ `${diff.path}: expected exported class '${className}' was not found.`
724
+ );
725
+ }
726
+ }
727
+ }
728
+ return {
729
+ safe: findings.length === 0,
730
+ findings
731
+ };
732
+ }
490
733
  function hasMaterialChanges(params) {
491
734
  const hasFileChanges = params.diffs.some((diff) => diff.before !== diff.after);
492
735
  const methodsChanged = JSON.stringify(normalizeMethods(params.existingMethods)) !== JSON.stringify(normalizeMethods(params.updatedMethods));
@@ -599,7 +842,9 @@ async function runUpdateCommand(cwd, options = {}) {
599
842
  const plan = await askOllamaForPatchPlan({
600
843
  docs,
601
844
  existingMethodsJson: methodsJson,
602
- model: options.model
845
+ model: options.model,
846
+ fallbackModels: options.fallbackModels,
847
+ maxModelAttempts: options.maxModelAttempts
603
848
  });
604
849
  planSpinner.succeed("Update plan generated.");
605
850
  const generatedFiles = [...plan.files];
@@ -616,6 +861,13 @@ async function runUpdateCommand(cwd, options = {}) {
616
861
  rootDir: cwd,
617
862
  files: generatedFiles
618
863
  });
864
+ const safety = validateUpdaterPlanSafety(diffs);
865
+ if (!safety.safe) {
866
+ const topFindings = safety.findings.slice(0, 8).join(" | ");
867
+ throw new Error(
868
+ `Updater plan rejected by safety checks. ${topFindings}`
869
+ );
870
+ }
619
871
  const changeStats = hasMaterialChanges({
620
872
  diffs,
621
873
  existingMethods,
@@ -700,17 +952,46 @@ async function runUpdateCommand(cwd, options = {}) {
700
952
 
701
953
  // src/cli/index.ts
702
954
  var program = new Command();
955
+ function formatCliError(error) {
956
+ if (error instanceof Error) {
957
+ return error.message;
958
+ }
959
+ return String(error);
960
+ }
961
+ function parseCsv(value) {
962
+ if (!value) {
963
+ return void 0;
964
+ }
965
+ const parsed = value.split(",").map((item) => item.trim()).filter(Boolean);
966
+ return parsed.length > 0 ? parsed : void 0;
967
+ }
968
+ function parseOptionalPositiveInt(value) {
969
+ if (!value) {
970
+ return void 0;
971
+ }
972
+ const parsed = Number(value);
973
+ if (!Number.isFinite(parsed) || parsed <= 0) {
974
+ return void 0;
975
+ }
976
+ return Math.floor(parsed);
977
+ }
703
978
  program.name("universal-social-sdk").description("Universal social media SDK CLI").version("1.1.0");
704
979
  program.command("init").description("Create .env.example and show OAuth setup links").action(async () => {
705
980
  try {
706
981
  await runInitCommand(process.cwd());
707
982
  } catch (error) {
708
983
  console.error(chalk3.red("Initialization failed."));
709
- console.error(error);
984
+ console.error(chalk3.red(formatCliError(error)));
710
985
  process.exitCode = 1;
711
986
  }
712
987
  });
713
- program.command("update").description("Crawl docs + run local Ollama + patch SDK sources").option("--dry-run", "Preview changes without writing files").option("-y, --yes", "Apply changes without confirmation prompt").option("--model <name>", "Override Ollama model for this run").option("--ci", "Run in non-interactive CI mode").option("--open-pr", "Prepare PR artifacts for workflow automation").option(
988
+ program.command("update").description("Crawl docs + run local Ollama + patch SDK sources").option("--dry-run", "Preview changes without writing files").option("-y, --yes", "Apply changes without confirmation prompt").option("--model <name>", "Override Ollama model for this run").option(
989
+ "--fallback-models <csv>",
990
+ "Comma-separated fallback model chain for OpenRouter"
991
+ ).option(
992
+ "--max-model-attempts <number>",
993
+ "Max model attempts for OpenRouter fallback chain"
994
+ ).option("--ci", "Run in non-interactive CI mode").option("--open-pr", "Prepare PR artifacts for workflow automation").option(
714
995
  "--branch-prefix <prefix>",
715
996
  "Branch prefix for updater PR metadata",
716
997
  "chore/updater"
@@ -725,6 +1006,8 @@ program.command("update").description("Crawl docs + run local Ollama + patch SDK
725
1006
  dryRun: options.dryRun,
726
1007
  yes: options.yes,
727
1008
  model: options.model,
1009
+ fallbackModels: parseCsv(options.fallbackModels),
1010
+ maxModelAttempts: parseOptionalPositiveInt(options.maxModelAttempts),
728
1011
  ci: options.ci,
729
1012
  openPr: options.openPr,
730
1013
  branchPrefix: options.branchPrefix,
@@ -733,7 +1016,7 @@ program.command("update").description("Crawl docs + run local Ollama + patch SDK
733
1016
  });
734
1017
  } catch (error) {
735
1018
  console.error(chalk3.red("Update failed."));
736
- console.error(error);
1019
+ console.error(chalk3.red(formatCliError(error)));
737
1020
  process.exitCode = 1;
738
1021
  }
739
1022
  }