@polka-codes/cli 0.9.71 → 0.9.72

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +266 -183
  2. package/package.json +8 -8
package/dist/index.js CHANGED
@@ -37660,7 +37660,7 @@ var {
37660
37660
  Help
37661
37661
  } = import__.default;
37662
37662
  // package.json
37663
- var version = "0.9.71";
37663
+ var version = "0.9.72";
37664
37664
 
37665
37665
  // src/commands/code.ts
37666
37666
  import { readFile as readFile4 } from "node:fs/promises";
@@ -59174,7 +59174,7 @@ function detectMediaType({
59174
59174
  }
59175
59175
  return;
59176
59176
  }
59177
- var VERSION3 = "5.0.106";
59177
+ var VERSION3 = "5.0.108";
59178
59178
  var download = async ({ url: url2 }) => {
59179
59179
  var _a16;
59180
59180
  const urlText = url2.toString();
@@ -66860,7 +66860,7 @@ async function generateAuthToken(options) {
66860
66860
  const token = await client.getAccessToken();
66861
66861
  return (token == null ? undefined : token.token) || null;
66862
66862
  }
66863
- var VERSION4 = "3.0.86";
66863
+ var VERSION4 = "3.0.87";
66864
66864
  var googleVertexErrorDataSchema = exports_external.object({
66865
66865
  error: exports_external.object({
66866
66866
  code: exports_external.number().nullable(),
@@ -73682,7 +73682,7 @@ function createDeepSeek(options = {}) {
73682
73682
  var deepseek = createDeepSeek();
73683
73683
 
73684
73684
  // ../../node_modules/@ai-sdk/google/dist/index.mjs
73685
- var VERSION7 = "2.0.44";
73685
+ var VERSION7 = "2.0.45";
73686
73686
  var googleErrorDataSchema2 = lazySchema(() => zodSchema(exports_external.object({
73687
73687
  error: exports_external.object({
73688
73688
  code: exports_external.number().nullable(),
@@ -75166,6 +75166,20 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
75166
75166
  errorSchema: openaiErrorDataSchema,
75167
75167
  errorToMessage: (data) => data.error.message
75168
75168
  });
75169
+ function getOpenAILanguageModelCapabilities(modelId) {
75170
+ const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
75171
+ const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
75172
+ const isReasoningModel = !(modelId.startsWith("gpt-3") || modelId.startsWith("gpt-4") || modelId.startsWith("chatgpt-4o") || modelId.startsWith("gpt-5-chat"));
75173
+ const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1");
75174
+ const systemMessageMode = isReasoningModel ? "developer" : "system";
75175
+ return {
75176
+ supportsFlexProcessing,
75177
+ supportsPriorityProcessing,
75178
+ isReasoningModel,
75179
+ systemMessageMode,
75180
+ supportsNonReasoningParameters
75181
+ };
75182
+ }
75169
75183
  function convertToOpenAIChatMessages({
75170
75184
  prompt,
75171
75185
  systemMessageMode = "system"
@@ -75580,6 +75594,7 @@ var OpenAIChatLanguageModel = class {
75580
75594
  schema: openaiChatLanguageModelOptions
75581
75595
  })) != null ? _a16 : {};
75582
75596
  const structuredOutputs = (_b8 = openaiOptions.structuredOutputs) != null ? _b8 : true;
75597
+ const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
75583
75598
  if (topK != null) {
75584
75599
  warnings.push({
75585
75600
  type: "unsupported-setting",
@@ -75595,7 +75610,7 @@ var OpenAIChatLanguageModel = class {
75595
75610
  }
75596
75611
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages({
75597
75612
  prompt,
75598
- systemMessageMode: getSystemMessageMode(this.modelId)
75613
+ systemMessageMode: modelCapabilities.systemMessageMode
75599
75614
  });
75600
75615
  warnings.push(...messageWarnings);
75601
75616
  const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
@@ -75634,22 +75649,31 @@ var OpenAIChatLanguageModel = class {
75634
75649
  safety_identifier: openaiOptions.safetyIdentifier,
75635
75650
  messages
75636
75651
  };
75637
- if (isReasoningModel(this.modelId)) {
75638
- if (baseArgs.temperature != null) {
75639
- baseArgs.temperature = undefined;
75640
- warnings.push({
75641
- type: "unsupported-setting",
75642
- setting: "temperature",
75643
- details: "temperature is not supported for reasoning models"
75644
- });
75645
- }
75646
- if (baseArgs.top_p != null) {
75647
- baseArgs.top_p = undefined;
75648
- warnings.push({
75649
- type: "unsupported-setting",
75650
- setting: "topP",
75651
- details: "topP is not supported for reasoning models"
75652
- });
75652
+ if (modelCapabilities.isReasoningModel) {
75653
+ if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
75654
+ if (baseArgs.temperature != null) {
75655
+ baseArgs.temperature = undefined;
75656
+ warnings.push({
75657
+ type: "unsupported-setting",
75658
+ setting: "temperature",
75659
+ details: "temperature is not supported for reasoning models"
75660
+ });
75661
+ }
75662
+ if (baseArgs.top_p != null) {
75663
+ baseArgs.top_p = undefined;
75664
+ warnings.push({
75665
+ type: "unsupported-setting",
75666
+ setting: "topP",
75667
+ details: "topP is not supported for reasoning models"
75668
+ });
75669
+ }
75670
+ if (baseArgs.logprobs != null) {
75671
+ baseArgs.logprobs = undefined;
75672
+ warnings.push({
75673
+ type: "other",
75674
+ message: "logprobs is not supported for reasoning models"
75675
+ });
75676
+ }
75653
75677
  }
75654
75678
  if (baseArgs.frequency_penalty != null) {
75655
75679
  baseArgs.frequency_penalty = undefined;
@@ -75674,13 +75698,6 @@ var OpenAIChatLanguageModel = class {
75674
75698
  message: "logitBias is not supported for reasoning models"
75675
75699
  });
75676
75700
  }
75677
- if (baseArgs.logprobs != null) {
75678
- baseArgs.logprobs = undefined;
75679
- warnings.push({
75680
- type: "other",
75681
- message: "logprobs is not supported for reasoning models"
75682
- });
75683
- }
75684
75701
  if (baseArgs.top_logprobs != null) {
75685
75702
  baseArgs.top_logprobs = undefined;
75686
75703
  warnings.push({
@@ -75704,7 +75721,7 @@ var OpenAIChatLanguageModel = class {
75704
75721
  });
75705
75722
  }
75706
75723
  }
75707
- if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
75724
+ if (openaiOptions.serviceTier === "flex" && !modelCapabilities.supportsFlexProcessing) {
75708
75725
  warnings.push({
75709
75726
  type: "unsupported-setting",
75710
75727
  setting: "serviceTier",
@@ -75712,7 +75729,7 @@ var OpenAIChatLanguageModel = class {
75712
75729
  });
75713
75730
  baseArgs.service_tier = undefined;
75714
75731
  }
75715
- if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
75732
+ if (openaiOptions.serviceTier === "priority" && !modelCapabilities.supportsPriorityProcessing) {
75716
75733
  warnings.push({
75717
75734
  type: "unsupported-setting",
75718
75735
  setting: "serviceTier",
@@ -76027,42 +76044,6 @@ var OpenAIChatLanguageModel = class {
76027
76044
  };
76028
76045
  }
76029
76046
  };
76030
- function isReasoningModel(modelId) {
76031
- return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
76032
- }
76033
- function supportsFlexProcessing(modelId) {
76034
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
76035
- }
76036
- function supportsPriorityProcessing(modelId) {
76037
- return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
76038
- }
76039
- function getSystemMessageMode(modelId) {
76040
- var _a16, _b8;
76041
- if (!isReasoningModel(modelId)) {
76042
- return "system";
76043
- }
76044
- return (_b8 = (_a16 = reasoningModels[modelId]) == null ? undefined : _a16.systemMessageMode) != null ? _b8 : "developer";
76045
- }
76046
- var reasoningModels = {
76047
- o3: {
76048
- systemMessageMode: "developer"
76049
- },
76050
- "o3-2025-04-16": {
76051
- systemMessageMode: "developer"
76052
- },
76053
- "o3-mini": {
76054
- systemMessageMode: "developer"
76055
- },
76056
- "o3-mini-2025-01-31": {
76057
- systemMessageMode: "developer"
76058
- },
76059
- "o4-mini": {
76060
- systemMessageMode: "developer"
76061
- },
76062
- "o4-mini-2025-04-16": {
76063
- systemMessageMode: "developer"
76064
- }
76065
- };
76066
76047
  function convertToOpenAICompletionPrompt({
76067
76048
  prompt,
76068
76049
  user = "user",
@@ -76630,8 +76611,8 @@ var codeInterpreter = (args = {}) => {
76630
76611
  };
76631
76612
  var comparisonFilterSchema = exports_external.object({
76632
76613
  key: exports_external.string(),
76633
- type: exports_external.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
76634
- value: exports_external.union([exports_external.string(), exports_external.number(), exports_external.boolean()])
76614
+ type: exports_external.enum(["eq", "ne", "gt", "gte", "lt", "lte", "in", "nin"]),
76615
+ value: exports_external.union([exports_external.string(), exports_external.number(), exports_external.boolean(), exports_external.array(exports_external.string())])
76635
76616
  });
76636
76617
  var compoundFilterSchema = exports_external.object({
76637
76618
  type: exports_external.enum(["and", "or"]),
@@ -76726,12 +76707,12 @@ var webSearchOutputSchema = lazySchema(() => zodSchema(exports_external.object({
76726
76707
  }),
76727
76708
  exports_external.object({
76728
76709
  type: exports_external.literal("openPage"),
76729
- url: exports_external.string()
76710
+ url: exports_external.string().nullish()
76730
76711
  }),
76731
76712
  exports_external.object({
76732
76713
  type: exports_external.literal("find"),
76733
- url: exports_external.string(),
76734
- pattern: exports_external.string()
76714
+ url: exports_external.string().nullish(),
76715
+ pattern: exports_external.string().nullish()
76735
76716
  })
76736
76717
  ]),
76737
76718
  sources: exports_external.array(exports_external.discriminatedUnion("type", [
@@ -76765,12 +76746,12 @@ var webSearchPreviewOutputSchema = lazySchema(() => zodSchema(exports_external.o
76765
76746
  }),
76766
76747
  exports_external.object({
76767
76748
  type: exports_external.literal("openPage"),
76768
- url: exports_external.string()
76749
+ url: exports_external.string().nullish()
76769
76750
  }),
76770
76751
  exports_external.object({
76771
76752
  type: exports_external.literal("find"),
76772
- url: exports_external.string(),
76773
- pattern: exports_external.string()
76753
+ url: exports_external.string().nullish(),
76754
+ pattern: exports_external.string().nullish()
76774
76755
  })
76775
76756
  ])
76776
76757
  })));
@@ -77218,12 +77199,12 @@ var openaiResponsesChunkSchema = lazyValidator(() => zodSchema(exports_external.
77218
77199
  }),
77219
77200
  exports_external.object({
77220
77201
  type: exports_external.literal("open_page"),
77221
- url: exports_external.string()
77202
+ url: exports_external.string().nullish()
77222
77203
  }),
77223
77204
  exports_external.object({
77224
77205
  type: exports_external.literal("find"),
77225
- url: exports_external.string(),
77226
- pattern: exports_external.string()
77206
+ url: exports_external.string().nullish(),
77207
+ pattern: exports_external.string().nullish()
77227
77208
  })
77228
77209
  ])
77229
77210
  }),
@@ -77410,12 +77391,12 @@ var openaiResponsesResponseSchema = lazyValidator(() => zodSchema(exports_extern
77410
77391
  }),
77411
77392
  exports_external.object({
77412
77393
  type: exports_external.literal("open_page"),
77413
- url: exports_external.string()
77394
+ url: exports_external.string().nullish()
77414
77395
  }),
77415
77396
  exports_external.object({
77416
77397
  type: exports_external.literal("find"),
77417
- url: exports_external.string(),
77418
- pattern: exports_external.string()
77398
+ url: exports_external.string().nullish(),
77399
+ pattern: exports_external.string().nullish()
77419
77400
  })
77420
77401
  ])
77421
77402
  }),
@@ -77748,7 +77729,7 @@ var OpenAIResponsesLanguageModel = class {
77748
77729
  }) {
77749
77730
  var _a16, _b8, _c, _d;
77750
77731
  const warnings = [];
77751
- const modelConfig = getResponsesModelConfig(this.modelId);
77732
+ const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
77752
77733
  if (topK != null) {
77753
77734
  warnings.push({ type: "unsupported-setting", setting: "topK" });
77754
77735
  }
@@ -77784,7 +77765,7 @@ var OpenAIResponsesLanguageModel = class {
77784
77765
  }
77785
77766
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
77786
77767
  prompt,
77787
- systemMessageMode: modelConfig.systemMessageMode,
77768
+ systemMessageMode: modelCapabilities.systemMessageMode,
77788
77769
  fileIdPrefixes: this.config.fileIdPrefixes,
77789
77770
  store: (_a16 = openaiOptions == null ? undefined : openaiOptions.store) != null ? _a16 : true,
77790
77771
  hasLocalShellTool: hasOpenAITool("openai.local_shell")
@@ -77814,7 +77795,7 @@ var OpenAIResponsesLanguageModel = class {
77814
77795
  addInclude("code_interpreter_call.outputs");
77815
77796
  }
77816
77797
  const store = openaiOptions == null ? undefined : openaiOptions.store;
77817
- if (store === false && modelConfig.isReasoningModel) {
77798
+ if (store === false && modelCapabilities.isReasoningModel) {
77818
77799
  addInclude("reasoning.encrypted_content");
77819
77800
  }
77820
77801
  const baseArgs = {
@@ -77854,7 +77835,7 @@ var OpenAIResponsesLanguageModel = class {
77854
77835
  safety_identifier: openaiOptions == null ? undefined : openaiOptions.safetyIdentifier,
77855
77836
  top_logprobs: topLogprobs,
77856
77837
  truncation: openaiOptions == null ? undefined : openaiOptions.truncation,
77857
- ...modelConfig.isReasoningModel && ((openaiOptions == null ? undefined : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? undefined : openaiOptions.reasoningSummary) != null) && {
77838
+ ...modelCapabilities.isReasoningModel && ((openaiOptions == null ? undefined : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? undefined : openaiOptions.reasoningSummary) != null) && {
77858
77839
  reasoning: {
77859
77840
  ...(openaiOptions == null ? undefined : openaiOptions.reasoningEffort) != null && {
77860
77841
  effort: openaiOptions.reasoningEffort
@@ -77865,22 +77846,24 @@ var OpenAIResponsesLanguageModel = class {
77865
77846
  }
77866
77847
  }
77867
77848
  };
77868
- if (modelConfig.isReasoningModel) {
77869
- if (baseArgs.temperature != null) {
77870
- baseArgs.temperature = undefined;
77871
- warnings.push({
77872
- type: "unsupported-setting",
77873
- setting: "temperature",
77874
- details: "temperature is not supported for reasoning models"
77875
- });
77876
- }
77877
- if (baseArgs.top_p != null) {
77878
- baseArgs.top_p = undefined;
77879
- warnings.push({
77880
- type: "unsupported-setting",
77881
- setting: "topP",
77882
- details: "topP is not supported for reasoning models"
77883
- });
77849
+ if (modelCapabilities.isReasoningModel) {
77850
+ if (!((openaiOptions == null ? undefined : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
77851
+ if (baseArgs.temperature != null) {
77852
+ baseArgs.temperature = undefined;
77853
+ warnings.push({
77854
+ type: "unsupported-setting",
77855
+ setting: "temperature",
77856
+ details: "temperature is not supported for reasoning models"
77857
+ });
77858
+ }
77859
+ if (baseArgs.top_p != null) {
77860
+ baseArgs.top_p = undefined;
77861
+ warnings.push({
77862
+ type: "unsupported-setting",
77863
+ setting: "topP",
77864
+ details: "topP is not supported for reasoning models"
77865
+ });
77866
+ }
77884
77867
  }
77885
77868
  } else {
77886
77869
  if ((openaiOptions == null ? undefined : openaiOptions.reasoningEffort) != null) {
@@ -77898,7 +77881,7 @@ var OpenAIResponsesLanguageModel = class {
77898
77881
  });
77899
77882
  }
77900
77883
  }
77901
- if ((openaiOptions == null ? undefined : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
77884
+ if ((openaiOptions == null ? undefined : openaiOptions.serviceTier) === "flex" && !modelCapabilities.supportsFlexProcessing) {
77902
77885
  warnings.push({
77903
77886
  type: "unsupported-setting",
77904
77887
  setting: "serviceTier",
@@ -77906,7 +77889,7 @@ var OpenAIResponsesLanguageModel = class {
77906
77889
  });
77907
77890
  delete baseArgs.service_tier;
77908
77891
  }
77909
- if ((openaiOptions == null ? undefined : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
77892
+ if ((openaiOptions == null ? undefined : openaiOptions.serviceTier) === "priority" && !modelCapabilities.supportsPriorityProcessing) {
77910
77893
  warnings.push({
77911
77894
  type: "unsupported-setting",
77912
77895
  setting: "serviceTier",
@@ -78749,32 +78732,6 @@ function isResponseAnnotationAddedChunk(chunk) {
78749
78732
  function isErrorChunk(chunk) {
78750
78733
  return chunk.type === "error";
78751
78734
  }
78752
- function getResponsesModelConfig(modelId) {
78753
- const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
78754
- const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
78755
- const defaults = {
78756
- systemMessageMode: "system",
78757
- supportsFlexProcessing: supportsFlexProcessing2,
78758
- supportsPriorityProcessing: supportsPriorityProcessing2
78759
- };
78760
- if (modelId.startsWith("gpt-5-chat")) {
78761
- return {
78762
- ...defaults,
78763
- isReasoningModel: false
78764
- };
78765
- }
78766
- if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
78767
- return {
78768
- ...defaults,
78769
- isReasoningModel: true,
78770
- systemMessageMode: "developer"
78771
- };
78772
- }
78773
- return {
78774
- ...defaults,
78775
- isReasoningModel: false
78776
- };
78777
- }
78778
78735
  function mapWebSearchOutput(action) {
78779
78736
  var _a16;
78780
78737
  switch (action.type) {
@@ -79080,7 +79037,7 @@ var OpenAITranscriptionModel = class {
79080
79037
  };
79081
79038
  }
79082
79039
  };
79083
- var VERSION8 = "2.0.76";
79040
+ var VERSION8 = "2.0.80";
79084
79041
  function createOpenAI(options = {}) {
79085
79042
  var _a16, _b8;
79086
79043
  const baseURL = (_a16 = withoutTrailingSlash(loadOptionalSetting({
@@ -80058,9 +80015,21 @@ var openrouterFailedResponseHandler = createJsonErrorResponseHandler2({
80058
80015
  errorSchema: OpenRouterErrorResponseSchema,
80059
80016
  errorToMessage: (data) => data.error.message
80060
80017
  });
80018
+ var FileAnnotationSchema = exports_external.object({
80019
+ type: exports_external.literal("file"),
80020
+ file: exports_external.object({
80021
+ hash: exports_external.string(),
80022
+ name: exports_external.string(),
80023
+ content: exports_external.array(exports_external.object({
80024
+ type: exports_external.string(),
80025
+ text: exports_external.string().optional()
80026
+ }).passthrough()).optional()
80027
+ }).passthrough()
80028
+ });
80061
80029
  var OpenRouterProviderMetadataSchema = exports_external.object({
80062
80030
  provider: exports_external.string(),
80063
80031
  reasoning_details: exports_external.array(ReasoningDetailUnionSchema).optional(),
80032
+ annotations: exports_external.array(FileAnnotationSchema).optional(),
80064
80033
  usage: exports_external.object({
80065
80034
  promptTokens: exports_external.number(),
80066
80035
  promptTokensDetails: exports_external.object({
@@ -80079,7 +80048,8 @@ var OpenRouterProviderMetadataSchema = exports_external.object({
80079
80048
  }).passthrough();
80080
80049
  var OpenRouterProviderOptionsSchema = exports_external.object({
80081
80050
  openrouter: exports_external.object({
80082
- reasoning_details: exports_external.array(ReasoningDetailUnionSchema).optional()
80051
+ reasoning_details: exports_external.array(ReasoningDetailUnionSchema).optional(),
80052
+ annotations: exports_external.array(FileAnnotationSchema).optional()
80083
80053
  }).optional()
80084
80054
  }).optional();
80085
80055
  function mapOpenRouterFinishReason(finishReason) {
@@ -80097,6 +80067,17 @@ function mapOpenRouterFinishReason(finishReason) {
80097
80067
  return "unknown";
80098
80068
  }
80099
80069
  }
80070
+ var OPENROUTER_AUDIO_FORMATS = [
80071
+ "wav",
80072
+ "mp3",
80073
+ "aiff",
80074
+ "aac",
80075
+ "ogg",
80076
+ "flac",
80077
+ "m4a",
80078
+ "pcm16",
80079
+ "pcm24"
80080
+ ];
80100
80081
  function isUrl({
80101
80082
  url: url2,
80102
80083
  protocols
@@ -80135,6 +80116,26 @@ function getBase64FromDataUrl(dataUrl) {
80135
80116
  const match = dataUrl.match(/^data:[^;]*;base64,(.+)$/);
80136
80117
  return match ? match[1] : dataUrl;
80137
80118
  }
80119
+ var MIME_TO_FORMAT = {
80120
+ mpeg: "mp3",
80121
+ mp3: "mp3",
80122
+ "x-wav": "wav",
80123
+ wave: "wav",
80124
+ wav: "wav",
80125
+ ogg: "ogg",
80126
+ vorbis: "ogg",
80127
+ aac: "aac",
80128
+ "x-aac": "aac",
80129
+ m4a: "m4a",
80130
+ "x-m4a": "m4a",
80131
+ mp4: "m4a",
80132
+ aiff: "aiff",
80133
+ "x-aiff": "aiff",
80134
+ flac: "flac",
80135
+ "x-flac": "flac",
80136
+ pcm16: "pcm16",
80137
+ pcm24: "pcm24"
80138
+ };
80138
80139
  function getInputAudioData(part) {
80139
80140
  const fileData = getFileUrl({
80140
80141
  part,
@@ -80157,18 +80158,13 @@ Learn more: https://openrouter.ai/docs/features/multimodal/audio`);
80157
80158
  }
80158
80159
  const data = getBase64FromDataUrl(fileData);
80159
80160
  const mediaType = part.mediaType || "audio/mpeg";
80160
- let format = mediaType.replace("audio/", "");
80161
- if (format === "mpeg" || format === "mp3") {
80162
- format = "mp3";
80163
- } else if (format === "x-wav" || format === "wave" || format === "wav") {
80164
- format = "wav";
80165
- }
80166
- if (format !== "mp3" && format !== "wav") {
80161
+ const rawFormat = mediaType.replace("audio/", "");
80162
+ const format = MIME_TO_FORMAT[rawFormat];
80163
+ if (format === undefined) {
80164
+ const supportedList = OPENROUTER_AUDIO_FORMATS.join(", ");
80167
80165
  throw new Error(`Unsupported audio format: "${mediaType}"
80168
80166
 
80169
- OpenRouter only supports MP3 and WAV audio formats.
80170
- • For MP3: use "audio/mpeg" or "audio/mp3"
80171
- • For WAV: use "audio/wav" or "audio/x-wav"
80167
+ OpenRouter supports the following audio formats: ${supportedList}
80172
80168
 
80173
80169
  Learn more: https://openrouter.ai/docs/features/multimodal/audio`);
80174
80170
  }
@@ -80181,7 +80177,7 @@ function getCacheControl2(providerMetadata) {
80181
80177
  return (_c = (_b8 = (_a153 = openrouter2 == null ? undefined : openrouter2.cacheControl) != null ? _a153 : openrouter2 == null ? undefined : openrouter2.cache_control) != null ? _b8 : anthropic2 == null ? undefined : anthropic2.cacheControl) != null ? _c : anthropic2 == null ? undefined : anthropic2.cache_control;
80182
80178
  }
80183
80179
  function convertToOpenRouterChatMessages(prompt) {
80184
- var _a153, _b8, _c, _d, _e, _f, _g, _h;
80180
+ var _a153, _b8, _c, _d, _e, _f, _g, _h, _i, _j;
80185
80181
  const messages = [];
80186
80182
  for (const { role, content, providerOptions } of prompt) {
80187
80183
  switch (role) {
@@ -80325,6 +80321,7 @@ function convertToOpenRouterChatMessages(prompt) {
80325
80321
  }
80326
80322
  const parsedProviderOptions = OpenRouterProviderOptionsSchema.safeParse(providerOptions);
80327
80323
  const messageReasoningDetails = parsedProviderOptions.success ? (_g = (_f = parsedProviderOptions.data) == null ? undefined : _f.openrouter) == null ? undefined : _g.reasoning_details : undefined;
80324
+ const messageAnnotations = parsedProviderOptions.success ? (_i = (_h = parsedProviderOptions.data) == null ? undefined : _h.openrouter) == null ? undefined : _i.annotations : undefined;
80328
80325
  const finalReasoningDetails = messageReasoningDetails && Array.isArray(messageReasoningDetails) && messageReasoningDetails.length > 0 ? messageReasoningDetails : accumulatedReasoningDetails.length > 0 ? accumulatedReasoningDetails : undefined;
80329
80326
  messages.push({
80330
80327
  role: "assistant",
@@ -80332,6 +80329,7 @@ function convertToOpenRouterChatMessages(prompt) {
80332
80329
  tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
80333
80330
  reasoning: reasoning || undefined,
80334
80331
  reasoning_details: finalReasoningDetails,
80332
+ annotations: messageAnnotations,
80335
80333
  cache_control: getCacheControl2(providerOptions)
80336
80334
  });
80337
80335
  break;
@@ -80343,7 +80341,7 @@ function convertToOpenRouterChatMessages(prompt) {
80343
80341
  role: "tool",
80344
80342
  tool_call_id: toolResponse.toolCallId,
80345
80343
  content: content2,
80346
- cache_control: (_h = getCacheControl2(providerOptions)) != null ? _h : getCacheControl2(toolResponse.providerOptions)
80344
+ cache_control: (_j = getCacheControl2(providerOptions)) != null ? _j : getCacheControl2(toolResponse.providerOptions)
80347
80345
  });
80348
80346
  }
80349
80347
  break;
@@ -80556,6 +80554,7 @@ var OpenRouterChatLanguageModel = class {
80556
80554
  this.specificationVersion = "v2";
80557
80555
  this.provider = "openrouter";
80558
80556
  this.defaultObjectGenerationMode = "tool";
80557
+ this.supportsImageUrls = true;
80559
80558
  this.supportedUrls = {
80560
80559
  "image/*": [
80561
80560
  /^data:image\/[a-zA-Z]+;base64,/,
@@ -80597,7 +80596,16 @@ var OpenRouterChatLanguageModel = class {
80597
80596
  presence_penalty: presencePenalty,
80598
80597
  seed,
80599
80598
  stop: stopSequences,
80600
- response_format: responseFormat,
80599
+ response_format: (responseFormat == null ? undefined : responseFormat.type) === "json" ? responseFormat.schema != null ? {
80600
+ type: "json_schema",
80601
+ json_schema: __spreadValues({
80602
+ schema: responseFormat.schema,
80603
+ strict: true,
80604
+ name: (_a153 = responseFormat.name) != null ? _a153 : "response"
80605
+ }, responseFormat.description && {
80606
+ description: responseFormat.description
80607
+ })
80608
+ } : { type: "json_object" } : undefined,
80601
80609
  top_k: topK,
80602
80610
  messages: convertToOpenRouterChatMessages(prompt),
80603
80611
  include_reasoning: this.settings.includeReasoning,
@@ -80608,20 +80616,6 @@ var OpenRouterChatLanguageModel = class {
80608
80616
  provider: this.settings.provider,
80609
80617
  debug: this.settings.debug
80610
80618
  }, this.config.extraBody), this.settings.extraBody);
80611
- if ((responseFormat == null ? undefined : responseFormat.type) === "json" && responseFormat.schema != null) {
80612
- return __spreadProps(__spreadValues({}, baseArgs), {
80613
- response_format: {
80614
- type: "json_schema",
80615
- json_schema: __spreadValues({
80616
- schema: responseFormat.schema,
80617
- strict: true,
80618
- name: (_a153 = responseFormat.name) != null ? _a153 : "response"
80619
- }, responseFormat.description && {
80620
- description: responseFormat.description
80621
- })
80622
- }
80623
- });
80624
- }
80625
80619
  if (tools2 && tools2.length > 0) {
80626
80620
  const mappedTools = tools2.filter((tool3) => tool3.type === "function").map((tool3) => ({
80627
80621
  type: "function",
@@ -80639,7 +80633,7 @@ var OpenRouterChatLanguageModel = class {
80639
80633
  return baseArgs;
80640
80634
  }
80641
80635
  async doGenerate(options) {
80642
- var _a153, _b8, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v;
80636
+ var _a153, _b8, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
80643
80637
  const providerOptions = options.providerOptions || {};
80644
80638
  const openrouterOptions = providerOptions.openrouter || {};
80645
80639
  const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
@@ -80793,6 +80787,7 @@ var OpenRouterChatLanguageModel = class {
80793
80787
  }
80794
80788
  }
80795
80789
  }
80790
+ const fileAnnotations = (_k = choice.message.annotations) == null ? undefined : _k.filter((a) => a.type === "file");
80796
80791
  return {
80797
80792
  content,
80798
80793
  finishReason: mapOpenRouterFinishReason(choice.finish_reason),
@@ -80800,22 +80795,23 @@ var OpenRouterChatLanguageModel = class {
80800
80795
  warnings: [],
80801
80796
  providerMetadata: {
80802
80797
  openrouter: OpenRouterProviderMetadataSchema.parse({
80803
- provider: (_k = response.provider) != null ? _k : "",
80804
- reasoning_details: (_l = choice.message.reasoning_details) != null ? _l : [],
80798
+ provider: (_l = response.provider) != null ? _l : "",
80799
+ reasoning_details: (_m = choice.message.reasoning_details) != null ? _m : [],
80800
+ annotations: fileAnnotations && fileAnnotations.length > 0 ? fileAnnotations : undefined,
80805
80801
  usage: __spreadValues(__spreadValues(__spreadValues({
80806
- promptTokens: (_m = usageInfo.inputTokens) != null ? _m : 0,
80807
- completionTokens: (_n = usageInfo.outputTokens) != null ? _n : 0,
80808
- totalTokens: (_o = usageInfo.totalTokens) != null ? _o : 0,
80809
- cost: (_p = response.usage) == null ? undefined : _p.cost
80810
- }, ((_r = (_q = response.usage) == null ? undefined : _q.prompt_tokens_details) == null ? undefined : _r.cached_tokens) != null ? {
80802
+ promptTokens: (_n = usageInfo.inputTokens) != null ? _n : 0,
80803
+ completionTokens: (_o = usageInfo.outputTokens) != null ? _o : 0,
80804
+ totalTokens: (_p = usageInfo.totalTokens) != null ? _p : 0,
80805
+ cost: (_q = response.usage) == null ? undefined : _q.cost
80806
+ }, ((_s = (_r = response.usage) == null ? undefined : _r.prompt_tokens_details) == null ? undefined : _s.cached_tokens) != null ? {
80811
80807
  promptTokensDetails: {
80812
80808
  cachedTokens: response.usage.prompt_tokens_details.cached_tokens
80813
80809
  }
80814
- } : {}), ((_t = (_s = response.usage) == null ? undefined : _s.completion_tokens_details) == null ? undefined : _t.reasoning_tokens) != null ? {
80810
+ } : {}), ((_u = (_t = response.usage) == null ? undefined : _t.completion_tokens_details) == null ? undefined : _u.reasoning_tokens) != null ? {
80815
80811
  completionTokensDetails: {
80816
80812
  reasoningTokens: response.usage.completion_tokens_details.reasoning_tokens
80817
80813
  }
80818
- } : {}), ((_v = (_u = response.usage) == null ? undefined : _u.cost_details) == null ? undefined : _v.upstream_inference_cost) != null ? {
80814
+ } : {}), ((_w = (_v = response.usage) == null ? undefined : _v.cost_details) == null ? undefined : _w.upstream_inference_cost) != null ? {
80819
80815
  costDetails: {
80820
80816
  upstreamInferenceCost: response.usage.cost_details.upstream_inference_cost
80821
80817
  }
@@ -81359,6 +81355,7 @@ var OpenRouterCompletionLanguageModel = class {
81359
81355
  constructor(modelId, settings, config4) {
81360
81356
  this.specificationVersion = "v2";
81361
81357
  this.provider = "openrouter";
81358
+ this.supportsImageUrls = true;
81362
81359
  this.supportedUrls = {
81363
81360
  "image/*": [
81364
81361
  /^data:image\/[a-zA-Z]+;base64,/,
@@ -81582,6 +81579,71 @@ var OpenRouterCompletionLanguageModel = class {
81582
81579
  };
81583
81580
  }
81584
81581
  };
81582
+ var openrouterEmbeddingUsageSchema = exports_external.object({
81583
+ prompt_tokens: exports_external.number(),
81584
+ total_tokens: exports_external.number(),
81585
+ cost: exports_external.number().optional()
81586
+ });
81587
+ var openrouterEmbeddingDataSchema = exports_external.object({
81588
+ object: exports_external.literal("embedding"),
81589
+ embedding: exports_external.array(exports_external.number()),
81590
+ index: exports_external.number().optional()
81591
+ });
81592
+ var OpenRouterEmbeddingResponseSchema = exports_external.object({
81593
+ id: exports_external.string().optional(),
81594
+ object: exports_external.literal("list"),
81595
+ data: exports_external.array(openrouterEmbeddingDataSchema),
81596
+ model: exports_external.string(),
81597
+ usage: openrouterEmbeddingUsageSchema.optional()
81598
+ });
81599
+ var OpenRouterEmbeddingModel = class {
81600
+ constructor(modelId, settings, config4) {
81601
+ this.specificationVersion = "v2";
81602
+ this.provider = "openrouter";
81603
+ this.maxEmbeddingsPerCall = undefined;
81604
+ this.supportsParallelCalls = true;
81605
+ this.modelId = modelId;
81606
+ this.settings = settings;
81607
+ this.config = config4;
81608
+ }
81609
+ async doEmbed(options) {
81610
+ var _a153;
81611
+ const { values, abortSignal, headers } = options;
81612
+ const args = __spreadValues(__spreadValues({
81613
+ model: this.modelId,
81614
+ input: values,
81615
+ user: this.settings.user,
81616
+ provider: this.settings.provider
81617
+ }, this.config.extraBody), this.settings.extraBody);
81618
+ const { value: responseValue, responseHeaders } = await postJsonToApi2({
81619
+ url: this.config.url({
81620
+ path: "/embeddings",
81621
+ modelId: this.modelId
81622
+ }),
81623
+ headers: combineHeaders2(this.config.headers(), headers),
81624
+ body: args,
81625
+ failedResponseHandler: openrouterFailedResponseHandler,
81626
+ successfulResponseHandler: createJsonResponseHandler2(OpenRouterEmbeddingResponseSchema),
81627
+ abortSignal,
81628
+ fetch: this.config.fetch
81629
+ });
81630
+ return {
81631
+ embeddings: responseValue.data.map((item) => item.embedding),
81632
+ usage: responseValue.usage ? { tokens: responseValue.usage.prompt_tokens } : undefined,
81633
+ providerMetadata: ((_a153 = responseValue.usage) == null ? undefined : _a153.cost) ? {
81634
+ openrouter: {
81635
+ usage: {
81636
+ cost: responseValue.usage.cost
81637
+ }
81638
+ }
81639
+ } : undefined,
81640
+ response: {
81641
+ headers: responseHeaders,
81642
+ body: responseValue
81643
+ }
81644
+ };
81645
+ }
81646
+ };
81585
81647
  function removeUndefinedEntries2(record2) {
81586
81648
  return Object.fromEntries(Object.entries(record2).filter(([, value]) => value !== null));
81587
81649
  }
@@ -81593,18 +81655,20 @@ function withUserAgentSuffix2(headers, ...userAgentSuffixParts) {
81593
81655
  "user-agent": newUserAgent
81594
81656
  });
81595
81657
  }
81596
- var VERSION9 = "1.3.0";
81658
+ var VERSION9 = "1.5.0";
81597
81659
  function createOpenRouter(options = {}) {
81598
81660
  var _a153, _b8, _c;
81599
81661
  const baseURL = (_b8 = withoutTrailingSlash2((_a153 = options.baseURL) != null ? _a153 : options.baseUrl)) != null ? _b8 : "https://openrouter.ai/api/v1";
81600
81662
  const compatibility = (_c = options.compatibility) != null ? _c : "compatible";
81601
- const getHeaders = () => withUserAgentSuffix2(__spreadValues({
81663
+ const getHeaders = () => withUserAgentSuffix2(__spreadValues(__spreadValues({
81602
81664
  Authorization: `Bearer ${loadApiKey2({
81603
81665
  apiKey: options.apiKey,
81604
81666
  environmentVariableName: "OPENROUTER_API_KEY",
81605
81667
  description: "OpenRouter"
81606
81668
  })}`
81607
- }, options.headers), `ai-sdk/openrouter/${VERSION9}`);
81669
+ }, options.headers), options.api_keys && Object.keys(options.api_keys).length > 0 && {
81670
+ "X-Provider-API-Keys": JSON.stringify(options.api_keys)
81671
+ }), `ai-sdk/openrouter/${VERSION9}`);
81608
81672
  const createChatModel = (modelId, settings = {}) => new OpenRouterChatLanguageModel(modelId, settings, {
81609
81673
  provider: "openrouter.chat",
81610
81674
  url: ({ path }) => `${baseURL}${path}`,
@@ -81621,6 +81685,13 @@ function createOpenRouter(options = {}) {
81621
81685
  fetch: options.fetch,
81622
81686
  extraBody: options.extraBody
81623
81687
  });
81688
+ const createEmbeddingModel = (modelId, settings = {}) => new OpenRouterEmbeddingModel(modelId, settings, {
81689
+ provider: "openrouter.embedding",
81690
+ url: ({ path }) => `${baseURL}${path}`,
81691
+ headers: getHeaders,
81692
+ fetch: options.fetch,
81693
+ extraBody: options.extraBody
81694
+ });
81624
81695
  const createLanguageModel = (modelId, settings) => {
81625
81696
  if (new.target) {
81626
81697
  throw new Error("The OpenRouter model function cannot be called with the new keyword.");
@@ -81634,6 +81705,8 @@ function createOpenRouter(options = {}) {
81634
81705
  provider3.languageModel = createLanguageModel;
81635
81706
  provider3.chat = createChatModel;
81636
81707
  provider3.completion = createCompletionModel;
81708
+ provider3.textEmbeddingModel = createEmbeddingModel;
81709
+ provider3.embedding = createEmbeddingModel;
81637
81710
  return provider3;
81638
81711
  }
81639
81712
  var openrouter = createOpenRouter({
@@ -86794,7 +86867,7 @@ async function findNextTask(tools2) {
86794
86867
  currentTask = subTasks[0];
86795
86868
  }
86796
86869
  }
86797
- async function runImplementationLoop(context, highLevelPlan, saveUsageSnapshot, additionalTools) {
86870
+ async function runImplementationLoop(context, highLevelPlan, saveUsageSnapshot, additionalTools, noReview) {
86798
86871
  const { logger, step, tools: tools2 } = context;
86799
86872
  const commitMessages = [];
86800
86873
  logger.info(`Phase 5: Iterative Implementation Loop...
@@ -86839,14 +86912,20 @@ Focus only on this item, but use the plan for context.`;
86839
86912
  await tools2.executeCommand({ command: "git", args: ["commit", "-m", commitMessage] });
86840
86913
  });
86841
86914
  commitMessages.push(commitMessage);
86842
- const { passed: reviewPassed, commitMessages: fixCommitMessages } = await performReviewAndFixCycle(iterationCount, nextTask, highLevelPlan, context, additionalTools);
86843
- commitMessages.push(...fixCommitMessages);
86844
- const taskElapsed = Date.now() - taskStartTime;
86845
- const taskElapsedTime = formatElapsedTime(taskElapsed);
86846
- if (reviewPassed) {
86847
- logger.info(`Iteration ${iterationCount} completed successfully (${taskElapsedTime})`);
86915
+ if (!noReview) {
86916
+ const { passed: reviewPassed, commitMessages: fixCommitMessages } = await performReviewAndFixCycle(iterationCount, nextTask, highLevelPlan, context, additionalTools);
86917
+ commitMessages.push(...fixCommitMessages);
86918
+ const taskElapsed = Date.now() - taskStartTime;
86919
+ const taskElapsedTime = formatElapsedTime(taskElapsed);
86920
+ if (reviewPassed) {
86921
+ logger.info(`Iteration ${iterationCount} completed successfully (${taskElapsedTime})`);
86922
+ } else {
86923
+ logger.warn(`Warning: Iteration ${iterationCount} completed with potential issues (${taskElapsedTime})`);
86924
+ }
86848
86925
  } else {
86849
- logger.warn(`Warning: Iteration ${iterationCount} completed with potential issues (${taskElapsedTime})`);
86926
+ const taskElapsed = Date.now() - taskStartTime;
86927
+ const taskElapsedTime = formatElapsedTime(taskElapsed);
86928
+ logger.info(`Iteration ${iterationCount} completed (${taskElapsedTime})`);
86850
86929
  }
86851
86930
  await step(`update-task-status-${iterationCount}`, async () => {
86852
86931
  await tools2.updateTodoItem({ operation: "update", id: nextTaskId, status: "completed" });
@@ -86977,7 +87056,7 @@ Max retries (${MAX_REVIEW_RETRIES}) reached for final review. Issues might remai
86977
87056
  }
86978
87057
  var epicWorkflow = async (input2, context) => {
86979
87058
  const { logger, tools: tools2 } = context;
86980
- const { task, saveEpicContext, saveUsageSnapshot, additionalTools } = input2;
87059
+ const { task, saveEpicContext, saveUsageSnapshot, additionalTools, noReview } = input2;
86981
87060
  const workflowStartTime = Date.now();
86982
87061
  if (!task || task.trim() === "") {
86983
87062
  logger.error("Error: Task cannot be empty. Please provide a valid task description.");
@@ -87022,8 +87101,10 @@ var epicWorkflow = async (input2, context) => {
87022
87101
  if (todos.length === 0) {
87023
87102
  await addTodoItemsFromPlan(input2.plan, context);
87024
87103
  }
87025
- const commitMessages = await runImplementationLoop(context, input2.plan, saveUsageSnapshot, additionalTools);
87026
- await performFinalReviewAndFix(context, input2.plan, input2.baseBranch ?? undefined, additionalTools);
87104
+ const commitMessages = await runImplementationLoop(context, input2.plan, saveUsageSnapshot, additionalTools, noReview);
87105
+ if (!noReview) {
87106
+ await performFinalReviewAndFix(context, input2.plan, input2.baseBranch ?? undefined, additionalTools);
87107
+ }
87027
87108
  await saveUsageSnapshot();
87028
87109
  await tools2.executeCommand({ command: "git", args: ["rm", "-f", ".epic.yml"] });
87029
87110
  const statusResult = await tools2.executeCommand({
@@ -87516,9 +87597,10 @@ class EpicTodoItemStore {
87516
87597
  }
87517
87598
 
87518
87599
  // src/commands/epic.ts
87519
- async function runEpic(task2, _options, command) {
87600
+ async function runEpic(task2, options, command) {
87520
87601
  const globalOpts = (command.parent ?? command).opts();
87521
87602
  const { verbose, yes } = globalOpts;
87603
+ const { review: review3 } = options;
87522
87604
  const logger = createLogger({
87523
87605
  verbose
87524
87606
  });
@@ -87545,6 +87627,7 @@ async function runEpic(task2, _options, command) {
87545
87627
  const workflowInput = {
87546
87628
  ...epicContext,
87547
87629
  interactive: !yes,
87630
+ noReview: review3 === false,
87548
87631
  saveEpicContext: async (context) => {
87549
87632
  if (context.task)
87550
87633
  workflowInput.task = context.task;
@@ -87582,7 +87665,7 @@ async function runEpic(task2, _options, command) {
87582
87665
  }
87583
87666
  });
87584
87667
  }
87585
- var epicCommand = new Command("epic").description("Orchestrates a large feature or epic, breaking it down into smaller tasks.").argument("[task]", "The epic to plan and implement.").action(runEpic);
87668
+ var epicCommand = new Command("epic").description("Orchestrates a large feature or epic, breaking it down into smaller tasks.").argument("[task]", "The epic to plan and implement.").option("--no-review", "Disable the review step").action(runEpic);
87586
87669
 
87587
87670
  // src/commands/fix.ts
87588
87671
  var fixCommand = new Command("fix").description("Fix issues by running a command and letting an agent fix it.").argument("[command]", "The command to run").option("-p, --prompt <prompt>", "Additional prompt for the agent.").action(async (command, options, cmd) => {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@polka-codes/cli",
3
- "version": "0.9.71",
3
+ "version": "0.9.72",
4
4
  "license": "AGPL-3.0",
5
5
  "author": "github@polka.codes",
6
6
  "type": "module",
@@ -19,16 +19,16 @@
19
19
  "dependencies": {
20
20
  "@ai-sdk/anthropic": "^2.0.53",
21
21
  "@ai-sdk/deepseek": "^1.0.31",
22
- "@ai-sdk/google": "^2.0.44",
23
- "@ai-sdk/google-vertex": "^3.0.86",
24
- "@ai-sdk/openai": "^2.0.76",
22
+ "@ai-sdk/google": "^2.0.45",
23
+ "@ai-sdk/google-vertex": "^3.0.87",
24
+ "@ai-sdk/openai": "^2.0.80",
25
25
  "@ai-sdk/provider": "^2.0.0",
26
26
  "@ai-sdk/provider-utils": "^3.0.18",
27
27
  "@inquirer/prompts": "^8.0.2",
28
- "@openrouter/ai-sdk-provider": "^1.3.0",
29
- "@polka-codes/cli-shared": "0.9.68",
30
- "@polka-codes/core": "0.9.68",
31
- "ai": "^5.0.106",
28
+ "@openrouter/ai-sdk-provider": "^1.5.0",
29
+ "@polka-codes/cli-shared": "0.9.71",
30
+ "@polka-codes/core": "0.9.71",
31
+ "ai": "^5.0.108",
32
32
  "chalk": "^5.6.2",
33
33
  "commander": "^14.0.2",
34
34
  "dotenv": "^17.2.3",