@polka-codes/cli 0.9.70 → 0.9.72

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +279 -186
  2. package/package.json +8 -8
package/dist/index.js CHANGED
@@ -37660,7 +37660,7 @@ var {
37660
37660
  Help
37661
37661
  } = import__.default;
37662
37662
  // package.json
37663
- var version = "0.9.70";
37663
+ var version = "0.9.72";
37664
37664
 
37665
37665
  // src/commands/code.ts
37666
37666
  import { readFile as readFile4 } from "node:fs/promises";
@@ -59174,7 +59174,7 @@ function detectMediaType({
59174
59174
  }
59175
59175
  return;
59176
59176
  }
59177
- var VERSION3 = "5.0.106";
59177
+ var VERSION3 = "5.0.108";
59178
59178
  var download = async ({ url: url2 }) => {
59179
59179
  var _a16;
59180
59180
  const urlText = url2.toString();
@@ -63995,7 +63995,7 @@ var agentWorkflow = async (input, { step, tools, logger }) => {
63995
63995
  for (let i = 0;i < maxToolRoundTrips; i++) {
63996
63996
  messages.push(...nextMessage);
63997
63997
  await event(`start-round-${i}`, { kind: "StartRequest" /* StartRequest */, userMessage: nextMessage });
63998
- const assistantMessage = await step(`agent-round-${i}`, async () => {
63998
+ const assistantMessage = await step(`agent-round-${i}`, { retry: 2 }, async () => {
63999
63999
  return await tools.generateText({
64000
64000
  messages,
64001
64001
  tools: toolSet,
@@ -64201,14 +64201,23 @@ var fromJsonModelMessage = (msg) => {
64201
64201
  var makeStepFn = () => {
64202
64202
  const results = new Map;
64203
64203
  const callStack = [];
64204
- return async (name17, fn, options) => {
64204
+ return async (name17, arg2, arg3) => {
64205
+ let fn;
64206
+ let options;
64207
+ if (typeof arg2 === "function") {
64208
+ fn = arg2;
64209
+ options = arg3;
64210
+ } else {
64211
+ options = arg2;
64212
+ fn = arg3;
64213
+ }
64205
64214
  callStack.push(name17);
64206
64215
  const key = callStack.join(">");
64207
64216
  try {
64208
64217
  if (results.has(key)) {
64209
64218
  return results.get(key);
64210
64219
  }
64211
- const maxRetryCount = options?.retry ?? 1;
64220
+ const maxRetryCount = options?.retry ?? 0;
64212
64221
  let lastError;
64213
64222
  for (let retryCount = 0;retryCount <= maxRetryCount; retryCount++) {
64214
64223
  try {
@@ -66851,7 +66860,7 @@ async function generateAuthToken(options) {
66851
66860
  const token = await client.getAccessToken();
66852
66861
  return (token == null ? undefined : token.token) || null;
66853
66862
  }
66854
- var VERSION4 = "3.0.86";
66863
+ var VERSION4 = "3.0.87";
66855
66864
  var googleVertexErrorDataSchema = exports_external.object({
66856
66865
  error: exports_external.object({
66857
66866
  code: exports_external.number().nullable(),
@@ -73673,7 +73682,7 @@ function createDeepSeek(options = {}) {
73673
73682
  var deepseek = createDeepSeek();
73674
73683
 
73675
73684
  // ../../node_modules/@ai-sdk/google/dist/index.mjs
73676
- var VERSION7 = "2.0.44";
73685
+ var VERSION7 = "2.0.45";
73677
73686
  var googleErrorDataSchema2 = lazySchema(() => zodSchema(exports_external.object({
73678
73687
  error: exports_external.object({
73679
73688
  code: exports_external.number().nullable(),
@@ -75157,6 +75166,20 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
75157
75166
  errorSchema: openaiErrorDataSchema,
75158
75167
  errorToMessage: (data) => data.error.message
75159
75168
  });
75169
+ function getOpenAILanguageModelCapabilities(modelId) {
75170
+ const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
75171
+ const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
75172
+ const isReasoningModel = !(modelId.startsWith("gpt-3") || modelId.startsWith("gpt-4") || modelId.startsWith("chatgpt-4o") || modelId.startsWith("gpt-5-chat"));
75173
+ const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1");
75174
+ const systemMessageMode = isReasoningModel ? "developer" : "system";
75175
+ return {
75176
+ supportsFlexProcessing,
75177
+ supportsPriorityProcessing,
75178
+ isReasoningModel,
75179
+ systemMessageMode,
75180
+ supportsNonReasoningParameters
75181
+ };
75182
+ }
75160
75183
  function convertToOpenAIChatMessages({
75161
75184
  prompt,
75162
75185
  systemMessageMode = "system"
@@ -75571,6 +75594,7 @@ var OpenAIChatLanguageModel = class {
75571
75594
  schema: openaiChatLanguageModelOptions
75572
75595
  })) != null ? _a16 : {};
75573
75596
  const structuredOutputs = (_b8 = openaiOptions.structuredOutputs) != null ? _b8 : true;
75597
+ const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
75574
75598
  if (topK != null) {
75575
75599
  warnings.push({
75576
75600
  type: "unsupported-setting",
@@ -75586,7 +75610,7 @@ var OpenAIChatLanguageModel = class {
75586
75610
  }
75587
75611
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages({
75588
75612
  prompt,
75589
- systemMessageMode: getSystemMessageMode(this.modelId)
75613
+ systemMessageMode: modelCapabilities.systemMessageMode
75590
75614
  });
75591
75615
  warnings.push(...messageWarnings);
75592
75616
  const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
@@ -75625,22 +75649,31 @@ var OpenAIChatLanguageModel = class {
75625
75649
  safety_identifier: openaiOptions.safetyIdentifier,
75626
75650
  messages
75627
75651
  };
75628
- if (isReasoningModel(this.modelId)) {
75629
- if (baseArgs.temperature != null) {
75630
- baseArgs.temperature = undefined;
75631
- warnings.push({
75632
- type: "unsupported-setting",
75633
- setting: "temperature",
75634
- details: "temperature is not supported for reasoning models"
75635
- });
75636
- }
75637
- if (baseArgs.top_p != null) {
75638
- baseArgs.top_p = undefined;
75639
- warnings.push({
75640
- type: "unsupported-setting",
75641
- setting: "topP",
75642
- details: "topP is not supported for reasoning models"
75643
- });
75652
+ if (modelCapabilities.isReasoningModel) {
75653
+ if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
75654
+ if (baseArgs.temperature != null) {
75655
+ baseArgs.temperature = undefined;
75656
+ warnings.push({
75657
+ type: "unsupported-setting",
75658
+ setting: "temperature",
75659
+ details: "temperature is not supported for reasoning models"
75660
+ });
75661
+ }
75662
+ if (baseArgs.top_p != null) {
75663
+ baseArgs.top_p = undefined;
75664
+ warnings.push({
75665
+ type: "unsupported-setting",
75666
+ setting: "topP",
75667
+ details: "topP is not supported for reasoning models"
75668
+ });
75669
+ }
75670
+ if (baseArgs.logprobs != null) {
75671
+ baseArgs.logprobs = undefined;
75672
+ warnings.push({
75673
+ type: "other",
75674
+ message: "logprobs is not supported for reasoning models"
75675
+ });
75676
+ }
75644
75677
  }
75645
75678
  if (baseArgs.frequency_penalty != null) {
75646
75679
  baseArgs.frequency_penalty = undefined;
@@ -75665,13 +75698,6 @@ var OpenAIChatLanguageModel = class {
75665
75698
  message: "logitBias is not supported for reasoning models"
75666
75699
  });
75667
75700
  }
75668
- if (baseArgs.logprobs != null) {
75669
- baseArgs.logprobs = undefined;
75670
- warnings.push({
75671
- type: "other",
75672
- message: "logprobs is not supported for reasoning models"
75673
- });
75674
- }
75675
75701
  if (baseArgs.top_logprobs != null) {
75676
75702
  baseArgs.top_logprobs = undefined;
75677
75703
  warnings.push({
@@ -75695,7 +75721,7 @@ var OpenAIChatLanguageModel = class {
75695
75721
  });
75696
75722
  }
75697
75723
  }
75698
- if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
75724
+ if (openaiOptions.serviceTier === "flex" && !modelCapabilities.supportsFlexProcessing) {
75699
75725
  warnings.push({
75700
75726
  type: "unsupported-setting",
75701
75727
  setting: "serviceTier",
@@ -75703,7 +75729,7 @@ var OpenAIChatLanguageModel = class {
75703
75729
  });
75704
75730
  baseArgs.service_tier = undefined;
75705
75731
  }
75706
- if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
75732
+ if (openaiOptions.serviceTier === "priority" && !modelCapabilities.supportsPriorityProcessing) {
75707
75733
  warnings.push({
75708
75734
  type: "unsupported-setting",
75709
75735
  setting: "serviceTier",
@@ -76018,42 +76044,6 @@ var OpenAIChatLanguageModel = class {
76018
76044
  };
76019
76045
  }
76020
76046
  };
76021
- function isReasoningModel(modelId) {
76022
- return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
76023
- }
76024
- function supportsFlexProcessing(modelId) {
76025
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
76026
- }
76027
- function supportsPriorityProcessing(modelId) {
76028
- return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
76029
- }
76030
- function getSystemMessageMode(modelId) {
76031
- var _a16, _b8;
76032
- if (!isReasoningModel(modelId)) {
76033
- return "system";
76034
- }
76035
- return (_b8 = (_a16 = reasoningModels[modelId]) == null ? undefined : _a16.systemMessageMode) != null ? _b8 : "developer";
76036
- }
76037
- var reasoningModels = {
76038
- o3: {
76039
- systemMessageMode: "developer"
76040
- },
76041
- "o3-2025-04-16": {
76042
- systemMessageMode: "developer"
76043
- },
76044
- "o3-mini": {
76045
- systemMessageMode: "developer"
76046
- },
76047
- "o3-mini-2025-01-31": {
76048
- systemMessageMode: "developer"
76049
- },
76050
- "o4-mini": {
76051
- systemMessageMode: "developer"
76052
- },
76053
- "o4-mini-2025-04-16": {
76054
- systemMessageMode: "developer"
76055
- }
76056
- };
76057
76047
  function convertToOpenAICompletionPrompt({
76058
76048
  prompt,
76059
76049
  user = "user",
@@ -76621,8 +76611,8 @@ var codeInterpreter = (args = {}) => {
76621
76611
  };
76622
76612
  var comparisonFilterSchema = exports_external.object({
76623
76613
  key: exports_external.string(),
76624
- type: exports_external.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
76625
- value: exports_external.union([exports_external.string(), exports_external.number(), exports_external.boolean()])
76614
+ type: exports_external.enum(["eq", "ne", "gt", "gte", "lt", "lte", "in", "nin"]),
76615
+ value: exports_external.union([exports_external.string(), exports_external.number(), exports_external.boolean(), exports_external.array(exports_external.string())])
76626
76616
  });
76627
76617
  var compoundFilterSchema = exports_external.object({
76628
76618
  type: exports_external.enum(["and", "or"]),
@@ -76717,12 +76707,12 @@ var webSearchOutputSchema = lazySchema(() => zodSchema(exports_external.object({
76717
76707
  }),
76718
76708
  exports_external.object({
76719
76709
  type: exports_external.literal("openPage"),
76720
- url: exports_external.string()
76710
+ url: exports_external.string().nullish()
76721
76711
  }),
76722
76712
  exports_external.object({
76723
76713
  type: exports_external.literal("find"),
76724
- url: exports_external.string(),
76725
- pattern: exports_external.string()
76714
+ url: exports_external.string().nullish(),
76715
+ pattern: exports_external.string().nullish()
76726
76716
  })
76727
76717
  ]),
76728
76718
  sources: exports_external.array(exports_external.discriminatedUnion("type", [
@@ -76756,12 +76746,12 @@ var webSearchPreviewOutputSchema = lazySchema(() => zodSchema(exports_external.o
76756
76746
  }),
76757
76747
  exports_external.object({
76758
76748
  type: exports_external.literal("openPage"),
76759
- url: exports_external.string()
76749
+ url: exports_external.string().nullish()
76760
76750
  }),
76761
76751
  exports_external.object({
76762
76752
  type: exports_external.literal("find"),
76763
- url: exports_external.string(),
76764
- pattern: exports_external.string()
76753
+ url: exports_external.string().nullish(),
76754
+ pattern: exports_external.string().nullish()
76765
76755
  })
76766
76756
  ])
76767
76757
  })));
@@ -77209,12 +77199,12 @@ var openaiResponsesChunkSchema = lazyValidator(() => zodSchema(exports_external.
77209
77199
  }),
77210
77200
  exports_external.object({
77211
77201
  type: exports_external.literal("open_page"),
77212
- url: exports_external.string()
77202
+ url: exports_external.string().nullish()
77213
77203
  }),
77214
77204
  exports_external.object({
77215
77205
  type: exports_external.literal("find"),
77216
- url: exports_external.string(),
77217
- pattern: exports_external.string()
77206
+ url: exports_external.string().nullish(),
77207
+ pattern: exports_external.string().nullish()
77218
77208
  })
77219
77209
  ])
77220
77210
  }),
@@ -77401,12 +77391,12 @@ var openaiResponsesResponseSchema = lazyValidator(() => zodSchema(exports_extern
77401
77391
  }),
77402
77392
  exports_external.object({
77403
77393
  type: exports_external.literal("open_page"),
77404
- url: exports_external.string()
77394
+ url: exports_external.string().nullish()
77405
77395
  }),
77406
77396
  exports_external.object({
77407
77397
  type: exports_external.literal("find"),
77408
- url: exports_external.string(),
77409
- pattern: exports_external.string()
77398
+ url: exports_external.string().nullish(),
77399
+ pattern: exports_external.string().nullish()
77410
77400
  })
77411
77401
  ])
77412
77402
  }),
@@ -77739,7 +77729,7 @@ var OpenAIResponsesLanguageModel = class {
77739
77729
  }) {
77740
77730
  var _a16, _b8, _c, _d;
77741
77731
  const warnings = [];
77742
- const modelConfig = getResponsesModelConfig(this.modelId);
77732
+ const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
77743
77733
  if (topK != null) {
77744
77734
  warnings.push({ type: "unsupported-setting", setting: "topK" });
77745
77735
  }
@@ -77775,7 +77765,7 @@ var OpenAIResponsesLanguageModel = class {
77775
77765
  }
77776
77766
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
77777
77767
  prompt,
77778
- systemMessageMode: modelConfig.systemMessageMode,
77768
+ systemMessageMode: modelCapabilities.systemMessageMode,
77779
77769
  fileIdPrefixes: this.config.fileIdPrefixes,
77780
77770
  store: (_a16 = openaiOptions == null ? undefined : openaiOptions.store) != null ? _a16 : true,
77781
77771
  hasLocalShellTool: hasOpenAITool("openai.local_shell")
@@ -77805,7 +77795,7 @@ var OpenAIResponsesLanguageModel = class {
77805
77795
  addInclude("code_interpreter_call.outputs");
77806
77796
  }
77807
77797
  const store = openaiOptions == null ? undefined : openaiOptions.store;
77808
- if (store === false && modelConfig.isReasoningModel) {
77798
+ if (store === false && modelCapabilities.isReasoningModel) {
77809
77799
  addInclude("reasoning.encrypted_content");
77810
77800
  }
77811
77801
  const baseArgs = {
@@ -77845,7 +77835,7 @@ var OpenAIResponsesLanguageModel = class {
77845
77835
  safety_identifier: openaiOptions == null ? undefined : openaiOptions.safetyIdentifier,
77846
77836
  top_logprobs: topLogprobs,
77847
77837
  truncation: openaiOptions == null ? undefined : openaiOptions.truncation,
77848
- ...modelConfig.isReasoningModel && ((openaiOptions == null ? undefined : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? undefined : openaiOptions.reasoningSummary) != null) && {
77838
+ ...modelCapabilities.isReasoningModel && ((openaiOptions == null ? undefined : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? undefined : openaiOptions.reasoningSummary) != null) && {
77849
77839
  reasoning: {
77850
77840
  ...(openaiOptions == null ? undefined : openaiOptions.reasoningEffort) != null && {
77851
77841
  effort: openaiOptions.reasoningEffort
@@ -77856,22 +77846,24 @@ var OpenAIResponsesLanguageModel = class {
77856
77846
  }
77857
77847
  }
77858
77848
  };
77859
- if (modelConfig.isReasoningModel) {
77860
- if (baseArgs.temperature != null) {
77861
- baseArgs.temperature = undefined;
77862
- warnings.push({
77863
- type: "unsupported-setting",
77864
- setting: "temperature",
77865
- details: "temperature is not supported for reasoning models"
77866
- });
77867
- }
77868
- if (baseArgs.top_p != null) {
77869
- baseArgs.top_p = undefined;
77870
- warnings.push({
77871
- type: "unsupported-setting",
77872
- setting: "topP",
77873
- details: "topP is not supported for reasoning models"
77874
- });
77849
+ if (modelCapabilities.isReasoningModel) {
77850
+ if (!((openaiOptions == null ? undefined : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
77851
+ if (baseArgs.temperature != null) {
77852
+ baseArgs.temperature = undefined;
77853
+ warnings.push({
77854
+ type: "unsupported-setting",
77855
+ setting: "temperature",
77856
+ details: "temperature is not supported for reasoning models"
77857
+ });
77858
+ }
77859
+ if (baseArgs.top_p != null) {
77860
+ baseArgs.top_p = undefined;
77861
+ warnings.push({
77862
+ type: "unsupported-setting",
77863
+ setting: "topP",
77864
+ details: "topP is not supported for reasoning models"
77865
+ });
77866
+ }
77875
77867
  }
77876
77868
  } else {
77877
77869
  if ((openaiOptions == null ? undefined : openaiOptions.reasoningEffort) != null) {
@@ -77889,7 +77881,7 @@ var OpenAIResponsesLanguageModel = class {
77889
77881
  });
77890
77882
  }
77891
77883
  }
77892
- if ((openaiOptions == null ? undefined : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
77884
+ if ((openaiOptions == null ? undefined : openaiOptions.serviceTier) === "flex" && !modelCapabilities.supportsFlexProcessing) {
77893
77885
  warnings.push({
77894
77886
  type: "unsupported-setting",
77895
77887
  setting: "serviceTier",
@@ -77897,7 +77889,7 @@ var OpenAIResponsesLanguageModel = class {
77897
77889
  });
77898
77890
  delete baseArgs.service_tier;
77899
77891
  }
77900
- if ((openaiOptions == null ? undefined : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
77892
+ if ((openaiOptions == null ? undefined : openaiOptions.serviceTier) === "priority" && !modelCapabilities.supportsPriorityProcessing) {
77901
77893
  warnings.push({
77902
77894
  type: "unsupported-setting",
77903
77895
  setting: "serviceTier",
@@ -78740,32 +78732,6 @@ function isResponseAnnotationAddedChunk(chunk) {
78740
78732
  function isErrorChunk(chunk) {
78741
78733
  return chunk.type === "error";
78742
78734
  }
78743
- function getResponsesModelConfig(modelId) {
78744
- const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
78745
- const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
78746
- const defaults = {
78747
- systemMessageMode: "system",
78748
- supportsFlexProcessing: supportsFlexProcessing2,
78749
- supportsPriorityProcessing: supportsPriorityProcessing2
78750
- };
78751
- if (modelId.startsWith("gpt-5-chat")) {
78752
- return {
78753
- ...defaults,
78754
- isReasoningModel: false
78755
- };
78756
- }
78757
- if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
78758
- return {
78759
- ...defaults,
78760
- isReasoningModel: true,
78761
- systemMessageMode: "developer"
78762
- };
78763
- }
78764
- return {
78765
- ...defaults,
78766
- isReasoningModel: false
78767
- };
78768
- }
78769
78735
  function mapWebSearchOutput(action) {
78770
78736
  var _a16;
78771
78737
  switch (action.type) {
@@ -79071,7 +79037,7 @@ var OpenAITranscriptionModel = class {
79071
79037
  };
79072
79038
  }
79073
79039
  };
79074
- var VERSION8 = "2.0.76";
79040
+ var VERSION8 = "2.0.80";
79075
79041
  function createOpenAI(options = {}) {
79076
79042
  var _a16, _b8;
79077
79043
  const baseURL = (_a16 = withoutTrailingSlash(loadOptionalSetting({
@@ -80049,9 +80015,21 @@ var openrouterFailedResponseHandler = createJsonErrorResponseHandler2({
80049
80015
  errorSchema: OpenRouterErrorResponseSchema,
80050
80016
  errorToMessage: (data) => data.error.message
80051
80017
  });
80018
+ var FileAnnotationSchema = exports_external.object({
80019
+ type: exports_external.literal("file"),
80020
+ file: exports_external.object({
80021
+ hash: exports_external.string(),
80022
+ name: exports_external.string(),
80023
+ content: exports_external.array(exports_external.object({
80024
+ type: exports_external.string(),
80025
+ text: exports_external.string().optional()
80026
+ }).passthrough()).optional()
80027
+ }).passthrough()
80028
+ });
80052
80029
  var OpenRouterProviderMetadataSchema = exports_external.object({
80053
80030
  provider: exports_external.string(),
80054
80031
  reasoning_details: exports_external.array(ReasoningDetailUnionSchema).optional(),
80032
+ annotations: exports_external.array(FileAnnotationSchema).optional(),
80055
80033
  usage: exports_external.object({
80056
80034
  promptTokens: exports_external.number(),
80057
80035
  promptTokensDetails: exports_external.object({
@@ -80070,7 +80048,8 @@ var OpenRouterProviderMetadataSchema = exports_external.object({
80070
80048
  }).passthrough();
80071
80049
  var OpenRouterProviderOptionsSchema = exports_external.object({
80072
80050
  openrouter: exports_external.object({
80073
- reasoning_details: exports_external.array(ReasoningDetailUnionSchema).optional()
80051
+ reasoning_details: exports_external.array(ReasoningDetailUnionSchema).optional(),
80052
+ annotations: exports_external.array(FileAnnotationSchema).optional()
80074
80053
  }).optional()
80075
80054
  }).optional();
80076
80055
  function mapOpenRouterFinishReason(finishReason) {
@@ -80088,6 +80067,17 @@ function mapOpenRouterFinishReason(finishReason) {
80088
80067
  return "unknown";
80089
80068
  }
80090
80069
  }
80070
+ var OPENROUTER_AUDIO_FORMATS = [
80071
+ "wav",
80072
+ "mp3",
80073
+ "aiff",
80074
+ "aac",
80075
+ "ogg",
80076
+ "flac",
80077
+ "m4a",
80078
+ "pcm16",
80079
+ "pcm24"
80080
+ ];
80091
80081
  function isUrl({
80092
80082
  url: url2,
80093
80083
  protocols
@@ -80126,6 +80116,26 @@ function getBase64FromDataUrl(dataUrl) {
80126
80116
  const match = dataUrl.match(/^data:[^;]*;base64,(.+)$/);
80127
80117
  return match ? match[1] : dataUrl;
80128
80118
  }
80119
+ var MIME_TO_FORMAT = {
80120
+ mpeg: "mp3",
80121
+ mp3: "mp3",
80122
+ "x-wav": "wav",
80123
+ wave: "wav",
80124
+ wav: "wav",
80125
+ ogg: "ogg",
80126
+ vorbis: "ogg",
80127
+ aac: "aac",
80128
+ "x-aac": "aac",
80129
+ m4a: "m4a",
80130
+ "x-m4a": "m4a",
80131
+ mp4: "m4a",
80132
+ aiff: "aiff",
80133
+ "x-aiff": "aiff",
80134
+ flac: "flac",
80135
+ "x-flac": "flac",
80136
+ pcm16: "pcm16",
80137
+ pcm24: "pcm24"
80138
+ };
80129
80139
  function getInputAudioData(part) {
80130
80140
  const fileData = getFileUrl({
80131
80141
  part,
@@ -80148,18 +80158,13 @@ Learn more: https://openrouter.ai/docs/features/multimodal/audio`);
80148
80158
  }
80149
80159
  const data = getBase64FromDataUrl(fileData);
80150
80160
  const mediaType = part.mediaType || "audio/mpeg";
80151
- let format = mediaType.replace("audio/", "");
80152
- if (format === "mpeg" || format === "mp3") {
80153
- format = "mp3";
80154
- } else if (format === "x-wav" || format === "wave" || format === "wav") {
80155
- format = "wav";
80156
- }
80157
- if (format !== "mp3" && format !== "wav") {
80161
+ const rawFormat = mediaType.replace("audio/", "");
80162
+ const format = MIME_TO_FORMAT[rawFormat];
80163
+ if (format === undefined) {
80164
+ const supportedList = OPENROUTER_AUDIO_FORMATS.join(", ");
80158
80165
  throw new Error(`Unsupported audio format: "${mediaType}"
80159
80166
 
80160
- OpenRouter only supports MP3 and WAV audio formats.
80161
- • For MP3: use "audio/mpeg" or "audio/mp3"
80162
- • For WAV: use "audio/wav" or "audio/x-wav"
80167
+ OpenRouter supports the following audio formats: ${supportedList}
80163
80168
 
80164
80169
  Learn more: https://openrouter.ai/docs/features/multimodal/audio`);
80165
80170
  }
@@ -80172,7 +80177,7 @@ function getCacheControl2(providerMetadata) {
80172
80177
  return (_c = (_b8 = (_a153 = openrouter2 == null ? undefined : openrouter2.cacheControl) != null ? _a153 : openrouter2 == null ? undefined : openrouter2.cache_control) != null ? _b8 : anthropic2 == null ? undefined : anthropic2.cacheControl) != null ? _c : anthropic2 == null ? undefined : anthropic2.cache_control;
80173
80178
  }
80174
80179
  function convertToOpenRouterChatMessages(prompt) {
80175
- var _a153, _b8, _c, _d, _e, _f, _g, _h;
80180
+ var _a153, _b8, _c, _d, _e, _f, _g, _h, _i, _j;
80176
80181
  const messages = [];
80177
80182
  for (const { role, content, providerOptions } of prompt) {
80178
80183
  switch (role) {
@@ -80316,6 +80321,7 @@ function convertToOpenRouterChatMessages(prompt) {
80316
80321
  }
80317
80322
  const parsedProviderOptions = OpenRouterProviderOptionsSchema.safeParse(providerOptions);
80318
80323
  const messageReasoningDetails = parsedProviderOptions.success ? (_g = (_f = parsedProviderOptions.data) == null ? undefined : _f.openrouter) == null ? undefined : _g.reasoning_details : undefined;
80324
+ const messageAnnotations = parsedProviderOptions.success ? (_i = (_h = parsedProviderOptions.data) == null ? undefined : _h.openrouter) == null ? undefined : _i.annotations : undefined;
80319
80325
  const finalReasoningDetails = messageReasoningDetails && Array.isArray(messageReasoningDetails) && messageReasoningDetails.length > 0 ? messageReasoningDetails : accumulatedReasoningDetails.length > 0 ? accumulatedReasoningDetails : undefined;
80320
80326
  messages.push({
80321
80327
  role: "assistant",
@@ -80323,6 +80329,7 @@ function convertToOpenRouterChatMessages(prompt) {
80323
80329
  tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
80324
80330
  reasoning: reasoning || undefined,
80325
80331
  reasoning_details: finalReasoningDetails,
80332
+ annotations: messageAnnotations,
80326
80333
  cache_control: getCacheControl2(providerOptions)
80327
80334
  });
80328
80335
  break;
@@ -80334,7 +80341,7 @@ function convertToOpenRouterChatMessages(prompt) {
80334
80341
  role: "tool",
80335
80342
  tool_call_id: toolResponse.toolCallId,
80336
80343
  content: content2,
80337
- cache_control: (_h = getCacheControl2(providerOptions)) != null ? _h : getCacheControl2(toolResponse.providerOptions)
80344
+ cache_control: (_j = getCacheControl2(providerOptions)) != null ? _j : getCacheControl2(toolResponse.providerOptions)
80338
80345
  });
80339
80346
  }
80340
80347
  break;
@@ -80547,6 +80554,7 @@ var OpenRouterChatLanguageModel = class {
80547
80554
  this.specificationVersion = "v2";
80548
80555
  this.provider = "openrouter";
80549
80556
  this.defaultObjectGenerationMode = "tool";
80557
+ this.supportsImageUrls = true;
80550
80558
  this.supportedUrls = {
80551
80559
  "image/*": [
80552
80560
  /^data:image\/[a-zA-Z]+;base64,/,
@@ -80588,7 +80596,16 @@ var OpenRouterChatLanguageModel = class {
80588
80596
  presence_penalty: presencePenalty,
80589
80597
  seed,
80590
80598
  stop: stopSequences,
80591
- response_format: responseFormat,
80599
+ response_format: (responseFormat == null ? undefined : responseFormat.type) === "json" ? responseFormat.schema != null ? {
80600
+ type: "json_schema",
80601
+ json_schema: __spreadValues({
80602
+ schema: responseFormat.schema,
80603
+ strict: true,
80604
+ name: (_a153 = responseFormat.name) != null ? _a153 : "response"
80605
+ }, responseFormat.description && {
80606
+ description: responseFormat.description
80607
+ })
80608
+ } : { type: "json_object" } : undefined,
80592
80609
  top_k: topK,
80593
80610
  messages: convertToOpenRouterChatMessages(prompt),
80594
80611
  include_reasoning: this.settings.includeReasoning,
@@ -80599,20 +80616,6 @@ var OpenRouterChatLanguageModel = class {
80599
80616
  provider: this.settings.provider,
80600
80617
  debug: this.settings.debug
80601
80618
  }, this.config.extraBody), this.settings.extraBody);
80602
- if ((responseFormat == null ? undefined : responseFormat.type) === "json" && responseFormat.schema != null) {
80603
- return __spreadProps(__spreadValues({}, baseArgs), {
80604
- response_format: {
80605
- type: "json_schema",
80606
- json_schema: __spreadValues({
80607
- schema: responseFormat.schema,
80608
- strict: true,
80609
- name: (_a153 = responseFormat.name) != null ? _a153 : "response"
80610
- }, responseFormat.description && {
80611
- description: responseFormat.description
80612
- })
80613
- }
80614
- });
80615
- }
80616
80619
  if (tools2 && tools2.length > 0) {
80617
80620
  const mappedTools = tools2.filter((tool3) => tool3.type === "function").map((tool3) => ({
80618
80621
  type: "function",
@@ -80630,7 +80633,7 @@ var OpenRouterChatLanguageModel = class {
80630
80633
  return baseArgs;
80631
80634
  }
80632
80635
  async doGenerate(options) {
80633
- var _a153, _b8, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v;
80636
+ var _a153, _b8, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
80634
80637
  const providerOptions = options.providerOptions || {};
80635
80638
  const openrouterOptions = providerOptions.openrouter || {};
80636
80639
  const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
@@ -80784,6 +80787,7 @@ var OpenRouterChatLanguageModel = class {
80784
80787
  }
80785
80788
  }
80786
80789
  }
80790
+ const fileAnnotations = (_k = choice.message.annotations) == null ? undefined : _k.filter((a) => a.type === "file");
80787
80791
  return {
80788
80792
  content,
80789
80793
  finishReason: mapOpenRouterFinishReason(choice.finish_reason),
@@ -80791,22 +80795,23 @@ var OpenRouterChatLanguageModel = class {
80791
80795
  warnings: [],
80792
80796
  providerMetadata: {
80793
80797
  openrouter: OpenRouterProviderMetadataSchema.parse({
80794
- provider: (_k = response.provider) != null ? _k : "",
80795
- reasoning_details: (_l = choice.message.reasoning_details) != null ? _l : [],
80798
+ provider: (_l = response.provider) != null ? _l : "",
80799
+ reasoning_details: (_m = choice.message.reasoning_details) != null ? _m : [],
80800
+ annotations: fileAnnotations && fileAnnotations.length > 0 ? fileAnnotations : undefined,
80796
80801
  usage: __spreadValues(__spreadValues(__spreadValues({
80797
- promptTokens: (_m = usageInfo.inputTokens) != null ? _m : 0,
80798
- completionTokens: (_n = usageInfo.outputTokens) != null ? _n : 0,
80799
- totalTokens: (_o = usageInfo.totalTokens) != null ? _o : 0,
80800
- cost: (_p = response.usage) == null ? undefined : _p.cost
80801
- }, ((_r = (_q = response.usage) == null ? undefined : _q.prompt_tokens_details) == null ? undefined : _r.cached_tokens) != null ? {
80802
+ promptTokens: (_n = usageInfo.inputTokens) != null ? _n : 0,
80803
+ completionTokens: (_o = usageInfo.outputTokens) != null ? _o : 0,
80804
+ totalTokens: (_p = usageInfo.totalTokens) != null ? _p : 0,
80805
+ cost: (_q = response.usage) == null ? undefined : _q.cost
80806
+ }, ((_s = (_r = response.usage) == null ? undefined : _r.prompt_tokens_details) == null ? undefined : _s.cached_tokens) != null ? {
80802
80807
  promptTokensDetails: {
80803
80808
  cachedTokens: response.usage.prompt_tokens_details.cached_tokens
80804
80809
  }
80805
- } : {}), ((_t = (_s = response.usage) == null ? undefined : _s.completion_tokens_details) == null ? undefined : _t.reasoning_tokens) != null ? {
80810
+ } : {}), ((_u = (_t = response.usage) == null ? undefined : _t.completion_tokens_details) == null ? undefined : _u.reasoning_tokens) != null ? {
80806
80811
  completionTokensDetails: {
80807
80812
  reasoningTokens: response.usage.completion_tokens_details.reasoning_tokens
80808
80813
  }
80809
- } : {}), ((_v = (_u = response.usage) == null ? undefined : _u.cost_details) == null ? undefined : _v.upstream_inference_cost) != null ? {
80814
+ } : {}), ((_w = (_v = response.usage) == null ? undefined : _v.cost_details) == null ? undefined : _w.upstream_inference_cost) != null ? {
80810
80815
  costDetails: {
80811
80816
  upstreamInferenceCost: response.usage.cost_details.upstream_inference_cost
80812
80817
  }
@@ -81350,6 +81355,7 @@ var OpenRouterCompletionLanguageModel = class {
81350
81355
  constructor(modelId, settings, config4) {
81351
81356
  this.specificationVersion = "v2";
81352
81357
  this.provider = "openrouter";
81358
+ this.supportsImageUrls = true;
81353
81359
  this.supportedUrls = {
81354
81360
  "image/*": [
81355
81361
  /^data:image\/[a-zA-Z]+;base64,/,
@@ -81573,6 +81579,71 @@ var OpenRouterCompletionLanguageModel = class {
81573
81579
  };
81574
81580
  }
81575
81581
  };
81582
+ var openrouterEmbeddingUsageSchema = exports_external.object({
81583
+ prompt_tokens: exports_external.number(),
81584
+ total_tokens: exports_external.number(),
81585
+ cost: exports_external.number().optional()
81586
+ });
81587
+ var openrouterEmbeddingDataSchema = exports_external.object({
81588
+ object: exports_external.literal("embedding"),
81589
+ embedding: exports_external.array(exports_external.number()),
81590
+ index: exports_external.number().optional()
81591
+ });
81592
+ var OpenRouterEmbeddingResponseSchema = exports_external.object({
81593
+ id: exports_external.string().optional(),
81594
+ object: exports_external.literal("list"),
81595
+ data: exports_external.array(openrouterEmbeddingDataSchema),
81596
+ model: exports_external.string(),
81597
+ usage: openrouterEmbeddingUsageSchema.optional()
81598
+ });
81599
+ var OpenRouterEmbeddingModel = class {
81600
+ constructor(modelId, settings, config4) {
81601
+ this.specificationVersion = "v2";
81602
+ this.provider = "openrouter";
81603
+ this.maxEmbeddingsPerCall = undefined;
81604
+ this.supportsParallelCalls = true;
81605
+ this.modelId = modelId;
81606
+ this.settings = settings;
81607
+ this.config = config4;
81608
+ }
81609
+ async doEmbed(options) {
81610
+ var _a153;
81611
+ const { values, abortSignal, headers } = options;
81612
+ const args = __spreadValues(__spreadValues({
81613
+ model: this.modelId,
81614
+ input: values,
81615
+ user: this.settings.user,
81616
+ provider: this.settings.provider
81617
+ }, this.config.extraBody), this.settings.extraBody);
81618
+ const { value: responseValue, responseHeaders } = await postJsonToApi2({
81619
+ url: this.config.url({
81620
+ path: "/embeddings",
81621
+ modelId: this.modelId
81622
+ }),
81623
+ headers: combineHeaders2(this.config.headers(), headers),
81624
+ body: args,
81625
+ failedResponseHandler: openrouterFailedResponseHandler,
81626
+ successfulResponseHandler: createJsonResponseHandler2(OpenRouterEmbeddingResponseSchema),
81627
+ abortSignal,
81628
+ fetch: this.config.fetch
81629
+ });
81630
+ return {
81631
+ embeddings: responseValue.data.map((item) => item.embedding),
81632
+ usage: responseValue.usage ? { tokens: responseValue.usage.prompt_tokens } : undefined,
81633
+ providerMetadata: ((_a153 = responseValue.usage) == null ? undefined : _a153.cost) ? {
81634
+ openrouter: {
81635
+ usage: {
81636
+ cost: responseValue.usage.cost
81637
+ }
81638
+ }
81639
+ } : undefined,
81640
+ response: {
81641
+ headers: responseHeaders,
81642
+ body: responseValue
81643
+ }
81644
+ };
81645
+ }
81646
+ };
81576
81647
  function removeUndefinedEntries2(record2) {
81577
81648
  return Object.fromEntries(Object.entries(record2).filter(([, value]) => value !== null));
81578
81649
  }
@@ -81584,18 +81655,20 @@ function withUserAgentSuffix2(headers, ...userAgentSuffixParts) {
81584
81655
  "user-agent": newUserAgent
81585
81656
  });
81586
81657
  }
81587
- var VERSION9 = "1.3.0";
81658
+ var VERSION9 = "1.5.0";
81588
81659
  function createOpenRouter(options = {}) {
81589
81660
  var _a153, _b8, _c;
81590
81661
  const baseURL = (_b8 = withoutTrailingSlash2((_a153 = options.baseURL) != null ? _a153 : options.baseUrl)) != null ? _b8 : "https://openrouter.ai/api/v1";
81591
81662
  const compatibility = (_c = options.compatibility) != null ? _c : "compatible";
81592
- const getHeaders = () => withUserAgentSuffix2(__spreadValues({
81663
+ const getHeaders = () => withUserAgentSuffix2(__spreadValues(__spreadValues({
81593
81664
  Authorization: `Bearer ${loadApiKey2({
81594
81665
  apiKey: options.apiKey,
81595
81666
  environmentVariableName: "OPENROUTER_API_KEY",
81596
81667
  description: "OpenRouter"
81597
81668
  })}`
81598
- }, options.headers), `ai-sdk/openrouter/${VERSION9}`);
81669
+ }, options.headers), options.api_keys && Object.keys(options.api_keys).length > 0 && {
81670
+ "X-Provider-API-Keys": JSON.stringify(options.api_keys)
81671
+ }), `ai-sdk/openrouter/${VERSION9}`);
81599
81672
  const createChatModel = (modelId, settings = {}) => new OpenRouterChatLanguageModel(modelId, settings, {
81600
81673
  provider: "openrouter.chat",
81601
81674
  url: ({ path }) => `${baseURL}${path}`,
@@ -81612,6 +81685,13 @@ function createOpenRouter(options = {}) {
81612
81685
  fetch: options.fetch,
81613
81686
  extraBody: options.extraBody
81614
81687
  });
81688
+ const createEmbeddingModel = (modelId, settings = {}) => new OpenRouterEmbeddingModel(modelId, settings, {
81689
+ provider: "openrouter.embedding",
81690
+ url: ({ path }) => `${baseURL}${path}`,
81691
+ headers: getHeaders,
81692
+ fetch: options.fetch,
81693
+ extraBody: options.extraBody
81694
+ });
81615
81695
  const createLanguageModel = (modelId, settings) => {
81616
81696
  if (new.target) {
81617
81697
  throw new Error("The OpenRouter model function cannot be called with the new keyword.");
@@ -81625,6 +81705,8 @@ function createOpenRouter(options = {}) {
81625
81705
  provider3.languageModel = createLanguageModel;
81626
81706
  provider3.chat = createChatModel;
81627
81707
  provider3.completion = createCompletionModel;
81708
+ provider3.textEmbeddingModel = createEmbeddingModel;
81709
+ provider3.embedding = createEmbeddingModel;
81628
81710
  return provider3;
81629
81711
  }
81630
81712
  var openrouter = createOpenRouter({
@@ -83710,6 +83792,7 @@ var googleModelPrices = {
83710
83792
  };
83711
83793
  var prices_default = {
83712
83794
  ["anthropic" /* Anthropic */]: {
83795
+ "claude-opus-4-5-20251101": { inputPrice: 5, outputPrice: 25, cacheWritesPrice: 6.25, cacheReadsPrice: 0.5, supportsThinking: true },
83713
83796
  "claude-sonnet-4-5-20250929": { inputPrice: 3, outputPrice: 15, cacheWritesPrice: 3.75, cacheReadsPrice: 0.3, supportsThinking: true },
83714
83797
  "claude-opus-4-20250514": { inputPrice: 15, outputPrice: 75, cacheWritesPrice: 18.75, cacheReadsPrice: 1.5, supportsThinking: true },
83715
83798
  "claude-opus-4-1-20250805": { inputPrice: 15, outputPrice: 75, cacheWritesPrice: 18.75, cacheReadsPrice: 1.5, supportsThinking: true },
@@ -86784,7 +86867,7 @@ async function findNextTask(tools2) {
86784
86867
  currentTask = subTasks[0];
86785
86868
  }
86786
86869
  }
86787
- async function runImplementationLoop(context, highLevelPlan, saveUsageSnapshot, additionalTools) {
86870
+ async function runImplementationLoop(context, highLevelPlan, saveUsageSnapshot, additionalTools, noReview) {
86788
86871
  const { logger, step, tools: tools2 } = context;
86789
86872
  const commitMessages = [];
86790
86873
  logger.info(`Phase 5: Iterative Implementation Loop...
@@ -86829,14 +86912,20 @@ Focus only on this item, but use the plan for context.`;
86829
86912
  await tools2.executeCommand({ command: "git", args: ["commit", "-m", commitMessage] });
86830
86913
  });
86831
86914
  commitMessages.push(commitMessage);
86832
- const { passed: reviewPassed, commitMessages: fixCommitMessages } = await performReviewAndFixCycle(iterationCount, nextTask, highLevelPlan, context, additionalTools);
86833
- commitMessages.push(...fixCommitMessages);
86834
- const taskElapsed = Date.now() - taskStartTime;
86835
- const taskElapsedTime = formatElapsedTime(taskElapsed);
86836
- if (reviewPassed) {
86837
- logger.info(`Iteration ${iterationCount} completed successfully (${taskElapsedTime})`);
86915
+ if (!noReview) {
86916
+ const { passed: reviewPassed, commitMessages: fixCommitMessages } = await performReviewAndFixCycle(iterationCount, nextTask, highLevelPlan, context, additionalTools);
86917
+ commitMessages.push(...fixCommitMessages);
86918
+ const taskElapsed = Date.now() - taskStartTime;
86919
+ const taskElapsedTime = formatElapsedTime(taskElapsed);
86920
+ if (reviewPassed) {
86921
+ logger.info(`Iteration ${iterationCount} completed successfully (${taskElapsedTime})`);
86922
+ } else {
86923
+ logger.warn(`Warning: Iteration ${iterationCount} completed with potential issues (${taskElapsedTime})`);
86924
+ }
86838
86925
  } else {
86839
- logger.warn(`Warning: Iteration ${iterationCount} completed with potential issues (${taskElapsedTime})`);
86926
+ const taskElapsed = Date.now() - taskStartTime;
86927
+ const taskElapsedTime = formatElapsedTime(taskElapsed);
86928
+ logger.info(`Iteration ${iterationCount} completed (${taskElapsedTime})`);
86840
86929
  }
86841
86930
  await step(`update-task-status-${iterationCount}`, async () => {
86842
86931
  await tools2.updateTodoItem({ operation: "update", id: nextTaskId, status: "completed" });
@@ -86967,7 +87056,7 @@ Max retries (${MAX_REVIEW_RETRIES}) reached for final review. Issues might remai
86967
87056
  }
86968
87057
  var epicWorkflow = async (input2, context) => {
86969
87058
  const { logger, tools: tools2 } = context;
86970
- const { task, saveEpicContext, saveUsageSnapshot, additionalTools } = input2;
87059
+ const { task, saveEpicContext, saveUsageSnapshot, additionalTools, noReview } = input2;
86971
87060
  const workflowStartTime = Date.now();
86972
87061
  if (!task || task.trim() === "") {
86973
87062
  logger.error("Error: Task cannot be empty. Please provide a valid task description.");
@@ -87012,8 +87101,10 @@ var epicWorkflow = async (input2, context) => {
87012
87101
  if (todos.length === 0) {
87013
87102
  await addTodoItemsFromPlan(input2.plan, context);
87014
87103
  }
87015
- const commitMessages = await runImplementationLoop(context, input2.plan, saveUsageSnapshot, additionalTools);
87016
- await performFinalReviewAndFix(context, input2.plan, input2.baseBranch ?? undefined, additionalTools);
87104
+ const commitMessages = await runImplementationLoop(context, input2.plan, saveUsageSnapshot, additionalTools, noReview);
87105
+ if (!noReview) {
87106
+ await performFinalReviewAndFix(context, input2.plan, input2.baseBranch ?? undefined, additionalTools);
87107
+ }
87017
87108
  await saveUsageSnapshot();
87018
87109
  await tools2.executeCommand({ command: "git", args: ["rm", "-f", ".epic.yml"] });
87019
87110
  const statusResult = await tools2.executeCommand({
@@ -87506,9 +87597,10 @@ class EpicTodoItemStore {
87506
87597
  }
87507
87598
 
87508
87599
  // src/commands/epic.ts
87509
- async function runEpic(task2, _options, command) {
87600
+ async function runEpic(task2, options, command) {
87510
87601
  const globalOpts = (command.parent ?? command).opts();
87511
87602
  const { verbose, yes } = globalOpts;
87603
+ const { review: review3 } = options;
87512
87604
  const logger = createLogger({
87513
87605
  verbose
87514
87606
  });
@@ -87535,6 +87627,7 @@ async function runEpic(task2, _options, command) {
87535
87627
  const workflowInput = {
87536
87628
  ...epicContext,
87537
87629
  interactive: !yes,
87630
+ noReview: review3 === false,
87538
87631
  saveEpicContext: async (context) => {
87539
87632
  if (context.task)
87540
87633
  workflowInput.task = context.task;
@@ -87572,7 +87665,7 @@ async function runEpic(task2, _options, command) {
87572
87665
  }
87573
87666
  });
87574
87667
  }
87575
- var epicCommand = new Command("epic").description("Orchestrates a large feature or epic, breaking it down into smaller tasks.").argument("[task]", "The epic to plan and implement.").action(runEpic);
87668
+ var epicCommand = new Command("epic").description("Orchestrates a large feature or epic, breaking it down into smaller tasks.").argument("[task]", "The epic to plan and implement.").option("--no-review", "Disable the review step").action(runEpic);
87576
87669
 
87577
87670
  // src/commands/fix.ts
87578
87671
  var fixCommand = new Command("fix").description("Fix issues by running a command and letting an agent fix it.").argument("[command]", "The command to run").option("-p, --prompt <prompt>", "Additional prompt for the agent.").action(async (command, options, cmd) => {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@polka-codes/cli",
3
- "version": "0.9.70",
3
+ "version": "0.9.72",
4
4
  "license": "AGPL-3.0",
5
5
  "author": "github@polka.codes",
6
6
  "type": "module",
@@ -19,16 +19,16 @@
19
19
  "dependencies": {
20
20
  "@ai-sdk/anthropic": "^2.0.53",
21
21
  "@ai-sdk/deepseek": "^1.0.31",
22
- "@ai-sdk/google": "^2.0.44",
23
- "@ai-sdk/google-vertex": "^3.0.86",
24
- "@ai-sdk/openai": "^2.0.76",
22
+ "@ai-sdk/google": "^2.0.45",
23
+ "@ai-sdk/google-vertex": "^3.0.87",
24
+ "@ai-sdk/openai": "^2.0.80",
25
25
  "@ai-sdk/provider": "^2.0.0",
26
26
  "@ai-sdk/provider-utils": "^3.0.18",
27
27
  "@inquirer/prompts": "^8.0.2",
28
- "@openrouter/ai-sdk-provider": "^1.3.0",
29
- "@polka-codes/cli-shared": "0.9.68",
30
- "@polka-codes/core": "0.9.68",
31
- "ai": "^5.0.106",
28
+ "@openrouter/ai-sdk-provider": "^1.5.0",
29
+ "@polka-codes/cli-shared": "0.9.71",
30
+ "@polka-codes/core": "0.9.71",
31
+ "ai": "^5.0.108",
32
32
  "chalk": "^5.6.2",
33
33
  "commander": "^14.0.2",
34
34
  "dotenv": "^17.2.3",