@polka-codes/cli 0.9.79 → 0.9.81

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +1205 -576
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -37660,7 +37660,7 @@ var {
37660
37660
  Help
37661
37661
  } = import__.default;
37662
37662
  // package.json
37663
- var version = "0.9.79";
37663
+ var version = "0.9.81";
37664
37664
 
37665
37665
  // src/commands/code.ts
37666
37666
  import { readFile as readFile4 } from "node:fs/promises";
@@ -50382,12 +50382,6 @@ function date4(params) {
50382
50382
  // ../../node_modules/zod/v4/classic/external.js
50383
50383
  config(en_default());
50384
50384
  // ../core/src/config.ts
50385
- var providerModelSchema = exports_external.object({
50386
- provider: exports_external.string().optional(),
50387
- model: exports_external.string().optional(),
50388
- parameters: exports_external.record(exports_external.string(), exports_external.any()).optional(),
50389
- budget: exports_external.number().positive().optional()
50390
- });
50391
50385
  var ruleSchema = exports_external.union([
50392
50386
  exports_external.string(),
50393
50387
  exports_external.object({ path: exports_external.string() }).strict(),
@@ -50400,6 +50394,13 @@ var ruleSchema = exports_external.union([
50400
50394
  branch: exports_external.string().optional()
50401
50395
  }).strict()
50402
50396
  ]);
50397
+ var providerModelSchema = exports_external.object({
50398
+ provider: exports_external.string().optional(),
50399
+ model: exports_external.string().optional(),
50400
+ parameters: exports_external.record(exports_external.string(), exports_external.any()).optional(),
50401
+ budget: exports_external.number().positive().optional(),
50402
+ rules: exports_external.array(ruleSchema).optional().or(exports_external.string()).optional()
50403
+ });
50403
50404
  var configSchema = exports_external.object({
50404
50405
  prices: exports_external.record(exports_external.string(), exports_external.record(exports_external.string(), exports_external.object({
50405
50406
  inputPrice: exports_external.number().optional(),
@@ -50492,7 +50493,7 @@ var toolInfo = {
50492
50493
  var handler = async (provider, args) => {
50493
50494
  if (!provider.askFollowupQuestion) {
50494
50495
  return {
50495
- type: "Error" /* Error */,
50496
+ success: false,
50496
50497
  message: {
50497
50498
  type: "error-text",
50498
50499
  value: "Not possible to ask followup question."
@@ -50502,7 +50503,7 @@ var handler = async (provider, args) => {
50502
50503
  const { questions } = toolInfo.parameters.parse(args);
50503
50504
  if (questions.length === 0) {
50504
50505
  return {
50505
- type: "Error" /* Error */,
50506
+ success: false,
50506
50507
  message: {
50507
50508
  type: "error-text",
50508
50509
  value: "No questions provided"
@@ -50518,7 +50519,7 @@ ${answer}
50518
50519
  </ask_followup_question_answer>`);
50519
50520
  }
50520
50521
  return {
50521
- type: "Reply" /* Reply */,
50522
+ success: true,
50522
50523
  message: {
50523
50524
  type: "text",
50524
50525
  value: answers.join(`
@@ -50561,7 +50562,7 @@ var toolInfo2 = {
50561
50562
  var handler2 = async (provider, args) => {
50562
50563
  if (!provider.executeCommand) {
50563
50564
  return {
50564
- type: "Error" /* Error */,
50565
+ success: false,
50565
50566
  message: {
50566
50567
  type: "error-text",
50567
50568
  value: "Not possible to execute command. Abort."
@@ -50590,7 +50591,7 @@ ${result.stderr}
50590
50591
  }
50591
50592
  if (result.exitCode === 0) {
50592
50593
  return {
50593
- type: "Reply" /* Reply */,
50594
+ success: true,
50594
50595
  message: {
50595
50596
  type: "text",
50596
50597
  value: message
@@ -50598,7 +50599,7 @@ ${result.stderr}
50598
50599
  };
50599
50600
  }
50600
50601
  return {
50601
- type: "Error" /* Error */,
50602
+ success: false,
50602
50603
  message: {
50603
50604
  type: "error-text",
50604
50605
  value: message
@@ -50606,7 +50607,7 @@ ${result.stderr}
50606
50607
  };
50607
50608
  } catch (error46) {
50608
50609
  return {
50609
- type: "Error" /* Error */,
50610
+ success: false,
50610
50611
  message: {
50611
50612
  type: "error-text",
50612
50613
  value: error46 instanceof Error ? error46.message : String(error46)
@@ -50655,7 +50656,7 @@ var toolInfo3 = {
50655
50656
  var handler3 = async (provider, args) => {
50656
50657
  if (!provider.fetchUrl) {
50657
50658
  return {
50658
- type: "Error" /* Error */,
50659
+ success: false,
50659
50660
  message: {
50660
50661
  type: "error-text",
50661
50662
  value: "Not possible to fetch url."
@@ -50665,7 +50666,7 @@ var handler3 = async (provider, args) => {
50665
50666
  const { url: urls } = toolInfo3.parameters.parse(args);
50666
50667
  if (urls.length === 0) {
50667
50668
  return {
50668
- type: "Error" /* Error */,
50669
+ success: false,
50669
50670
  message: {
50670
50671
  type: "error-text",
50671
50672
  value: "No URLs provided. Please provide at least one URL to fetch."
@@ -50684,7 +50685,7 @@ var handler3 = async (provider, args) => {
50684
50685
  }
50685
50686
  const resolvedResults = await Promise.all(results);
50686
50687
  return {
50687
- type: "Reply" /* Reply */,
50688
+ success: true,
50688
50689
  message: {
50689
50690
  type: "text",
50690
50691
  value: resolvedResults.join(`
@@ -50696,40 +50697,8 @@ var fetchUrl_default = {
50696
50697
  ...toolInfo3,
50697
50698
  handler: handler3
50698
50699
  };
50699
- // ../core/src/tools/getTodoItem.ts
50700
- var toolInfo4 = {
50701
- name: "getTodoItem",
50702
- description: "Get a to-do item by its ID.",
50703
- parameters: exports_external.object({
50704
- id: exports_external.string().describe("The ID of the to-do item.")
50705
- })
50706
- };
50707
- var handler4 = async (provider, args) => {
50708
- if (!provider.getTodoItem) {
50709
- return {
50710
- type: "Error" /* Error */,
50711
- message: {
50712
- type: "error-text",
50713
- value: "Not possible to get a to-do item."
50714
- }
50715
- };
50716
- }
50717
- const { id } = toolInfo4.parameters.parse(args);
50718
- const item = await provider.getTodoItem(id);
50719
- return {
50720
- type: "Reply" /* Reply */,
50721
- message: {
50722
- type: "json",
50723
- value: item ?? null
50724
- }
50725
- };
50726
- };
50727
- var getTodoItem_default = {
50728
- ...toolInfo4,
50729
- handler: handler4
50730
- };
50731
50700
  // ../core/src/tools/listFiles.ts
50732
- var toolInfo5 = {
50701
+ var toolInfo4 = {
50733
50702
  name: "listFiles",
50734
50703
  description: "Request to list files and directories within the specified directory. If recursive is true, it will list all files and directories recursively. If recursive is false or not provided, it will only list the top-level contents. Do not use this tool to confirm the existence of files you may have created, as the user will let you know if the files were created successfully or not.",
50735
50704
  parameters: exports_external.object({
@@ -50767,20 +50736,20 @@ var toolInfo5 = {
50767
50736
  ]
50768
50737
  })
50769
50738
  };
50770
- var handler5 = async (provider, args) => {
50739
+ var handler4 = async (provider, args) => {
50771
50740
  if (!provider.listFiles) {
50772
50741
  return {
50773
- type: "Error" /* Error */,
50742
+ success: false,
50774
50743
  message: {
50775
50744
  type: "error-text",
50776
50745
  value: "Not possible to list files."
50777
50746
  }
50778
50747
  };
50779
50748
  }
50780
- const { path, maxCount, recursive, includeIgnored } = toolInfo5.parameters.parse(args);
50749
+ const { path, maxCount, recursive, includeIgnored } = toolInfo4.parameters.parse(args);
50781
50750
  const [files, limitReached] = await provider.listFiles(path, recursive, maxCount, includeIgnored);
50782
50751
  return {
50783
- type: "Reply" /* Reply */,
50752
+ success: true,
50784
50753
  message: {
50785
50754
  type: "text",
50786
50755
  value: `<list_files_path>${path}</list_files_path>
@@ -50793,128 +50762,32 @@ ${files.join(`
50793
50762
  };
50794
50763
  };
50795
50764
  var listFiles_default = {
50796
- ...toolInfo5,
50797
- handler: handler5
50798
- };
50799
- // ../core/src/tools/listMemoryTopics.ts
50800
- var toolInfo6 = {
50801
- name: "listMemoryTopics",
50802
- description: "Lists all topics in memory. Use this to see what information has been stored and which topics are available to read from.",
50803
- parameters: exports_external.object({})
50804
- };
50805
- var handler6 = async (provider, _args) => {
50806
- const topics = await provider.listMemoryTopics();
50807
- if (!topics.length) {
50808
- return { type: "Reply" /* Reply */, message: { type: "text", value: "No topics found." } };
50809
- }
50810
- return {
50811
- type: "Reply" /* Reply */,
50812
- message: {
50813
- type: "text",
50814
- value: `Memory topics:
50815
- ${topics.join(`
50816
- `)}`
50817
- }
50818
- };
50819
- };
50820
- var listMemoryTopics_default = {
50821
- ...toolInfo6,
50822
- handler: handler6
50823
- };
50824
- // ../core/src/tools/todo.ts
50825
- var TodoStatus = exports_external.enum(["open", "completed", "closed"]);
50826
- var TodoItemSchema = exports_external.object({
50827
- id: exports_external.string(),
50828
- title: exports_external.string(),
50829
- description: exports_external.string(),
50830
- status: TodoStatus
50831
- });
50832
- var UpdateTodoItemInputSchema = exports_external.object({
50833
- operation: exports_external.enum(["add", "update"]),
50834
- id: exports_external.string().nullish(),
50835
- parentId: exports_external.string().nullish(),
50836
- title: exports_external.string().nullish(),
50837
- description: exports_external.string().nullish(),
50838
- status: TodoStatus.nullish()
50839
- }).superRefine((data, ctx) => {
50840
- if (data.operation === "add") {
50841
- if (!data.title) {
50842
- ctx.addIssue({
50843
- code: "custom",
50844
- message: 'Title is required for "add" operation',
50845
- path: ["title"]
50846
- });
50847
- }
50848
- } else if (data.operation === "update") {
50849
- if (!data.id) {
50850
- ctx.addIssue({
50851
- code: "custom",
50852
- message: 'ID is required for "update" operation',
50853
- path: ["id"]
50854
- });
50855
- }
50856
- }
50857
- });
50858
- var UpdateTodoItemOutputSchema = exports_external.object({
50859
- id: exports_external.string()
50860
- });
50861
-
50862
- // ../core/src/tools/listTodoItems.ts
50863
- var toolInfo7 = {
50864
- name: "listTodoItems",
50865
- description: "List all to-do items, sorted by id. If an id is provided, it lists all sub-items for that id. Can be filtered by status.",
50866
- parameters: exports_external.object({
50867
- id: exports_external.string().nullish(),
50868
- status: TodoStatus.nullish()
50869
- })
50870
- };
50871
- var handler7 = async (provider, args) => {
50872
- if (!provider.listTodoItems) {
50873
- return {
50874
- type: "Error" /* Error */,
50875
- message: {
50876
- type: "error-text",
50877
- value: "Not possible to list to-do items."
50878
- }
50879
- };
50880
- }
50881
- const { id, status } = toolInfo7.parameters.parse(args);
50882
- const items = await provider.listTodoItems(id, status);
50883
- return {
50884
- type: "Reply" /* Reply */,
50885
- message: {
50886
- type: "json",
50887
- value: items
50888
- }
50889
- };
50890
- };
50891
- var listTodoItems_default = {
50892
- ...toolInfo7,
50893
- handler: handler7
50765
+ ...toolInfo4,
50766
+ handler: handler4
50894
50767
  };
50895
50768
  // ../core/src/tools/readBinaryFile.ts
50896
- var toolInfo8 = {
50769
+ var toolInfo5 = {
50897
50770
  name: "readBinaryFile",
50898
50771
  description: "Read a binary file from a URL or local path. Use file:// prefix to access local files. This can be used to access non-text files such as PDFs or images.",
50899
50772
  parameters: exports_external.object({
50900
50773
  url: exports_external.string().describe("The URL or local path of the file to read.")
50901
50774
  })
50902
50775
  };
50903
- var handler8 = async (provider, args) => {
50776
+ var handler5 = async (provider, args) => {
50904
50777
  if (!provider.readBinaryFile) {
50905
50778
  return {
50906
- type: "Error" /* Error */,
50779
+ success: false,
50907
50780
  message: {
50908
50781
  type: "error-text",
50909
50782
  value: "Not possible to fetch files. Abort."
50910
50783
  }
50911
50784
  };
50912
50785
  }
50913
- const { url: url2 } = toolInfo8.parameters.parse(args);
50786
+ const { url: url2 } = toolInfo5.parameters.parse(args);
50914
50787
  try {
50915
50788
  const filePart = await provider.readBinaryFile(url2);
50916
50789
  return {
50917
- type: "Reply" /* Reply */,
50790
+ success: true,
50918
50791
  message: {
50919
50792
  type: "content",
50920
50793
  value: [
@@ -50930,7 +50803,7 @@ var handler8 = async (provider, args) => {
50930
50803
  } catch (error46) {
50931
50804
  const errorMessage = error46 instanceof Error ? error46.message : "Unknown error";
50932
50805
  return {
50933
- type: "Error" /* Error */,
50806
+ success: false,
50934
50807
  message: {
50935
50808
  type: "error-text",
50936
50809
  value: `Error fetching file from ${url2}: ${errorMessage}`
@@ -50939,11 +50812,11 @@ var handler8 = async (provider, args) => {
50939
50812
  }
50940
50813
  };
50941
50814
  var readBinaryFile_default = {
50942
- ...toolInfo8,
50943
- handler: handler8
50815
+ ...toolInfo5,
50816
+ handler: handler5
50944
50817
  };
50945
50818
  // ../core/src/tools/readFile.ts
50946
- var toolInfo9 = {
50819
+ var toolInfo6 = {
50947
50820
  name: "readFile",
50948
50821
  description: "Request to read the contents of one or multiple files at the specified paths. Use comma separated paths to read multiple files. Use this when you need to examine the contents of an existing file you do not know the contents of, for example to analyze code, review text files, or extract information from configuration files. May not be suitable for other types of binary files, as it returns the raw content as a string. Try to list all the potential files are relevent to the task, and then use this tool to read all the relevant files.",
50949
50822
  parameters: exports_external.object({
@@ -50980,17 +50853,17 @@ var toolInfo9 = {
50980
50853
  ]
50981
50854
  })
50982
50855
  };
50983
- var handler9 = async (provider, args) => {
50856
+ var handler6 = async (provider, args) => {
50984
50857
  if (!provider.readFile) {
50985
50858
  return {
50986
- type: "Error" /* Error */,
50859
+ success: false,
50987
50860
  message: {
50988
50861
  type: "error-text",
50989
50862
  value: "Not possible to read file."
50990
50863
  }
50991
50864
  };
50992
50865
  }
50993
- const { path: paths, includeIgnored } = toolInfo9.parameters.parse(args);
50866
+ const { path: paths, includeIgnored } = toolInfo6.parameters.parse(args);
50994
50867
  const resp = [];
50995
50868
  for (const path of paths) {
50996
50869
  const fileContent = await provider.readFile(path, includeIgnored ?? false);
@@ -51006,7 +50879,7 @@ var handler9 = async (provider, args) => {
51006
50879
  }
51007
50880
  }
51008
50881
  return {
51009
- type: "Reply" /* Reply */,
50882
+ success: true,
51010
50883
  message: {
51011
50884
  type: "text",
51012
50885
  value: resp.join(`
@@ -51015,45 +50888,11 @@ var handler9 = async (provider, args) => {
51015
50888
  };
51016
50889
  };
51017
50890
  var readFile_default = {
51018
- ...toolInfo9,
51019
- handler: handler9
51020
- };
51021
- // ../core/src/tools/readMemory.ts
51022
- var toolInfo10 = {
51023
- name: "readMemory",
51024
- description: "Reads content from a memory topic. Use this to retrieve information stored in previous steps. If no topic is specified, reads from the default topic.",
51025
- parameters: exports_external.object({
51026
- topic: exports_external.string().nullish().describe('The topic to read from memory. Defaults to ":default:".')
51027
- })
51028
- };
51029
- var handler10 = async (provider, args) => {
51030
- const { topic } = toolInfo10.parameters.parse(args);
51031
- const content = await provider.readMemory(topic ?? undefined);
51032
- if (content) {
51033
- return {
51034
- type: "Reply" /* Reply */,
51035
- message: {
51036
- type: "text",
51037
- value: `<memory${topic ? ` topic="${topic}"` : ""}>
51038
- ${content}
51039
- </memory>`
51040
- }
51041
- };
51042
- }
51043
- return {
51044
- type: "Reply" /* Reply */,
51045
- message: {
51046
- type: "text",
51047
- value: `<memory ${topic ? `topic="${topic}"` : ""} isEmpty="true" />`
51048
- }
51049
- };
51050
- };
51051
- var readMemory_default = {
51052
- ...toolInfo10,
51053
- handler: handler10
50891
+ ...toolInfo6,
50892
+ handler: handler6
51054
50893
  };
51055
50894
  // ../core/src/tools/removeFile.ts
51056
- var toolInfo11 = {
50895
+ var toolInfo7 = {
51057
50896
  name: "removeFile",
51058
50897
  description: "Request to remove a file at the specified path.",
51059
50898
  parameters: exports_external.object({
@@ -51069,20 +50908,20 @@ var toolInfo11 = {
51069
50908
  ]
51070
50909
  })
51071
50910
  };
51072
- var handler11 = async (provider, args) => {
50911
+ var handler7 = async (provider, args) => {
51073
50912
  if (!provider.removeFile) {
51074
50913
  return {
51075
- type: "Error" /* Error */,
50914
+ success: false,
51076
50915
  message: {
51077
50916
  type: "error-text",
51078
50917
  value: "Not possible to remove file."
51079
50918
  }
51080
50919
  };
51081
50920
  }
51082
- const parsed = toolInfo11.parameters.safeParse(args);
50921
+ const parsed = toolInfo7.parameters.safeParse(args);
51083
50922
  if (!parsed.success) {
51084
50923
  return {
51085
- type: "Error" /* Error */,
50924
+ success: false,
51086
50925
  message: {
51087
50926
  type: "error-text",
51088
50927
  value: `Invalid arguments for removeFile: ${parsed.error.message}`
@@ -51092,7 +50931,7 @@ var handler11 = async (provider, args) => {
51092
50931
  const { path } = parsed.data;
51093
50932
  await provider.removeFile(path);
51094
50933
  return {
51095
- type: "Reply" /* Reply */,
50934
+ success: true,
51096
50935
  message: {
51097
50936
  type: "text",
51098
50937
  value: `<remove_file_path>${path}</remove_file_path><status>Success</status>`
@@ -51100,11 +50939,11 @@ var handler11 = async (provider, args) => {
51100
50939
  };
51101
50940
  };
51102
50941
  var removeFile_default = {
51103
- ...toolInfo11,
51104
- handler: handler11
50942
+ ...toolInfo7,
50943
+ handler: handler7
51105
50944
  };
51106
50945
  // ../core/src/tools/renameFile.ts
51107
- var toolInfo12 = {
50946
+ var toolInfo8 = {
51108
50947
  name: "renameFile",
51109
50948
  description: "Request to rename a file from source path to target path.",
51110
50949
  parameters: exports_external.object({
@@ -51122,20 +50961,20 @@ var toolInfo12 = {
51122
50961
  ]
51123
50962
  })
51124
50963
  };
51125
- var handler12 = async (provider, args) => {
50964
+ var handler8 = async (provider, args) => {
51126
50965
  if (!provider.renameFile) {
51127
50966
  return {
51128
- type: "Error" /* Error */,
50967
+ success: false,
51129
50968
  message: {
51130
50969
  type: "error-text",
51131
50970
  value: "Not possible to rename file."
51132
50971
  }
51133
50972
  };
51134
50973
  }
51135
- const { source_path, target_path } = toolInfo12.parameters.parse(args);
50974
+ const { source_path, target_path } = toolInfo8.parameters.parse(args);
51136
50975
  await provider.renameFile(source_path, target_path);
51137
50976
  return {
51138
- type: "Reply" /* Reply */,
50977
+ success: true,
51139
50978
  message: {
51140
50979
  type: "text",
51141
50980
  value: `<rename_file_path>${target_path}</rename_file_path><status>Success</status>`
@@ -51143,8 +50982,8 @@ var handler12 = async (provider, args) => {
51143
50982
  };
51144
50983
  };
51145
50984
  var renameFile_default = {
51146
- ...toolInfo12,
51147
- handler: handler12
50985
+ ...toolInfo8,
50986
+ handler: handler8
51148
50987
  };
51149
50988
  // ../core/src/tools/utils/replaceInFile.ts
51150
50989
  var replaceInFile = (fileContent, diff) => {
@@ -51221,7 +51060,7 @@ var replaceInFile = (fileContent, diff) => {
51221
51060
  };
51222
51061
 
51223
51062
  // ../core/src/tools/replaceInFile.ts
51224
- var toolInfo13 = {
51063
+ var toolInfo9 = {
51225
51064
  name: "replaceInFile",
51226
51065
  description: "Request to replace sections of content in an existing file using SEARCH/REPLACE blocks that define exact changes to specific parts of the file. This tool should be used when you need to make targeted changes to specific parts of a file.",
51227
51066
  parameters: exports_external.object({
@@ -51331,20 +51170,20 @@ function oldFeature() {
51331
51170
  ]
51332
51171
  })
51333
51172
  };
51334
- var handler13 = async (provider, args) => {
51173
+ var handler9 = async (provider, args) => {
51335
51174
  if (!provider.readFile || !provider.writeFile) {
51336
51175
  return {
51337
- type: "Error" /* Error */,
51176
+ success: false,
51338
51177
  message: {
51339
51178
  type: "error-text",
51340
51179
  value: "Not possible to replace in file."
51341
51180
  }
51342
51181
  };
51343
51182
  }
51344
- const parsed = toolInfo13.parameters.safeParse(args);
51183
+ const parsed = toolInfo9.parameters.safeParse(args);
51345
51184
  if (!parsed.success) {
51346
51185
  return {
51347
- type: "Error" /* Error */,
51186
+ success: false,
51348
51187
  message: {
51349
51188
  type: "error-text",
51350
51189
  value: `Invalid arguments for replaceInFile: ${parsed.error.message}`
@@ -51356,7 +51195,7 @@ var handler13 = async (provider, args) => {
51356
51195
  const fileContent = await provider.readFile(path, false);
51357
51196
  if (fileContent == null) {
51358
51197
  return {
51359
- type: "Error" /* Error */,
51198
+ success: false,
51360
51199
  message: {
51361
51200
  type: "error-text",
51362
51201
  value: `<replace_in_file_result path="${path}" status="failed" message="File not found" />`
@@ -51366,7 +51205,7 @@ var handler13 = async (provider, args) => {
51366
51205
  const result = replaceInFile(fileContent, diff);
51367
51206
  if (result.status === "no_diff_applied") {
51368
51207
  return {
51369
- type: "Error" /* Error */,
51208
+ success: false,
51370
51209
  message: {
51371
51210
  type: "error-text",
51372
51211
  value: `<replace_in_file_result path="${path}" status="failed" message="Unable to apply changes">
@@ -51378,7 +51217,7 @@ var handler13 = async (provider, args) => {
51378
51217
  await provider.writeFile(path, result.content);
51379
51218
  if (result.status === "some_diff_applied") {
51380
51219
  return {
51381
- type: "Reply" /* Reply */,
51220
+ success: true,
51382
51221
  message: {
51383
51222
  type: "text",
51384
51223
  value: `<replace_in_file_result path="${path}" status="some_diff_applied" applied_count="${result.appliedCount}" total_count="${result.totalCount}">
@@ -51388,7 +51227,7 @@ var handler13 = async (provider, args) => {
51388
51227
  };
51389
51228
  }
51390
51229
  return {
51391
- type: "Reply" /* Reply */,
51230
+ success: true,
51392
51231
  message: {
51393
51232
  type: "text",
51394
51233
  value: `<replace_in_file_result path="${path}" status="all_diff_applied" />`
@@ -51396,7 +51235,7 @@ var handler13 = async (provider, args) => {
51396
51235
  };
51397
51236
  } catch (error46) {
51398
51237
  return {
51399
- type: "Error" /* Error */,
51238
+ success: false,
51400
51239
  message: {
51401
51240
  type: "error-text",
51402
51241
  value: `Invalid arguments for replaceInFile: ${error46}`
@@ -51405,11 +51244,11 @@ var handler13 = async (provider, args) => {
51405
51244
  }
51406
51245
  };
51407
51246
  var replaceInFile_default = {
51408
- ...toolInfo13,
51409
- handler: handler13
51247
+ ...toolInfo9,
51248
+ handler: handler9
51410
51249
  };
51411
51250
  // ../core/src/tools/search.ts
51412
- var toolInfo14 = {
51251
+ var toolInfo10 = {
51413
51252
  name: "search",
51414
51253
  description: "Search the web for information using Google Search. Use this tool to find current information, facts, news, documentation, or research that is not available in your training data. Returns comprehensive search results with relevant content extracted from the web.",
51415
51254
  parameters: exports_external.object({
@@ -51437,11 +51276,11 @@ var toolInfo14 = {
51437
51276
  ]
51438
51277
  })
51439
51278
  };
51440
- var handler14 = async (provider, args) => {
51441
- const { query } = toolInfo14.parameters.parse(args);
51279
+ var handler10 = async (provider, args) => {
51280
+ const { query } = toolInfo10.parameters.parse(args);
51442
51281
  if (!provider.search) {
51443
51282
  return {
51444
- type: "Error" /* Error */,
51283
+ success: false,
51445
51284
  message: {
51446
51285
  type: "text",
51447
51286
  value: "This tool requires a web provider to be installed."
@@ -51450,7 +51289,7 @@ var handler14 = async (provider, args) => {
51450
51289
  }
51451
51290
  const result = await provider.search(query);
51452
51291
  return {
51453
- type: "Reply" /* Reply */,
51292
+ success: true,
51454
51293
  message: {
51455
51294
  type: "text",
51456
51295
  value: result
@@ -51458,11 +51297,11 @@ var handler14 = async (provider, args) => {
51458
51297
  };
51459
51298
  };
51460
51299
  var search_default = {
51461
- ...toolInfo14,
51462
- handler: handler14
51300
+ ...toolInfo10,
51301
+ handler: handler10
51463
51302
  };
51464
51303
  // ../core/src/tools/searchFiles.ts
51465
- var toolInfo15 = {
51304
+ var toolInfo11 = {
51466
51305
  name: "searchFiles",
51467
51306
  description: "Request to perform a regex search across files in a specified directory, outputting context-rich results that include surrounding lines. This tool searches for patterns or specific content across multiple files, displaying each match with encapsulating context.",
51468
51307
  parameters: exports_external.object({
@@ -51486,20 +51325,20 @@ var toolInfo15 = {
51486
51325
  ]
51487
51326
  })
51488
51327
  };
51489
- var handler15 = async (provider, args) => {
51328
+ var handler11 = async (provider, args) => {
51490
51329
  if (!provider.searchFiles) {
51491
51330
  return {
51492
- type: "Error" /* Error */,
51331
+ success: false,
51493
51332
  message: {
51494
51333
  type: "error-text",
51495
51334
  value: "Not possible to search files."
51496
51335
  }
51497
51336
  };
51498
51337
  }
51499
- const parsed = toolInfo15.parameters.safeParse(args);
51338
+ const parsed = toolInfo11.parameters.safeParse(args);
51500
51339
  if (!parsed.success) {
51501
51340
  return {
51502
- type: "Error" /* Error */,
51341
+ success: false,
51503
51342
  message: {
51504
51343
  type: "error-text",
51505
51344
  value: `Invalid arguments for searchFiles: ${parsed.error.message}`
@@ -51510,7 +51349,7 @@ var handler15 = async (provider, args) => {
51510
51349
  try {
51511
51350
  const files = await provider.searchFiles(path, regex, filePattern ?? "*");
51512
51351
  return {
51513
- type: "Reply" /* Reply */,
51352
+ success: true,
51514
51353
  message: {
51515
51354
  type: "text",
51516
51355
  value: `<search_files_path>${path}</search_files_path>
@@ -51525,7 +51364,7 @@ ${files.join(`
51525
51364
  };
51526
51365
  } catch (error46) {
51527
51366
  return {
51528
- type: "Error" /* Error */,
51367
+ success: false,
51529
51368
  message: {
51530
51369
  type: "error-text",
51531
51370
  value: `Error searching files: ${error46}`
@@ -51534,112 +51373,48 @@ ${files.join(`
51534
51373
  }
51535
51374
  };
51536
51375
  var searchFiles_default = {
51537
- ...toolInfo15,
51538
- handler: handler15
51376
+ ...toolInfo11,
51377
+ handler: handler11
51539
51378
  };
51540
- // ../core/src/tools/updateMemory.ts
51541
- var toolInfo16 = {
51542
- name: "updateMemory",
51543
- description: 'Appends, replaces, or removes content from a memory topic. Use "append" to add to existing content, "replace" to overwrite entirely, or "remove" to delete a topic. Memory persists across tool calls within a workflow.',
51544
- parameters: exports_external.object({
51545
- operation: exports_external.enum(["append", "replace", "remove"]).describe("The operation to perform."),
51546
- topic: exports_external.string().nullish().describe('The topic to update in memory. Defaults to ":default:".'),
51547
- content: exports_external.string().nullish().describe("The content for append or replace operations. Must be omitted for remove operation.")
51548
- }).superRefine((data, ctx) => {
51549
- if (data.operation === "append" || data.operation === "replace") {
51550
- if (data.content === undefined) {
51551
- ctx.addIssue({
51552
- code: "custom",
51553
- message: 'Content is required for "append" and "replace" operations.',
51554
- path: ["content"]
51555
- });
51556
- }
51557
- } else if (data.operation === "remove") {
51558
- if (data.content !== undefined) {
51559
- ctx.addIssue({
51560
- code: "custom",
51561
- message: 'Content must not be provided for "remove" operation.',
51562
- path: ["content"]
51563
- });
51564
- }
51379
+ // ../core/src/tools/todo.ts
51380
+ var TodoStatus = exports_external.enum(["open", "completed", "closed"]);
51381
+ var TodoItemSchema = exports_external.object({
51382
+ id: exports_external.string(),
51383
+ title: exports_external.string(),
51384
+ description: exports_external.string(),
51385
+ status: TodoStatus
51386
+ });
51387
+ var UpdateTodoItemInputSchema = exports_external.object({
51388
+ operation: exports_external.enum(["add", "update"]),
51389
+ id: exports_external.string().nullish(),
51390
+ parentId: exports_external.string().nullish(),
51391
+ title: exports_external.string().nullish(),
51392
+ description: exports_external.string().nullish(),
51393
+ status: TodoStatus.nullish()
51394
+ }).superRefine((data, ctx) => {
51395
+ if (data.operation === "add") {
51396
+ if (!data.title) {
51397
+ ctx.addIssue({
51398
+ code: "custom",
51399
+ message: 'Title is required for "add" operation',
51400
+ path: ["title"]
51401
+ });
51565
51402
  }
51566
- })
51567
- };
51568
- var handler16 = async (provider, args) => {
51569
- if (!provider.updateMemory) {
51570
- return {
51571
- type: "Error" /* Error */,
51572
- message: {
51573
- type: "error-text",
51574
- value: "Memory operations are not supported by the current provider."
51575
- }
51576
- };
51577
- }
51578
- const params = toolInfo16.parameters.parse(args);
51579
- await provider.updateMemory(params.operation, params.topic ?? undefined, params.content ?? undefined);
51580
- switch (params.operation) {
51581
- case "append":
51582
- return {
51583
- type: "Reply" /* Reply */,
51584
- message: {
51585
- type: "text",
51586
- value: `Content appended to memory topic '${params.topic || ":default:"}'.`
51587
- }
51588
- };
51589
- case "replace":
51590
- return {
51591
- type: "Reply" /* Reply */,
51592
- message: {
51593
- type: "text",
51594
- value: `Memory topic '${params.topic || ":default:"}' replaced.`
51595
- }
51596
- };
51597
- case "remove":
51598
- return {
51599
- type: "Reply" /* Reply */,
51600
- message: {
51601
- type: "text",
51602
- value: `Memory topic '${params.topic || ":default:"}' removed.`
51603
- }
51604
- };
51605
- }
51606
- };
51607
- var updateMemory_default = {
51608
- ...toolInfo16,
51609
- handler: handler16
51610
- };
51611
- // ../core/src/tools/updateTodoItem.ts
51612
- var toolInfo17 = {
51613
- name: "updateTodoItem",
51614
- description: "Add or update a to-do item.",
51615
- parameters: UpdateTodoItemInputSchema
51616
- };
51617
- var handler17 = async (provider, args) => {
51618
- if (!provider.updateTodoItem) {
51619
- return {
51620
- type: "Error" /* Error */,
51621
- message: {
51622
- type: "error-text",
51623
- value: "Not possible to update a to-do item."
51624
- }
51625
- };
51626
- }
51627
- const input = toolInfo17.parameters.parse(args);
51628
- const result = await provider.updateTodoItem(input);
51629
- return {
51630
- type: "Reply" /* Reply */,
51631
- message: {
51632
- type: "json",
51633
- value: result
51403
+ } else if (data.operation === "update") {
51404
+ if (!data.id) {
51405
+ ctx.addIssue({
51406
+ code: "custom",
51407
+ message: 'ID is required for "update" operation',
51408
+ path: ["id"]
51409
+ });
51634
51410
  }
51635
- };
51636
- };
51637
- var updateTodoItem_default = {
51638
- ...toolInfo17,
51639
- handler: handler17
51640
- };
51411
+ }
51412
+ });
51413
+ var UpdateTodoItemOutputSchema = exports_external.object({
51414
+ id: exports_external.string()
51415
+ });
51641
51416
  // ../core/src/tools/writeToFile.ts
51642
- var toolInfo18 = {
51417
+ var toolInfo12 = {
51643
51418
  name: "writeToFile",
51644
51419
  description: "Request to write content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Ensure that the output content does not include incorrect escaped character patterns such as `&lt;`, `&gt;`, or `&amp;`. Also ensure there is no unwanted CDATA tags in the content.",
51645
51420
  parameters: exports_external.object({
@@ -51668,20 +51443,20 @@ export default App;
51668
51443
  ]
51669
51444
  })
51670
51445
  };
51671
- var handler18 = async (provider, args) => {
51446
+ var handler12 = async (provider, args) => {
51672
51447
  if (!provider.writeFile) {
51673
51448
  return {
51674
- type: "Error" /* Error */,
51449
+ success: false,
51675
51450
  message: {
51676
51451
  type: "error-text",
51677
51452
  value: "Not possible to write file."
51678
51453
  }
51679
51454
  };
51680
51455
  }
51681
- const parsed = toolInfo18.parameters.safeParse(args);
51456
+ const parsed = toolInfo12.parameters.safeParse(args);
51682
51457
  if (!parsed.success) {
51683
51458
  return {
51684
- type: "Error" /* Error */,
51459
+ success: false,
51685
51460
  message: {
51686
51461
  type: "error-text",
51687
51462
  value: `Invalid arguments for writeToFile: ${parsed.error.message}`
@@ -51694,7 +51469,7 @@ var handler18 = async (provider, args) => {
51694
51469
  content = trimmedContent.slice(9, -3);
51695
51470
  await provider.writeFile(path, content);
51696
51471
  return {
51697
- type: "Reply" /* Reply */,
51472
+ success: true,
51698
51473
  message: {
51699
51474
  type: "text",
51700
51475
  value: `<write_to_file_path>${path}</write_to_file_path><status>Success</status>`
@@ -51702,8 +51477,8 @@ var handler18 = async (provider, args) => {
51702
51477
  };
51703
51478
  };
51704
51479
  var writeToFile_default = {
51705
- ...toolInfo18,
51706
- handler: handler18
51480
+ ...toolInfo12,
51481
+ handler: handler12
51707
51482
  };
51708
51483
  // ../core/src/UsageMeter.ts
51709
51484
  class UsageMeter {
@@ -63981,11 +63756,11 @@ var uiMessagesSchema = lazyValidator(() => zodSchema(exports_external.array(expo
63981
63756
  // ../core/src/workflow/agent.workflow.ts
63982
63757
  var agentWorkflow = async (input, { step, tools, logger }) => {
63983
63758
  const event = (name17, event2) => step(name17, () => tools.taskEvent(event2));
63984
- const { tools: toolInfo19, maxToolRoundTrips = 200 } = input;
63759
+ const { tools: toolInfo13, maxToolRoundTrips = 200 } = input;
63985
63760
  const messages = "systemPrompt" in input ? [{ role: "system", content: input.systemPrompt }] : input.messages;
63986
63761
  await event("start-task", { kind: "StartTask" /* StartTask */, systemPrompt: "systemPrompt" in input ? input.systemPrompt : "" });
63987
63762
  const toolSet = {};
63988
- for (const tool2 of toolInfo19) {
63763
+ for (const tool2 of toolInfo13) {
63989
63764
  toolSet[tool2.name] = {
63990
63765
  description: tool2.description,
63991
63766
  inputSchema: jsonSchema(toJSONSchema(tool2.parameters))
@@ -64087,50 +63862,28 @@ var agentWorkflow = async (input, { step, tools, logger }) => {
64087
63862
  input: toolCall.input
64088
63863
  });
64089
63864
  });
64090
- switch (toolResponse.type) {
64091
- case "Reply" /* Reply */:
64092
- await event(`event-tool-reply-${toolCall.toolName}-${toolCall.toolCallId}`, {
64093
- kind: "ToolReply" /* ToolReply */,
64094
- tool: toolCall.toolName,
64095
- content: toolResponse.message
64096
- });
64097
- toolResults.push({
64098
- toolCallId: toolCall.toolCallId,
64099
- toolName: toolCall.toolName,
64100
- output: toolResponse.message
64101
- });
64102
- break;
64103
- case "Error" /* Error */:
64104
- await event(`event-tool-error-${toolCall.toolName}-${toolCall.toolCallId}`, {
64105
- kind: "ToolError" /* ToolError */,
64106
- tool: toolCall.toolName,
64107
- error: toolResponse.message ?? "Unknown error"
64108
- });
64109
- toolResults.push({
64110
- toolCallId: toolCall.toolCallId,
64111
- toolName: toolCall.toolName,
64112
- output: toolResponse.message
64113
- });
64114
- break;
64115
- case "Exit": {
64116
- if (toolCalls.length > 1) {
64117
- toolResults.push({
64118
- toolCallId: toolCall.toolCallId,
64119
- toolName: toolCall.toolName,
64120
- output: {
64121
- type: "error-text",
64122
- value: `Error: The tool '${toolCall.toolName}' must be called alone, but it was called with other tools.`
64123
- }
64124
- });
64125
- break;
64126
- }
64127
- if (toolResults.length > 0) {
64128
- break;
64129
- }
64130
- const exitReason = { ...toolResponse, messages };
64131
- await event("end-task", { kind: "EndTask" /* EndTask */, exitReason });
64132
- return exitReason;
64133
- }
63865
+ if (toolResponse.success) {
63866
+ await event(`event-tool-reply-${toolCall.toolName}-${toolCall.toolCallId}`, {
63867
+ kind: "ToolReply" /* ToolReply */,
63868
+ tool: toolCall.toolName,
63869
+ content: toolResponse.message
63870
+ });
63871
+ toolResults.push({
63872
+ toolCallId: toolCall.toolCallId,
63873
+ toolName: toolCall.toolName,
63874
+ output: toolResponse.message
63875
+ });
63876
+ } else {
63877
+ await event(`event-tool-error-${toolCall.toolName}-${toolCall.toolCallId}`, {
63878
+ kind: "ToolError" /* ToolError */,
63879
+ tool: toolCall.toolName,
63880
+ error: toolResponse.message ?? "Unknown error"
63881
+ });
63882
+ toolResults.push({
63883
+ toolCallId: toolCall.toolCallId,
63884
+ toolName: toolCall.toolName,
63885
+ output: toolResponse.message
63886
+ });
64134
63887
  }
64135
63888
  }
64136
63889
  nextMessage = [
@@ -64205,7 +63958,8 @@ var WorkflowStepDefinitionSchema = exports_external.object({
64205
63958
  output: exports_external.string().nullish(),
64206
63959
  expected_outcome: exports_external.string().nullish(),
64207
63960
  code: exports_external.string().nullish(),
64208
- outputSchema: exports_external.any().nullish()
63961
+ outputSchema: exports_external.any().nullish(),
63962
+ timeout: exports_external.number().positive().nullish()
64209
63963
  });
64210
63964
  var WorkflowDefinitionSchema = exports_external.object({
64211
63965
  task: exports_external.string(),
@@ -64218,6 +63972,11 @@ var WorkflowFileSchema = exports_external.object({
64218
63972
  });
64219
63973
 
64220
63974
  // ../core/src/workflow/dynamic.ts
63975
+ var TOOL_GROUPS = {
63976
+ readonly: ["readFile", "readBinaryFile", "listFiles", "searchFiles"],
63977
+ readwrite: ["readFile", "readBinaryFile", "listFiles", "searchFiles", "writeToFile", "replaceInFile", "removeFile", "renameFile"],
63978
+ internet: ["fetchUrl", "search"]
63979
+ };
64221
63980
  function parseDynamicWorkflowDefinition(source) {
64222
63981
  try {
64223
63982
  const raw = $parse(source);
@@ -64231,6 +63990,29 @@ function parseDynamicWorkflowDefinition(source) {
64231
63990
  }
64232
63991
  }
64233
63992
  var AsyncFunction = Object.getPrototypeOf(async () => {}).constructor;
63993
+ function validateAndApplyDefaults(workflowId, workflow, input) {
63994
+ if (!workflow.inputs || workflow.inputs.length === 0) {
63995
+ return input;
63996
+ }
63997
+ const validatedInput = {};
63998
+ const errors4 = [];
63999
+ for (const inputDef of workflow.inputs) {
64000
+ const providedValue = input[inputDef.id];
64001
+ if (providedValue !== undefined && providedValue !== null) {
64002
+ validatedInput[inputDef.id] = providedValue;
64003
+ } else if (inputDef.default !== undefined && inputDef.default !== null) {
64004
+ validatedInput[inputDef.id] = inputDef.default;
64005
+ } else {
64006
+ errors4.push(`Missing required input '${inputDef.id}'${inputDef.description ? `: ${inputDef.description}` : ""}`);
64007
+ }
64008
+ }
64009
+ if (errors4.length > 0) {
64010
+ throw new Error(`Workflow '${workflowId}' input validation failed:
64011
+ ${errors4.map((e) => ` - ${e}`).join(`
64012
+ `)}`);
64013
+ }
64014
+ return validatedInput;
64015
+ }
64234
64016
  function createRunWorkflowFn(args) {
64235
64017
  return async (subWorkflowId, subInput) => {
64236
64018
  const mergedInput = { ...args.input, ...args.state, ...subInput ?? {} };
@@ -64251,7 +64033,14 @@ function compileStep(stepDef, workflowId, compiledSteps) {
64251
64033
  compiledSteps.set(key, fn);
64252
64034
  return fn;
64253
64035
  } catch (error46) {
64254
- throw new Error(`Failed to compile code for step '${stepDef.id}' in workflow '${workflowId}': ${error46 instanceof Error ? error46.message : String(error46)}`);
64036
+ const errorMsg = error46 instanceof Error ? error46.message : String(error46);
64037
+ const codePreview = stepDef.code.length > 200 ? `${stepDef.code.substring(0, 200)}...` : stepDef.code;
64038
+ throw new Error(`Failed to compile code for step '${stepDef.id}' in workflow '${workflowId}':
64039
+ ` + ` Error: ${errorMsg}
64040
+ ` + ` Code:
64041
+ ${codePreview.split(`
64042
+ `).map((line) => ` ${line}`).join(`
64043
+ `)}`);
64255
64044
  }
64256
64045
  }
64257
64046
  async function executeStepWithAgent(stepDef, workflowId, input, state, context, options, runInternal) {
@@ -64262,9 +64051,33 @@ async function executeStepWithAgent(stepDef, workflowId, input, state, context,
64262
64051
  if (!options.toolInfo) {
64263
64052
  throw new Error(`Step '${stepDef.id}' in workflow '${workflowId}' requires agent execution, but no toolInfo was provided to DynamicWorkflowRunner.`);
64264
64053
  }
64265
- const allowedToolNames = stepDef.tools;
64266
- const toolsForAgent = allowedToolNames ? options.toolInfo.filter((t) => allowedToolNames.includes(t.name)) : [...options.toolInfo];
64267
- if (!allowedToolNames || allowedToolNames.includes("runWorkflow")) {
64054
+ const rawAllowedToolNames = stepDef.tools;
64055
+ let toolsForAgent;
64056
+ if (rawAllowedToolNames) {
64057
+ const expandedToolNames = new Set;
64058
+ let includeAll = false;
64059
+ for (const name17 of rawAllowedToolNames) {
64060
+ if (name17 === "all") {
64061
+ includeAll = true;
64062
+ break;
64063
+ }
64064
+ if (Object.hasOwn(TOOL_GROUPS, name17)) {
64065
+ for (const tool2 of TOOL_GROUPS[name17]) {
64066
+ expandedToolNames.add(tool2);
64067
+ }
64068
+ } else {
64069
+ expandedToolNames.add(name17);
64070
+ }
64071
+ }
64072
+ if (includeAll) {
64073
+ toolsForAgent = [...options.toolInfo];
64074
+ } else {
64075
+ toolsForAgent = options.toolInfo.filter((t) => expandedToolNames.has(t.name));
64076
+ }
64077
+ } else {
64078
+ toolsForAgent = [...options.toolInfo];
64079
+ }
64080
+ if (!rawAllowedToolNames || rawAllowedToolNames.includes("all") || rawAllowedToolNames.includes("runWorkflow")) {
64268
64081
  toolsForAgent.push({
64269
64082
  name: "runWorkflow",
64270
64083
  description: "Run a named sub-workflow defined in the current workflow file.",
@@ -64273,11 +64086,12 @@ async function executeStepWithAgent(stepDef, workflowId, input, state, context,
64273
64086
  input: exports_external.any().nullish().describe("Optional input object for the sub-workflow")
64274
64087
  }),
64275
64088
  handler: async () => {
64276
- return { type: "Error" /* Error */, message: { type: "error-text", value: "runWorkflow is virtual." } };
64089
+ return { success: false, message: { type: "error-text", value: "runWorkflow is virtual." } };
64277
64090
  }
64278
64091
  });
64279
64092
  }
64280
64093
  const allowedToolNameSet = new Set(toolsForAgent.map((t) => t.name));
64094
+ context.logger.debug(`[Agent] Available tools for step '${stepDef.id}': ${toolsForAgent.map((t) => t.name).join(", ")}`);
64281
64095
  const systemPrompt = options.stepSystemPrompt?.({ workflowId, step: stepDef, input, state }) ?? [
64282
64096
  `You are an AI assistant executing a workflow step.`,
64283
64097
  "",
@@ -64304,7 +64118,7 @@ async function executeStepWithAgent(stepDef, workflowId, input, state, context,
64304
64118
  invokeTool: async ({ toolName, input: toolInput }) => {
64305
64119
  if (!allowedToolNameSet.has(toolName)) {
64306
64120
  return {
64307
- type: "Error" /* Error */,
64121
+ success: false,
64308
64122
  message: { type: "error-text", value: `Tool '${toolName}' is not allowed in this step.` }
64309
64123
  };
64310
64124
  }
@@ -64313,17 +64127,17 @@ async function executeStepWithAgent(stepDef, workflowId, input, state, context,
64313
64127
  const subInput = toolInput?.input;
64314
64128
  if (typeof subWorkflowId !== "string") {
64315
64129
  return {
64316
- type: "Error" /* Error */,
64130
+ success: false,
64317
64131
  message: { type: "error-text", value: "runWorkflow.workflowId must be a string." }
64318
64132
  };
64319
64133
  }
64320
64134
  try {
64321
64135
  const output = await runWorkflow(subWorkflowId, subInput);
64322
64136
  const jsonResult = { type: "json", value: output };
64323
- return { type: "Reply" /* Reply */, message: jsonResult };
64137
+ return { success: true, message: jsonResult };
64324
64138
  } catch (error46) {
64325
64139
  return {
64326
- type: "Error" /* Error */,
64140
+ success: false,
64327
64141
  message: { type: "error-text", value: error46 instanceof Error ? error46.message : String(error46) }
64328
64142
  };
64329
64143
  }
@@ -64346,28 +64160,91 @@ async function executeStepWithAgent(stepDef, workflowId, input, state, context,
64346
64160
  if (parsed.success) {
64347
64161
  return parsed.data;
64348
64162
  }
64163
+ if (options.wrapAgentResultInObject) {
64164
+ context.logger.warn(`[Agent] Step '${stepDef.id}' returned plain text instead of JSON. Wrapping in {result: ...}`);
64165
+ return { result: result.message };
64166
+ }
64349
64167
  return result.message;
64350
64168
  }
64351
- throw new Error(`Agent execution for step '${stepDef.id}' in workflow '${workflowId}' did not exit cleanly.`);
64169
+ if (result.type === "Error") {
64170
+ throw new Error(`Agent step '${stepDef.id}' in workflow '${workflowId}' failed: ${result.error?.message || "Unknown error"}`);
64171
+ }
64172
+ if (result.type === "UsageExceeded") {
64173
+ throw new Error(`Agent step '${stepDef.id}' in workflow '${workflowId}' exceeded usage limits (tokens or rounds)`);
64174
+ }
64175
+ throw new Error(`Agent step '${stepDef.id}' in workflow '${workflowId}' exited unexpectedly with type: ${result.type}`);
64176
+ }
64177
+ async function executeStepWithTimeout(stepDef, workflowId, input, state, context, options, compiledSteps, runInternal) {
64178
+ const executeStepLogic = async () => {
64179
+ if (stepDef.code && options.allowUnsafeCodeExecution) {
64180
+ context.logger.debug(`[Step] Executing step '${stepDef.id}' with compiled code`);
64181
+ const fn = compileStep(stepDef, workflowId, compiledSteps);
64182
+ const runWorkflow = createRunWorkflowFn({ input, state, context, runInternal });
64183
+ const agentTools = {};
64184
+ if (options.toolInfo) {
64185
+ for (const tool2 of options.toolInfo) {
64186
+ if (typeof context.tools[tool2.name] === "function") {
64187
+ agentTools[tool2.name] = context.tools[tool2.name];
64188
+ }
64189
+ }
64190
+ }
64191
+ const runtimeCtx = {
64192
+ workflowId,
64193
+ stepId: stepDef.id,
64194
+ input,
64195
+ state,
64196
+ tools: context.tools,
64197
+ logger: context.logger,
64198
+ step: context.step,
64199
+ runWorkflow,
64200
+ toolInfo: options.toolInfo,
64201
+ agentTools
64202
+ };
64203
+ const result2 = await fn(runtimeCtx);
64204
+ context.logger.debug(`[Step] Compiled code execution completed for step '${stepDef.id}'`);
64205
+ return result2;
64206
+ }
64207
+ context.logger.debug(`[Step] Executing step '${stepDef.id}' with agent`);
64208
+ const result = await executeStepWithAgent(stepDef, workflowId, input, state, context, options, runInternal);
64209
+ context.logger.debug(`[Step] Agent execution completed for step '${stepDef.id}'`);
64210
+ return result;
64211
+ };
64212
+ if (stepDef.timeout && stepDef.timeout > 0) {
64213
+ context.logger.debug(`[Step] Step '${stepDef.id}' has timeout of ${stepDef.timeout}ms`);
64214
+ let timeoutId;
64215
+ const timeoutPromise = new Promise((_, reject) => {
64216
+ timeoutId = setTimeout(() => reject(new Error(`Step '${stepDef.id}' in workflow '${workflowId}' timed out after ${stepDef.timeout}ms`)), stepDef.timeout);
64217
+ });
64218
+ try {
64219
+ return await Promise.race([executeStepLogic(), timeoutPromise]);
64220
+ } finally {
64221
+ if (timeoutId)
64222
+ clearTimeout(timeoutId);
64223
+ }
64224
+ }
64225
+ return await executeStepLogic();
64352
64226
  }
64353
64227
  async function executeStep(stepDef, workflowId, input, state, context, options, compiledSteps, runInternal) {
64354
- if (stepDef.code && options.allowUnsafeCodeExecution) {
64355
- const fn = compileStep(stepDef, workflowId, compiledSteps);
64356
- const runWorkflow = createRunWorkflowFn({ input, state, context, runInternal });
64357
- const runtimeCtx = {
64358
- workflowId,
64359
- stepId: stepDef.id,
64360
- input,
64361
- state,
64362
- tools: context.tools,
64363
- logger: context.logger,
64364
- step: context.step,
64365
- runWorkflow,
64366
- toolInfo: options.toolInfo
64367
- };
64368
- return await fn(runtimeCtx);
64228
+ const result = await executeStepWithTimeout(stepDef, workflowId, input, state, context, options, compiledSteps, runInternal);
64229
+ if (stepDef.outputSchema) {
64230
+ try {
64231
+ const _schema = exports_external.any();
64232
+ if (typeof stepDef.outputSchema === "object") {
64233
+ context.logger.debug(`[Step] Validating output for step '${stepDef.id}' against schema`);
64234
+ if (stepDef.outputSchema.type === "object") {
64235
+ if (typeof result !== "object" || result === null || Array.isArray(result)) {
64236
+ throw new Error(`Expected object output, got ${Array.isArray(result) ? "array" : result === null ? "null" : typeof result}`);
64237
+ }
64238
+ }
64239
+ if (stepDef.outputSchema.type === "array" && !Array.isArray(result)) {
64240
+ throw new Error(`Expected array output, got ${typeof result}`);
64241
+ }
64242
+ }
64243
+ } catch (error46) {
64244
+ throw new Error(`Step '${stepDef.id}' in workflow '${workflowId}' output validation failed: ${error46 instanceof Error ? error46.message : String(error46)}`);
64245
+ }
64369
64246
  }
64370
- return await executeStepWithAgent(stepDef, workflowId, input, state, context, options, runInternal);
64247
+ return result;
64371
64248
  }
64372
64249
  function createDynamicWorkflow(definition, options = {}) {
64373
64250
  if (typeof definition === "string") {
@@ -64383,36 +64260,393 @@ function createDynamicWorkflow(definition, options = {}) {
64383
64260
  if (!workflow) {
64384
64261
  throw new Error(`Workflow '${workflowId}' not found`);
64385
64262
  }
64263
+ const validatedInput = validateAndApplyDefaults(workflowId, workflow, input);
64264
+ context.logger.info(`[Workflow] Starting workflow '${workflowId}'`);
64265
+ context.logger.debug(`[Workflow] Input: ${JSON.stringify(validatedInput)}`);
64266
+ context.logger.debug(`[Workflow] Inherited state: ${JSON.stringify(inheritedState)}`);
64267
+ context.logger.debug(`[Workflow] Steps: ${workflow.steps.map((s) => s.id).join(", ")}`);
64386
64268
  const state = { ...inheritedState };
64387
64269
  let lastOutput;
64388
- for (const stepDef of workflow.steps) {
64270
+ for (let i = 0;i < workflow.steps.length; i++) {
64271
+ const stepDef = workflow.steps[i];
64389
64272
  const stepName = `${workflowId}.${stepDef.id}`;
64273
+ context.logger.info(`[Workflow] Step ${i + 1}/${workflow.steps.length}: ${stepDef.id}`);
64274
+ context.logger.debug(`[Workflow] Step task: ${stepDef.task}`);
64275
+ if (stepDef.expected_outcome) {
64276
+ context.logger.debug(`[Workflow] Expected outcome: ${stepDef.expected_outcome}`);
64277
+ }
64278
+ context.logger.debug(`[Workflow] Current state keys: ${Object.keys(state).join(", ")}`);
64390
64279
  lastOutput = await context.step(stepName, async () => {
64391
- return await executeStep(stepDef, workflowId, input, state, context, options, compiledSteps, runInternal);
64280
+ return await executeStep(stepDef, workflowId, validatedInput, state, context, options, compiledSteps, runInternal);
64392
64281
  });
64393
64282
  const outputKey = stepDef.output ?? stepDef.id;
64394
64283
  state[outputKey] = lastOutput;
64284
+ context.logger.debug(`[Workflow] Step output stored as '${outputKey}': ${typeof lastOutput === "object" ? JSON.stringify(lastOutput).substring(0, 200) : lastOutput}`);
64395
64285
  }
64286
+ context.logger.info(`[Workflow] Completed workflow '${workflowId}'`);
64396
64287
  if (workflow.output) {
64288
+ context.logger.debug(`[Workflow] Returning output field: ${workflow.output}`);
64397
64289
  return state[workflow.output];
64398
64290
  }
64291
+ context.logger.debug(`[Workflow] Returning full state with keys: ${Object.keys(state).join(", ")}`);
64399
64292
  return state;
64400
64293
  };
64401
64294
  return async (workflowId, input, context) => {
64402
64295
  return await runInternal(workflowId, input, context, {});
64403
64296
  };
64404
64297
  }
64298
+ // ../core/src/workflow/prompts/dynamic-generator-prompts.ts
64299
+ var RUNTIME_CONTEXT_TYPES = `## Runtime context (ctx)
64300
+ \`\`\`ts
64301
+ // Runtime types (for reference)
64302
+ type Logger = {
64303
+ debug: (...args: any[]) => void
64304
+ info: (...args: any[]) => void
64305
+ warn: (...args: any[]) => void
64306
+ error: (...args: any[]) => void
64307
+ }
64308
+
64309
+ type StepFn = {
64310
+ <T>(name: string, fn: () => Promise<T>): Promise<T>
64311
+ <T>(name: string, options: { retry?: number }, fn: () => Promise<T>): Promise<T>
64312
+ }
64313
+
64314
+ type JsonModelMessage = { role: 'system' | 'user' | 'assistant' | 'tool'; content: any }
64315
+ type JsonResponseMessage = { role: 'assistant' | 'tool'; content: any }
64316
+ type ToolSet = Record<string, any>
64317
+
64318
+ type ToolResponseResult =
64319
+ | { type: 'text'; value: string }
64320
+ | { type: 'json'; value: any }
64321
+ | { type: 'error-text'; value: string }
64322
+ | { type: 'error-json'; value: any }
64323
+ | { type: 'content'; value: any[] }
64324
+
64325
+ type ToolResponse =
64326
+ | { type: 'Reply'; message: ToolResponseResult }
64327
+ | { type: 'Exit'; message: string; object?: any }
64328
+ | { type: 'Error'; message: ToolResponseResult }
64329
+
64330
+ type ExitReason =
64331
+ | { type: 'UsageExceeded' }
64332
+ | { type: 'Exit'; message: string; object?: any }
64333
+ | { type: 'Error'; error: { message: string; stack?: string } }
64334
+
64335
+ type FullToolInfo = { name: string; description: string; parameters: any; handler: any }
64336
+
64337
+ type AgentTools = {
64338
+ readFile: (input: { path: string }) => Promise<string | null>
64339
+ writeToFile: (input: { path: string; content: string }) => Promise<void>
64340
+ executeCommand: (input: { command: string; pipe?: boolean; requiresApproval?: boolean } & ({ args: string[]; shell?: false } | { shell: true })) => Promise<{
64341
+ exitCode: number
64342
+ stdout: string
64343
+ stderr: string
64344
+ }>
64345
+ searchFiles: (input: { path: string; regex: string; filePattern?: string }) => Promise<string>
64346
+ listFiles: (input: { path: string; recursive?: boolean; maxCount?: number; includeIgnored?: boolean }) => Promise<string>
64347
+ fetchUrl: (input: { url: string[] }) => Promise<string>
64348
+ askFollowupQuestion: (input: { questions: { prompt: string; options?: string[] }[] }) => Promise<any>
64349
+ // ... and other tools available in the environment
64350
+ }
64351
+
64352
+ // Tools available on ctx.tools in dynamic steps
64353
+ type DynamicWorkflowTools = {
64354
+ // LLM + agent helpers
64355
+ runAgent: (input: {
64356
+ tools: Readonly<FullToolInfo[]>
64357
+ maxToolRoundTrips?: number
64358
+ userMessage: readonly JsonModelMessage[]
64359
+ } & ({ messages: JsonModelMessage[] } | { systemPrompt: string })) => Promise<ExitReason>
64360
+
64361
+ // CLI UX helpers
64362
+ confirm: (input: { message: string }) => Promise<boolean>
64363
+ input: (input: { message: string; default?: string }) => Promise<string>
64364
+ select: (input: { message: string; choices: { name: string; value: string }[] }) => Promise<string>
64365
+ }
64366
+
64367
+ type DynamicStepRuntimeContext = {
64368
+ workflowId: string
64369
+ stepId: string
64370
+ input: Record<string, any>
64371
+ state: Record<string, any>
64372
+ tools: DynamicWorkflowTools
64373
+ agentTools: AgentTools
64374
+ logger: Logger
64375
+ step: StepFn
64376
+ runWorkflow: (workflowId: string, input?: Record<string, any>) => Promise<any>
64377
+ toolInfo?: ReadonlyArray<FullToolInfo>
64378
+ }
64379
+ \`\`\`
64380
+
64381
+ - \`ctx.input\`: workflow inputs (read-only).
64382
+ - \`ctx.state\`: shared state between steps (previous step outputs are stored here).
64383
+ - \`ctx.agentTools\`: standard tools (readFile, executeCommand, etc.). Call as \`await ctx.agentTools.someTool({ ... })\`.
64384
+ - \`ctx.tools\`: workflow helpers (runAgent, confirm, input, select).
64385
+ - \`ctx.runWorkflow\`: run a sub-workflow by id.`;
64386
+ var CONTEXT_USAGE_GUIDELINES = `## Guidelines
64387
+ - Use \`await\` for all async operations.
64388
+ - Return the output value for the step (this becomes the step output).
64389
+ - Access inputs via \`ctx.input.<inputId>\`.
64390
+ - Access previous step outputs via \`ctx.state.<stepOutputKey>\` (defaults to the step \`output\` or \`id\`).`;
64391
+ var QUALITY_GUIDELINES = `## Quality Guidelines for Code Implementation
64392
+
64393
+ ### Error Handling
64394
+ - ALWAYS validate inputs at the start of steps
64395
+ - Use try-catch for operations that might fail (file I/O, parsing, API calls)
64396
+ - Preserve stack traces: re-throw original errors rather than creating new ones
64397
+ - Use error type guards: \`const err = error instanceof Error ? error : new Error(String(error))\`
64398
+ - Check for null/undefined before using values
64399
+ - Handle edge cases (empty arrays, missing files, invalid data)
64400
+
64401
+ ### Logging
64402
+ - Use \`ctx.logger.info()\` for important progress updates
64403
+ - Use \`ctx.logger.debug()\` for detailed information
64404
+ - Use \`ctx.logger.warn()\` for recoverable issues
64405
+ - Use \`ctx.logger.error()\` before throwing errors
64406
+ - Log when starting and completing significant operations
64407
+ - Use template literals for readability: \`ctx.logger.info(\\\`Processing \${items.length} items...\\\`)\`
64408
+
64409
+ ### User Experience
64410
+ - Provide progress feedback for long operations
64411
+ - Return structured data (objects/arrays), not strings when possible
64412
+ - Include helpful metadata in results (counts, timestamps, status)
64413
+ - For batch operations, report progress: \`Processed 5/10 items\`
64414
+
64415
+ ### Data Validation
64416
+ - Validate required fields exist before accessing
64417
+ - Check data types match expectations
64418
+ - Validate array lengths before iteration
64419
+ - Example: \`if (!data?.users || !Array.isArray(data.users)) throw new Error('Invalid data format')\`
64420
+
64421
+ ### Best Practices
64422
+ - Use meaningful variable names
64423
+ - Avoid nested callbacks - use async/await
64424
+ - Clean up resources (close files, clear timeouts)
64425
+ - Return consistent data structures across similar steps
64426
+ - For iteration, consider batching or rate limiting
64427
+
64428
+ ### When to Simplify
64429
+ - Simple transformation steps (e.g., formatting strings) need only basic error handling
64430
+ - Internal sub-workflow steps with validated inputs from parent can skip redundant validation
64431
+ - Minimal logging is fine for fast steps (<100ms) that don't perform I/O or external calls
64432
+ - Use judgment: match error handling complexity to the step's failure risk and impact`;
64433
+ var TOOL_CALLING_EXAMPLES = `## Tool calling examples
64434
+
64435
+ ### Standard tools (ctx.agentTools)
64436
+ \`\`\`ts
64437
+ // readFile
64438
+ const readme = await ctx.agentTools.readFile({ path: 'README.md' })
64439
+ if (readme == null) throw new Error('README.md not found')
64440
+
64441
+ // writeToFile
64442
+ await ctx.agentTools.writeToFile({ path: 'notes.txt', content: 'hello\\n' })
64443
+
64444
+ // executeCommand (args form)
64445
+ const rg = await ctx.agentTools.executeCommand({ command: 'rg', args: ['-n', 'TODO', '.'] })
64446
+ if (rg.exitCode !== 0) throw new Error(rg.stderr)
64447
+
64448
+ // executeCommand (shell form)
64449
+ await ctx.agentTools.executeCommand({ command: 'ls -la', shell: true, pipe: true })
64450
+ \`\`\`
64451
+
64452
+ ### Workflow helpers (ctx.tools)
64453
+ \`\`\`ts
64454
+ // runAgent (nested agent; use ctx.toolInfo as the tool list)
64455
+ const agentRes = await ctx.tools.runAgent({
64456
+ systemPrompt: 'You are a helpful assistant.',
64457
+ userMessage: [{ role: 'user', content: 'Summarize README.md in 3 bullets.' }],
64458
+ tools: (ctx.toolInfo ?? []) as any,
64459
+ })
64460
+ if (agentRes.type !== 'Exit') throw new Error('runAgent failed')
64461
+
64462
+ // confirm / input / select (interactive)
64463
+ const ok = await ctx.tools.confirm({ message: 'Proceed?' })
64464
+ const name = await ctx.tools.input({ message: 'Name?', default: 'main' })
64465
+ const flavor = await ctx.tools.select({
64466
+ message: 'Pick one',
64467
+ choices: [
64468
+ { name: 'A', value: 'a' },
64469
+ { name: 'B', value: 'b' },
64470
+ ],
64471
+ })
64472
+ \`\`\`
64473
+
64474
+ ### Sub-workflow example (ctx.runWorkflow)
64475
+ \`\`\`ts
64476
+ const results: any[] = []
64477
+ for (const pr of ctx.state.prs ?? []) {
64478
+ results.push(await ctx.runWorkflow('reviewPR', { prId: pr.id }))
64479
+ }
64480
+ return results
64481
+ \`\`\``;
64482
+ var COMPLETE_STEP_EXAMPLE = `## Complete Example: High-Quality Step Implementation
64483
+
64484
+ This example demonstrates all quality guidelines in a single step:
64485
+
64486
+ \`\`\`ts
64487
+ // Step: processUserData
64488
+ // Task: Read, validate, and process user data from a file
64489
+
64490
+ // Input validation
64491
+ if (!ctx.input.dataFile) {
64492
+ throw new Error('Missing required input: dataFile')
64493
+ }
64494
+
64495
+ ctx.logger.info(\`Starting user data processing for: \${ctx.input.dataFile}\`)
64496
+
64497
+ // Read file with error handling
64498
+ let rawData
64499
+ try {
64500
+ ctx.logger.debug(\`Reading file: \${ctx.input.dataFile}\`)
64501
+ rawData = await ctx.agentTools.readFile({ path: ctx.input.dataFile })
64502
+
64503
+ if (!rawData) {
64504
+ throw new Error(\`File not found or empty: \${ctx.input.dataFile}\`)
64505
+ }
64506
+ } catch (error) {
64507
+ const err = error instanceof Error ? error : new Error(String(error))
64508
+ ctx.logger.error(\`Failed to read file: \${err.message}\`)
64509
+ throw err // Preserve original stack trace
64510
+ }
64511
+
64512
+ // Parse and validate data
64513
+ let users
64514
+ try {
64515
+ ctx.logger.debug('Parsing JSON data')
64516
+ const parsed = JSON.parse(rawData)
64517
+
64518
+ if (!parsed?.users || !Array.isArray(parsed.users)) {
64519
+ throw new Error('Invalid data format: expected {users: [...]}')
64520
+ }
64521
+
64522
+ users = parsed.users
64523
+ ctx.logger.info(\`Found \${users.length} users to process\`)
64524
+ } catch (error) {
64525
+ const err = error instanceof Error ? error : new Error(String(error))
64526
+ ctx.logger.error(\`Data parsing failed: \${err.message}\`)
64527
+ throw err // Preserve original stack trace
64528
+ }
64529
+
64530
+ // Process each user with progress reporting
64531
+ const results = []
64532
+ for (let i = 0; i < users.length; i++) {
64533
+ const user = users[i]
64534
+
64535
+ // Validate each user object
64536
+ if (!user?.id || !user?.email) {
64537
+ ctx.logger.warn(\`Skipping invalid user at index \${i}: missing id or email\`)
64538
+ continue
64539
+ }
64540
+
64541
+ // Process user
64542
+ const processed = {
64543
+ id: user.id,
64544
+ email: user.email.toLowerCase().trim(),
64545
+ name: user.name?.trim() || 'Unknown',
64546
+ processedAt: new Date().toISOString(),
64547
+ status: 'active'
64548
+ }
64549
+
64550
+ results.push(processed)
64551
+
64552
+ // Progress feedback every 10 items
64553
+ if ((i + 1) % 10 === 0) {
64554
+ ctx.logger.info(\`Processed \${i + 1}/\${users.length} users\`)
64555
+ }
64556
+ }
64557
+
64558
+ ctx.logger.info(\`Successfully processed \${results.length}/\${users.length} users\`)
64559
+
64560
+ // Return structured result with metadata
64561
+ return {
64562
+ users: results,
64563
+ metadata: {
64564
+ totalInput: users.length,
64565
+ totalProcessed: results.length,
64566
+ skipped: users.length - results.length,
64567
+ processedAt: new Date().toISOString()
64568
+ }
64569
+ }
64570
+ \`\`\`
64571
+
64572
+ Key features demonstrated:
64573
+ - Input validation at start
64574
+ - Comprehensive error handling with try-catch that preserves stack traces
64575
+ - Logging at info, debug, warn, and error levels
64576
+ - Progress reporting for long operations (every 10 items)
64577
+ - Data validation throughout (null checks, type checks, array validation)
64578
+ - Structured return value with metadata for observability
64579
+ - Descriptive error messages with context
64580
+ - Meaningful variable names (rawData, users, processed)
64581
+ - Clean async/await usage
64582
+ - Template literals for readable string interpolation
64583
+ - Proper error type guards (error instanceof Error)`;
64584
+ var CODE_FIELD_CONSTRAINTS = `REMEMBER: The "code" field must be ONLY the function body statements.
64585
+ - DO NOT wrap code in arrow functions: \`(ctx) => { ... }\`
64586
+ - DO NOT wrap code in async functions: \`async (ctx) => { ... }\`
64587
+ - DO NOT include outer curly braces
64588
+ - DO include a return statement if the step should produce output
64589
+ - Each "code" field should be a string containing multiple statements separated by newlines`;
64590
+ function composeImplementationGuidelines() {
64591
+ return [
64592
+ RUNTIME_CONTEXT_TYPES,
64593
+ "",
64594
+ CONTEXT_USAGE_GUIDELINES,
64595
+ "",
64596
+ QUALITY_GUIDELINES,
64597
+ "",
64598
+ TOOL_CALLING_EXAMPLES,
64599
+ "",
64600
+ COMPLETE_STEP_EXAMPLE
64601
+ ].join(`
64602
+ `);
64603
+ }
64604
+
64405
64605
  // ../core/src/workflow/dynamic-generator.workflow.ts
64406
64606
  var GenerateWorkflowDefinitionInputSchema = exports_external.object({
64407
64607
  prompt: exports_external.string(),
64408
64608
  availableTools: exports_external.array(exports_external.object({
64409
64609
  name: exports_external.string(),
64410
64610
  description: exports_external.string()
64411
- })).optional()
64611
+ })).nullish()
64412
64612
  });
64413
64613
  var GenerateWorkflowCodeInputSchema = exports_external.object({
64414
- workflow: WorkflowFileSchema
64614
+ workflow: WorkflowFileSchema,
64615
+ skipReview: exports_external.boolean().nullish()
64415
64616
  });
64617
+ var AsyncFunction2 = Object.getPrototypeOf(async () => {}).constructor;
64618
+ function validateWorkflowDefinition(workflow) {
64619
+ const errors4 = [];
64620
+ if (!workflow.workflows.main) {
64621
+ errors4.push("Missing required 'main' workflow");
64622
+ }
64623
+ for (const [wfId, wf] of Object.entries(workflow.workflows)) {
64624
+ const stepIds = new Set;
64625
+ for (const step of wf.steps) {
64626
+ if (stepIds.has(step.id)) {
64627
+ errors4.push(`Duplicate step ID '${step.id}' in workflow '${wfId}'`);
64628
+ }
64629
+ stepIds.add(step.id);
64630
+ }
64631
+ }
64632
+ return { valid: errors4.length === 0, errors: errors4 };
64633
+ }
64634
+ function validateWorkflowCodeSyntax(workflow) {
64635
+ const errors4 = [];
64636
+ for (const [wfId, wf] of Object.entries(workflow.workflows)) {
64637
+ for (const step of wf.steps) {
64638
+ if (step.code) {
64639
+ try {
64640
+ new AsyncFunction2("ctx", step.code);
64641
+ } catch (e) {
64642
+ const errorMsg = e instanceof Error ? e.message : String(e);
64643
+ errors4.push(`Syntax error in ${wfId}.${step.id}: ${errorMsg}`);
64644
+ }
64645
+ }
64646
+ }
64647
+ }
64648
+ return { valid: errors4.length === 0, errors: errors4 };
64649
+ }
64416
64650
  var WORKFLOW_DEFINITION_SYSTEM_PROMPT = `You are an expert workflow architect.
64417
64651
  Your task is to create a JSON workflow definition based on the user's request.
64418
64652
 
@@ -64428,8 +64662,11 @@ The workflow definition must follow this structure:
64428
64662
  {
64429
64663
  "id": "stepId",
64430
64664
  "task": "Description of the step",
64431
- "tools": ["toolName1", "toolName2"], // Optional list of tools needed
64432
- "output": "outputVariableName", // Optional
64665
+ "tools": ["toolName1", "toolName2"], // Optional: restrict which tools can be used. Can use groups: "readonly", "readwrite", "internet", "all".
64666
+ "output": "outputVariableName", // Optional: defaults to step id
64667
+ "timeout": 30000, // Optional: timeout in milliseconds
64668
+ "expected_outcome": "What this step produces", // Optional: documentation for expected results
64669
+ "outputSchema": { "type": "object" } // Optional: validation schema
64433
64670
  }
64434
64671
  ],
64435
64672
  "output": "outputVariableName" // Optional
@@ -64443,7 +64680,24 @@ Constraints:
64443
64680
  - Break down complex tasks into logical steps.
64444
64681
  - Define clear inputs and outputs.
64445
64682
 
64446
- Example 1:
64683
+ Quality Guidelines:
64684
+ - Add "timeout" field (in milliseconds) for steps that might take long (file I/O, API calls, searches)
64685
+ - Use "expected_outcome" field to document what each step should produce and its format
64686
+ - Use descriptive step IDs (e.g., "validateInput", "fetchUserData", not "step1", "step2")
64687
+ - Design steps to be focused - one responsibility per step
64688
+ - For steps that process multiple items, consider creating a sub-workflow
64689
+ - Add "outputSchema" with type information for validation-critical steps
64690
+ - Order steps logically with clear data flow
64691
+
64692
+ ### Using expected_outcome Effectively
64693
+
64694
+ The "expected_outcome" field helps document what each step produces. Best practices:
64695
+ - Describe the data structure: "Returns an array of { id, name, status } objects"
64696
+ - Mention important constraints: "Returns at most 10 results, sorted by date"
64697
+ - Note failure modes: "Returns null if file not found"
64698
+ - Document side effects: "Creates output directory if it doesn't exist"
64699
+
64700
+ Example 1 - Research workflow:
64447
64701
  User: "Research a topic and summarize it."
64448
64702
  Output:
64449
64703
  \`\`\`json
@@ -64459,13 +64713,16 @@ Output:
64459
64713
  "id": "search",
64460
64714
  "task": "Search for information about the topic",
64461
64715
  "tools": ["search"],
64462
- "output": "searchResults"
64716
+ "output": "searchResults",
64717
+ "timeout": 30000,
64718
+ "expected_outcome": "Returns search results with titles, URLs, and snippets related to the topic"
64463
64719
  },
64464
64720
  {
64465
64721
  "id": "summarize",
64466
64722
  "task": "Summarize the search results",
64467
- "tools": ["generateText"],
64468
- "output": "summary"
64723
+ "tools": ["runAgent"],
64724
+ "output": "summary",
64725
+ "expected_outcome": "Returns a concise summary string (2-3 paragraphs) of the key findings"
64469
64726
  }
64470
64727
  ],
64471
64728
  "output": "summary"
@@ -64474,7 +64731,7 @@ Output:
64474
64731
  }
64475
64732
  \`\`\`
64476
64733
 
64477
- Example 2:
64734
+ Example 2 - PR review workflow with sub-workflow:
64478
64735
  User: "Review urgent PRs. For each PR, run the review workflow."
64479
64736
  Output:
64480
64737
  \`\`\`json
@@ -64488,13 +64745,16 @@ Output:
64488
64745
  "id": "fetchPRs",
64489
64746
  "task": "Fetch list of urgent PRs",
64490
64747
  "tools": ["github_list_prs"],
64491
- "output": "prs"
64748
+ "output": "prs",
64749
+ "timeout": 15000,
64750
+ "expected_outcome": "Returns array of PR objects with { id, title, author, url }"
64492
64751
  },
64493
64752
  {
64494
64753
  "id": "reviewEachPR",
64495
64754
  "task": "Run review workflow for each PR",
64496
64755
  "tools": [],
64497
- "output": "reviews"
64756
+ "output": "reviews",
64757
+ "expected_outcome": "Returns array of review results, one per PR"
64498
64758
  }
64499
64759
  ],
64500
64760
  "output": "reviews"
@@ -64509,13 +64769,16 @@ Output:
64509
64769
  "id": "getDiff",
64510
64770
  "task": "Get PR diff",
64511
64771
  "tools": ["github_get_diff"],
64512
- "output": "diff"
64772
+ "output": "diff",
64773
+ "timeout": 10000,
64774
+ "expected_outcome": "Returns the unified diff string for the PR"
64513
64775
  },
64514
64776
  {
64515
64777
  "id": "analyze",
64516
- "task": "Analyze the diff",
64517
- "tools": ["generateText"],
64518
- "output": "analysis"
64778
+ "task": "Analyze the diff and provide feedback",
64779
+ "tools": ["runAgent"],
64780
+ "output": "analysis",
64781
+ "expected_outcome": "Returns { summary, issues, suggestions } object with review feedback"
64519
64782
  }
64520
64783
  ],
64521
64784
  "output": "analysis"
@@ -64523,77 +64786,130 @@ Output:
64523
64786
  }
64524
64787
  }
64525
64788
  \`\`\`
64789
+
64790
+ Example 3 - File processing with conditional logic:
64791
+ User: "Process all JSON files in a directory, validate them, and generate a report."
64792
+ Output:
64793
+ \`\`\`json
64794
+ {
64795
+ "workflows": {
64796
+ "main": {
64797
+ "task": "Process and validate JSON files, then generate a report",
64798
+ "inputs": [
64799
+ { "id": "directory", "description": "Directory containing JSON files", "default": "data" }
64800
+ ],
64801
+ "steps": [
64802
+ {
64803
+ "id": "listJsonFiles",
64804
+ "task": "List all JSON files in the directory",
64805
+ "tools": ["listFiles"],
64806
+ "output": "jsonFiles",
64807
+ "timeout": 5000,
64808
+ "expected_outcome": "Returns array of file paths ending in .json"
64809
+ },
64810
+ {
64811
+ "id": "processFiles",
64812
+ "task": "Process each JSON file using the processFile sub-workflow",
64813
+ "tools": [],
64814
+ "output": "processedResults",
64815
+ "expected_outcome": "Returns array of { file, valid, errors?, data? } for each file"
64816
+ },
64817
+ {
64818
+ "id": "generateReport",
64819
+ "task": "Generate a summary report of all processed files",
64820
+ "tools": ["writeToFile"],
64821
+ "output": "reportPath",
64822
+ "expected_outcome": "Writes report to 'report.md' and returns the file path"
64823
+ }
64824
+ ],
64825
+ "output": "reportPath"
64826
+ },
64827
+ "processFile": {
64828
+ "task": "Process and validate a single JSON file",
64829
+ "inputs": [
64830
+ { "id": "filePath", "description": "Path to the JSON file" }
64831
+ ],
64832
+ "steps": [
64833
+ {
64834
+ "id": "readFile",
64835
+ "task": "Read the JSON file content",
64836
+ "tools": ["readFile"],
64837
+ "output": "content",
64838
+ "timeout": 3000,
64839
+ "expected_outcome": "Returns file content as string, or null if not found"
64840
+ },
64841
+ {
64842
+ "id": "validateJson",
64843
+ "task": "Parse and validate the JSON structure",
64844
+ "tools": [],
64845
+ "output": "validationResult",
64846
+ "expected_outcome": "Returns { valid: boolean, data?: object, errors?: string[] }"
64847
+ }
64848
+ ],
64849
+ "output": "validationResult"
64850
+ }
64851
+ }
64852
+ }
64853
+ \`\`\`
64526
64854
  `;
64855
+ var WORKFLOW_IMPLEMENTATION_GUIDELINES = composeImplementationGuidelines();
64527
64856
  var WORKFLOW_CODE_SYSTEM_PROMPT = `You are an expert TypeScript developer.
64528
64857
  Your task is to implement the TypeScript code for the steps in the provided workflow definition.
64529
64858
 
64530
64859
  You will receive a JSON workflow definition where the "code" field is null.
64531
64860
  You must fill in the "code" field for each step with valid TypeScript code.
64532
64861
 
64533
- The code will be executed in an async function with the following signature:
64534
- async (ctx) => {
64535
- // Your code here
64536
- }
64537
-
64538
- The \`ctx\` object provides access to:
64539
- - \`ctx.input\`: The workflow inputs.
64540
- - \`ctx.state\`: A shared state object for passing data between steps.
64541
- - \`ctx.tools\`: An object containing available tools.
64542
- - \`ctx.runWorkflow\`: (workflowId: string, input?: any) => Promise<any>. Use this to run other workflows.
64543
-
64544
- Guidelines:
64545
- - Use \`await\` for asynchronous operations.
64546
- - Return the output value of the step.
64547
- - Access inputs via \`ctx.input.inputName\`.
64548
- - Access previous step outputs via \`ctx.state.stepOutputName\`.
64549
- - Use \`ctx.tools.invokeTool({ toolName: 'name', input: { ... } })\` to call tools.
64550
- - Use \`ctx.tools.generateText({ messages: [...] })\` for LLM calls.
64551
- - Use \`ctx.tools.invokeTool({ toolName: 'runAgent', input: { prompt: '...' } })\` for complex sub-tasks that require multiple steps or tools. Prefer this over \`generateText\` for advanced tasks.
64552
-
64553
- Example Code for a step:
64554
- \`\`\`typescript
64555
- const searchResults = await ctx.tools.invokeTool({
64556
- toolName: 'search',
64557
- input: { query: ctx.input.topic }
64558
- });
64559
- return searchResults;
64560
- \`\`\`
64862
+ CRITICAL: Each step "code" field must contain ONLY the function body statements (the code inside the curly braces).
64863
+ DO NOT include function declaration, arrow function syntax, async keyword, parameter list, or outer curly braces.
64561
64864
 
64562
- Example Code for LLM step:
64563
- \`\`\`typescript
64564
- const summary = await ctx.tools.generateText({
64565
- messages: [
64566
- { role: 'system', content: 'Summarize the following text.' },
64567
- { role: 'user', content: ctx.state.searchResults }
64568
- ]
64569
- });
64570
- return summary;
64571
- \`\`\`
64865
+ Prefer using \`ctx.tools.runAgent\` for complex tasks or when multiple steps/tools are needed. Use \`ctx.agentTools\` for direct tool usage (e.g. \`ctx.agentTools.readFile\`).
64572
64866
 
64573
- Example Code for runAgent:
64574
- \`\`\`typescript
64575
- const result = await ctx.tools.invokeTool({
64576
- toolName: 'runAgent',
64577
- input: {
64578
- prompt: 'Research the history of the internet and write a summary.',
64579
- tools: ['search', 'generateText']
64580
- }
64581
- });
64582
- return result;
64867
+ The code will be wrapped automatically in: \`async (ctx) => { YOUR_CODE_HERE }\`
64868
+
64869
+ Example of CORRECT code field:
64870
+ \`\`\`ts
64871
+ const result = await ctx.agentTools.readFile({ path: 'README.md' })
64872
+ if (!result) throw new Error('File not found')
64873
+ return result
64583
64874
  \`\`\`
64584
64875
 
64585
- Example Code for invoking a sub-workflow:
64586
- \`\`\`typescript
64587
- const results = [];
64588
- for (const pr of ctx.state.prs) {
64589
- const review = await ctx.runWorkflow('reviewPR', { prId: pr.id });
64590
- results.push(review);
64876
+ Example of INCORRECT code field (DO NOT DO THIS):
64877
+ \`\`\`ts
64878
+ async (ctx) => {
64879
+ const result = await ctx.agentTools.readFile({ path: 'README.md' })
64880
+ return result
64591
64881
  }
64592
- return results;
64593
64882
  \`\`\`
64594
64883
 
64884
+ ${WORKFLOW_IMPLEMENTATION_GUIDELINES}
64885
+
64886
+ ## Final Instructions
64887
+
64888
+ ${CODE_FIELD_CONSTRAINTS}
64889
+
64595
64890
  Return the complete workflow JSON with the "code" fields populated.
64596
64891
  `;
64892
+ var WORKFLOW_REVIEW_SYSTEM_PROMPT = `You are an expert TypeScript Code Reviewer.
64893
+ Your task is to review the provided workflow definition and its implemented code, and improve it to meet the highest quality standards.
64894
+
64895
+ You will receive a JSON workflow definition where the "code" fields are already populated.
64896
+ You must review each step's code and improve it if necessary.
64897
+
64898
+ Check for:
64899
+ - Correct usage of \`ctx.agentTools\` (for standard tools) and \`ctx.tools\` (for workflow helpers).
64900
+ - Proper error handling (try-catch, input validation).
64901
+ - Meaningful logging.
64902
+ - Adherence to the Quality Guidelines.
64903
+ - Correct syntax (no outer function wrappers).
64904
+
64905
+ ${QUALITY_GUIDELINES}
64906
+
64907
+ ## Final Instructions
64908
+
64909
+ Return the complete workflow JSON with the "code" fields improved where necessary.
64910
+ Ensure the "code" field still contains ONLY the function body statements.
64911
+ `;
64912
+ var MAX_GENERATION_ATTEMPTS = 3;
64597
64913
  var generateWorkflowDefinitionWorkflow = async (input, ctx) => {
64598
64914
  let systemPrompt = WORKFLOW_DEFINITION_SYSTEM_PROMPT;
64599
64915
  if (input.availableTools && input.availableTools.length > 0) {
@@ -64614,24 +64930,75 @@ Use these tools when appropriate.`;
64614
64930
  outputSchema: WorkflowFileSchema
64615
64931
  }, ctx);
64616
64932
  });
64617
- if (result.type === "Exit" && result.object) {
64618
- return result.object;
64933
+ if (result.type !== "Exit" || !result.object) {
64934
+ throw new Error("Failed to generate workflow definition");
64619
64935
  }
64620
- throw new Error("Failed to generate workflow definition");
64936
+ const workflow = result.object;
64937
+ await ctx.step("validate-workflow-definition", async () => {
64938
+ const validation = validateWorkflowDefinition(workflow);
64939
+ if (!validation.valid) {
64940
+ ctx.logger.warn(`Workflow definition validation warnings: ${validation.errors.join("; ")}`);
64941
+ }
64942
+ return validation;
64943
+ });
64944
+ return workflow;
64621
64945
  };
64622
64946
  var generateWorkflowCodeWorkflow = async (input, ctx) => {
64623
- const result = await ctx.step("generate-workflow-code", async () => {
64624
- return agentWorkflow({
64625
- systemPrompt: WORKFLOW_CODE_SYSTEM_PROMPT,
64626
- userMessage: [{ role: "user", content: JSON.stringify(input.workflow, null, 2) }],
64627
- tools: [],
64628
- outputSchema: WorkflowFileSchema
64629
- }, ctx);
64630
- });
64631
- if (result.type === "Exit" && result.object) {
64632
- return result.object;
64947
+ let lastError = null;
64948
+ let currentWorkflow = input.workflow;
64949
+ for (let attempt = 0;attempt < MAX_GENERATION_ATTEMPTS; attempt++) {
64950
+ const stepName = attempt === 0 ? "generate-workflow-code" : `retry-workflow-code-${attempt}`;
64951
+ const userMessage = lastError ? `Previous attempt had issues: ${lastError}
64952
+
64953
+ Please fix the problems in this workflow:
64954
+ ${JSON.stringify(currentWorkflow, null, 2)}` : JSON.stringify(currentWorkflow, null, 2);
64955
+ const generated = await ctx.step(stepName, async () => {
64956
+ return agentWorkflow({
64957
+ systemPrompt: WORKFLOW_CODE_SYSTEM_PROMPT,
64958
+ userMessage: [{ role: "user", content: userMessage }],
64959
+ tools: [],
64960
+ outputSchema: WorkflowFileSchema
64961
+ }, ctx);
64962
+ });
64963
+ if (generated.type !== "Exit" || !generated.object) {
64964
+ lastError = "Failed to generate workflow code";
64965
+ continue;
64966
+ }
64967
+ const generatedWorkflow = generated.object;
64968
+ const syntaxValidation = await ctx.step(`validate-code-syntax-${attempt}`, async () => {
64969
+ return validateWorkflowCodeSyntax(generatedWorkflow);
64970
+ });
64971
+ if (!syntaxValidation.valid) {
64972
+ lastError = syntaxValidation.errors.join("; ");
64973
+ currentWorkflow = generatedWorkflow;
64974
+ ctx.logger.warn(`Code syntax validation failed (attempt ${attempt + 1}): ${lastError}`);
64975
+ continue;
64976
+ }
64977
+ if (input.skipReview) {
64978
+ return generatedWorkflow;
64979
+ }
64980
+ const reviewed = await ctx.step("review-workflow-code", async () => {
64981
+ return agentWorkflow({
64982
+ systemPrompt: WORKFLOW_REVIEW_SYSTEM_PROMPT,
64983
+ userMessage: [{ role: "user", content: JSON.stringify(generatedWorkflow, null, 2) }],
64984
+ tools: [],
64985
+ outputSchema: WorkflowFileSchema
64986
+ }, ctx);
64987
+ });
64988
+ if (reviewed.type !== "Exit" || !reviewed.object) {
64989
+ throw new Error("Failed to review workflow code");
64990
+ }
64991
+ const reviewedWorkflow = reviewed.object;
64992
+ const reviewSyntaxValidation = await ctx.step("validate-reviewed-code-syntax", async () => {
64993
+ return validateWorkflowCodeSyntax(reviewedWorkflow);
64994
+ });
64995
+ if (!reviewSyntaxValidation.valid) {
64996
+ ctx.logger.warn(`Reviewed code has syntax issues: ${reviewSyntaxValidation.errors.join("; ")}`);
64997
+ return generatedWorkflow;
64998
+ }
64999
+ return reviewedWorkflow;
64633
65000
  }
64634
- throw new Error("Failed to generate workflow code");
65001
+ throw new Error(`Failed to generate valid workflow code after ${MAX_GENERATION_ATTEMPTS} attempts: ${lastError}`);
64635
65002
  };
64636
65003
  // ../core/src/workflow/json-ai-types.ts
64637
65004
  var toJsonDataContent = (data) => {
@@ -68230,15 +68597,15 @@ function useKeypress(userHandler) {
68230
68597
  signal.current = userHandler;
68231
68598
  useEffect((rl) => {
68232
68599
  let ignore = false;
68233
- const handler19 = withUpdates((_input, event) => {
68600
+ const handler13 = withUpdates((_input, event) => {
68234
68601
  if (ignore)
68235
68602
  return;
68236
68603
  signal.current(event, rl);
68237
68604
  });
68238
- rl.input.on("keypress", handler19);
68605
+ rl.input.on("keypress", handler13);
68239
68606
  return () => {
68240
68607
  ignore = true;
68241
- rl.input.removeListener("keypress", handler19);
68608
+ rl.input.removeListener("keypress", handler13);
68242
68609
  };
68243
68610
  }, []);
68244
68611
  }
@@ -68829,16 +69196,16 @@ class Emitter {
68829
69196
 
68830
69197
  class SignalExitBase {
68831
69198
  }
68832
- var signalExitWrap = (handler19) => {
69199
+ var signalExitWrap = (handler13) => {
68833
69200
  return {
68834
69201
  onExit(cb, opts) {
68835
- return handler19.onExit(cb, opts);
69202
+ return handler13.onExit(cb, opts);
68836
69203
  },
68837
69204
  load() {
68838
- return handler19.load();
69205
+ return handler13.load();
68839
69206
  },
68840
69207
  unload() {
68841
- return handler19.unload();
69208
+ return handler13.unload();
68842
69209
  }
68843
69210
  };
68844
69211
  };
@@ -70884,11 +71251,6 @@ Tool error:`, event.tool));
70884
71251
  }
70885
71252
  break;
70886
71253
  }
70887
- case "Exit" /* Exit */:
70888
- if (verbose > 0) {
70889
- customConsole.log("Exit Message:", event.exitReason.message);
70890
- }
70891
- break;
70892
71254
  }
70893
71255
  for (const [tool3, taskStats] of taskToolCallStats.entries()) {
70894
71256
  const globalStats = globalToolCallStats.get(tool3) ?? { calls: 0, success: 0, errors: 0 };
@@ -84384,7 +84746,7 @@ class ApiProviderConfig {
84384
84746
  return this.resolveModelConfig(mergedConfig);
84385
84747
  }
84386
84748
  resolveModelConfig(config4) {
84387
- const { provider: provider3, model, parameters, budget } = config4;
84749
+ const { provider: provider3, model, parameters, budget, rules } = config4;
84388
84750
  const finalProvider = provider3 ?? this.defaultProvider;
84389
84751
  if (!finalProvider) {
84390
84752
  return;
@@ -84405,7 +84767,8 @@ class ApiProviderConfig {
84405
84767
  keyFile,
84406
84768
  baseUrl,
84407
84769
  parameters: finalParameters,
84408
- budget
84770
+ budget,
84771
+ rules
84409
84772
  };
84410
84773
  }
84411
84774
  }
@@ -84584,9 +84947,41 @@ function getProviderOptions(options) {
84584
84947
 
84585
84948
  // src/tool-implementations.ts
84586
84949
  import { spawn as spawn3, spawnSync as spawnSync2 } from "node:child_process";
84587
- import fs3, { mkdir as mkdir2 } from "node:fs/promises";
84950
+ import fs4, { mkdir as mkdir2 } from "node:fs/promises";
84588
84951
  import { dirname as dirname2 } from "node:path";
84589
84952
 
84953
+ // src/tools/getTodoItem.ts
84954
+ var toolInfo13 = {
84955
+ name: "getTodoItem",
84956
+ description: "Get a to-do item by its ID.",
84957
+ parameters: exports_external.object({
84958
+ id: exports_external.string().describe("The ID of the to-do item.")
84959
+ })
84960
+ };
84961
+ var handler13 = async (provider3, args) => {
84962
+ if (!provider3.getTodoItem) {
84963
+ return {
84964
+ success: false,
84965
+ message: {
84966
+ type: "error-text",
84967
+ value: "Not possible to get a to-do item."
84968
+ }
84969
+ };
84970
+ }
84971
+ const { id } = toolInfo13.parameters.parse(args);
84972
+ const item = await provider3.getTodoItem(id);
84973
+ return {
84974
+ success: true,
84975
+ message: {
84976
+ type: "json",
84977
+ value: item ?? null
84978
+ }
84979
+ };
84980
+ };
84981
+ var getTodoItem_default = {
84982
+ ...toolInfo13,
84983
+ handler: handler13
84984
+ };
84590
84985
  // src/tools/utils/diffLineNumbers.ts
84591
84986
  function parseHunkHeader(header) {
84592
84987
  const match = header.match(/^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@/);
@@ -84646,7 +85041,7 @@ function annotateDiffWithLineNumbers(diff) {
84646
85041
  }
84647
85042
 
84648
85043
  // src/tools/gitDiff.ts
84649
- var toolInfo19 = {
85044
+ var toolInfo14 = {
84650
85045
  name: "git_diff",
84651
85046
  description: "Get the git diff for the current repository. Can be used to get staged changes, unstaged changes, or changes between commits. By default, it returns unstaged changes.",
84652
85047
  parameters: exports_external.object({
@@ -84675,17 +85070,17 @@ var toolInfo19 = {
84675
85070
  }, exports_external.boolean().optional().default(true)).describe("Annotate the diff with line numbers for additions and deletions.")
84676
85071
  })
84677
85072
  };
84678
- var handler19 = async (provider3, args) => {
85073
+ var handler14 = async (provider3, args) => {
84679
85074
  if (!provider3.executeCommand) {
84680
85075
  return {
84681
- type: "Error" /* Error */,
85076
+ success: false,
84682
85077
  message: {
84683
85078
  type: "error-text",
84684
85079
  value: "Not possible to execute command. Abort."
84685
85080
  }
84686
85081
  };
84687
85082
  }
84688
- const { staged, file: file2, commitRange, contextLines, includeLineNumbers } = toolInfo19.parameters.parse(args);
85083
+ const { staged, file: file2, commitRange, contextLines, includeLineNumbers } = toolInfo14.parameters.parse(args);
84689
85084
  const commandParts = ["git", "diff", "--no-color", `-U${contextLines}`];
84690
85085
  if (staged) {
84691
85086
  commandParts.push("--staged");
@@ -84702,7 +85097,7 @@ var handler19 = async (provider3, args) => {
84702
85097
  if (result.exitCode === 0) {
84703
85098
  if (!result.stdout.trim()) {
84704
85099
  return {
84705
- type: "Reply" /* Reply */,
85100
+ success: true,
84706
85101
  message: {
84707
85102
  type: "text",
84708
85103
  value: "No diff found."
@@ -84714,7 +85109,7 @@ var handler19 = async (provider3, args) => {
84714
85109
  diffOutput = annotateDiffWithLineNumbers(diffOutput);
84715
85110
  }
84716
85111
  return {
84717
- type: "Reply" /* Reply */,
85112
+ success: true,
84718
85113
  message: {
84719
85114
  type: "text",
84720
85115
  value: `<diff file="${file2 ?? "all"}">
@@ -84724,7 +85119,7 @@ ${diffOutput}
84724
85119
  };
84725
85120
  }
84726
85121
  return {
84727
- type: "Error" /* Error */,
85122
+ success: false,
84728
85123
  message: {
84729
85124
  type: "error-text",
84730
85125
  value: `\`${command}\` exited with code ${result.exitCode}:
@@ -84733,7 +85128,7 @@ ${result.stderr}`
84733
85128
  };
84734
85129
  } catch (error46) {
84735
85130
  return {
84736
- type: "Error" /* Error */,
85131
+ success: false,
84737
85132
  message: {
84738
85133
  type: "error-text",
84739
85134
  value: error46 instanceof Error ? error46.message : String(error46)
@@ -84742,6 +85137,199 @@ ${result.stderr}`
84742
85137
  }
84743
85138
  };
84744
85139
  var gitDiff_default = {
85140
+ ...toolInfo14,
85141
+ handler: handler14
85142
+ };
85143
+ // src/tools/listMemoryTopics.ts
85144
+ var toolInfo15 = {
85145
+ name: "listMemoryTopics",
85146
+ description: "Lists all topics in memory. Use this to see what information has been stored and which topics are available to read from.",
85147
+ parameters: exports_external.object({})
85148
+ };
85149
+ var handler15 = async (provider3, _args) => {
85150
+ const topics = await provider3.listMemoryTopics();
85151
+ if (!topics.length) {
85152
+ return { success: true, message: { type: "text", value: "No topics found." } };
85153
+ }
85154
+ return {
85155
+ success: true,
85156
+ message: {
85157
+ type: "text",
85158
+ value: `Memory topics:
85159
+ ${topics.join(`
85160
+ `)}`
85161
+ }
85162
+ };
85163
+ };
85164
+ var listMemoryTopics_default = {
85165
+ ...toolInfo15,
85166
+ handler: handler15
85167
+ };
85168
+ // src/tools/listTodoItems.ts
85169
+ var toolInfo16 = {
85170
+ name: "listTodoItems",
85171
+ description: "List all to-do items, sorted by id. If an id is provided, it lists all sub-items for that id. Can be filtered by status.",
85172
+ parameters: exports_external.object({
85173
+ id: exports_external.string().nullish(),
85174
+ status: TodoStatus.nullish()
85175
+ })
85176
+ };
85177
+ var handler16 = async (provider3, args) => {
85178
+ if (!provider3.listTodoItems) {
85179
+ return {
85180
+ success: false,
85181
+ message: {
85182
+ type: "error-text",
85183
+ value: "Not possible to list to-do items."
85184
+ }
85185
+ };
85186
+ }
85187
+ const { id, status } = toolInfo16.parameters.parse(args);
85188
+ const items = await provider3.listTodoItems(id, status);
85189
+ return {
85190
+ success: true,
85191
+ message: {
85192
+ type: "json",
85193
+ value: items
85194
+ }
85195
+ };
85196
+ };
85197
+ var listTodoItems_default = {
85198
+ ...toolInfo16,
85199
+ handler: handler16
85200
+ };
85201
+ // src/tools/readMemory.ts
85202
+ var toolInfo17 = {
85203
+ name: "readMemory",
85204
+ description: "Reads content from a memory topic. Use this to retrieve information stored in previous steps. If no topic is specified, reads from the default topic.",
85205
+ parameters: exports_external.object({
85206
+ topic: exports_external.string().nullish().describe('The topic to read from memory. Defaults to ":default:".')
85207
+ })
85208
+ };
85209
+ var handler17 = async (provider3, args) => {
85210
+ const { topic } = toolInfo17.parameters.parse(args);
85211
+ const content = await provider3.readMemory(topic ?? undefined);
85212
+ if (content) {
85213
+ return {
85214
+ success: true,
85215
+ message: {
85216
+ type: "text",
85217
+ value: `<memory${topic ? ` topic="${topic}"` : ""}>
85218
+ ${content}
85219
+ </memory>`
85220
+ }
85221
+ };
85222
+ }
85223
+ return {
85224
+ success: true,
85225
+ message: {
85226
+ type: "text",
85227
+ value: `<memory ${topic ? `topic="${topic}"` : ""} isEmpty="true" />`
85228
+ }
85229
+ };
85230
+ };
85231
+ var readMemory_default = {
85232
+ ...toolInfo17,
85233
+ handler: handler17
85234
+ };
85235
+ // src/tools/updateMemory.ts
85236
+ var toolInfo18 = {
85237
+ name: "updateMemory",
85238
+ description: 'Appends, replaces, or removes content from a memory topic. Use "append" to add to existing content, "replace" to overwrite entirely, or "remove" to delete a topic. Memory persists across tool calls within a workflow.',
85239
+ parameters: exports_external.object({
85240
+ operation: exports_external.enum(["append", "replace", "remove"]).describe("The operation to perform."),
85241
+ topic: exports_external.string().nullish().describe('The topic to update in memory. Defaults to ":default:".'),
85242
+ content: exports_external.string().nullish().describe("The content for append or replace operations. Must be omitted for remove operation.")
85243
+ }).superRefine((data, ctx) => {
85244
+ if (data.operation === "append" || data.operation === "replace") {
85245
+ if (data.content === undefined) {
85246
+ ctx.addIssue({
85247
+ code: "custom",
85248
+ message: 'Content is required for "append" and "replace" operations.',
85249
+ path: ["content"]
85250
+ });
85251
+ }
85252
+ } else if (data.operation === "remove") {
85253
+ if (data.content !== undefined) {
85254
+ ctx.addIssue({
85255
+ code: "custom",
85256
+ message: 'Content must not be provided for "remove" operation.',
85257
+ path: ["content"]
85258
+ });
85259
+ }
85260
+ }
85261
+ })
85262
+ };
85263
+ var handler18 = async (provider3, args) => {
85264
+ if (!provider3.updateMemory) {
85265
+ return {
85266
+ success: false,
85267
+ message: {
85268
+ type: "error-text",
85269
+ value: "Memory operations are not supported by the current provider."
85270
+ }
85271
+ };
85272
+ }
85273
+ const params = toolInfo18.parameters.parse(args);
85274
+ await provider3.updateMemory(params.operation, params.topic ?? undefined, params.content ?? undefined);
85275
+ switch (params.operation) {
85276
+ case "append":
85277
+ return {
85278
+ success: true,
85279
+ message: {
85280
+ type: "text",
85281
+ value: `Content appended to memory topic '${params.topic || ":default:"}'.`
85282
+ }
85283
+ };
85284
+ case "replace":
85285
+ return {
85286
+ success: true,
85287
+ message: {
85288
+ type: "text",
85289
+ value: `Memory topic '${params.topic || ":default:"}' replaced.`
85290
+ }
85291
+ };
85292
+ case "remove":
85293
+ return {
85294
+ success: true,
85295
+ message: {
85296
+ type: "text",
85297
+ value: `Memory topic '${params.topic || ":default:"}' removed.`
85298
+ }
85299
+ };
85300
+ }
85301
+ };
85302
+ var updateMemory_default = {
85303
+ ...toolInfo18,
85304
+ handler: handler18
85305
+ };
85306
+ // src/tools/updateTodoItem.ts
85307
+ var toolInfo19 = {
85308
+ name: "updateTodoItem",
85309
+ description: "Add or update a to-do item.",
85310
+ parameters: UpdateTodoItemInputSchema
85311
+ };
85312
+ var handler19 = async (provider3, args) => {
85313
+ if (!provider3.updateTodoItem) {
85314
+ return {
85315
+ success: false,
85316
+ message: {
85317
+ type: "error-text",
85318
+ value: "Not possible to update a to-do item."
85319
+ }
85320
+ };
85321
+ }
85322
+ const input = toolInfo19.parameters.parse(args);
85323
+ const result = await provider3.updateTodoItem(input);
85324
+ return {
85325
+ success: true,
85326
+ message: {
85327
+ type: "json",
85328
+ value: result
85329
+ }
85330
+ };
85331
+ };
85332
+ var updateTodoItem_default = {
84745
85333
  ...toolInfo19,
84746
85334
  handler: handler19
84747
85335
  };
@@ -84817,12 +85405,14 @@ async function getUserInput(message, options = {}) {
84817
85405
 
84818
85406
  // src/workflows/workflow.utils.ts
84819
85407
  import { execSync } from "node:child_process";
85408
+ import { promises as fs3 } from "node:fs";
85409
+ import path from "node:path";
84820
85410
  function parseGitDiffNameStatus(diffOutput) {
84821
85411
  const lines = diffOutput.split(`
84822
85412
  `).filter((line) => line.trim());
84823
85413
  return lines.map((line) => {
84824
85414
  const [status, ...pathParts] = line.split("\t");
84825
- const path = pathParts.join("\t");
85415
+ const path2 = pathParts.join("\t");
84826
85416
  let statusDescription;
84827
85417
  switch (status[0]) {
84828
85418
  case "A":
@@ -84846,7 +85436,7 @@ function parseGitDiffNameStatus(diffOutput) {
84846
85436
  default:
84847
85437
  statusDescription = "Unknown";
84848
85438
  }
84849
- return { path, status: statusDescription };
85439
+ return { path: path2, status: statusDescription };
84850
85440
  });
84851
85441
  }
84852
85442
  function printChangedFiles(logger, changedFiles) {
@@ -84873,21 +85463,21 @@ function parseGitDiffNumStat(output) {
84873
85463
  if (parts.length >= 3) {
84874
85464
  const insertions = parts[0] === "-" ? 0 : Number.parseInt(parts[0], 10);
84875
85465
  const deletions = parts[1] === "-" ? 0 : Number.parseInt(parts[1], 10);
84876
- const path = unquotePath(parts.slice(2).join("\t"));
84877
- stats[path] = { insertions, deletions };
85466
+ const path2 = unquotePath(parts.slice(2).join("\t"));
85467
+ stats[path2] = { insertions, deletions };
84878
85468
  }
84879
85469
  }
84880
85470
  return stats;
84881
85471
  }
84882
- var unquotePath = (path) => {
84883
- if (path.startsWith('"') && path.endsWith('"')) {
85472
+ var unquotePath = (path2) => {
85473
+ if (path2.startsWith('"') && path2.endsWith('"')) {
84884
85474
  try {
84885
- return JSON.parse(path);
85475
+ return JSON.parse(path2);
84886
85476
  } catch {
84887
- return path;
85477
+ return path2;
84888
85478
  }
84889
85479
  }
84890
- return path;
85480
+ return path2;
84891
85481
  };
84892
85482
  function parseGitStatus(statusOutput) {
84893
85483
  const statusLines = statusOutput.split(`
@@ -84896,7 +85486,7 @@ function parseGitStatus(statusOutput) {
84896
85486
  for (const line of statusLines) {
84897
85487
  const indexStatus = line[0];
84898
85488
  const workingTreeStatus = line[1];
84899
- const path = line.length > 3 ? unquotePath(line.slice(3)) : line;
85489
+ const path2 = line.length > 3 ? unquotePath(line.slice(3)) : line;
84900
85490
  const statuses = [];
84901
85491
  if (indexStatus !== " " && indexStatus !== "?") {
84902
85492
  switch (indexStatus) {
@@ -84935,7 +85525,7 @@ function parseGitStatus(statusOutput) {
84935
85525
  }
84936
85526
  }
84937
85527
  if (statuses.length > 0) {
84938
- files.push({ path, status: statuses.join(", ") });
85528
+ files.push({ path: path2, status: statuses.join(", ") });
84939
85529
  }
84940
85530
  }
84941
85531
  return files;
@@ -85069,21 +85659,42 @@ function formatElapsedTime(ms) {
85069
85659
  const remainingMinutes = minutes % 60;
85070
85660
  return remainingMinutes > 0 ? `${hours}h ${remainingMinutes}m` : `${hours}h`;
85071
85661
  }
85072
- async function getDefaultContext() {
85662
+ async function getDefaultContext(commandName) {
85073
85663
  const config4 = await loadConfig();
85074
85664
  const cwd = process.cwd();
85075
85665
  const [files, truncated] = await listFiles(cwd, true, 2000, cwd, config4?.excludeFiles ?? []);
85076
85666
  const fileList = files.join(`
85077
85667
  `);
85668
+ const now2 = new Date;
85669
+ const formattedDate = `${now2.getUTCFullYear()}-${String(now2.getUTCMonth() + 1).padStart(2, "0")}-${String(now2.getUTCDate()).padStart(2, "0")}`;
85078
85670
  const contextParts = [
85079
85671
  `<file_list truncated="${truncated}">
85080
85672
  ${fileList}
85081
85673
  </file_list>`,
85082
- `<now_date>${new Date().toISOString()}</now_date>`
85674
+ `<now_date>${formattedDate}</now_date>`
85083
85675
  ];
85084
- if (config4?.rules) {
85676
+ try {
85677
+ const agentsMdContent = await fs3.readFile(path.join(cwd, "AGENTS.md"), "utf-8");
85678
+ contextParts.push(`<agents_instructions>
85679
+ ${agentsMdContent}
85680
+ </agents_instructions>`);
85681
+ } catch {}
85682
+ let rules = await resolveRules(config4?.rules);
85683
+ if (commandName && config4) {
85684
+ const apiConfig = new ApiProviderConfig(config4);
85685
+ const commandConfig = apiConfig.getConfigForCommand(commandName);
85686
+ if (commandConfig?.rules) {
85687
+ const commandRules = await resolveRules(commandConfig.rules);
85688
+ if (commandRules) {
85689
+ rules = rules ? `${rules}
85690
+
85691
+ ${commandRules}` : commandRules;
85692
+ }
85693
+ }
85694
+ }
85695
+ if (rules) {
85085
85696
  contextParts.push(`<rules>
85086
- ${config4.rules}
85697
+ ${rules}
85087
85698
  </rules>`);
85088
85699
  }
85089
85700
  if (config4?.scripts) {
@@ -85205,12 +85816,12 @@ async function select(input2, context) {
85205
85816
  }
85206
85817
  async function writeToFile(input2) {
85207
85818
  await mkdir2(dirname2(input2.path), { recursive: true });
85208
- await fs3.writeFile(input2.path, input2.content);
85819
+ await fs4.writeFile(input2.path, input2.content);
85209
85820
  return {};
85210
85821
  }
85211
85822
  async function readFile3(input2) {
85212
85823
  try {
85213
- const content = await fs3.readFile(input2.path, "utf8");
85824
+ const content = await fs4.readFile(input2.path, "utf8");
85214
85825
  return content;
85215
85826
  } catch {}
85216
85827
  return null;
@@ -85367,7 +85978,7 @@ async function invokeTool(input2, context) {
85367
85978
  const tool3 = toolHandlers.get(input2.toolName);
85368
85979
  if (!tool3) {
85369
85980
  return {
85370
- type: "Error" /* Error */,
85981
+ success: false,
85371
85982
  message: {
85372
85983
  type: "error-text",
85373
85984
  value: `Tool not found: ${input2.toolName}`
@@ -85379,7 +85990,7 @@ async function invokeTool(input2, context) {
85379
85990
  return result;
85380
85991
  } catch (error46) {
85381
85992
  return {
85382
- type: "Error" /* Error */,
85993
+ success: false,
85383
85994
  message: {
85384
85995
  type: "error-text",
85385
85996
  value: error46?.message ?? `${error46}`
@@ -85620,6 +86231,9 @@ Memory is organized using topics, which are like named containers for different
85620
86231
  - Use the default topic for simple, single-context scenarios
85621
86232
  - Memory persists across all tool calls within the current workflow
85622
86233
  `;
86234
+ var AGENTS_INSTRUCTION = `## AGENTS.md Instructions
86235
+
86236
+ If you are working in a subdirectory, check if there is an AGENTS.md file in that directory or parent directories for specific instructions. These files contain project-specific guidelines and conventions that you must follow.`;
85623
86237
 
85624
86238
  // src/workflows/prompts/coder.ts
85625
86239
  var CODER_SYSTEM_PROMPT = `Role: AI developer.
@@ -85631,6 +86245,8 @@ ${MEMORY_USAGE_SECTION}
85631
86245
 
85632
86246
  ${TOOL_USAGE_INSTRUCTION}
85633
86247
 
86248
+ ${AGENTS_INSTRUCTION}
86249
+
85634
86250
  ## Implementation Guidelines
85635
86251
 
85636
86252
  ### 1. Plan Analysis
@@ -86187,6 +86803,8 @@ ${MEMORY_USAGE_SECTION}
86187
86803
 
86188
86804
  ${TOOL_USAGE_INSTRUCTION}
86189
86805
 
86806
+ ${AGENTS_INSTRUCTION}
86807
+
86190
86808
  ## Your Role
86191
86809
 
86192
86810
  As a planner, your expertise lies in:
@@ -86587,7 +87205,7 @@ var fixWorkflow = async (input2, context) => {
86587
87205
  }
86588
87206
  logger.info(`Command failed with exit code ${exitCode}. Asking agent to fix it...`);
86589
87207
  const result = await step(`fix-${i2}`, async () => {
86590
- const defaultContext = await getDefaultContext();
87208
+ const defaultContext = await getDefaultContext("fix");
86591
87209
  const memoryContext = await tools2.getMemoryContext();
86592
87210
  const userPrompt = getFixUserPrompt(command, exitCode, stdout, stderr, task, prompt);
86593
87211
  const agentTools = [
@@ -86653,7 +87271,7 @@ async function createPlan(input2, context) {
86653
87271
  userMessage: [{ role: "user", content: userFeedback ?? task }]
86654
87272
  };
86655
87273
  } else {
86656
- const defaultContext = await getDefaultContext();
87274
+ const defaultContext = await getDefaultContext("plan");
86657
87275
  const memoryContext = await tools2.getMemoryContext();
86658
87276
  const prompt = `${memoryContext}
86659
87277
  ${getPlanPrompt(task, inputPlan)}
@@ -86709,10 +87327,10 @@ ${defaultContext}`;
86709
87327
  }
86710
87328
  const outputFiles = [];
86711
87329
  if (filePaths) {
86712
- for (const path of filePaths) {
86713
- const content = await tools2.readFile({ path });
87330
+ for (const path2 of filePaths) {
87331
+ const content = await tools2.readFile({ path: path2 });
86714
87332
  if (content) {
86715
- outputFiles.push({ path, content });
87333
+ outputFiles.push({ path: path2, content });
86716
87334
  }
86717
87335
  }
86718
87336
  }
@@ -86944,7 +87562,7 @@ ${fileContentString}`;
86944
87562
  agentTools.push(additionalTools.search);
86945
87563
  }
86946
87564
  const res = await step("implement", async () => {
86947
- const defaultContext = await getDefaultContext();
87565
+ const defaultContext = await getDefaultContext("code");
86948
87566
  const memoryContext = await tools2.getMemoryContext();
86949
87567
  const textContent = userContent.find((c) => c.type === "text");
86950
87568
  if (textContent && textContent.type === "text") {
@@ -87264,7 +87882,7 @@ async function createPlan2(input2, context) {
87264
87882
  outputSchema: EpicPlanSchema
87265
87883
  }, context);
87266
87884
  }
87267
- const defaultContext = await getDefaultContext();
87885
+ const defaultContext = await getDefaultContext("epic");
87268
87886
  const memoryContext = await tools2.getMemoryContext();
87269
87887
  const prompt = `${memoryContext}
87270
87888
  ${getPlanPrompt(task, plan2)}
@@ -87309,7 +87927,7 @@ async function createAndApprovePlan(task, context, saveUsageSnapshot, interactiv
87309
87927
  const planAgentResult = await step(`plan-${planAttempt}`, () => createPlan2({ task, feedback, messages, additionalTools }, context));
87310
87928
  messages = planAgentResult.messages;
87311
87929
  planAttempt++;
87312
- if (planAgentResult.type !== "Exit" /* Exit */) {
87930
+ if (planAgentResult.type !== "Exit") {
87313
87931
  logger.error(`Plan creation failed. Agent exited with status: ${planAgentResult.type}`);
87314
87932
  return null;
87315
87933
  }
@@ -87473,7 +88091,7 @@ async function performReviewAndFixCycle(iterationCount, taskItem, highLevelPlan,
87473
88091
  const commitMessages = [];
87474
88092
  for (let i2 = 0;i2 < MAX_REVIEW_RETRIES; i2++) {
87475
88093
  const diffResult = await tools2.executeCommand({ command: "git", args: ["diff", "--name-status", "HEAD~1", "HEAD"] });
87476
- const changedFiles = parseGitDiffNameStatus(diffResult.stdout).filter(({ path }) => path !== ".epic.yml");
88094
+ const changedFiles = parseGitDiffNameStatus(diffResult.stdout).filter(({ path: path2 }) => path2 !== ".epic.yml");
87477
88095
  if (changedFiles.length === 0) {
87478
88096
  logger.info(`No files were changed. Skipping review.
87479
88097
  `);
@@ -87487,7 +88105,7 @@ Review iteration ${i2 + 1}/${MAX_REVIEW_RETRIES}`);
87487
88105
  changedFiles
87488
88106
  };
87489
88107
  const reviewAgentResult = await step(`review-${iterationCount}-${i2}`, { retry: 1 }, async () => {
87490
- const defaultContext = await getDefaultContext();
88108
+ const defaultContext = await getDefaultContext("review");
87491
88109
  const memoryContext = await tools2.getMemoryContext();
87492
88110
  const userMessage = `${defaultContext}
87493
88111
  ${memoryContext}
@@ -87500,7 +88118,7 @@ ${formatReviewToolInput(changeInfo)}`;
87500
88118
  outputSchema: reviewOutputSchema
87501
88119
  }, context);
87502
88120
  });
87503
- if (reviewAgentResult.type !== "Exit" /* Exit */) {
88121
+ if (reviewAgentResult.type !== "Exit") {
87504
88122
  logger.error(`Review agent failed with status: ${reviewAgentResult.type}.`);
87505
88123
  break;
87506
88124
  }
@@ -87682,7 +88300,7 @@ Phase 6: Final Review and Fixup...
87682
88300
  const commitRange = `${baseBranch}...${currentBranch}`;
87683
88301
  for (let i2 = 0;i2 < MAX_REVIEW_RETRIES; i2++) {
87684
88302
  const diffResult = await tools2.executeCommand({ command: "git", args: ["diff", "--name-status", commitRange] });
87685
- const changedFiles = parseGitDiffNameStatus(diffResult.stdout).filter(({ path }) => path !== ".epic.yml");
88303
+ const changedFiles = parseGitDiffNameStatus(diffResult.stdout).filter(({ path: path2 }) => path2 !== ".epic.yml");
87686
88304
  if (changedFiles.length === 0) {
87687
88305
  logger.info(`No files have been changed in this branch. Skipping final review.
87688
88306
  `);
@@ -87696,7 +88314,7 @@ Final review iteration ${i2 + 1}/${MAX_REVIEW_RETRIES}`);
87696
88314
  changedFiles
87697
88315
  };
87698
88316
  const reviewAgentResult = await step(`final-review-${i2}`, async () => {
87699
- const defaultContext = await getDefaultContext();
88317
+ const defaultContext = await getDefaultContext("review");
87700
88318
  const memoryContext = await tools2.getMemoryContext();
87701
88319
  const userMessage = `${defaultContext}
87702
88320
  ${memoryContext}
@@ -87709,7 +88327,7 @@ ${formatReviewToolInput(changeInfo)}`;
87709
88327
  outputSchema: reviewOutputSchema
87710
88328
  }, context);
87711
88329
  });
87712
- if (reviewAgentResult.type !== "Exit" /* Exit */) {
88330
+ if (reviewAgentResult.type !== "Exit") {
87713
88331
  logger.error(`Review agent failed with status: ${reviewAgentResult.type}.`);
87714
88332
  break;
87715
88333
  }
@@ -87907,7 +88525,7 @@ Running generic agent...
87907
88525
  agentTools.push(additionalTools.search);
87908
88526
  }
87909
88527
  await step("agent", async () => {
87910
- const defaultContext = await getDefaultContext();
88528
+ const defaultContext = await getDefaultContext("task");
87911
88529
  const userMessage = `${task}
87912
88530
 
87913
88531
  ${defaultContext}`;
@@ -87948,7 +88566,7 @@ Deciding which workflow to use for task...
87948
88566
  tools: [],
87949
88567
  outputSchema: DecisionSchema
87950
88568
  }, context);
87951
- if (result.type !== "Exit" /* Exit */ || !result.object) {
88569
+ if (result.type !== "Exit" || !result.object) {
87952
88570
  throw new Error(`Could not decide which workflow to run. Agent exited with reason: ${result.type}`);
87953
88571
  }
87954
88572
  const decision = result.object;
@@ -88018,7 +88636,7 @@ var prWorkflow = async (input2, context) => {
88018
88636
  tools: [],
88019
88637
  outputSchema: prDetailsSchema
88020
88638
  }, context);
88021
- if (agentResult.type !== "Exit" /* Exit */) {
88639
+ if (agentResult.type !== "Exit") {
88022
88640
  throw new Error(`Workflow exited unexpectedly with type: ${agentResult.type}`);
88023
88641
  }
88024
88642
  const prDetails = agentResult.object;
@@ -88180,7 +88798,7 @@ var reviewWorkflow = async (input2, context) => {
88180
88798
  return { overview: "No changes to review.", specificReviews: [] };
88181
88799
  }
88182
88800
  const result = await step("review", async () => {
88183
- const defaultContext = await getDefaultContext();
88801
+ const defaultContext = await getDefaultContext("review");
88184
88802
  const memoryContext = await tools2.getMemoryContext();
88185
88803
  const reviewInput = formatReviewToolInput(changeInfo);
88186
88804
  const fullContent = `${reviewInput}
@@ -88228,7 +88846,7 @@ var commitCommand = new Command("commit").description("Create a commit with AI-g
88228
88846
  });
88229
88847
 
88230
88848
  // src/workflows/epic-context.ts
88231
- import { promises as fs4 } from "node:fs";
88849
+ import { promises as fs5 } from "node:fs";
88232
88850
  var EPIC_CONTEXT_FILE = ".epic.yml";
88233
88851
  var EpicUsageSchema = exports_external.object({
88234
88852
  timestamp: exports_external.number(),
@@ -88257,12 +88875,12 @@ var saveEpicContext = async (context) => {
88257
88875
  memory: context.memory,
88258
88876
  usages: context.usages
88259
88877
  });
88260
- await fs4.writeFile(EPIC_CONTEXT_FILE, yamlString, "utf-8");
88878
+ await fs5.writeFile(EPIC_CONTEXT_FILE, yamlString, "utf-8");
88261
88879
  };
88262
88880
  var loadEpicContext = async () => {
88263
88881
  let fileContent;
88264
88882
  try {
88265
- fileContent = await fs4.readFile(EPIC_CONTEXT_FILE, "utf-8");
88883
+ fileContent = await fs5.readFile(EPIC_CONTEXT_FILE, "utf-8");
88266
88884
  } catch {
88267
88885
  return {};
88268
88886
  }
@@ -88792,9 +89410,9 @@ ${formattedReview}`;
88792
89410
  // src/commands/workflow.ts
88793
89411
  import { mkdir as mkdir3, readFile as readFile5, writeFile as writeFile2 } from "node:fs/promises";
88794
89412
  import { dirname as dirname3 } from "node:path";
88795
- async function saveWorkflowFile(path, workflow3) {
88796
- await mkdir3(dirname3(path), { recursive: true });
88797
- await writeFile2(path, $stringify(workflow3));
89413
+ async function saveWorkflowFile(path2, workflow3) {
89414
+ await mkdir3(dirname3(path2), { recursive: true });
89415
+ await writeFile2(path2, $stringify(workflow3));
88798
89416
  }
88799
89417
  function clearWorkflowCode(workflowDef) {
88800
89418
  for (const wf of Object.values(workflowDef.workflows)) {
@@ -88906,12 +89524,15 @@ Return the updated workflow definition.`;
88906
89524
  }
88907
89525
  const workflowDef = parsedResult.definition;
88908
89526
  const workflowNames = Object.keys(workflowDef.workflows);
89527
+ logger.info(`Available workflows: ${workflowNames.join(", ")}`);
88909
89528
  let workflowId = workflowName;
88910
89529
  if (!workflowId) {
88911
89530
  if (workflowNames.includes("main")) {
88912
89531
  workflowId = "main";
89532
+ logger.info(`Using 'main' workflow`);
88913
89533
  } else if (workflowNames.length === 1) {
88914
89534
  workflowId = workflowNames[0];
89535
+ logger.info(`Using workflow '${workflowId}'`);
88915
89536
  } else if (workflowNames.length > 1) {
88916
89537
  logger.error(`Multiple workflows found in file and no 'main' workflow. Please specify one using --workflow <name>. Available workflows: ${workflowNames.join(", ")}`);
88917
89538
  return;
@@ -88924,6 +89545,7 @@ Return the updated workflow definition.`;
88924
89545
  logger.error(`Workflow '${workflowId}' not found in file. Available workflows: ${workflowNames.join(", ")}`);
88925
89546
  return;
88926
89547
  }
89548
+ logger.info(`Using workflow '${workflowId}'`);
88927
89549
  }
88928
89550
  let dynamicRunner;
88929
89551
  try {
@@ -88943,7 +89565,14 @@ Return the updated workflow definition.`;
88943
89565
  if (selectedWorkflow.inputs && selectedWorkflow.inputs.length > 0 && task2) {
88944
89566
  const firstInput = selectedWorkflow.inputs[0];
88945
89567
  workflowInput[firstInput.id] = task2;
89568
+ logger.info(`Workflow input '${firstInput.id}': ${task2}`);
89569
+ } else if (selectedWorkflow.inputs && selectedWorkflow.inputs.length > 0) {
89570
+ logger.info(`Workflow expects inputs: ${selectedWorkflow.inputs.map((i2) => i2.id).join(", ")}`);
89571
+ } else {
89572
+ logger.info("Workflow has no inputs");
88946
89573
  }
89574
+ logger.info(`Workflow has ${selectedWorkflow.steps.length} step(s)`);
89575
+ logger.debug(`Steps: ${selectedWorkflow.steps.map((s2) => `${s2.id} (${s2.task})`).join(", ")}`);
88947
89576
  await runWorkflow(workflowFn, workflowInput, { commandName: "workflow", command, logger, yes });
88948
89577
  }
88949
89578
  var workflowCommand = new Command("workflow").description("Generate, manage, and run custom workflows.").argument("[task]", "The task description for generating the workflow.").option("-f, --file <path>", "Path to the workflow file").option("-w, --workflow <name>", "The name of the workflow to run").option("--create", "Create a new workflow").option("--regenerate", "Regenerate the code for the workflow").action(runWorkflowCommand);