wcgw 2.6.3__py3-none-any.whl → 2.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of wcgw might be problematic. Click here for more details.

@@ -0,0 +1,119 @@
1
+ import re
2
+ from typing import Callable
3
+
4
+ from .diff_edit import FileEditInput, FileEditOutput
5
+
6
+
7
+ def search_replace_edit(
8
+ lines: list[str], original_content: str, logger: Callable[[str], object]
9
+ ) -> tuple[str, str]:
10
+ if not lines:
11
+ raise Exception("Error: No input to search replace edit")
12
+ original_lines = original_content.split("\n")
13
+ n_lines = len(lines)
14
+ i = 0
15
+ search_replace_blocks = list[tuple[list[str], list[str]]]()
16
+ while i < n_lines:
17
+ if re.match(r"^<<<<<<+\s*SEARCH\s*$", lines[i]):
18
+ search_block = []
19
+ i += 1
20
+ while i < n_lines and not re.match(r"^======*\s*$", lines[i]):
21
+ search_block.append(lines[i])
22
+ i += 1
23
+ i += 1
24
+ if not search_block:
25
+ raise Exception("SEARCH block can not be empty")
26
+ replace_block = []
27
+ while i < n_lines and not re.match(r"^>>>>>>+\s*REPLACE\s*$", lines[i]):
28
+ replace_block.append(lines[i])
29
+ i += 1
30
+ i += 1
31
+
32
+ for line in search_block:
33
+ logger("> " + line)
34
+ logger("=======")
35
+ for line in replace_block:
36
+ logger("< " + line)
37
+ logger("\n\n\n\n")
38
+
39
+ search_replace_blocks.append((search_block, replace_block))
40
+ else:
41
+ i += 1
42
+
43
+ if not search_replace_blocks:
44
+ raise Exception(
45
+ "No valid search replace blocks found, ensure your SEARCH/REPLACE blocks are formatted correctly"
46
+ )
47
+
48
+ edited_content, comments_ = greedy_context_replace(
49
+ original_lines, [[x] for x in search_replace_blocks], original_lines, set(), 0
50
+ )
51
+ edited_file = "\n".join(edited_content)
52
+ if not comments_:
53
+ comments = "Edited successfully"
54
+ else:
55
+ comments = (
56
+ "Edited successfully. However, following warnings were generated while matching search blocks.\n"
57
+ + "\n".join(comments_)
58
+ )
59
+ return edited_file, comments
60
+
61
+
62
+ def greedy_context_replace(
63
+ original_lines: list[str],
64
+ search_replace_blocks: list[list[tuple[list[str], list[str]]]],
65
+ running_lines: list[str],
66
+ running_comments: set[str],
67
+ current_block_offset: int,
68
+ ) -> tuple[list[str], set[str]]:
69
+ if current_block_offset >= len(search_replace_blocks):
70
+ return running_lines, running_comments
71
+ current_blocks = search_replace_blocks[current_block_offset]
72
+
73
+ outputs = FileEditInput(running_lines, 0, current_blocks, 0).edit_file()
74
+ best_matches, is_error = FileEditOutput.get_best_match(outputs)
75
+
76
+ if is_error:
77
+ best_matches[0].replace_or_throw(3)
78
+ raise Exception("Shouldn't happen")
79
+
80
+ if len(best_matches) > 1:
81
+ # Duplicate found, try to ground using previous blocks.
82
+ if current_block_offset == 0:
83
+ raise Exception(f"""
84
+ The following block matched more than once:
85
+ ---
86
+ ```
87
+ {'\n'.join(current_blocks[-1][0])}
88
+ ```
89
+ """)
90
+
91
+ else:
92
+ search_replace_blocks = (
93
+ search_replace_blocks[: current_block_offset - 1]
94
+ + [search_replace_blocks[current_block_offset - 1] + current_blocks]
95
+ + search_replace_blocks[current_block_offset + 1 :]
96
+ )
97
+ try:
98
+ return greedy_context_replace(
99
+ original_lines, search_replace_blocks, original_lines, set(), 0
100
+ )
101
+ except Exception:
102
+ raise Exception(f"""
103
+ The following block matched more than once:
104
+ ---
105
+ ```
106
+ {'\n'.join(current_blocks[-1][0])}
107
+ ```
108
+ """)
109
+
110
+ best_match = best_matches[0]
111
+ running_lines, comments = best_match.replace_or_throw(3)
112
+ running_comments = running_comments | comments
113
+ return greedy_context_replace(
114
+ original_lines,
115
+ search_replace_blocks,
116
+ running_lines,
117
+ running_comments,
118
+ current_block_offset + 1,
119
+ )
@@ -82,6 +82,7 @@ async def handle_list_tools() -> list[types.Tool]:
82
82
  - If the user has mentioned a folder or file with unclear project root, use the file or folder as `any_workspace_path`.
83
83
  - If user has mentioned any files use `initial_files_to_read` to read, use absolute paths only.
84
84
  - If `any_workspace_path` is provided, a tree structure of the workspace will be shown.
85
+ - Leave `any_workspace_path` as empty if no file or folder is mentioned.
85
86
  """,
86
87
  ),
87
88
  ToolParam(
@@ -152,6 +153,24 @@ async def handle_list_tools() -> list[types.Tool]:
152
153
  """
153
154
  + diffinstructions,
154
155
  ),
156
+ # ToolParam(
157
+ # inputSchema=KnowledgeTransfer.model_json_schema(),
158
+ # name="KnowledgeTransfer",
159
+ # description="""
160
+ # Write detailed description in order to do a KT, if the user asks for it.
161
+ # Save all information necessary for a person to understand the task and the problems.
162
+ # - `all_user_instructions` should contain all instructions user shared in the conversation.
163
+ # - `current_status_of_the_task` should contain only what is already achieved, not what's remaining.
164
+ # - `all_issues_snippets` should only contain snippets of error, traceback, file snippets, commands, etc., no comments or solutions (important!).
165
+ # - Be very verbose in `all_issues_snippets` providing as much error context as possible.
166
+ # - Provide an id if the user hasn't provided one.
167
+ # - This tool will return a text file path where the information is saved.
168
+ # - After the tool completes succesfully, tell the user the task id and the generate file path. (important!)
169
+ # - Leave arguments as empty string if they aren't relevant.
170
+ # - This tool marks end of your conversation, do not run any further tools after calling this.
171
+ # - Provide absolute file paths only in `relevant_file_paths` containing all relevant files.
172
+ # """,
173
+ # ),
155
174
  ]
156
175
  if COMPUTER_USE_ON_DOCKER_ENABLED:
157
176
  tools += [
wcgw/client/memory.py ADDED
@@ -0,0 +1,52 @@
1
+ import os
2
+
3
+ from ..types_ import KnowledgeTransfer
4
+
5
+
6
+ def get_app_dir_xdg() -> str:
7
+ xdg_data_dir = os.environ.get("XDG_DATA_HOME", os.path.expanduser("~/.local/share"))
8
+ return os.path.join(xdg_data_dir, "wcgw")
9
+
10
+
11
+ def format_memory(task_memory: KnowledgeTransfer, relevant_files: str) -> str:
12
+ memory_data = f"""# Goal: {task_memory.objective}\n\n
13
+ # Instructions:\n{task_memory.all_user_instructions}\n\n
14
+ # Current Status:\n{task_memory.current_status_of_the_task}\n\n
15
+ # Pending Issues:\n{task_memory.all_issues_snippets}\n\n
16
+ # Build Instructions:\n{task_memory.build_and_development_instructions}\n"""
17
+
18
+ memory_data += "\n# Relevant Files:\n" + relevant_files
19
+
20
+ return memory_data
21
+
22
+
23
+ def save_memory(task_memory: KnowledgeTransfer, relevant_files: str) -> str:
24
+ app_dir = get_app_dir_xdg()
25
+ memory_dir = os.path.join(app_dir, "memory")
26
+ os.makedirs(memory_dir, exist_ok=True)
27
+
28
+ task_id = task_memory.id
29
+ if not task_id:
30
+ raise Exception("Task id can not be empty")
31
+ memory_data = format_memory(task_memory, relevant_files)
32
+
33
+ memory_file = os.path.join(memory_dir, f"{task_id}.json")
34
+ memory_file_full = os.path.join(memory_dir, f"{task_id}.txt")
35
+
36
+ with open(memory_file_full, "w") as f:
37
+ f.write(memory_data)
38
+
39
+ with open(memory_file, "w") as f:
40
+ f.write(task_memory.model_dump_json())
41
+
42
+ return memory_file_full
43
+
44
+
45
+ def load_memory(task_id: str) -> KnowledgeTransfer:
46
+ app_dir = get_app_dir_xdg()
47
+ memory_dir = os.path.join(app_dir, "memory")
48
+ memory_file = os.path.join(memory_dir, f"{task_id}.json")
49
+
50
+ with open(memory_file, "r") as f:
51
+ task_save = KnowledgeTransfer.model_validate_json(f.read())
52
+ return task_save
@@ -27,17 +27,20 @@ from ..types_ import (
27
27
  BashCommand,
28
28
  BashInteraction,
29
29
  FileEdit,
30
+ KnowledgeTransfer,
30
31
  ReadFiles,
31
32
  ReadImage,
32
33
  ResetShell,
33
34
  WriteIfEmpty,
34
35
  )
35
36
  from .common import CostData, History, Models, discard_input
37
+ from .memory import load_memory
36
38
  from .openai_utils import get_input_cost, get_output_cost
37
39
  from .tools import (
38
40
  DoneFlag,
39
41
  ImageData,
40
42
  get_tool_output,
43
+ initialize,
41
44
  which_tool,
42
45
  )
43
46
 
@@ -117,19 +120,24 @@ def loop(
117
120
 
118
121
  history: History = []
119
122
  waiting_for_assistant = False
123
+
124
+ memory = None
120
125
  if resume:
121
- if resume == "latest":
122
- resume_path = sorted(Path(".wcgw").iterdir(), key=os.path.getmtime)[-1]
123
- else:
124
- resume_path = Path(resume)
125
- if not resume_path.exists():
126
- raise FileNotFoundError(f"File {resume} not found")
127
- with resume_path.open() as f:
128
- history = json.load(f)
129
- if len(history) <= 2:
130
- raise ValueError("Invalid history file")
131
- first_message = ""
132
- waiting_for_assistant = history[-1]["role"] != "assistant"
126
+ try:
127
+ memory = load_memory(resume)
128
+ except OSError:
129
+ if resume == "latest":
130
+ resume_path = sorted(Path(".wcgw").iterdir(), key=os.path.getmtime)[-1]
131
+ else:
132
+ resume_path = Path(resume)
133
+ if not resume_path.exists():
134
+ raise FileNotFoundError(f"File {resume} not found")
135
+ with resume_path.open() as f:
136
+ history = json.load(f)
137
+ if len(history) <= 2:
138
+ raise ValueError("Invalid history file")
139
+ first_message = ""
140
+ waiting_for_assistant = history[-1]["role"] != "assistant"
133
141
 
134
142
  my_dir = os.path.dirname(__file__)
135
143
 
@@ -202,10 +210,29 @@ def loop(
202
210
  ResetShell,
203
211
  description="Resets the shell. Use only if all interrupts and prompt reset attempts have failed repeatedly.",
204
212
  ),
213
+ openai.pydantic_function_tool(
214
+ KnowledgeTransfer,
215
+ description="""
216
+ Write detailed description in order to do a KT, if the user asks for it.
217
+ Save all information necessary for a person to understand the task and the problems.
218
+
219
+ - `all_user_instructions` should contain all instructions user shared in the conversation.
220
+ - `current_status_of_the_task` should contain only what is already achieved, not what's remaining.
221
+ - `all_issues_snippets` should only contain snippets of error, traceback, file snippets, commands, etc., no comments or solutions (important!).
222
+ - Be very verbose in `all_issues_snippets` providing as much error context as possible.
223
+ - Provide an id if the user hasn't provided one.
224
+ - This tool will return a text file path where the information is saved.
225
+ - After the tool completes succesfully, tell the user the task id and the generate file path. (important!)
226
+ - Leave arguments as empty string if they aren't relevant.
227
+ - This tool marks end of your conversation, do not run any further tools after calling this.
228
+ - Provide absolute file paths only in `relevant_file_paths` containing all relevant files.
229
+ """,
230
+ ),
205
231
  ]
206
- uname_sysname = os.uname().sysname
207
- uname_machine = os.uname().machine
208
232
 
233
+ initial_info = initialize(
234
+ os.getcwd(), [], resume if (memory and resume) else "", 8000
235
+ )
209
236
  system = f"""
210
237
  You're an expert software engineer with shell and code knowledge.
211
238
 
@@ -217,10 +244,7 @@ Instructions:
217
244
  - Do not provide code snippets unless asked by the user, instead directly add/edit the code.
218
245
  - Do not install new tools/packages before ensuring no such tools/package or an alternative already exists.
219
246
 
220
- System information:
221
- - System: {uname_sysname}
222
- - Machine: {uname_machine}
223
- - Current directory: {os.getcwd()}
247
+ {initial_info}
224
248
 
225
249
  """
226
250
 
wcgw/client/tools.py CHANGED
@@ -9,7 +9,7 @@ import shlex
9
9
  import time
10
10
  import traceback
11
11
  import uuid
12
- from difflib import SequenceMatcher
12
+ from os.path import expanduser
13
13
  from pathlib import Path
14
14
  from tempfile import NamedTemporaryFile, TemporaryDirectory
15
15
  from typing import (
@@ -43,6 +43,7 @@ from ..types_ import (
43
43
  GetScreenInfo,
44
44
  Initialize,
45
45
  Keyboard,
46
+ KnowledgeTransfer,
46
47
  Mouse,
47
48
  ReadFiles,
48
49
  ReadImage,
@@ -51,6 +52,8 @@ from ..types_ import (
51
52
  WriteIfEmpty,
52
53
  )
53
54
  from .computer_use import run_computer_tool
55
+ from .file_ops.search_replace import search_replace_edit
56
+ from .memory import format_memory, load_memory, save_memory
54
57
  from .repo_ops.repo_context import get_repo_context
55
58
  from .sys_utils import command_run
56
59
 
@@ -290,12 +293,33 @@ BASH_STATE = BashState()
290
293
 
291
294
 
292
295
  def initialize(
293
- any_workspace_path: str, read_files_: list[str], max_tokens: Optional[int]
296
+ any_workspace_path: str,
297
+ read_files_: list[str],
298
+ task_id_to_resume: str,
299
+ max_tokens: Optional[int],
294
300
  ) -> str:
295
301
  reset_shell()
296
302
 
303
+ # Expand the workspace path
304
+ any_workspace_path = expand_user(any_workspace_path, BASH_STATE.is_in_docker)
297
305
  repo_context = ""
298
306
 
307
+ memory = ""
308
+ if task_id_to_resume:
309
+ try:
310
+ task_mem = load_memory(task_id_to_resume)
311
+ mem_files = task_mem.relevant_file_paths
312
+ mem_files_read = read_files(mem_files, max_tokens)
313
+ memory = "Following is the retrieved task:\n" + format_memory(
314
+ task_mem, mem_files_read
315
+ )
316
+ if (
317
+ not any_workspace_path or not os.path.exists(any_workspace_path)
318
+ ) and os.path.exists(task_mem.project_root_path):
319
+ any_workspace_path = task_mem.project_root_path
320
+ except Exception:
321
+ memory = f'Error: Unable to load task with ID "{task_id_to_resume}" '
322
+
299
323
  if any_workspace_path:
300
324
  if os.path.exists(any_workspace_path):
301
325
  repo_context, folder_to_start = get_repo_context(any_workspace_path, 200)
@@ -306,7 +330,9 @@ def initialize(
306
330
 
307
331
  repo_context = f"---\n# Workspace structure\n{repo_context}\n---\n"
308
332
  else:
309
- return f"\nInfo: Workspace path {any_workspace_path} does not exist\n"
333
+ repo_context = (
334
+ f"\nInfo: Workspace path {any_workspace_path} does not exist\n"
335
+ )
310
336
 
311
337
  initial_files_context = ""
312
338
  if read_files_:
@@ -325,6 +351,10 @@ Current working directory: {BASH_STATE.cwd}
325
351
  {repo_context}
326
352
 
327
353
  {initial_files_context}
354
+
355
+ ---
356
+
357
+ {memory}
328
358
  """
329
359
 
330
360
  return output
@@ -396,6 +426,12 @@ def rstrip(lines: list[str]) -> str:
396
426
  return "\n".join([line.rstrip() for line in lines])
397
427
 
398
428
 
429
+ def expand_user(path: str, docker_id: Optional[str]) -> str:
430
+ if not path or not path.startswith("~") or docker_id:
431
+ return path
432
+ return expanduser(path)
433
+
434
+
399
435
  def _incremental_text(text: str, last_pending_output: str) -> str:
400
436
  # text = render_terminal_output(text[-100_000:])
401
437
  text = text[-100_000:]
@@ -673,6 +709,9 @@ def truncate_if_over(content: str, max_tokens: Optional[int]) -> str:
673
709
 
674
710
 
675
711
  def read_image_from_shell(file_path: str) -> ImageData:
712
+ # Expand the path
713
+ file_path = expand_user(file_path, BASH_STATE.is_in_docker)
714
+
676
715
  if not os.path.isabs(file_path):
677
716
  file_path = os.path.join(BASH_STATE.cwd, file_path)
678
717
 
@@ -722,7 +761,7 @@ def write_file(
722
761
  if not os.path.isabs(writefile.file_path):
723
762
  return f"Failure: file_path should be absolute path, current working directory is {BASH_STATE.cwd}"
724
763
  else:
725
- path_ = writefile.file_path
764
+ path_ = expand_user(writefile.file_path, BASH_STATE.is_in_docker)
726
765
 
727
766
  error_on_exist_ = error_on_exist and path_ not in BASH_STATE.whitelist_for_overwrite
728
767
  add_overwrite_warning = ""
@@ -828,126 +867,6 @@ Syntax errors:
828
867
  return "Success" + "".join(warnings)
829
868
 
830
869
 
831
- def find_least_edit_distance_substring(
832
- orig_content_lines: list[str], find_lines: list[str]
833
- ) -> tuple[list[str], str]:
834
- # Prepare content lines, stripping whitespace and keeping track of original indices
835
- content_lines = [line.strip() for line in orig_content_lines]
836
- new_to_original_indices = {}
837
- new_content_lines = []
838
- for i, line in enumerate(content_lines):
839
- if not line:
840
- continue
841
- new_content_lines.append(line)
842
- new_to_original_indices[len(new_content_lines) - 1] = i
843
- content_lines = new_content_lines
844
-
845
- # Prepare find lines, removing empty lines
846
- find_lines = [line.strip() for line in find_lines if line.strip()]
847
-
848
- # Initialize variables for best match tracking
849
- max_similarity = 0.0
850
- min_edit_distance_lines = []
851
- context_lines = []
852
-
853
- # For each possible starting position in content
854
- for i in range(max(1, len(content_lines) - len(find_lines) + 1)):
855
- # Calculate similarity for the block starting at position i
856
- block_similarity = 0.0
857
- for j in range(len(find_lines)):
858
- if (i + j) < len(content_lines):
859
- # Use SequenceMatcher for more efficient similarity calculation
860
- similarity = SequenceMatcher(
861
- None, content_lines[i + j], find_lines[j]
862
- ).ratio()
863
- block_similarity += similarity
864
-
865
- # If this block is more similar than previous best
866
- if block_similarity > max_similarity:
867
- max_similarity = block_similarity
868
- # Map back to original line indices
869
- orig_start_index = new_to_original_indices[i]
870
- orig_end_index = (
871
- new_to_original_indices.get(
872
- i + len(find_lines) - 1, len(orig_content_lines) - 1
873
- )
874
- + 1
875
- )
876
- # Get the original lines
877
- min_edit_distance_lines = orig_content_lines[
878
- orig_start_index:orig_end_index
879
- ]
880
- # Get context (10 lines before and after)
881
- context_lines = orig_content_lines[
882
- max(0, orig_start_index - 10) : (orig_end_index + 10)
883
- ]
884
-
885
- return (
886
- min_edit_distance_lines,
887
- "\n".join(context_lines),
888
- )
889
-
890
-
891
- def lines_replacer(
892
- orig_content_lines: list[str], search_lines: list[str], replace_lines: list[str]
893
- ) -> str:
894
- # Validation for empty search
895
- search_lines = list(filter(None, [x.strip() for x in search_lines]))
896
-
897
- # Create mapping of non-empty lines to original indices
898
- new_to_original_indices = []
899
- new_content_lines = []
900
- for i, line in enumerate(orig_content_lines):
901
- stripped = line.strip()
902
- if not stripped:
903
- continue
904
- new_content_lines.append(stripped)
905
- new_to_original_indices.append(i)
906
-
907
- if not new_content_lines and not search_lines:
908
- return "\n".join(replace_lines)
909
- elif not search_lines:
910
- raise ValueError("Search block is empty")
911
- elif not new_content_lines:
912
- raise ValueError("File content is empty")
913
-
914
- # Search for matching block
915
- for i in range(len(new_content_lines) - len(search_lines) + 1):
916
- if all(
917
- new_content_lines[i + j] == search_lines[j]
918
- for j in range(len(search_lines))
919
- ):
920
- start_idx = new_to_original_indices[i]
921
- end_idx = new_to_original_indices[i + len(search_lines) - 1] + 1
922
- return "\n".join(
923
- orig_content_lines[:start_idx]
924
- + replace_lines
925
- + orig_content_lines[end_idx:]
926
- )
927
-
928
- raise ValueError("Search block not found in content")
929
-
930
-
931
- def edit_content(content: str, find_lines: str, replace_with_lines: str) -> str:
932
- replace_with_lines_ = replace_with_lines.split("\n")
933
- find_lines_ = find_lines.split("\n")
934
- content_lines_ = content.split("\n")
935
- try:
936
- return lines_replacer(content_lines_, find_lines_, replace_with_lines_)
937
- except ValueError:
938
- pass
939
-
940
- _, context_lines = find_least_edit_distance_substring(content_lines_, find_lines_)
941
-
942
- raise Exception(
943
- f"""Error: no match found for the provided search block.
944
- Requested search block: \n```\n{find_lines}\n```
945
- Possible relevant section in the file:\n---\n```\n{context_lines}\n```\n---\nFile not edited
946
- \nPlease retry with exact search. Re-read the file if unsure.
947
- """
948
- )
949
-
950
-
951
870
  def do_diff_edit(fedit: FileEdit, max_tokens: Optional[int]) -> str:
952
871
  try:
953
872
  return _do_diff_edit(fedit, max_tokens)
@@ -974,7 +893,7 @@ def _do_diff_edit(fedit: FileEdit, max_tokens: Optional[int]) -> str:
974
893
  f"Failure: file_path should be absolute path, current working directory is {BASH_STATE.cwd}"
975
894
  )
976
895
  else:
977
- path_ = fedit.file_path
896
+ path_ = expand_user(fedit.file_path, BASH_STATE.is_in_docker)
978
897
 
979
898
  # The LLM is now aware that the file exists
980
899
  BASH_STATE.add_to_whitelist_for_overwrite(path_)
@@ -1002,46 +921,7 @@ def _do_diff_edit(fedit: FileEdit, max_tokens: Optional[int]) -> str:
1002
921
  )
1003
922
  lines = fedit.file_edit_using_search_replace_blocks.split("\n")
1004
923
 
1005
- if not lines or not re.match(r"^<<<<<<+\s*SEARCH\s*$", lines[0]):
1006
- raise Exception(
1007
- "Error: first line should be `<<<<<< SEARCH` to start a search-replace block"
1008
- )
1009
-
1010
- n_lines = len(lines)
1011
- i = 0
1012
- replacement_count = 0
1013
- while i < n_lines:
1014
- if re.match(r"^<<<<<<+\s*SEARCH\s*$", lines[i]):
1015
- search_block = []
1016
- i += 1
1017
- while i < n_lines and not re.match(r"^======*\s*$", lines[i]):
1018
- search_block.append(lines[i])
1019
- i += 1
1020
- i += 1
1021
- replace_block = []
1022
- while i < n_lines and not re.match(r"^>>>>>>+\s*REPLACE\s*$", lines[i]):
1023
- replace_block.append(lines[i])
1024
- i += 1
1025
- i += 1
1026
-
1027
- for line in search_block:
1028
- console.log("> " + line)
1029
- console.log("=======")
1030
- for line in replace_block:
1031
- console.log("< " + line)
1032
- console.log("\n\n\n\n")
1033
- search_block_ = "\n".join(search_block)
1034
- replace_block_ = "\n".join(replace_block)
1035
-
1036
- apply_diff_to = edit_content(apply_diff_to, search_block_, replace_block_)
1037
- replacement_count += 1
1038
- else:
1039
- i += 1
1040
-
1041
- if replacement_count == 0:
1042
- raise Exception(
1043
- "Error: no valid search-replace blocks found, please check your syntax for FileEdit"
1044
- )
924
+ apply_diff_to, comments = search_replace_edit(lines, apply_diff_to, console.log)
1045
925
 
1046
926
  if not BASH_STATE.is_in_docker:
1047
927
  with open(path_, "w") as f:
@@ -1070,9 +950,9 @@ def _do_diff_edit(fedit: FileEdit, max_tokens: Optional[int]) -> str:
1070
950
  )
1071
951
 
1072
952
  console.print(f"W: Syntax errors encountered: {syntax_errors}")
1073
- return f"""Wrote file succesfully.
953
+ return f"""{comments}
1074
954
  ---
1075
- However, tree-sitter reported syntax errors, please re-read the file and fix if there are any errors.
955
+ Tree-sitter reported syntax errors, please re-read the file and fix if there are any errors.
1076
956
  Syntax errors:
1077
957
  {syntax_errors}
1078
958
 
@@ -1081,7 +961,7 @@ Syntax errors:
1081
961
  except Exception:
1082
962
  pass
1083
963
 
1084
- return "Success"
964
+ return comments
1085
965
 
1086
966
 
1087
967
  class DoneFlag(BaseModel):
@@ -1123,6 +1003,7 @@ TOOLS = (
1123
1003
  | Keyboard
1124
1004
  | ScreenShot
1125
1005
  | GetScreenInfo
1006
+ | KnowledgeTransfer
1126
1007
  )
1127
1008
 
1128
1009
 
@@ -1164,6 +1045,8 @@ def which_tool_name(name: str) -> Type[TOOLS]:
1164
1045
  return ScreenShot
1165
1046
  elif name == "GetScreenInfo":
1166
1047
  return GetScreenInfo
1048
+ elif name == "KnowledgeTransfer":
1049
+ return KnowledgeTransfer
1167
1050
  else:
1168
1051
  raise ValueError(f"Unknown tool name: {name}")
1169
1052
 
@@ -1216,7 +1099,12 @@ def get_tool_output(
1216
1099
  elif isinstance(arg, Initialize):
1217
1100
  console.print("Calling initial info tool")
1218
1101
  output = (
1219
- initialize(arg.any_workspace_path, arg.initial_files_to_read, max_tokens),
1102
+ initialize(
1103
+ arg.any_workspace_path,
1104
+ arg.initial_files_to_read,
1105
+ arg.task_id_to_resume,
1106
+ max_tokens,
1107
+ ),
1220
1108
  0.0,
1221
1109
  )
1222
1110
  elif isinstance(arg, (Mouse, Keyboard, ScreenShot, GetScreenInfo)):
@@ -1254,6 +1142,20 @@ def get_tool_output(
1254
1142
  )
1255
1143
  BASH_STATE.set_in_docker(arg.docker_image_id)
1256
1144
  return outputs, outputs_cost[1]
1145
+ elif isinstance(arg, KnowledgeTransfer):
1146
+ console.print("Calling task memory tool")
1147
+ relevant_files = arg.relevant_file_paths
1148
+ for i, fpath in enumerate(relevant_files):
1149
+ if not os.path.isabs(fpath):
1150
+ relpath = os.path.join(arg.project_root_path, fpath)
1151
+ if os.path.exists(relpath):
1152
+ relevant_files[i] = relpath
1153
+ else:
1154
+ raise Exception(f"The file path {fpath} does not exist")
1155
+ elif not os.path.exists(fpath):
1156
+ raise Exception(f"The file path {fpath} does not exist")
1157
+ relevant_files_data = read_files(relevant_files, None)
1158
+ output = save_memory(arg, relevant_files_data), 0.0
1257
1159
  else:
1258
1160
  raise ValueError(f"Unknown tool: {arg}")
1259
1161
  if isinstance(output[0], str):
@@ -1282,6 +1184,7 @@ class Mdata(BaseModel):
1282
1184
  | str
1283
1185
  | ReadFiles
1284
1186
  | Initialize
1187
+ | KnowledgeTransfer
1285
1188
  )
1286
1189
 
1287
1190
 
@@ -1381,6 +1284,9 @@ def read_files(file_paths: list[str], max_tokens: Optional[int]) -> str:
1381
1284
  def read_file(file_path: str, max_tokens: Optional[int]) -> tuple[str, bool, int]:
1382
1285
  console.print(f"Reading file: {file_path}")
1383
1286
 
1287
+ # Expand the path before checking if it's absolute
1288
+ file_path = expand_user(file_path, BASH_STATE.is_in_docker)
1289
+
1384
1290
  if not os.path.isabs(file_path):
1385
1291
  raise ValueError(
1386
1292
  f"Failure: file_path should be absolute path, current working directory is {BASH_STATE.cwd}"