wcgw 2.6.3__py3-none-any.whl → 2.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of wcgw might be problematic. Click here for more details.

@@ -0,0 +1,119 @@
1
+ import re
2
+ from typing import Callable
3
+
4
+ from .diff_edit import FileEditInput, FileEditOutput
5
+
6
+
7
+ def search_replace_edit(
8
+ lines: list[str], original_content: str, logger: Callable[[str], object]
9
+ ) -> tuple[str, str]:
10
+ if not lines:
11
+ raise Exception("Error: No input to search replace edit")
12
+ original_lines = original_content.split("\n")
13
+ n_lines = len(lines)
14
+ i = 0
15
+ search_replace_blocks = list[tuple[list[str], list[str]]]()
16
+ while i < n_lines:
17
+ if re.match(r"^<<<<<<+\s*SEARCH\s*$", lines[i]):
18
+ search_block = []
19
+ i += 1
20
+ while i < n_lines and not re.match(r"^======*\s*$", lines[i]):
21
+ search_block.append(lines[i])
22
+ i += 1
23
+ i += 1
24
+ if not search_block:
25
+ raise Exception("SEARCH block can not be empty")
26
+ replace_block = []
27
+ while i < n_lines and not re.match(r"^>>>>>>+\s*REPLACE\s*$", lines[i]):
28
+ replace_block.append(lines[i])
29
+ i += 1
30
+ i += 1
31
+
32
+ for line in search_block:
33
+ logger("> " + line)
34
+ logger("=======")
35
+ for line in replace_block:
36
+ logger("< " + line)
37
+ logger("\n\n\n\n")
38
+
39
+ search_replace_blocks.append((search_block, replace_block))
40
+ else:
41
+ i += 1
42
+
43
+ if not search_replace_blocks:
44
+ raise Exception(
45
+ "No valid search replace blocks found, ensure your SEARCH/REPLACE blocks are formatted correctly"
46
+ )
47
+
48
+ edited_content, comments_ = greedy_context_replace(
49
+ original_lines, [[x] for x in search_replace_blocks], original_lines, set(), 0
50
+ )
51
+ edited_file = "\n".join(edited_content)
52
+ if not comments_:
53
+ comments = "Edited successfully"
54
+ else:
55
+ comments = (
56
+ "Edited successfully. However, following warnings were generated while matching search blocks.\n"
57
+ + "\n".join(comments_)
58
+ )
59
+ return edited_file, comments
60
+
61
+
62
+ def greedy_context_replace(
63
+ original_lines: list[str],
64
+ search_replace_blocks: list[list[tuple[list[str], list[str]]]],
65
+ running_lines: list[str],
66
+ running_comments: set[str],
67
+ current_block_offset: int,
68
+ ) -> tuple[list[str], set[str]]:
69
+ if current_block_offset >= len(search_replace_blocks):
70
+ return running_lines, running_comments
71
+ current_blocks = search_replace_blocks[current_block_offset]
72
+
73
+ outputs = FileEditInput(running_lines, 0, current_blocks, 0).edit_file()
74
+ best_matches, is_error = FileEditOutput.get_best_match(outputs)
75
+
76
+ if is_error:
77
+ best_matches[0].replace_or_throw(3)
78
+ raise Exception("Shouldn't happen")
79
+
80
+ if len(best_matches) > 1:
81
+ # Duplicate found, try to ground using previous blocks.
82
+ if current_block_offset == 0:
83
+ raise Exception(f"""
84
+ The following block matched more than once:
85
+ ---
86
+ ```
87
+ {'\n'.join(current_blocks[-1][0])}
88
+ ```
89
+ """)
90
+
91
+ else:
92
+ search_replace_blocks = (
93
+ search_replace_blocks[: current_block_offset - 1]
94
+ + [search_replace_blocks[current_block_offset - 1] + current_blocks]
95
+ + search_replace_blocks[current_block_offset + 1 :]
96
+ )
97
+ try:
98
+ return greedy_context_replace(
99
+ original_lines, search_replace_blocks, original_lines, set(), 0
100
+ )
101
+ except Exception:
102
+ raise Exception(f"""
103
+ The following block matched more than once:
104
+ ---
105
+ ```
106
+ {'\n'.join(current_blocks[-1][0])}
107
+ ```
108
+ """)
109
+
110
+ best_match = best_matches[0]
111
+ running_lines, comments = best_match.replace_or_throw(3)
112
+ running_comments = running_comments | comments
113
+ return greedy_context_replace(
114
+ original_lines,
115
+ search_replace_blocks,
116
+ running_lines,
117
+ running_comments,
118
+ current_block_offset + 1,
119
+ )
@@ -14,6 +14,7 @@ from mcp_wcgw.types import Tool as ToolParam
14
14
  from ...types_ import (
15
15
  BashCommand,
16
16
  BashInteraction,
17
+ ContextSave,
17
18
  FileEdit,
18
19
  GetScreenInfo,
19
20
  Initialize,
@@ -46,14 +47,47 @@ async def handle_read_resource(uri: AnyUrl) -> str:
46
47
 
47
48
  @server.list_prompts() # type: ignore
48
49
  async def handle_list_prompts() -> list[types.Prompt]:
49
- return []
50
+ return [
51
+ types.Prompt(
52
+ name="KnowledgeTransfer",
53
+ description="Prompt for invoking ContextSave tool in order to do a comprehensive knowledge transfer of a coding task. Prompts to save detailed error log and instructions.",
54
+ )
55
+ ]
50
56
 
51
57
 
52
58
  @server.get_prompt() # type: ignore
53
59
  async def handle_get_prompt(
54
60
  name: str, arguments: dict[str, str] | None
55
61
  ) -> types.GetPromptResult:
56
- return types.GetPromptResult(messages=[])
62
+ messages = []
63
+ if name == "KnowledgeTransfer":
64
+ messages = [
65
+ types.PromptMessage(
66
+ role="user",
67
+ content=types.TextContent(
68
+ type="text",
69
+ text="""Use `ContextSave` tool to do a knowledge transfer of the task in hand.
70
+ Write detailed description in order to do a KT.
71
+ Save all information necessary for a person to understand the task and the problems.
72
+
73
+ Format the `description` field using Markdown with the following sections.
74
+ - "# Objective" section containing project and task objective.
75
+ - "# All user instructions" section should be provided containing all instructions user shared in the conversation.
76
+ - "# Current status of the task" should be provided containing only what is already achieved, not what's remaining.
77
+ - "# All issues with snippets" section containing snippets of error, traceback, file snippets, commands, etc. But no comments or solutions.
78
+ - Be very verbose in the all issues with snippets section providing as much error context as possible.
79
+ - "# Build and development instructions" section containing instructions to build or run project or run tests, or envrionment related information. Only include what's known. Leave empty if unknown.
80
+ - After the tool completes succesfully, tell me the task id and the file path the tool generated (important!)
81
+ - This tool marks end of your conversation, do not run any further tools after calling this.
82
+
83
+ Provide all relevant file paths in order to understand and solve the the task. Err towards providing more file paths than fewer.
84
+
85
+ (Note to self: this conversation can then be resumed later asking "Resume `<generated id>`" which should call Initialize tool)
86
+ """,
87
+ ),
88
+ )
89
+ ]
90
+ return types.GetPromptResult(messages=messages)
57
91
 
58
92
 
59
93
  @server.list_tools() # type: ignore
@@ -82,6 +116,7 @@ async def handle_list_tools() -> list[types.Tool]:
82
116
  - If the user has mentioned a folder or file with unclear project root, use the file or folder as `any_workspace_path`.
83
117
  - If user has mentioned any files use `initial_files_to_read` to read, use absolute paths only.
84
118
  - If `any_workspace_path` is provided, a tree structure of the workspace will be shown.
119
+ - Leave `any_workspace_path` as empty if no file or folder is mentioned.
85
120
  """,
86
121
  ),
87
122
  ToolParam(
@@ -152,6 +187,14 @@ async def handle_list_tools() -> list[types.Tool]:
152
187
  """
153
188
  + diffinstructions,
154
189
  ),
190
+ ToolParam(
191
+ inputSchema=ContextSave.model_json_schema(),
192
+ name="ContextSave",
193
+ description="""
194
+ Saves provided description and file contents of all the relevant file paths or globs in a single text file.
195
+ - Provide random unqiue id or whatever user provided.
196
+ - Leave project path as empty string if no project path""",
197
+ ),
155
198
  ]
156
199
  if COMPUTER_USE_ON_DOCKER_ENABLED:
157
200
  tools += [
wcgw/client/memory.py ADDED
@@ -0,0 +1,78 @@
1
+ import os
2
+ import re
3
+ import shlex
4
+ from typing import Callable, Optional
5
+
6
+ from ..types_ import ContextSave
7
+
8
+
9
+ def get_app_dir_xdg() -> str:
10
+ xdg_data_dir = os.environ.get("XDG_DATA_HOME", os.path.expanduser("~/.local/share"))
11
+ return os.path.join(xdg_data_dir, "wcgw")
12
+
13
+
14
+ def format_memory(task_memory: ContextSave, relevant_files: str) -> str:
15
+ memory_data = ""
16
+ if task_memory.project_root_path:
17
+ memory_data += (
18
+ f"# PROJECT ROOT = {shlex.quote(task_memory.project_root_path)}\n"
19
+ )
20
+ memory_data += task_memory.description
21
+
22
+ memory_data += (
23
+ "\n\n"
24
+ + "# Relevant file paths\n"
25
+ + ", ".join(map(shlex.quote, task_memory.relevant_file_globs))
26
+ )
27
+
28
+ memory_data += "\n\n# Relevant Files:\n" + relevant_files
29
+
30
+ return memory_data
31
+
32
+
33
+ def save_memory(task_memory: ContextSave, relevant_files: str) -> str:
34
+ app_dir = get_app_dir_xdg()
35
+ memory_dir = os.path.join(app_dir, "memory")
36
+ os.makedirs(memory_dir, exist_ok=True)
37
+
38
+ task_id = task_memory.id
39
+ if not task_id:
40
+ raise Exception("Task id can not be empty")
41
+ memory_data = format_memory(task_memory, relevant_files)
42
+
43
+ memory_file_full = os.path.join(memory_dir, f"{task_id}.txt")
44
+
45
+ with open(memory_file_full, "w") as f:
46
+ f.write(memory_data)
47
+
48
+ return memory_file_full
49
+
50
+
51
+ def load_memory[T](
52
+ task_id: str,
53
+ max_tokens: Optional[int],
54
+ encoder: Callable[[str], list[T]],
55
+ decoder: Callable[[list[T]], str],
56
+ ) -> tuple[str, str]:
57
+ app_dir = get_app_dir_xdg()
58
+ memory_dir = os.path.join(app_dir, "memory")
59
+ memory_file = os.path.join(memory_dir, f"{task_id}.txt")
60
+
61
+ with open(memory_file, "r") as f:
62
+ data = f.read()
63
+
64
+ if max_tokens:
65
+ toks = encoder(data)
66
+ if len(toks) > max_tokens:
67
+ toks = toks[: max(0, max_tokens - 10)]
68
+ data = decoder(toks)
69
+ data += "\n(... truncated)"
70
+
71
+ project_root_match = re.search(r"# PROJECT ROOT = \s*(.*?)\s*$", data, re.MULTILINE)
72
+ project_root_path = ""
73
+ if project_root_match:
74
+ matched_path = project_root_match.group(1)
75
+ parsed_ = shlex.split(matched_path)
76
+ if parsed_ and len(parsed_) == 1:
77
+ project_root_path = parsed_[0]
78
+ return project_root_path, data
@@ -26,6 +26,7 @@ from typer import Typer
26
26
  from ..types_ import (
27
27
  BashCommand,
28
28
  BashInteraction,
29
+ ContextSave,
29
30
  FileEdit,
30
31
  ReadFiles,
31
32
  ReadImage,
@@ -33,11 +34,14 @@ from ..types_ import (
33
34
  WriteIfEmpty,
34
35
  )
35
36
  from .common import CostData, History, Models, discard_input
37
+ from .memory import load_memory
36
38
  from .openai_utils import get_input_cost, get_output_cost
37
39
  from .tools import (
38
40
  DoneFlag,
39
41
  ImageData,
42
+ default_enc,
40
43
  get_tool_output,
44
+ initialize,
41
45
  which_tool,
42
46
  )
43
47
 
@@ -117,19 +121,29 @@ def loop(
117
121
 
118
122
  history: History = []
119
123
  waiting_for_assistant = False
124
+
125
+ memory = None
120
126
  if resume:
121
- if resume == "latest":
122
- resume_path = sorted(Path(".wcgw").iterdir(), key=os.path.getmtime)[-1]
123
- else:
124
- resume_path = Path(resume)
125
- if not resume_path.exists():
126
- raise FileNotFoundError(f"File {resume} not found")
127
- with resume_path.open() as f:
128
- history = json.load(f)
129
- if len(history) <= 2:
130
- raise ValueError("Invalid history file")
131
- first_message = ""
132
- waiting_for_assistant = history[-1]["role"] != "assistant"
127
+ try:
128
+ _, memory = load_memory(
129
+ resume,
130
+ 8000,
131
+ lambda x: default_enc.encode(x).ids,
132
+ lambda x: default_enc.decode(x),
133
+ )
134
+ except OSError:
135
+ if resume == "latest":
136
+ resume_path = sorted(Path(".wcgw").iterdir(), key=os.path.getmtime)[-1]
137
+ else:
138
+ resume_path = Path(resume)
139
+ if not resume_path.exists():
140
+ raise FileNotFoundError(f"File {resume} not found")
141
+ with resume_path.open() as f:
142
+ history = json.load(f)
143
+ if len(history) <= 2:
144
+ raise ValueError("Invalid history file")
145
+ first_message = ""
146
+ waiting_for_assistant = history[-1]["role"] != "assistant"
133
147
 
134
148
  my_dir = os.path.dirname(__file__)
135
149
 
@@ -202,10 +216,19 @@ def loop(
202
216
  ResetShell,
203
217
  description="Resets the shell. Use only if all interrupts and prompt reset attempts have failed repeatedly.",
204
218
  ),
219
+ openai.pydantic_function_tool(
220
+ ContextSave,
221
+ description="""
222
+
223
+ Saves provided description and file contents of all the relevant file paths or globs in a single text file.
224
+ - Provide random unqiue id or whatever user provided.
225
+ - Leave project path as empty string if no project path""",
226
+ ),
205
227
  ]
206
- uname_sysname = os.uname().sysname
207
- uname_machine = os.uname().machine
208
228
 
229
+ initial_info = initialize(
230
+ os.getcwd(), [], resume if (memory and resume) else "", 8000
231
+ )
209
232
  system = f"""
210
233
  You're an expert software engineer with shell and code knowledge.
211
234
 
@@ -217,10 +240,7 @@ Instructions:
217
240
  - Do not provide code snippets unless asked by the user, instead directly add/edit the code.
218
241
  - Do not install new tools/packages before ensuring no such tools/package or an alternative already exists.
219
242
 
220
- System information:
221
- - System: {uname_sysname}
222
- - Machine: {uname_machine}
223
- - Current directory: {os.getcwd()}
243
+ {initial_info}
224
244
 
225
245
  """
226
246