wcgw 5.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
wcgw/py.typed ADDED
File without changes
wcgw/types_.py ADDED
@@ -0,0 +1,318 @@
1
+ import os
2
+ import re
3
+ from typing import Any, List, Literal, Optional, Protocol, Sequence, Union
4
+
5
+ from pydantic import BaseModel as PydanticBaseModel
6
+ from pydantic import Field, PrivateAttr
7
+
8
+
9
+ def normalize_thread_id(thread_id: str) -> str:
10
+ """Normalize thread_id by keeping only word characters (alphanumeric and underscore)."""
11
+ return re.sub(r"[^\w]", "", thread_id)
12
+
13
+
14
+ class NoExtraArgs(PydanticBaseModel):
15
+ class Config:
16
+ extra = "forbid"
17
+
18
+
19
+ BaseModel = NoExtraArgs
20
+
21
+
22
+ Modes = Literal["wcgw", "architect", "code_writer"]
23
+
24
+
25
+ class CodeWriterMode(BaseModel):
26
+ allowed_globs: Literal["all"] | list[str]
27
+ allowed_commands: Literal["all"] | list[str]
28
+
29
+ def model_post_init(self, _: Any) -> None:
30
+ # Patch frequently wrong output trading off accuracy
31
+ # in rare case there's a file named 'all' or a command named 'all'
32
+ if len(self.allowed_commands) == 1:
33
+ if self.allowed_commands[0] == "all":
34
+ self.allowed_commands = "all"
35
+ if len(self.allowed_globs) == 1:
36
+ if self.allowed_globs[0] == "all":
37
+ self.allowed_globs = "all"
38
+
39
+ def update_relative_globs(self, workspace_root: str) -> None:
40
+ """Update globs if they're relative paths"""
41
+ if self.allowed_globs != "all":
42
+ self.allowed_globs = [
43
+ glob if os.path.isabs(glob) else os.path.join(workspace_root, glob)
44
+ for glob in self.allowed_globs
45
+ ]
46
+
47
+
48
+ ModesConfig = Union[Literal["wcgw", "architect"], CodeWriterMode]
49
+
50
+
51
+ class Initialize(BaseModel):
52
+ type: Literal[
53
+ "first_call",
54
+ "user_asked_mode_change",
55
+ "reset_shell",
56
+ "user_asked_change_workspace",
57
+ ]
58
+ any_workspace_path: str = Field(
59
+ description="Workspace to initialise in. Don't use ~ by default, instead use empty string"
60
+ )
61
+ initial_files_to_read: list[str] = Field(
62
+ description="Array of one or more files to read. Provide [] if no files mentioned."
63
+ )
64
+ task_id_to_resume: str
65
+ mode_name: Literal["wcgw", "architect", "code_writer"]
66
+ thread_id: str = Field(
67
+ description="Use the thread_id created in first_call, leave it as empty string if first_call"
68
+ )
69
+ code_writer_config: Optional[CodeWriterMode] = None
70
+
71
+ def model_post_init(self, __context: Any) -> None:
72
+ self.thread_id = normalize_thread_id(self.thread_id)
73
+ if self.mode_name == "code_writer":
74
+ assert self.code_writer_config is not None, (
75
+ "code_writer_config can't be null when the mode is code_writer"
76
+ )
77
+ if self.type != "first_call" and not self.thread_id:
78
+ raise ValueError(
79
+ "Thread id should be provided if type != 'first_call', including when resetting"
80
+ )
81
+ return super().model_post_init(__context)
82
+
83
+ @property
84
+ def mode(self) -> ModesConfig:
85
+ if self.mode_name == "wcgw":
86
+ return "wcgw"
87
+ if self.mode_name == "architect":
88
+ return "architect"
89
+ assert self.code_writer_config is not None, (
90
+ "code_writer_config can't be null when the mode is code_writer"
91
+ )
92
+ return self.code_writer_config
93
+
94
+
95
+ class Command(BaseModel):
96
+ command: str
97
+ type: Literal["command"] = "command"
98
+ is_background: bool = False
99
+
100
+
101
+ class StatusCheck(BaseModel):
102
+ status_check: Literal[True] = True
103
+ type: Literal["status_check"] = "status_check"
104
+ bg_command_id: str | None = None
105
+
106
+
107
+ class SendText(BaseModel):
108
+ send_text: str
109
+ type: Literal["send_text"] = "send_text"
110
+ bg_command_id: str | None = None
111
+
112
+
113
+ Specials = Literal[
114
+ "Enter", "Key-up", "Key-down", "Key-left", "Key-right", "Ctrl-c", "Ctrl-d"
115
+ ]
116
+
117
+
118
+ class SendSpecials(BaseModel):
119
+ send_specials: Sequence[Specials]
120
+ type: Literal["send_specials"] = "send_specials"
121
+ bg_command_id: str | None = None
122
+
123
+
124
+ class SendAscii(BaseModel):
125
+ send_ascii: Sequence[int]
126
+ type: Literal["send_ascii"] = "send_ascii"
127
+ bg_command_id: str | None = None
128
+
129
+
130
+ class ActionJsonSchema(BaseModel):
131
+ type: Literal[
132
+ "command", "status_check", "send_text", "send_specials", "send_ascii"
133
+ ] = Field(description="type of action.")
134
+ command: Optional[str] = Field(
135
+ default=None, description='Set only if type="command"'
136
+ )
137
+ status_check: Optional[Literal[True]] = Field(
138
+ default=None, description='Set only if type="status_check"'
139
+ )
140
+ send_text: Optional[str] = Field(
141
+ default=None, description='Set only if type="send_text"'
142
+ )
143
+ send_specials: Optional[Sequence[Specials]] = Field(
144
+ default=None, description='Set only if type="send_specials"'
145
+ )
146
+ send_ascii: Optional[Sequence[int]] = Field(
147
+ default=None, description='Set only if type="send_ascii"'
148
+ )
149
+ is_background: bool = Field(
150
+ default=False,
151
+ description='Set only if type="command" and running the command in background',
152
+ )
153
+ bg_command_id: str | None = Field(
154
+ default=None,
155
+ description='Set only if type!="command" and doing action on a running background command',
156
+ )
157
+
158
+
159
+ class BashCommandOverride(BaseModel):
160
+ action_json: ActionJsonSchema
161
+ wait_for_seconds: Optional[float] = None
162
+ thread_id: str
163
+
164
+
165
+ class BashCommand(BaseModel):
166
+ action_json: Command | StatusCheck | SendText | SendSpecials | SendAscii
167
+ wait_for_seconds: Optional[float] = None
168
+ thread_id: str
169
+
170
+ def model_post_init(self, __context: Any) -> None:
171
+ self.thread_id = normalize_thread_id(self.thread_id)
172
+ return super().model_post_init(__context)
173
+
174
+ @staticmethod
175
+ def model_json_schema(*args, **kwargs) -> dict[str, Any]: # type: ignore
176
+ return BashCommandOverride.model_json_schema(*args, **kwargs)
177
+
178
+
179
+ class ReadImage(BaseModel):
180
+ file_path: str
181
+
182
+
183
+ class WriteIfEmpty(BaseModel):
184
+ file_path: str
185
+ file_content: str
186
+
187
+
188
+ class ReadFiles(BaseModel):
189
+ file_paths: list[str]
190
+ _start_line_nums: List[Optional[int]] = PrivateAttr(default_factory=lambda: [])
191
+ _end_line_nums: List[Optional[int]] = PrivateAttr(default_factory=lambda: [])
192
+
193
+ @property
194
+ def show_line_numbers_reason(self) -> str:
195
+ return "True"
196
+
197
+ @property
198
+ def start_line_nums(self) -> List[Optional[int]]:
199
+ """Get the start line numbers."""
200
+ return self._start_line_nums
201
+
202
+ @property
203
+ def end_line_nums(self) -> List[Optional[int]]:
204
+ """Get the end line numbers."""
205
+ return self._end_line_nums
206
+
207
+ def model_post_init(self, __context: Any) -> None:
208
+ # Parse file paths for line ranges and store them in private attributes
209
+ self._start_line_nums = []
210
+ self._end_line_nums = []
211
+
212
+ # Create new file_paths list without line ranges
213
+ clean_file_paths = []
214
+
215
+ for file_path in self.file_paths:
216
+ start_line_num = None
217
+ end_line_num = None
218
+ path_part = file_path
219
+
220
+ # Check if the path ends with a line range pattern
221
+ # We're looking for patterns at the very end of the path like:
222
+ # - file.py:10 (specific line)
223
+ # - file.py:10-20 (line range)
224
+ # - file.py:10- (from line 10 to end)
225
+ # - file.py:-20 (from start to line 20)
226
+
227
+ # Split by the last colon
228
+ if ":" in file_path:
229
+ parts = file_path.rsplit(":", 1)
230
+ if len(parts) == 2:
231
+ potential_path = parts[0]
232
+ line_spec = parts[1]
233
+
234
+ # Check if it's a valid line range format
235
+ if line_spec.isdigit():
236
+ # Format: file.py:10
237
+ try:
238
+ start_line_num = int(line_spec)
239
+ path_part = potential_path
240
+ except ValueError:
241
+ # Keep the original path if conversion fails
242
+ pass
243
+
244
+ elif "-" in line_spec:
245
+ # Could be file.py:10-20, file.py:10-, or file.py:-20
246
+ line_parts = line_spec.split("-", 1)
247
+
248
+ if not line_parts[0] and line_parts[1].isdigit():
249
+ # Format: file.py:-20
250
+ try:
251
+ end_line_num = int(line_parts[1])
252
+ path_part = potential_path
253
+ except ValueError:
254
+ # Keep original path
255
+ pass
256
+
257
+ elif line_parts[0].isdigit():
258
+ # Format: file.py:10-20 or file.py:10-
259
+ try:
260
+ start_line_num = int(line_parts[0])
261
+
262
+ if line_parts[1].isdigit():
263
+ # file.py:10-20
264
+ end_line_num = int(line_parts[1])
265
+
266
+ # In both cases, update the path
267
+ path_part = potential_path
268
+ except ValueError:
269
+ # Keep original path
270
+ pass
271
+
272
+ # Add clean path and corresponding line numbers
273
+ clean_file_paths.append(path_part)
274
+ self._start_line_nums.append(start_line_num)
275
+ self._end_line_nums.append(end_line_num)
276
+
277
+ # Update file_paths with clean paths
278
+ self.file_paths = clean_file_paths
279
+
280
+ return super().model_post_init(__context)
281
+
282
+
283
+ class FileEdit(BaseModel):
284
+ file_path: str
285
+ file_edit_using_search_replace_blocks: str
286
+
287
+
288
+ class FileWriteOrEdit(BaseModel):
289
+ # Naming should be in sorted order otherwise it gets changed in LLM backend.
290
+ file_path: str = Field(description="#1: absolute file path")
291
+ percentage_to_change: int = Field(
292
+ description="#2: predict this percentage, calculated as number of existing lines that will have some diff divided by total existing lines."
293
+ )
294
+ text_or_search_replace_blocks: str = Field(
295
+ description="#3: content/edit blocks. Must be after #2 in the tool xml"
296
+ )
297
+ thread_id: str = Field(description="#4: thread_id")
298
+
299
+ def model_post_init(self, __context: Any) -> None:
300
+ self.thread_id = normalize_thread_id(self.thread_id)
301
+ return super().model_post_init(__context)
302
+
303
+
304
+ class ContextSave(BaseModel):
305
+ id: str
306
+ project_root_path: str
307
+ description: str
308
+ relevant_file_globs: list[str]
309
+
310
+
311
+ class Console(Protocol):
312
+ def print(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
313
+
314
+ def log(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
315
+
316
+
317
+ class Mdata(PydanticBaseModel):
318
+ data: BashCommand | FileWriteOrEdit | str | ReadFiles | Initialize | ContextSave
@@ -0,0 +1,339 @@
1
+ Metadata-Version: 2.4
2
+ Name: wcgw
3
+ Version: 5.5.4
4
+ Summary: Shell and coding agent for Claude and other mcp clients
5
+ Project-URL: Homepage, https://github.com/rusiaaman/wcgw
6
+ Author-email: Aman Rusia <gapypi@arcfu.com>
7
+ License-File: LICENSE
8
+ Requires-Python: >=3.11
9
+ Requires-Dist: anthropic>=0.39.0
10
+ Requires-Dist: fastapi>=0.115.0
11
+ Requires-Dist: mcp>=1.7.0
12
+ Requires-Dist: openai>=1.46.0
13
+ Requires-Dist: petname>=2.6
14
+ Requires-Dist: pexpect>=4.9.0
15
+ Requires-Dist: psutil>=7.0.0
16
+ Requires-Dist: pydantic>=2.9.2
17
+ Requires-Dist: pygit2>=1.16.0
18
+ Requires-Dist: pyte>=0.8.2
19
+ Requires-Dist: python-dotenv>=1.0.1
20
+ Requires-Dist: rich>=13.8.1
21
+ Requires-Dist: semantic-version>=2.10.0
22
+ Requires-Dist: syntax-checker==0.4.0
23
+ Requires-Dist: tokenizers>=0.21.0
24
+ Requires-Dist: toml>=0.10.2
25
+ Requires-Dist: tree-sitter-bash>=0.23.3
26
+ Requires-Dist: tree-sitter>=0.24.0
27
+ Requires-Dist: typer>=0.12.5
28
+ Requires-Dist: uvicorn>=0.31.0
29
+ Requires-Dist: wcmatch>=10.1
30
+ Requires-Dist: websockets>=13.1
31
+ Description-Content-Type: text/markdown
32
+
33
+ # Shell and Coding agent for Claude and other mcp clients
34
+
35
+ Empowering chat applications to code, build and run on your local machine.
36
+
37
+ wcgw is an MCP server with tightly integrated shell and code editing tools.
38
+
39
+ ⚠️ Warning: do not allow BashCommand tool without reviewing the command, it may result in data loss.
40
+
41
+ [![Tests](https://github.com/rusiaaman/wcgw/actions/workflows/python-tests.yml/badge.svg?branch=main)](https://github.com/rusiaaman/wcgw/actions/workflows/python-tests.yml)
42
+ [![Mypy strict](https://github.com/rusiaaman/wcgw/actions/workflows/python-types.yml/badge.svg?branch=main)](https://github.com/rusiaaman/wcgw/actions/workflows/python-types.yml)
43
+ [![Build](https://github.com/rusiaaman/wcgw/actions/workflows/python-publish.yml/badge.svg)](https://github.com/rusiaaman/wcgw/actions/workflows/python-publish.yml)
44
+ [![codecov](https://codecov.io/gh/rusiaaman/wcgw/graph/badge.svg)](https://codecov.io/gh/rusiaaman/wcgw)
45
+
46
+ ## Demo
47
+
48
+ ![Workflow Demo](static/workflow-demo.gif)
49
+
50
+ ## Updates
51
+
52
+ - [6 Oct 2025] Model can now run multiple commands in background. ZSH is now a supported shell. Multiplexing improvements.
53
+
54
+ - [27 Apr 2025] Removed support for GPTs over relay server. Only MCP server is supported in version >= 5.
55
+
56
+ - [24 Mar 2025] Improved writing and editing experience for sonnet 3.7, CLAUDE.md gets loaded automatically.
57
+
58
+ - [16 Feb 2025] You can now attach to the working terminal that the AI uses. See the "attach-to-terminal" section below.
59
+
60
+ - [15 Jan 2025] Modes introduced: architect, code-writer, and all powerful wcgw mode.
61
+
62
+ - [8 Jan 2025] Context saving tool for saving relevant file paths along with a description in a single file. Can be used as a task checkpoint or for knowledge transfer.
63
+
64
+ - [29 Dec 2024] Syntax checking on file writing and edits is now stable. Made `initialize` tool call useful; sending smart repo structure to claude if any repo is referenced. Large file handling is also now improved.
65
+
66
+ - [9 Dec 2024] [Vscode extension to paste context on Claude app](https://marketplace.visualstudio.com/items?itemName=AmanRusia.wcgw)
67
+
68
+ ## 🚀 Highlights
69
+
70
+ - ⚡ **Create, Execute, Iterate**: Ask claude to keep running compiler checks till all errors are fixed, or ask it to keep checking for the status of a long running command till it's done.
71
+ - ⚡ **Large file edit**: Supports large file incremental edits to avoid token limit issues. Smartly selects when to do small edits or large rewrite based on % of change needed.
72
+ - ⚡ **Syntax checking on edits**: Reports feedback to the LLM if its edits have any syntax errors, so that it can redo it.
73
+ - ⚡ **Interactive Command Handling**: Supports interactive commands using arrow keys, interrupt, and ansi escape sequences.
74
+ - ⚡ **File protections**:
75
+ - The AI needs to read a file at least once before it's allowed to edit or rewrite it. This avoids accidental overwrites.
76
+ - Avoids context filling up while reading very large files. Files get chunked based on token length.
77
+ - On initialisation the provided workspace's directory structure is returned after selecting important files (based on .gitignore as well as a statistical approach)
78
+ - File edit based on search-replace tries to find correct search block if it has multiple matches based on previous search blocks. Fails otherwise (for correctness).
79
+ - File edit has spacing tolerant matching, with warning on issues like indentation mismatch. If there's no match, the closest match is returned to the AI to fix its mistakes.
80
+ - Using Aider-like search and replace, which has better performance than tool call based search and replace.
81
+ - ⚡ **Shell optimizations**:
82
+ - Current working directory is always returned after any shell command to prevent AI from getting lost.
83
+ - Command polling exits after a quick timeout to avoid slow feedback. However, status checking has wait tolerance based on fresh output streaming from a command. Both of these approach combined provides a good shell interaction experience.
84
+ - Supports multiple concurrent background commands alongside the main interactive shell.
85
+ - ⚡ **Saving repo context in a single file**: Task checkpointing using "ContextSave" tool saves detailed context in a single file. Tasks can later be resumed in a new chat asking "Resume `task id`". The saved file can be used to do other kinds of knowledge transfer, such as taking help from another AI.
86
+ - ⚡ **Easily switch between various modes**:
87
+ - Ask it to run in 'architect' mode for planning. Inspired by adier's architect mode, work with Claude to come up with a plan first. Leads to better accuracy and prevents premature file editing.
88
+ - Ask it to run in 'code-writer' mode for code editing and project building. You can provide specific paths with wild card support to prevent other files getting edited.
89
+ - By default it runs in 'wcgw' mode that has no restrictions and full authorisation.
90
+ - More details in [Modes section](#modes)
91
+ - ⚡ **Runs in multiplex terminal** Use [vscode extension](https://marketplace.visualstudio.com/items?itemName=AmanRusia.wcgw) or run `screen -x` to attach to the terminal that the AI runs commands on. See history or interrupt process or interact with the same terminal that AI uses.
92
+ - ⚡ **Automatically load CLAUDE.md/AGENTS.md** Loads "CLAUDE.md" or "AGENTS.md" file in project root and sends as instructions during initialisation. Instructions in a global "~/.wcgw/CLAUDE.md" or "~/.wcgw/AGENTS.md" file are loaded and added along with project specific CLAUDE.md. The file name is case sensitive. CLAUDE.md is attached if it's present otherwise AGENTS.md is attached.
93
+
94
+ ## Top use cases examples
95
+
96
+ - Solve problem X using python, create and run test cases and fix any issues. Do it in a temporary directory
97
+ - Find instances of code with X behavior in my repository
98
+ - Git clone https://github.com/my/repo in my home directory, then understand the project, set up the environment and build
99
+ - Create a golang htmx tailwind webapp, then open browser to see if it works (use with puppeteer mcp)
100
+ - Edit or update a large file
101
+ - In a separate branch create feature Y, then use github cli to create a PR to original branch
102
+ - Command X is failing in Y directory, please run and fix issues
103
+ - Using X virtual environment run Y command
104
+ - Using cli tools, create build and test an android app. Finally run it using emulator for me to use
105
+ - Fix all mypy issues in my repo at X path.
106
+ - Using 'screen' run my server in background instead, then run another api server in bg, finally run the frontend build. Keep checking logs for any issues in all three
107
+ - Create repo wide unittest cases. Keep iterating through files and creating cases. Also keep running the tests after each update. Do not modify original code.
108
+
109
+ ## Claude setup (using mcp)
110
+
111
+ ### Mac and linux
112
+
113
+ First install `uv` using homebrew `brew install uv`
114
+
115
+ (**Important:** use homebrew to install uv. Otherwise make sure `uv` is present in a global location like /usr/bin/)
116
+
117
+ Then create or update `claude_desktop_config.json` (~/Library/Application Support/Claude/claude_desktop_config.json) with following json.
118
+
119
+ ```json
120
+ {
121
+ "mcpServers": {
122
+ "wcgw": {
123
+ "command": "uvx",
124
+ "args": ["wcgw@latest"]
125
+ }
126
+ }
127
+ }
128
+ ```
129
+
130
+ Then restart claude app.
131
+
132
+ **Optional: Force a specific shell**
133
+
134
+ To use a specific shell (bash or zsh), add the `--shell` argument:
135
+
136
+ ```json
137
+ {
138
+ "mcpServers": {
139
+ "wcgw": {
140
+ "command": "uvx",
141
+ "args": ["wcgw@latest", "--shell", "/bin/bash"]
142
+ }
143
+ }
144
+ }
145
+ ```
146
+
147
+ _If there's an error in setting up_
148
+
149
+ - If there's an error like "uv ENOENT", make sure `uv` is installed. Then run 'which uv' in the terminal, and use its output in place of "uv" in the configuration.
150
+ - If there's still an issue, check that `uv tool run --python 3.12 wcgw` runs in your terminal. It should have no output and shouldn't exit.
151
+ - Try removing ~/.cache/uv folder
152
+ - Try using `uv` version `0.6.0` for which this tool was tested.
153
+ - Debug the mcp server using `npx @modelcontextprotocol/inspector@0.1.7 uv tool run --python 3.12 wcgw`
154
+
155
+ ### Windows on wsl
156
+
157
+ This mcp server works only on wsl on windows.
158
+
159
+ To set it up, [install uv](https://docs.astral.sh/uv/getting-started/installation/)
160
+
161
+ Then add or update the claude config file `%APPDATA%\Claude\claude_desktop_config.json` with the following
162
+
163
+ ```json
164
+ {
165
+ "mcpServers": {
166
+ "wcgw": {
167
+ "command": "wsl.exe",
168
+ "args": ["uvx", "wcgw@latest"]
169
+ }
170
+ }
171
+ }
172
+ ```
173
+ When you encounter an error, execute the command wsl uv --python 3.12 wcgw in command prompt. If you get the `error /bin/bash: line 1: uv: command not found`, it means uv was not installed globally and you need to point to the correct path of uv.
174
+ 1. Find where uv is installed:
175
+ ```bash
176
+ whereis uv
177
+ ```
178
+ Example output:
179
+ ```uv: /home/mywsl/.local/bin/uv```
180
+
181
+ 2. Test the full path works:
182
+ ```
183
+ wsl /home/mywsl/.local/bin/uv tool run --python 3.12 wcgw
184
+ ```
185
+
186
+ 3. Update the config with the full path:
187
+ ```
188
+ {
189
+ "mcpServers": {
190
+ "wcgw": {
191
+ "command": "wsl.exe",
192
+ "args": ["/home/mywsl/.local/bin/uv", "tool", "run", "--python", "3.12", "wcgw"]
193
+ }
194
+ }
195
+ }
196
+ ```
197
+ Replace `/home/mywsl/.local/bin/uv` with your actual uv path from step 1.
198
+
199
+ ### Usage
200
+
201
+ Wait for a few seconds. You should be able to see this icon if everything goes right.
202
+
203
+ ![mcp icon](https://github.com/rusiaaman/wcgw/blob/main/static/rocket-icon.png?raw=true)
204
+ over here
205
+
206
+ ![mcp icon](https://github.com/rusiaaman/wcgw/blob/main/static/claude-ss.jpg?raw=true)
207
+
208
+ Then ask claude to execute shell commands, read files, edit files, run your code, etc.
209
+
210
+ #### Task checkpoint or knowledge transfer
211
+
212
+ - You can do a task checkpoint or a knowledge transfer by attaching "KnowledgeTransfer" prompt using "Attach from MCP" button.
213
+ - On running "KnowledgeTransfer" prompt, the "ContextSave" tool will be called saving the task description and all file content together in a single file. An id for the task will be generated.
214
+ - You can in a new chat say "Resume '<task id>'", the AI should then call "Initialize" with the task id and load the context from there.
215
+ - Or you can directly open the file generated and share it with another AI for help.
216
+
217
+ #### Modes
218
+
219
+ There are three built-in modes. You may ask Claude to run in one of the modes, like "Use 'architect' mode"
220
+ | **Mode** | **Description** | **Allows** | **Denies** | **Invoke prompt** |
221
+ |-----------------|-----------------------------------------------------------------------------|---------------------------------------------------------|----------------------------------------------|----------------------------------------------------------------------------------------------------|
222
+ | **Architect** | Designed for you to work with Claude to investigate and understand your repo. | Read-only commands | FileEdit and Write tool | Run in mode='architect' |
223
+ | **Code-writer** | For code writing and development | Specified path globs for editing or writing, specified commands | FileEdit for paths not matching specified glob, Write for paths not matching specified glob | Run in code writer mode, only 'tests/**' allowed, only uv command allowed |
224
+ | **wcgw\*\* | Default mode with everything allowed | Everything | Nothing | No prompt, or "Run in wcgw mode" |
225
+
226
+ Note: in code-writer mode either all commands are allowed or none are allowed for now. If you give a list of allowed commands, Claude is instructed to run only those commands, but no actual check happens. (WIP)
227
+
228
+ #### Attach to the working terminal to investigate
229
+
230
+ NEW: the [vscode extension](https://marketplace.visualstudio.com/items?itemName=AmanRusia.wcgw) now automatically attach the running terminal
231
+ if workspace path matches.
232
+
233
+ If you've `screen` command installed, wcgw runs on a screen instance automatically. If you've started wcgw mcp server, you can list the screen sessions:
234
+
235
+ `screen -ls`
236
+
237
+ And note down the wcgw screen name which will be something like `93358.wcgw.235521` where the last number is in the hour-minute-second format.
238
+
239
+ You can then attach to the session using `screen -x 93358.wcgw.235521`
240
+
241
+ You may interrupt any running command safely.
242
+
243
+ You can interact with the terminal safely, for example for entering passwords, or entering some text. (Warning: If you run a new command, any new LLM command will interrupt it.)
244
+
245
+ You shouldn't exit the session using `exit `or Ctrl-d, instead you should use `ctrl+a+d` to safely detach without destroying the screen session.
246
+
247
+ Include the following in ~/.screenrc for better scrolling experience
248
+ ```
249
+ defscrollback 10000
250
+ termcapinfo xterm* ti@:te@
251
+ ```
252
+
253
+ ### [Optional] Vs code extension
254
+
255
+ https://marketplace.visualstudio.com/items?itemName=AmanRusia.wcgw
256
+
257
+ Commands:
258
+
259
+ - Select a text and press `cmd+'` and then enter instructions. This will switch the app to Claude and paste a text containing your instructions, file path, workspace dir, and the selected text.
260
+
261
+ ## Examples
262
+
263
+ ![example](https://github.com/rusiaaman/wcgw/blob/main/static/example.jpg?raw=true)
264
+
265
+ ## Using mcp server over docker
266
+
267
+ First build the docker image `docker build -t wcgw https://github.com/rusiaaman/wcgw.git`
268
+
269
+ Then you can update `/Users/username/Library/Application Support/Claude/claude_desktop_config.json` to have
270
+
271
+ ```
272
+ {
273
+ "mcpServers": {
274
+ "wcgw": {
275
+ "command": "docker",
276
+ "args": [
277
+ "run",
278
+ "-i",
279
+ "--rm",
280
+ "--mount",
281
+ "type=bind,src=/Users/username/Desktop,dst=/workspace/Desktop",
282
+ "wcgw"
283
+ ]
284
+ }
285
+ }
286
+ }
287
+ ```
288
+
289
+ ## [Optional] Local shell access with openai API key or anthropic API key
290
+
291
+ ### Openai
292
+
293
+ Add `OPENAI_API_KEY` and `OPENAI_ORG_ID` env variables.
294
+
295
+ Then run
296
+
297
+ `uvx wcgw wcgw_local --limit 0.1` # Cost limit $0.1
298
+
299
+ You can now directly write messages or press enter key to open vim for multiline message and text pasting.
300
+
301
+ ### Anthropic
302
+
303
+ Add `ANTHROPIC_API_KEY` env variable.
304
+
305
+ Then run
306
+
307
+ `uvx wcgw wcgw_local --claude`
308
+
309
+ You can now directly write messages or press enter key to open vim for multiline message and text pasting.
310
+
311
+ ## Tools
312
+
313
+ The server provides the following MCP tools:
314
+
315
+ **Shell Operations:**
316
+
317
+ - `Initialize`: Reset shell and set up workspace environment
318
+ - Parameters: `any_workspace_path` (string), `initial_files_to_read` (string[]), `mode_name` ("wcgw"|"architect"|"code_writer"), `task_id_to_resume` (string)
319
+ - `BashCommand`: Execute shell commands with timeout control
320
+ - Parameters: `command` (string), `wait_for_seconds` (int, optional)
321
+ - Parameters: `send_text` (string) or `send_specials` (["Enter"|"Key-up"|...]) or `send_ascii` (int[]), `wait_for_seconds` (int, optional)
322
+
323
+ **File Operations:**
324
+
325
+ - `ReadFiles`: Read content from one or more files
326
+ - Parameters: `file_paths` (string[])
327
+ - `WriteIfEmpty`: Create new files or write to empty files
328
+ - Parameters: `file_path` (string), `file_content` (string)
329
+ - `FileEdit`: Edit existing files using search/replace blocks
330
+ - Parameters: `file_path` (string), `file_edit_using_search_replace_blocks` (string)
331
+ - `ReadImage`: Read image files for display/processing
332
+ - Parameters: `file_path` (string)
333
+
334
+ **Project Management:**
335
+
336
+ - `ContextSave`: Save project context and files for Knowledge Transfer or saving task checkpoints to be resumed later
337
+ - Parameters: `id` (string), `project_root_path` (string), `description` (string), `relevant_file_globs` (string[])
338
+
339
+ All tools support absolute paths and include built-in protections against common errors. See the [MCP specification](https://modelcontextprotocol.io/) for detailed protocol information.