wcgw 5.5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wcgw/__init__.py +4 -0
- wcgw/client/__init__.py +0 -0
- wcgw/client/bash_state/bash_state.py +1426 -0
- wcgw/client/bash_state/parser/__init__.py +7 -0
- wcgw/client/bash_state/parser/bash_statement_parser.py +181 -0
- wcgw/client/common.py +51 -0
- wcgw/client/diff-instructions.txt +73 -0
- wcgw/client/encoder/__init__.py +47 -0
- wcgw/client/file_ops/diff_edit.py +619 -0
- wcgw/client/file_ops/extensions.py +137 -0
- wcgw/client/file_ops/search_replace.py +212 -0
- wcgw/client/mcp_server/Readme.md +3 -0
- wcgw/client/mcp_server/__init__.py +32 -0
- wcgw/client/mcp_server/server.py +184 -0
- wcgw/client/memory.py +103 -0
- wcgw/client/modes.py +240 -0
- wcgw/client/repo_ops/display_tree.py +116 -0
- wcgw/client/repo_ops/file_stats.py +152 -0
- wcgw/client/repo_ops/path_prob.py +58 -0
- wcgw/client/repo_ops/paths_model.vocab +20000 -0
- wcgw/client/repo_ops/paths_tokens.model +80042 -0
- wcgw/client/repo_ops/repo_context.py +289 -0
- wcgw/client/schema_generator.py +63 -0
- wcgw/client/tool_prompts.py +98 -0
- wcgw/client/tools.py +1432 -0
- wcgw/py.typed +0 -0
- wcgw/types_.py +318 -0
- wcgw-5.5.4.dist-info/METADATA +339 -0
- wcgw-5.5.4.dist-info/RECORD +38 -0
- wcgw-5.5.4.dist-info/WHEEL +4 -0
- wcgw-5.5.4.dist-info/entry_points.txt +4 -0
- wcgw-5.5.4.dist-info/licenses/LICENSE +213 -0
- wcgw_cli/__init__.py +1 -0
- wcgw_cli/__main__.py +3 -0
- wcgw_cli/anthropic_client.py +486 -0
- wcgw_cli/cli.py +40 -0
- wcgw_cli/openai_client.py +404 -0
- wcgw_cli/openai_utils.py +67 -0
wcgw/client/tools.py
ADDED
|
@@ -0,0 +1,1432 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import glob
|
|
3
|
+
import json
|
|
4
|
+
import mimetypes
|
|
5
|
+
import os
|
|
6
|
+
import subprocess
|
|
7
|
+
import traceback
|
|
8
|
+
import uuid
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from hashlib import sha256
|
|
11
|
+
from os.path import expanduser
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from tempfile import NamedTemporaryFile
|
|
14
|
+
from typing import (
|
|
15
|
+
Any,
|
|
16
|
+
Callable,
|
|
17
|
+
Literal,
|
|
18
|
+
Optional,
|
|
19
|
+
ParamSpec,
|
|
20
|
+
Type,
|
|
21
|
+
TypeVar,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
import rich
|
|
25
|
+
from openai.types.chat import (
|
|
26
|
+
ChatCompletionMessageParam,
|
|
27
|
+
)
|
|
28
|
+
from pydantic import BaseModel, TypeAdapter, ValidationError
|
|
29
|
+
from syntax_checker import Output as SCOutput
|
|
30
|
+
from syntax_checker import check_syntax as raw_check_syntax
|
|
31
|
+
from wcmatch import glob as wcglob
|
|
32
|
+
|
|
33
|
+
from ..client.bash_state.bash_state import (
|
|
34
|
+
BashState,
|
|
35
|
+
execute_bash,
|
|
36
|
+
generate_thread_id,
|
|
37
|
+
get_status,
|
|
38
|
+
get_tmpdir,
|
|
39
|
+
)
|
|
40
|
+
from ..client.repo_ops.file_stats import (
|
|
41
|
+
FileStats,
|
|
42
|
+
load_workspace_stats,
|
|
43
|
+
save_workspace_stats,
|
|
44
|
+
)
|
|
45
|
+
from ..types_ import (
|
|
46
|
+
BashCommand,
|
|
47
|
+
CodeWriterMode,
|
|
48
|
+
Command,
|
|
49
|
+
Console,
|
|
50
|
+
ContextSave,
|
|
51
|
+
FileEdit,
|
|
52
|
+
FileWriteOrEdit,
|
|
53
|
+
Initialize,
|
|
54
|
+
Modes,
|
|
55
|
+
ModesConfig,
|
|
56
|
+
ReadFiles,
|
|
57
|
+
ReadImage,
|
|
58
|
+
WriteIfEmpty,
|
|
59
|
+
)
|
|
60
|
+
from .encoder import EncoderDecoder, get_default_encoder
|
|
61
|
+
from .file_ops.extensions import select_max_tokens
|
|
62
|
+
from .file_ops.search_replace import (
|
|
63
|
+
SEARCH_MARKER,
|
|
64
|
+
search_replace_edit,
|
|
65
|
+
)
|
|
66
|
+
from .memory import load_memory, save_memory
|
|
67
|
+
from .modes import (
|
|
68
|
+
ARCHITECT_PROMPT,
|
|
69
|
+
WCGW_PROMPT,
|
|
70
|
+
code_writer_prompt,
|
|
71
|
+
modes_to_state,
|
|
72
|
+
)
|
|
73
|
+
from .repo_ops.repo_context import get_repo_context
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class Context:
|
|
78
|
+
bash_state: BashState
|
|
79
|
+
console: Console
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def check_syntax(ext: str, content: str) -> SCOutput:
|
|
83
|
+
if ext == "html":
|
|
84
|
+
# Ignore due to prevelance of templating, causing issues
|
|
85
|
+
return raw_check_syntax("html", "")
|
|
86
|
+
return raw_check_syntax(ext, content)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def get_mode_prompt(context: Context) -> str:
|
|
90
|
+
mode_prompt = ""
|
|
91
|
+
if context.bash_state.mode == "code_writer":
|
|
92
|
+
mode_prompt = code_writer_prompt(
|
|
93
|
+
context.bash_state.file_edit_mode.allowed_globs,
|
|
94
|
+
context.bash_state.write_if_empty_mode.allowed_globs,
|
|
95
|
+
"all" if context.bash_state.bash_command_mode.allowed_commands else [],
|
|
96
|
+
)
|
|
97
|
+
elif context.bash_state.mode == "architect":
|
|
98
|
+
mode_prompt = ARCHITECT_PROMPT
|
|
99
|
+
else:
|
|
100
|
+
mode_prompt = WCGW_PROMPT
|
|
101
|
+
|
|
102
|
+
return mode_prompt
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def initialize(
|
|
106
|
+
type: Literal["user_asked_change_workspace", "first_call"],
|
|
107
|
+
context: Context,
|
|
108
|
+
any_workspace_path: str,
|
|
109
|
+
read_files_: list[str],
|
|
110
|
+
task_id_to_resume: str,
|
|
111
|
+
coding_max_tokens: Optional[int],
|
|
112
|
+
noncoding_max_tokens: Optional[int],
|
|
113
|
+
mode: ModesConfig,
|
|
114
|
+
thread_id: str,
|
|
115
|
+
) -> tuple[str, Context, dict[str, list[tuple[int, int]]]]:
|
|
116
|
+
# Expand the workspace path
|
|
117
|
+
any_workspace_path = expand_user(any_workspace_path)
|
|
118
|
+
repo_context = ""
|
|
119
|
+
|
|
120
|
+
memory = ""
|
|
121
|
+
loaded_state = None
|
|
122
|
+
|
|
123
|
+
# For workspace/mode changes, ensure we're using an existing state if possible
|
|
124
|
+
if type != "first_call" and thread_id != context.bash_state.current_thread_id:
|
|
125
|
+
# Try to load state from the thread_id
|
|
126
|
+
if not context.bash_state.load_state_from_thread_id(thread_id):
|
|
127
|
+
return (
|
|
128
|
+
f"Error: No saved bash state found for thread_id `{thread_id}`. Please re-initialize to get a new id or use correct id.",
|
|
129
|
+
context,
|
|
130
|
+
{},
|
|
131
|
+
)
|
|
132
|
+
del (
|
|
133
|
+
thread_id
|
|
134
|
+
) # No use other than loading correct state before doing actual tool related stuff
|
|
135
|
+
|
|
136
|
+
# Handle task resumption - this applies only to first_call
|
|
137
|
+
if type == "first_call" and task_id_to_resume:
|
|
138
|
+
try:
|
|
139
|
+
project_root_path, task_mem, loaded_state = load_memory(
|
|
140
|
+
task_id_to_resume,
|
|
141
|
+
coding_max_tokens,
|
|
142
|
+
noncoding_max_tokens,
|
|
143
|
+
lambda x: default_enc.encoder(x),
|
|
144
|
+
lambda x: default_enc.decoder(x),
|
|
145
|
+
)
|
|
146
|
+
memory = "Following is the retrieved task:\n" + task_mem
|
|
147
|
+
if os.path.exists(project_root_path):
|
|
148
|
+
any_workspace_path = project_root_path
|
|
149
|
+
|
|
150
|
+
except Exception:
|
|
151
|
+
memory = f'Error: Unable to load task with ID "{task_id_to_resume}" '
|
|
152
|
+
elif task_id_to_resume:
|
|
153
|
+
memory = (
|
|
154
|
+
"Warning: task can only be resumed in a new conversation. No task loaded."
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
folder_to_start = None
|
|
158
|
+
if type == "first_call" and not any_workspace_path:
|
|
159
|
+
tmp_dir = get_tmpdir()
|
|
160
|
+
any_workspace_path = os.path.join(
|
|
161
|
+
tmp_dir, "claude-playground-" + uuid.uuid4().hex[:4]
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
if any_workspace_path:
|
|
165
|
+
if os.path.exists(any_workspace_path):
|
|
166
|
+
if os.path.isfile(any_workspace_path):
|
|
167
|
+
# Set any_workspace_path to the directory containing the file
|
|
168
|
+
# Add the file to read_files_ only if empty to avoid duplicates
|
|
169
|
+
if not read_files_:
|
|
170
|
+
read_files_ = [any_workspace_path]
|
|
171
|
+
any_workspace_path = os.path.dirname(any_workspace_path)
|
|
172
|
+
# Let get_repo_context handle loading the workspace stats
|
|
173
|
+
repo_context, folder_to_start = get_repo_context(any_workspace_path)
|
|
174
|
+
|
|
175
|
+
repo_context = f"---\n# Workspace structure\n{repo_context}\n---\n"
|
|
176
|
+
|
|
177
|
+
# update modes if they're relative
|
|
178
|
+
if isinstance(mode, CodeWriterMode):
|
|
179
|
+
mode.update_relative_globs(any_workspace_path)
|
|
180
|
+
else:
|
|
181
|
+
assert isinstance(mode, str)
|
|
182
|
+
else:
|
|
183
|
+
if os.path.abspath(any_workspace_path):
|
|
184
|
+
os.makedirs(any_workspace_path, exist_ok=True)
|
|
185
|
+
repo_context = f"\nInfo: Workspace path {any_workspace_path} did not exist. I've created it for you.\n"
|
|
186
|
+
folder_to_start = Path(any_workspace_path)
|
|
187
|
+
else:
|
|
188
|
+
repo_context = (
|
|
189
|
+
f"\nInfo: Workspace path {any_workspace_path} does not exist."
|
|
190
|
+
)
|
|
191
|
+
# Restore bash state if available
|
|
192
|
+
if loaded_state is not None:
|
|
193
|
+
try:
|
|
194
|
+
parsed_state = BashState.parse_state(loaded_state)
|
|
195
|
+
workspace_root = (
|
|
196
|
+
str(folder_to_start) if folder_to_start else parsed_state[5]
|
|
197
|
+
)
|
|
198
|
+
loaded_thread_id = parsed_state[6] if len(parsed_state) > 6 else None
|
|
199
|
+
|
|
200
|
+
if not loaded_thread_id:
|
|
201
|
+
loaded_thread_id = context.bash_state.current_thread_id
|
|
202
|
+
|
|
203
|
+
if mode == "wcgw":
|
|
204
|
+
context.bash_state.load_state(
|
|
205
|
+
parsed_state[0],
|
|
206
|
+
parsed_state[1],
|
|
207
|
+
parsed_state[2],
|
|
208
|
+
parsed_state[3],
|
|
209
|
+
{**parsed_state[4], **context.bash_state.whitelist_for_overwrite},
|
|
210
|
+
str(folder_to_start) if folder_to_start else workspace_root,
|
|
211
|
+
workspace_root,
|
|
212
|
+
loaded_thread_id,
|
|
213
|
+
)
|
|
214
|
+
else:
|
|
215
|
+
state = modes_to_state(mode)
|
|
216
|
+
context.bash_state.load_state(
|
|
217
|
+
state[0],
|
|
218
|
+
state[1],
|
|
219
|
+
state[2],
|
|
220
|
+
state[3],
|
|
221
|
+
{**parsed_state[4], **context.bash_state.whitelist_for_overwrite},
|
|
222
|
+
str(folder_to_start) if folder_to_start else workspace_root,
|
|
223
|
+
workspace_root,
|
|
224
|
+
loaded_thread_id,
|
|
225
|
+
)
|
|
226
|
+
except ValueError:
|
|
227
|
+
context.console.print(traceback.format_exc())
|
|
228
|
+
context.console.print("Error: couldn't load bash state")
|
|
229
|
+
pass
|
|
230
|
+
mode_prompt = get_mode_prompt(context)
|
|
231
|
+
else:
|
|
232
|
+
mode_changed = is_mode_change(mode, context.bash_state)
|
|
233
|
+
state = modes_to_state(mode)
|
|
234
|
+
new_thread_id = context.bash_state.current_thread_id
|
|
235
|
+
if type == "first_call":
|
|
236
|
+
# Recreate thread_id
|
|
237
|
+
new_thread_id = generate_thread_id()
|
|
238
|
+
# Use the provided workspace path as the workspace root
|
|
239
|
+
context.bash_state.load_state(
|
|
240
|
+
state[0],
|
|
241
|
+
state[1],
|
|
242
|
+
state[2],
|
|
243
|
+
state[3],
|
|
244
|
+
dict(context.bash_state.whitelist_for_overwrite),
|
|
245
|
+
str(folder_to_start) if folder_to_start else "",
|
|
246
|
+
str(folder_to_start) if folder_to_start else "",
|
|
247
|
+
new_thread_id,
|
|
248
|
+
)
|
|
249
|
+
if type == "first_call" or mode_changed:
|
|
250
|
+
mode_prompt = get_mode_prompt(context)
|
|
251
|
+
else:
|
|
252
|
+
mode_prompt = ""
|
|
253
|
+
|
|
254
|
+
del mode
|
|
255
|
+
|
|
256
|
+
initial_files_context = ""
|
|
257
|
+
initial_paths_with_ranges: dict[str, list[tuple[int, int]]] = {}
|
|
258
|
+
if read_files_:
|
|
259
|
+
if folder_to_start:
|
|
260
|
+
read_files_ = [
|
|
261
|
+
# Expand the path before checking if it's absolute
|
|
262
|
+
os.path.join(folder_to_start, f)
|
|
263
|
+
if not os.path.isabs(expand_user(f))
|
|
264
|
+
else expand_user(f)
|
|
265
|
+
for f in read_files_
|
|
266
|
+
]
|
|
267
|
+
initial_files, initial_paths_with_ranges, _ = read_files(
|
|
268
|
+
read_files_, coding_max_tokens, noncoding_max_tokens, context
|
|
269
|
+
)
|
|
270
|
+
initial_files_context = f"---\n# Requested files\nHere are the contents of the requested files:\n{initial_files}\n---\n"
|
|
271
|
+
|
|
272
|
+
# Check for global CLAUDE.md and workspace CLAUDE.md
|
|
273
|
+
alignment_context = ""
|
|
274
|
+
|
|
275
|
+
# Check if ripgrep is available and add instruction if it is
|
|
276
|
+
try:
|
|
277
|
+
subprocess.run(["which", "rg"], timeout=1, capture_output=True, check=True)
|
|
278
|
+
alignment_context += "---\n# Available commands\n\n- Use ripgrep `rg` command instead of `grep` because it's much much faster.\n\n---\n\n"
|
|
279
|
+
except Exception:
|
|
280
|
+
pass
|
|
281
|
+
|
|
282
|
+
# Check for global alignment doc in ~/.wcgw: prefer CLAUDE.md, else AGENTS.md
|
|
283
|
+
try:
|
|
284
|
+
global_dir = os.path.join(expanduser("~"), ".wcgw")
|
|
285
|
+
for fname in ("CLAUDE.md", "AGENTS.md"):
|
|
286
|
+
global_alignment_file_path = os.path.join(global_dir, fname)
|
|
287
|
+
if os.path.exists(global_alignment_file_path):
|
|
288
|
+
with open(global_alignment_file_path, "r") as f:
|
|
289
|
+
global_alignment_content = f.read()
|
|
290
|
+
alignment_context += f"---\n# Important guidelines from the user\n```\n{global_alignment_content}\n```\n---\n\n"
|
|
291
|
+
break
|
|
292
|
+
except Exception as e:
|
|
293
|
+
# Log any errors when reading the global file
|
|
294
|
+
context.console.log(f"Error reading global alignment file: {e}")
|
|
295
|
+
|
|
296
|
+
# Then check for workspace-specific alignment doc: prefer CLAUDE.md, else AGENTS.md
|
|
297
|
+
if folder_to_start:
|
|
298
|
+
try:
|
|
299
|
+
base_dir = str(folder_to_start)
|
|
300
|
+
selected_name = ""
|
|
301
|
+
alignment_content = ""
|
|
302
|
+
for fname in ("CLAUDE.md", "AGENTS.md"):
|
|
303
|
+
alignment_file_path = os.path.join(base_dir, fname)
|
|
304
|
+
if os.path.exists(alignment_file_path):
|
|
305
|
+
with open(alignment_file_path, "r") as f:
|
|
306
|
+
alignment_content = f.read()
|
|
307
|
+
selected_name = fname
|
|
308
|
+
break
|
|
309
|
+
if alignment_content:
|
|
310
|
+
alignment_context += f"---\n# {selected_name} - user shared project guidelines to follow\n```\n{alignment_content}\n```\n---\n\n"
|
|
311
|
+
except Exception as e:
|
|
312
|
+
# Log any errors when reading the workspace file
|
|
313
|
+
context.console.log(f"Error reading workspace alignment file: {e}")
|
|
314
|
+
|
|
315
|
+
uname_sysname = os.uname().sysname
|
|
316
|
+
uname_machine = os.uname().machine
|
|
317
|
+
|
|
318
|
+
output = f"""
|
|
319
|
+
Use thread_id={context.bash_state.current_thread_id} for all wcgw tool calls which take that.
|
|
320
|
+
---
|
|
321
|
+
{mode_prompt}
|
|
322
|
+
|
|
323
|
+
# Environment
|
|
324
|
+
System: {uname_sysname}
|
|
325
|
+
Machine: {uname_machine}
|
|
326
|
+
Initialized in directory (also cwd): {context.bash_state.cwd}
|
|
327
|
+
User home directory: {expanduser("~")}
|
|
328
|
+
|
|
329
|
+
{alignment_context}
|
|
330
|
+
{repo_context}
|
|
331
|
+
|
|
332
|
+
---
|
|
333
|
+
|
|
334
|
+
{memory}
|
|
335
|
+
---
|
|
336
|
+
|
|
337
|
+
{initial_files_context}
|
|
338
|
+
|
|
339
|
+
"""
|
|
340
|
+
|
|
341
|
+
return output, context, initial_paths_with_ranges
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
def is_mode_change(mode_config: ModesConfig, bash_state: BashState) -> bool:
|
|
345
|
+
allowed = modes_to_state(mode_config)
|
|
346
|
+
bash_allowed = (
|
|
347
|
+
bash_state.bash_command_mode,
|
|
348
|
+
bash_state.file_edit_mode,
|
|
349
|
+
bash_state.write_if_empty_mode,
|
|
350
|
+
bash_state.mode,
|
|
351
|
+
)
|
|
352
|
+
return allowed != bash_allowed
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
def reset_wcgw(
|
|
356
|
+
context: Context,
|
|
357
|
+
starting_directory: str,
|
|
358
|
+
mode_name: Optional[Modes],
|
|
359
|
+
change_mode: ModesConfig,
|
|
360
|
+
thread_id: str,
|
|
361
|
+
) -> str:
|
|
362
|
+
# Load state for this thread_id before proceeding with mode/directory changes
|
|
363
|
+
if thread_id != context.bash_state.current_thread_id:
|
|
364
|
+
# Try to load state from the thread_id
|
|
365
|
+
if not context.bash_state.load_state_from_thread_id(thread_id):
|
|
366
|
+
return f"Error: No saved bash state found for thread_id `{thread_id}`. Please re-initialize to get a new id or use correct id."
|
|
367
|
+
if mode_name:
|
|
368
|
+
# update modes if they're relative
|
|
369
|
+
if isinstance(change_mode, CodeWriterMode):
|
|
370
|
+
change_mode.update_relative_globs(starting_directory)
|
|
371
|
+
else:
|
|
372
|
+
assert isinstance(change_mode, str)
|
|
373
|
+
|
|
374
|
+
# Get new state configuration
|
|
375
|
+
bash_command_mode, file_edit_mode, write_if_empty_mode, mode = modes_to_state(
|
|
376
|
+
change_mode
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
# Reset shell with new mode, using the provided thread_id
|
|
380
|
+
context.bash_state.load_state(
|
|
381
|
+
bash_command_mode,
|
|
382
|
+
file_edit_mode,
|
|
383
|
+
write_if_empty_mode,
|
|
384
|
+
mode,
|
|
385
|
+
dict(context.bash_state.whitelist_for_overwrite),
|
|
386
|
+
starting_directory,
|
|
387
|
+
starting_directory,
|
|
388
|
+
thread_id,
|
|
389
|
+
)
|
|
390
|
+
mode_prompt = get_mode_prompt(context)
|
|
391
|
+
return (
|
|
392
|
+
f"Reset successful with mode change to {mode_name}.\n"
|
|
393
|
+
+ mode_prompt
|
|
394
|
+
+ "\n"
|
|
395
|
+
+ get_status(context.bash_state, is_bg=False)
|
|
396
|
+
)
|
|
397
|
+
else:
|
|
398
|
+
# Regular reset without mode change - keep same mode but update directory
|
|
399
|
+
bash_command_mode = context.bash_state.bash_command_mode
|
|
400
|
+
file_edit_mode = context.bash_state.file_edit_mode
|
|
401
|
+
write_if_empty_mode = context.bash_state.write_if_empty_mode
|
|
402
|
+
mode = context.bash_state.mode
|
|
403
|
+
|
|
404
|
+
# Reload state with new directory, using the provided thread_id
|
|
405
|
+
context.bash_state.load_state(
|
|
406
|
+
bash_command_mode,
|
|
407
|
+
file_edit_mode,
|
|
408
|
+
write_if_empty_mode,
|
|
409
|
+
mode,
|
|
410
|
+
dict(context.bash_state.whitelist_for_overwrite),
|
|
411
|
+
starting_directory,
|
|
412
|
+
starting_directory,
|
|
413
|
+
thread_id,
|
|
414
|
+
)
|
|
415
|
+
return "Reset successful" + get_status(context.bash_state, is_bg=False)
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
T = TypeVar("T")
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
def save_out_of_context(content: str, suffix: str) -> str:
|
|
422
|
+
file_path = NamedTemporaryFile(delete=False, suffix=suffix).name
|
|
423
|
+
with open(file_path, "w") as f:
|
|
424
|
+
f.write(content)
|
|
425
|
+
return file_path
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
def expand_user(path: str) -> str:
|
|
429
|
+
if not path or not path.startswith("~"):
|
|
430
|
+
return path
|
|
431
|
+
return expanduser(path)
|
|
432
|
+
|
|
433
|
+
|
|
434
|
+
def try_open_file(file_path: str) -> None:
|
|
435
|
+
"""Try to open a file using the system's default application."""
|
|
436
|
+
# Determine the appropriate open command based on OS
|
|
437
|
+
open_cmd = None
|
|
438
|
+
if os.uname().sysname == "Darwin": # macOS
|
|
439
|
+
open_cmd = "open"
|
|
440
|
+
elif os.uname().sysname == "Linux":
|
|
441
|
+
# Try common Linux open commands
|
|
442
|
+
for cmd in ["xdg-open", "gnome-open", "kde-open"]:
|
|
443
|
+
try:
|
|
444
|
+
subprocess.run(["which", cmd], timeout=1, capture_output=True)
|
|
445
|
+
open_cmd = cmd
|
|
446
|
+
break
|
|
447
|
+
except:
|
|
448
|
+
continue
|
|
449
|
+
|
|
450
|
+
# Try to open the file if a command is available
|
|
451
|
+
if open_cmd:
|
|
452
|
+
try:
|
|
453
|
+
subprocess.run([open_cmd, file_path], timeout=2)
|
|
454
|
+
except:
|
|
455
|
+
pass
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
MEDIA_TYPES = Literal["image/jpeg", "image/png", "image/gif", "image/webp"]
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
class ImageData(BaseModel):
|
|
462
|
+
media_type: MEDIA_TYPES
|
|
463
|
+
data: str
|
|
464
|
+
|
|
465
|
+
@property
|
|
466
|
+
def dataurl(self) -> str:
|
|
467
|
+
return f"data:{self.media_type};base64," + self.data
|
|
468
|
+
|
|
469
|
+
|
|
470
|
+
Param = ParamSpec("Param")
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
def truncate_if_over(content: str, max_tokens: Optional[int]) -> str:
|
|
474
|
+
if max_tokens and max_tokens > 0:
|
|
475
|
+
tokens = default_enc.encoder(content)
|
|
476
|
+
n_tokens = len(tokens)
|
|
477
|
+
if n_tokens > max_tokens:
|
|
478
|
+
content = (
|
|
479
|
+
default_enc.decoder(tokens[: max(0, max_tokens - 100)])
|
|
480
|
+
+ "\n(...truncated)"
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
return content
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
def read_image_from_shell(file_path: str, context: Context) -> ImageData:
|
|
487
|
+
# Expand the path before checking if it's absolute
|
|
488
|
+
file_path = expand_user(file_path)
|
|
489
|
+
|
|
490
|
+
# If not absolute after expansion, join with current working directory
|
|
491
|
+
if not os.path.isabs(file_path):
|
|
492
|
+
file_path = os.path.join(context.bash_state.cwd, file_path)
|
|
493
|
+
|
|
494
|
+
if not os.path.exists(file_path):
|
|
495
|
+
raise ValueError(f"File {file_path} does not exist")
|
|
496
|
+
|
|
497
|
+
with open(file_path, "rb") as image_file:
|
|
498
|
+
image_bytes = image_file.read()
|
|
499
|
+
image_b64 = base64.b64encode(image_bytes).decode("utf-8")
|
|
500
|
+
image_type = mimetypes.guess_type(file_path)[0]
|
|
501
|
+
return ImageData(media_type=image_type, data=image_b64) # type: ignore
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
def get_context_for_errors(
|
|
505
|
+
errors: list[tuple[int, int]],
|
|
506
|
+
file_content: str,
|
|
507
|
+
filename: str,
|
|
508
|
+
coding_max_tokens: Optional[int],
|
|
509
|
+
noncoding_max_tokens: Optional[int],
|
|
510
|
+
) -> str:
|
|
511
|
+
file_lines = file_content.split("\n")
|
|
512
|
+
min_line_num = max(0, min([error[0] for error in errors]) - 10)
|
|
513
|
+
max_line_num = min(len(file_lines), max([error[0] for error in errors]) + 10)
|
|
514
|
+
context_lines = file_lines[min_line_num:max_line_num]
|
|
515
|
+
context = "\n".join(context_lines)
|
|
516
|
+
|
|
517
|
+
max_tokens = select_max_tokens(filename, coding_max_tokens, noncoding_max_tokens)
|
|
518
|
+
if max_tokens is not None and max_tokens > 0:
|
|
519
|
+
ntokens = len(default_enc.encoder(context))
|
|
520
|
+
if ntokens > max_tokens:
|
|
521
|
+
return "Please re-read the file to understand the context"
|
|
522
|
+
return f"Here's relevant snippet from the file where the syntax errors occured:\n<snippet>\n{context}\n</snippet>"
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
def write_file(
|
|
526
|
+
writefile: WriteIfEmpty,
|
|
527
|
+
error_on_exist: bool,
|
|
528
|
+
coding_max_tokens: Optional[int],
|
|
529
|
+
noncoding_max_tokens: Optional[int],
|
|
530
|
+
context: Context,
|
|
531
|
+
) -> tuple[
|
|
532
|
+
str, dict[str, list[tuple[int, int]]]
|
|
533
|
+
]: # Updated to return message and file paths with line ranges
|
|
534
|
+
# Expand the path before checking if it's absolute
|
|
535
|
+
path_ = expand_user(writefile.file_path)
|
|
536
|
+
|
|
537
|
+
workspace_path = context.bash_state.workspace_root
|
|
538
|
+
stats = load_workspace_stats(workspace_path)
|
|
539
|
+
|
|
540
|
+
if path_ not in stats.files:
|
|
541
|
+
stats.files[path_] = FileStats()
|
|
542
|
+
|
|
543
|
+
stats.files[path_].increment_write()
|
|
544
|
+
save_workspace_stats(workspace_path, stats)
|
|
545
|
+
|
|
546
|
+
if not os.path.isabs(path_):
|
|
547
|
+
return (
|
|
548
|
+
f"Failure: file_path should be absolute path, current working directory is {context.bash_state.cwd}",
|
|
549
|
+
{}, # Return empty dict instead of empty list for type consistency
|
|
550
|
+
)
|
|
551
|
+
|
|
552
|
+
error_on_exist_ = (
|
|
553
|
+
error_on_exist and path_ not in context.bash_state.whitelist_for_overwrite
|
|
554
|
+
)
|
|
555
|
+
curr_hash = ""
|
|
556
|
+
if error_on_exist and path_ in context.bash_state.whitelist_for_overwrite:
|
|
557
|
+
# Ensure hash has not changed
|
|
558
|
+
if os.path.exists(path_):
|
|
559
|
+
with open(path_, "rb") as f:
|
|
560
|
+
file_content = f.read()
|
|
561
|
+
curr_hash = sha256(file_content).hexdigest()
|
|
562
|
+
|
|
563
|
+
whitelist_data = context.bash_state.whitelist_for_overwrite[path_]
|
|
564
|
+
|
|
565
|
+
# If we haven't fully read the file or hash has changed, require re-reading
|
|
566
|
+
if curr_hash != whitelist_data.file_hash:
|
|
567
|
+
error_on_exist_ = True
|
|
568
|
+
elif not whitelist_data.is_read_enough():
|
|
569
|
+
error_on_exist_ = True
|
|
570
|
+
|
|
571
|
+
# Validate using write_if_empty_mode after checking whitelist
|
|
572
|
+
allowed_globs = context.bash_state.write_if_empty_mode.allowed_globs
|
|
573
|
+
if allowed_globs != "all" and not wcglob.globmatch(
|
|
574
|
+
path_, allowed_globs, flags=wcglob.GLOBSTAR
|
|
575
|
+
):
|
|
576
|
+
return (
|
|
577
|
+
f"Error: updating file {path_} not allowed in current mode. Doesn't match allowed globs: {allowed_globs}",
|
|
578
|
+
{}, # Empty dict instead of empty list
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
if (error_on_exist or error_on_exist_) and os.path.exists(path_):
|
|
582
|
+
content = Path(path_).read_text().strip()
|
|
583
|
+
if content:
|
|
584
|
+
if error_on_exist_:
|
|
585
|
+
file_ranges = []
|
|
586
|
+
|
|
587
|
+
if path_ not in context.bash_state.whitelist_for_overwrite:
|
|
588
|
+
# File hasn't been read at all
|
|
589
|
+
msg = f"Error: you need to read existing file {path_} at least once before it can be overwritten.\n\n"
|
|
590
|
+
# Read the entire file
|
|
591
|
+
file_content_str, truncated, _, _, line_range = read_file(
|
|
592
|
+
path_, coding_max_tokens, noncoding_max_tokens, context, False
|
|
593
|
+
)
|
|
594
|
+
file_ranges = [line_range]
|
|
595
|
+
|
|
596
|
+
final_message = ""
|
|
597
|
+
if not truncated:
|
|
598
|
+
final_message = "You can now safely retry writing immediately considering the above information."
|
|
599
|
+
|
|
600
|
+
return (
|
|
601
|
+
(
|
|
602
|
+
msg
|
|
603
|
+
+ f"Here's the existing file:\n<file-contents-numbered>\n{file_content_str}\n{final_message}\n</file-contents-numbered>"
|
|
604
|
+
),
|
|
605
|
+
{path_: file_ranges},
|
|
606
|
+
)
|
|
607
|
+
|
|
608
|
+
whitelist_data = context.bash_state.whitelist_for_overwrite[path_]
|
|
609
|
+
|
|
610
|
+
if curr_hash != whitelist_data.file_hash:
|
|
611
|
+
msg = "Error: the file has changed since last read.\n\n"
|
|
612
|
+
# Read the entire file again
|
|
613
|
+
file_content_str, truncated, _, _, line_range = read_file(
|
|
614
|
+
path_, coding_max_tokens, noncoding_max_tokens, context, False
|
|
615
|
+
)
|
|
616
|
+
file_ranges = [line_range]
|
|
617
|
+
|
|
618
|
+
final_message = ""
|
|
619
|
+
if not truncated:
|
|
620
|
+
final_message = "You can now safely retry writing immediately considering the above information."
|
|
621
|
+
|
|
622
|
+
return (
|
|
623
|
+
(
|
|
624
|
+
msg
|
|
625
|
+
+ f"Here's the existing file:\n<file-contents-numbered>\n{file_content_str}\n</file-contents-numbered>\n{final_message}"
|
|
626
|
+
),
|
|
627
|
+
{path_: file_ranges},
|
|
628
|
+
)
|
|
629
|
+
else:
|
|
630
|
+
# The file hasn't changed, but we haven't read enough of it
|
|
631
|
+
unread_ranges = whitelist_data.get_unread_ranges()
|
|
632
|
+
# Format the ranges as a string for display
|
|
633
|
+
ranges_str = ", ".join(
|
|
634
|
+
[f"{start}-{end}" for start, end in unread_ranges]
|
|
635
|
+
)
|
|
636
|
+
msg = f"Error: you need to read more of the file before it can be overwritten.\nUnread line ranges: {ranges_str}\n\n"
|
|
637
|
+
|
|
638
|
+
# Read just the unread ranges
|
|
639
|
+
paths_: list[str] = []
|
|
640
|
+
for start, end in unread_ranges:
|
|
641
|
+
paths_.append(path_ + ":" + f"{start}-{end}")
|
|
642
|
+
paths_readfiles = ReadFiles(file_paths=paths_)
|
|
643
|
+
readfiles, file_ranges_dict, truncated = read_files(
|
|
644
|
+
paths_readfiles.file_paths,
|
|
645
|
+
coding_max_tokens,
|
|
646
|
+
noncoding_max_tokens,
|
|
647
|
+
context,
|
|
648
|
+
start_line_nums=paths_readfiles.start_line_nums,
|
|
649
|
+
end_line_nums=paths_readfiles.end_line_nums,
|
|
650
|
+
)
|
|
651
|
+
|
|
652
|
+
final_message = ""
|
|
653
|
+
if not truncated:
|
|
654
|
+
final_message = "Now that you have read the rest of the file, you can now safely immediately retry writing but consider the new information above."
|
|
655
|
+
|
|
656
|
+
return (
|
|
657
|
+
(msg + "\n" + readfiles + "\n" + final_message),
|
|
658
|
+
file_ranges_dict,
|
|
659
|
+
)
|
|
660
|
+
# No need to add to whitelist here - will be handled by get_tool_output
|
|
661
|
+
|
|
662
|
+
path = Path(path_)
|
|
663
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
664
|
+
|
|
665
|
+
try:
|
|
666
|
+
with path.open("w") as f:
|
|
667
|
+
f.write(writefile.file_content)
|
|
668
|
+
except OSError as e:
|
|
669
|
+
return f"Error: {e}", {}
|
|
670
|
+
|
|
671
|
+
extension = Path(path_).suffix.lstrip(".")
|
|
672
|
+
|
|
673
|
+
context.console.print(f"File written to {path_}")
|
|
674
|
+
|
|
675
|
+
warnings = []
|
|
676
|
+
try:
|
|
677
|
+
check = check_syntax(extension, writefile.file_content)
|
|
678
|
+
syntax_errors = check.description
|
|
679
|
+
|
|
680
|
+
if syntax_errors:
|
|
681
|
+
if extension in {"tsx", "ts"}:
|
|
682
|
+
syntax_errors += "\nNote: Ignore if 'tagged template literals' are used, they may raise false positive errors in tree-sitter."
|
|
683
|
+
|
|
684
|
+
context_for_errors = get_context_for_errors(
|
|
685
|
+
check.errors,
|
|
686
|
+
writefile.file_content,
|
|
687
|
+
path_,
|
|
688
|
+
coding_max_tokens,
|
|
689
|
+
noncoding_max_tokens,
|
|
690
|
+
)
|
|
691
|
+
context.console.print(f"W: Syntax errors encountered: {syntax_errors}")
|
|
692
|
+
warnings.append(f"""
|
|
693
|
+
---
|
|
694
|
+
Warning: tree-sitter reported syntax errors
|
|
695
|
+
Syntax errors:
|
|
696
|
+
{syntax_errors}
|
|
697
|
+
|
|
698
|
+
{context_for_errors}
|
|
699
|
+
---
|
|
700
|
+
""")
|
|
701
|
+
|
|
702
|
+
except Exception:
|
|
703
|
+
pass
|
|
704
|
+
|
|
705
|
+
# Count the lines directly from the content we're writing
|
|
706
|
+
total_lines = writefile.file_content.count("\n") + 1
|
|
707
|
+
|
|
708
|
+
return "Success" + "".join(warnings), {
|
|
709
|
+
path_: [(1, total_lines)]
|
|
710
|
+
} # Return the file path with line range along with success message
|
|
711
|
+
|
|
712
|
+
|
|
713
|
+
def do_diff_edit(
|
|
714
|
+
fedit: FileEdit,
|
|
715
|
+
coding_max_tokens: Optional[int],
|
|
716
|
+
noncoding_max_tokens: Optional[int],
|
|
717
|
+
context: Context,
|
|
718
|
+
) -> tuple[str, dict[str, list[tuple[int, int]]]]:
|
|
719
|
+
try:
|
|
720
|
+
return _do_diff_edit(fedit, coding_max_tokens, noncoding_max_tokens, context)
|
|
721
|
+
except Exception as e:
|
|
722
|
+
# Try replacing \"
|
|
723
|
+
try:
|
|
724
|
+
fedit = FileEdit(
|
|
725
|
+
file_path=fedit.file_path,
|
|
726
|
+
file_edit_using_search_replace_blocks=fedit.file_edit_using_search_replace_blocks.replace(
|
|
727
|
+
'\\"', '"'
|
|
728
|
+
),
|
|
729
|
+
)
|
|
730
|
+
return _do_diff_edit(
|
|
731
|
+
fedit, coding_max_tokens, noncoding_max_tokens, context
|
|
732
|
+
)
|
|
733
|
+
except Exception:
|
|
734
|
+
pass
|
|
735
|
+
raise e
|
|
736
|
+
|
|
737
|
+
|
|
738
|
+
def _do_diff_edit(
|
|
739
|
+
fedit: FileEdit,
|
|
740
|
+
coding_max_tokens: Optional[int],
|
|
741
|
+
noncoding_max_tokens: Optional[int],
|
|
742
|
+
context: Context,
|
|
743
|
+
) -> tuple[str, dict[str, list[tuple[int, int]]]]:
|
|
744
|
+
context.console.log(f"Editing file: {fedit.file_path}")
|
|
745
|
+
|
|
746
|
+
# Expand the path before checking if it's absolute
|
|
747
|
+
path_ = expand_user(fedit.file_path)
|
|
748
|
+
|
|
749
|
+
if not os.path.isabs(path_):
|
|
750
|
+
raise Exception(
|
|
751
|
+
f"Failure: file_path should be absolute path, current working directory is {context.bash_state.cwd}"
|
|
752
|
+
)
|
|
753
|
+
|
|
754
|
+
workspace_path = context.bash_state.workspace_root
|
|
755
|
+
stats = load_workspace_stats(workspace_path)
|
|
756
|
+
|
|
757
|
+
if path_ not in stats.files:
|
|
758
|
+
stats.files[path_] = FileStats()
|
|
759
|
+
|
|
760
|
+
stats.files[path_].increment_edit()
|
|
761
|
+
save_workspace_stats(workspace_path, stats)
|
|
762
|
+
|
|
763
|
+
# Validate using file_edit_mode
|
|
764
|
+
allowed_globs = context.bash_state.file_edit_mode.allowed_globs
|
|
765
|
+
if allowed_globs != "all" and not wcglob.globmatch(
|
|
766
|
+
path_, allowed_globs, flags=wcglob.GLOBSTAR
|
|
767
|
+
):
|
|
768
|
+
raise Exception(
|
|
769
|
+
f"Error: updating file {path_} not allowed in current mode. Doesn't match allowed globs: {allowed_globs}"
|
|
770
|
+
)
|
|
771
|
+
|
|
772
|
+
# No need to add to whitelist here - will be handled by get_tool_output
|
|
773
|
+
|
|
774
|
+
if not os.path.exists(path_):
|
|
775
|
+
raise Exception(f"Error: file {path_} does not exist")
|
|
776
|
+
|
|
777
|
+
with open(path_) as f:
|
|
778
|
+
apply_diff_to = f.read()
|
|
779
|
+
|
|
780
|
+
fedit.file_edit_using_search_replace_blocks = (
|
|
781
|
+
fedit.file_edit_using_search_replace_blocks.strip()
|
|
782
|
+
)
|
|
783
|
+
lines = fedit.file_edit_using_search_replace_blocks.split("\n")
|
|
784
|
+
|
|
785
|
+
apply_diff_to, comments = search_replace_edit(
|
|
786
|
+
lines, apply_diff_to, context.console.log
|
|
787
|
+
)
|
|
788
|
+
|
|
789
|
+
# Count the lines just once - after the edit but before writing
|
|
790
|
+
total_lines = apply_diff_to.count("\n") + 1
|
|
791
|
+
|
|
792
|
+
with open(path_, "w") as f:
|
|
793
|
+
f.write(apply_diff_to)
|
|
794
|
+
|
|
795
|
+
syntax_errors = ""
|
|
796
|
+
extension = Path(path_).suffix.lstrip(".")
|
|
797
|
+
try:
|
|
798
|
+
check = check_syntax(extension, apply_diff_to)
|
|
799
|
+
syntax_errors = check.description
|
|
800
|
+
if syntax_errors:
|
|
801
|
+
context_for_errors = get_context_for_errors(
|
|
802
|
+
check.errors,
|
|
803
|
+
apply_diff_to,
|
|
804
|
+
path_,
|
|
805
|
+
coding_max_tokens,
|
|
806
|
+
noncoding_max_tokens,
|
|
807
|
+
)
|
|
808
|
+
if extension in {"tsx", "ts"}:
|
|
809
|
+
syntax_errors += "\nNote: Ignore if 'tagged template literals' are used, they may raise false positive errors in tree-sitter."
|
|
810
|
+
|
|
811
|
+
context.console.print(f"W: Syntax errors encountered: {syntax_errors}")
|
|
812
|
+
|
|
813
|
+
return (
|
|
814
|
+
f"""{comments}
|
|
815
|
+
---
|
|
816
|
+
Warning: tree-sitter reported syntax errors, please re-read the file and fix if there are any errors.
|
|
817
|
+
Syntax errors:
|
|
818
|
+
{syntax_errors}
|
|
819
|
+
|
|
820
|
+
{context_for_errors}
|
|
821
|
+
""",
|
|
822
|
+
{path_: [(1, total_lines)]},
|
|
823
|
+
) # Return the file path with line range along with the warning message
|
|
824
|
+
except Exception:
|
|
825
|
+
pass
|
|
826
|
+
|
|
827
|
+
return comments, {
|
|
828
|
+
path_: [(1, total_lines)]
|
|
829
|
+
} # Return the file path with line range along with the edit comments
|
|
830
|
+
|
|
831
|
+
|
|
832
|
+
def _is_edit(content: str, percentage: int) -> bool:
|
|
833
|
+
lines = content.lstrip().split("\n")
|
|
834
|
+
if not lines:
|
|
835
|
+
return False
|
|
836
|
+
line = lines[0]
|
|
837
|
+
if SEARCH_MARKER.match(line) or (0 < percentage <= 50):
|
|
838
|
+
return True
|
|
839
|
+
return False
|
|
840
|
+
|
|
841
|
+
|
|
842
|
+
def file_writing(
|
|
843
|
+
file_writing_args: FileWriteOrEdit,
|
|
844
|
+
coding_max_tokens: Optional[int],
|
|
845
|
+
noncoding_max_tokens: Optional[int],
|
|
846
|
+
context: Context,
|
|
847
|
+
) -> tuple[
|
|
848
|
+
str, dict[str, list[tuple[int, int]]]
|
|
849
|
+
]: # Updated to return message and file paths with line ranges
|
|
850
|
+
"""
|
|
851
|
+
Write or edit a file based on percentage of changes.
|
|
852
|
+
If percentage_changed > 50%, treat content as direct file content.
|
|
853
|
+
Otherwise, treat content as search/replace blocks.
|
|
854
|
+
"""
|
|
855
|
+
# Check if the thread_id matches current
|
|
856
|
+
if file_writing_args.thread_id != context.bash_state.current_thread_id:
|
|
857
|
+
# Try to load state from the thread_id
|
|
858
|
+
if not context.bash_state.load_state_from_thread_id(
|
|
859
|
+
file_writing_args.thread_id
|
|
860
|
+
):
|
|
861
|
+
return (
|
|
862
|
+
f"Error: No saved bash state found for thread_id `{file_writing_args.thread_id}`. Please re-initialize to get a new id or use correct id.",
|
|
863
|
+
{},
|
|
864
|
+
)
|
|
865
|
+
|
|
866
|
+
# Expand the path before checking if it's absolute
|
|
867
|
+
path_ = expand_user(file_writing_args.file_path)
|
|
868
|
+
if not os.path.isabs(path_):
|
|
869
|
+
return (
|
|
870
|
+
f"Failure: file_path should be absolute path, current working directory is {context.bash_state.cwd}",
|
|
871
|
+
{}, # Return empty dict instead of empty list for type consistency
|
|
872
|
+
)
|
|
873
|
+
|
|
874
|
+
# If file doesn't exist, always use direct file_content mode
|
|
875
|
+
content = file_writing_args.text_or_search_replace_blocks
|
|
876
|
+
|
|
877
|
+
if not _is_edit(content, file_writing_args.percentage_to_change):
|
|
878
|
+
# Use direct content mode (same as WriteIfEmpty)
|
|
879
|
+
result, paths = write_file(
|
|
880
|
+
WriteIfEmpty(
|
|
881
|
+
file_path=path_,
|
|
882
|
+
file_content=file_writing_args.text_or_search_replace_blocks,
|
|
883
|
+
),
|
|
884
|
+
True,
|
|
885
|
+
coding_max_tokens,
|
|
886
|
+
noncoding_max_tokens,
|
|
887
|
+
context,
|
|
888
|
+
)
|
|
889
|
+
return result, paths
|
|
890
|
+
else:
|
|
891
|
+
# File exists and percentage <= 50, use search/replace mode
|
|
892
|
+
result, paths = do_diff_edit(
|
|
893
|
+
FileEdit(
|
|
894
|
+
file_path=path_,
|
|
895
|
+
file_edit_using_search_replace_blocks=file_writing_args.text_or_search_replace_blocks,
|
|
896
|
+
),
|
|
897
|
+
coding_max_tokens,
|
|
898
|
+
noncoding_max_tokens,
|
|
899
|
+
context,
|
|
900
|
+
)
|
|
901
|
+
return result, paths
|
|
902
|
+
|
|
903
|
+
|
|
904
|
+
TOOLS = BashCommand | FileWriteOrEdit | ReadImage | ReadFiles | Initialize | ContextSave
|
|
905
|
+
|
|
906
|
+
|
|
907
|
+
def which_tool(args: str) -> TOOLS:
|
|
908
|
+
adapter = TypeAdapter[TOOLS](TOOLS, config={"extra": "forbid"})
|
|
909
|
+
return adapter.validate_python(json.loads(args))
|
|
910
|
+
|
|
911
|
+
|
|
912
|
+
def which_tool_name(name: str) -> Type[TOOLS]:
|
|
913
|
+
if name == "BashCommand":
|
|
914
|
+
return BashCommand
|
|
915
|
+
elif name == "FileWriteOrEdit":
|
|
916
|
+
return FileWriteOrEdit
|
|
917
|
+
elif name == "ReadImage":
|
|
918
|
+
return ReadImage
|
|
919
|
+
elif name == "ReadFiles":
|
|
920
|
+
return ReadFiles
|
|
921
|
+
elif name == "Initialize":
|
|
922
|
+
return Initialize
|
|
923
|
+
elif name == "ContextSave":
|
|
924
|
+
return ContextSave
|
|
925
|
+
else:
|
|
926
|
+
raise ValueError(f"Unknown tool name: {name}")
|
|
927
|
+
|
|
928
|
+
|
|
929
|
+
def parse_tool_by_name(name: str, arguments: dict[str, Any]) -> TOOLS:
|
|
930
|
+
tool_type = which_tool_name(name)
|
|
931
|
+
try:
|
|
932
|
+
return tool_type(**arguments)
|
|
933
|
+
except ValidationError:
|
|
934
|
+
|
|
935
|
+
def try_json(x: str) -> Any:
|
|
936
|
+
if not isinstance(x, str):
|
|
937
|
+
return x
|
|
938
|
+
try:
|
|
939
|
+
return json.loads(x)
|
|
940
|
+
except json.JSONDecodeError:
|
|
941
|
+
return x
|
|
942
|
+
|
|
943
|
+
return tool_type(**{k: try_json(v) for k, v in arguments.items()})
|
|
944
|
+
|
|
945
|
+
|
|
946
|
+
TOOL_CALLS: list[TOOLS] = []
|
|
947
|
+
|
|
948
|
+
|
|
949
|
+
def get_tool_output(
|
|
950
|
+
context: Context,
|
|
951
|
+
args: dict[object, object] | TOOLS,
|
|
952
|
+
enc: EncoderDecoder[int],
|
|
953
|
+
limit: float,
|
|
954
|
+
loop_call: Callable[[str, float], tuple[str, float]],
|
|
955
|
+
coding_max_tokens: Optional[int],
|
|
956
|
+
noncoding_max_tokens: Optional[int],
|
|
957
|
+
) -> tuple[list[str | ImageData], float]:
|
|
958
|
+
global TOOL_CALLS
|
|
959
|
+
if isinstance(args, dict):
|
|
960
|
+
adapter = TypeAdapter[TOOLS](TOOLS, config={"extra": "forbid"})
|
|
961
|
+
arg = adapter.validate_python(args)
|
|
962
|
+
else:
|
|
963
|
+
arg = args
|
|
964
|
+
output: tuple[str | ImageData, float]
|
|
965
|
+
TOOL_CALLS.append(arg)
|
|
966
|
+
|
|
967
|
+
# Initialize a dictionary to track file paths and line ranges
|
|
968
|
+
file_paths_with_ranges: dict[str, list[tuple[int, int]]] = {}
|
|
969
|
+
|
|
970
|
+
if isinstance(arg, BashCommand):
|
|
971
|
+
context.console.print("Calling execute bash tool")
|
|
972
|
+
|
|
973
|
+
output_str, cost = execute_bash(
|
|
974
|
+
context.bash_state, enc, arg, noncoding_max_tokens, arg.wait_for_seconds
|
|
975
|
+
)
|
|
976
|
+
output = output_str, cost
|
|
977
|
+
elif isinstance(arg, WriteIfEmpty):
|
|
978
|
+
context.console.print("Calling write file tool")
|
|
979
|
+
|
|
980
|
+
result, write_paths = write_file(
|
|
981
|
+
arg, True, coding_max_tokens, noncoding_max_tokens, context
|
|
982
|
+
)
|
|
983
|
+
output = result, 0
|
|
984
|
+
# Add write paths with their ranges to our tracking dictionary
|
|
985
|
+
for path, ranges in write_paths.items():
|
|
986
|
+
if path in file_paths_with_ranges:
|
|
987
|
+
file_paths_with_ranges[path].extend(ranges)
|
|
988
|
+
else:
|
|
989
|
+
file_paths_with_ranges[path] = ranges.copy()
|
|
990
|
+
elif isinstance(arg, FileEdit):
|
|
991
|
+
context.console.print("Calling full file edit tool")
|
|
992
|
+
|
|
993
|
+
result, edit_paths = do_diff_edit(
|
|
994
|
+
arg, coding_max_tokens, noncoding_max_tokens, context
|
|
995
|
+
)
|
|
996
|
+
output = result, 0.0
|
|
997
|
+
# Add edit paths with their ranges to our tracking dictionary
|
|
998
|
+
for path, ranges in edit_paths.items():
|
|
999
|
+
if path in file_paths_with_ranges:
|
|
1000
|
+
file_paths_with_ranges[path].extend(ranges)
|
|
1001
|
+
else:
|
|
1002
|
+
file_paths_with_ranges[path] = ranges.copy()
|
|
1003
|
+
elif isinstance(arg, FileWriteOrEdit):
|
|
1004
|
+
context.console.print("Calling file writing tool")
|
|
1005
|
+
|
|
1006
|
+
result, write_edit_paths = file_writing(
|
|
1007
|
+
arg, coding_max_tokens, noncoding_max_tokens, context
|
|
1008
|
+
)
|
|
1009
|
+
output = result, 0.0
|
|
1010
|
+
# Add write/edit paths with their ranges to our tracking dictionary
|
|
1011
|
+
for path, ranges in write_edit_paths.items():
|
|
1012
|
+
if path in file_paths_with_ranges:
|
|
1013
|
+
file_paths_with_ranges[path].extend(ranges)
|
|
1014
|
+
else:
|
|
1015
|
+
file_paths_with_ranges[path] = ranges.copy()
|
|
1016
|
+
elif isinstance(arg, ReadImage):
|
|
1017
|
+
context.console.print("Calling read image tool")
|
|
1018
|
+
image_data = read_image_from_shell(arg.file_path, context)
|
|
1019
|
+
output = image_data, 0.0
|
|
1020
|
+
elif isinstance(arg, ReadFiles):
|
|
1021
|
+
context.console.print("Calling read file tool")
|
|
1022
|
+
# Access line numbers through properties
|
|
1023
|
+
result, file_ranges_dict, _ = read_files(
|
|
1024
|
+
arg.file_paths,
|
|
1025
|
+
coding_max_tokens,
|
|
1026
|
+
noncoding_max_tokens,
|
|
1027
|
+
context,
|
|
1028
|
+
arg.start_line_nums,
|
|
1029
|
+
arg.end_line_nums,
|
|
1030
|
+
)
|
|
1031
|
+
output = result, 0.0
|
|
1032
|
+
|
|
1033
|
+
# Merge the new file ranges into our tracking dictionary
|
|
1034
|
+
for path, ranges in file_ranges_dict.items():
|
|
1035
|
+
if path in file_paths_with_ranges:
|
|
1036
|
+
file_paths_with_ranges[path].extend(ranges)
|
|
1037
|
+
else:
|
|
1038
|
+
file_paths_with_ranges[path] = ranges
|
|
1039
|
+
elif isinstance(arg, Initialize):
|
|
1040
|
+
context.console.print("Calling initial info tool")
|
|
1041
|
+
if arg.type == "user_asked_mode_change" or arg.type == "reset_shell":
|
|
1042
|
+
workspace_path = (
|
|
1043
|
+
arg.any_workspace_path
|
|
1044
|
+
if os.path.isdir(arg.any_workspace_path)
|
|
1045
|
+
else os.path.dirname(arg.any_workspace_path)
|
|
1046
|
+
)
|
|
1047
|
+
workspace_path = workspace_path if os.path.exists(workspace_path) else ""
|
|
1048
|
+
|
|
1049
|
+
# For these specific operations, thread_id is required
|
|
1050
|
+
output = (
|
|
1051
|
+
reset_wcgw(
|
|
1052
|
+
context,
|
|
1053
|
+
workspace_path,
|
|
1054
|
+
arg.mode_name
|
|
1055
|
+
if is_mode_change(arg.mode, context.bash_state)
|
|
1056
|
+
else None,
|
|
1057
|
+
arg.mode,
|
|
1058
|
+
arg.thread_id,
|
|
1059
|
+
),
|
|
1060
|
+
0.0,
|
|
1061
|
+
)
|
|
1062
|
+
else:
|
|
1063
|
+
output_, context, init_paths = initialize(
|
|
1064
|
+
arg.type,
|
|
1065
|
+
context,
|
|
1066
|
+
arg.any_workspace_path,
|
|
1067
|
+
arg.initial_files_to_read or [],
|
|
1068
|
+
arg.task_id_to_resume,
|
|
1069
|
+
coding_max_tokens,
|
|
1070
|
+
noncoding_max_tokens,
|
|
1071
|
+
arg.mode,
|
|
1072
|
+
arg.thread_id,
|
|
1073
|
+
)
|
|
1074
|
+
output = output_, 0.0
|
|
1075
|
+
# Since init_paths is already a dictionary mapping file paths to line ranges,
|
|
1076
|
+
# we just need to merge it with our tracking dictionary
|
|
1077
|
+
for path, ranges in init_paths.items():
|
|
1078
|
+
if path not in file_paths_with_ranges and os.path.exists(path):
|
|
1079
|
+
file_paths_with_ranges[path] = ranges
|
|
1080
|
+
elif path in file_paths_with_ranges:
|
|
1081
|
+
file_paths_with_ranges[path].extend(ranges)
|
|
1082
|
+
|
|
1083
|
+
elif isinstance(arg, ContextSave):
|
|
1084
|
+
context.console.print("Calling task memory tool")
|
|
1085
|
+
relevant_files = []
|
|
1086
|
+
warnings = ""
|
|
1087
|
+
# Expand user in project root path
|
|
1088
|
+
arg.project_root_path = os.path.expanduser(arg.project_root_path)
|
|
1089
|
+
for fglob in arg.relevant_file_globs:
|
|
1090
|
+
# Expand user in glob pattern before checking if it's absolute
|
|
1091
|
+
fglob = expand_user(fglob)
|
|
1092
|
+
# If not absolute after expansion, join with project root path
|
|
1093
|
+
if not os.path.isabs(fglob) and arg.project_root_path:
|
|
1094
|
+
fglob = os.path.join(arg.project_root_path, fglob)
|
|
1095
|
+
globs = glob.glob(fglob, recursive=True)
|
|
1096
|
+
relevant_files.extend(globs[:1000])
|
|
1097
|
+
if not globs:
|
|
1098
|
+
warnings += f"Warning: No files found for the glob: {fglob}\n"
|
|
1099
|
+
relevant_files_data, _, _ = read_files(
|
|
1100
|
+
relevant_files[:10_000], None, None, context
|
|
1101
|
+
)
|
|
1102
|
+
save_path = save_memory(
|
|
1103
|
+
arg, relevant_files_data, context.bash_state.serialize()
|
|
1104
|
+
)
|
|
1105
|
+
if not relevant_files and arg.relevant_file_globs:
|
|
1106
|
+
output_ = f'Error: No files found for the given globs. Context file successfully saved at "{save_path}", but please fix the error.'
|
|
1107
|
+
elif warnings:
|
|
1108
|
+
output_ = warnings + "\nContext file successfully saved at " + save_path
|
|
1109
|
+
else:
|
|
1110
|
+
output_ = save_path
|
|
1111
|
+
# Try to open the saved file
|
|
1112
|
+
try_open_file(save_path)
|
|
1113
|
+
output = output_, 0.0
|
|
1114
|
+
else:
|
|
1115
|
+
raise ValueError(f"Unknown tool: {arg}")
|
|
1116
|
+
|
|
1117
|
+
if file_paths_with_ranges: # Only add to whitelist if we have paths
|
|
1118
|
+
context.bash_state.add_to_whitelist_for_overwrite(file_paths_with_ranges)
|
|
1119
|
+
|
|
1120
|
+
# Save bash_state
|
|
1121
|
+
context.bash_state.save_state_to_disk()
|
|
1122
|
+
|
|
1123
|
+
if isinstance(output[0], str):
|
|
1124
|
+
context.console.print(str(output[0]))
|
|
1125
|
+
else:
|
|
1126
|
+
context.console.print(f"Received {type(output[0])} from tool")
|
|
1127
|
+
return [output[0]], output[1]
|
|
1128
|
+
|
|
1129
|
+
|
|
1130
|
+
History = list[ChatCompletionMessageParam]
|
|
1131
|
+
|
|
1132
|
+
default_enc = get_default_encoder()
|
|
1133
|
+
curr_cost = 0.0
|
|
1134
|
+
|
|
1135
|
+
|
|
1136
|
+
def range_format(start_line_num: Optional[int], end_line_num: Optional[int]) -> str:
|
|
1137
|
+
st = "" if not start_line_num else str(start_line_num)
|
|
1138
|
+
end = "" if not end_line_num else str(end_line_num)
|
|
1139
|
+
if not st and not end:
|
|
1140
|
+
return ""
|
|
1141
|
+
return f":{st}-{end}"
|
|
1142
|
+
|
|
1143
|
+
|
|
1144
|
+
def read_files(
|
|
1145
|
+
file_paths: list[str],
|
|
1146
|
+
coding_max_tokens: Optional[int],
|
|
1147
|
+
noncoding_max_tokens: Optional[int],
|
|
1148
|
+
context: Context,
|
|
1149
|
+
start_line_nums: Optional[list[Optional[int]]] = None,
|
|
1150
|
+
end_line_nums: Optional[list[Optional[int]]] = None,
|
|
1151
|
+
) -> tuple[
|
|
1152
|
+
str, dict[str, list[tuple[int, int]]], bool
|
|
1153
|
+
]: # Updated to return file paths with ranges
|
|
1154
|
+
message = ""
|
|
1155
|
+
file_ranges_dict: dict[
|
|
1156
|
+
str, list[tuple[int, int]]
|
|
1157
|
+
] = {} # Map file paths to line ranges
|
|
1158
|
+
|
|
1159
|
+
workspace_path = context.bash_state.workspace_root
|
|
1160
|
+
stats = load_workspace_stats(workspace_path)
|
|
1161
|
+
|
|
1162
|
+
for path_ in file_paths:
|
|
1163
|
+
path_ = expand_user(path_)
|
|
1164
|
+
if not os.path.isabs(path_):
|
|
1165
|
+
continue
|
|
1166
|
+
if path_ not in stats.files:
|
|
1167
|
+
stats.files[path_] = FileStats()
|
|
1168
|
+
|
|
1169
|
+
stats.files[path_].increment_read()
|
|
1170
|
+
save_workspace_stats(workspace_path, stats)
|
|
1171
|
+
truncated = False
|
|
1172
|
+
for i, file in enumerate(file_paths):
|
|
1173
|
+
try:
|
|
1174
|
+
# Use line numbers from parameters if provided
|
|
1175
|
+
start_line_num = None if start_line_nums is None else start_line_nums[i]
|
|
1176
|
+
end_line_num = None if end_line_nums is None else end_line_nums[i]
|
|
1177
|
+
|
|
1178
|
+
# For backward compatibility, we still need to extract line numbers from path
|
|
1179
|
+
# if they weren't provided as parameters
|
|
1180
|
+
content, truncated, tokens, path, line_range = read_file(
|
|
1181
|
+
file,
|
|
1182
|
+
coding_max_tokens,
|
|
1183
|
+
noncoding_max_tokens,
|
|
1184
|
+
context,
|
|
1185
|
+
start_line_num,
|
|
1186
|
+
end_line_num,
|
|
1187
|
+
)
|
|
1188
|
+
|
|
1189
|
+
# Add file path with line range to dictionary
|
|
1190
|
+
if path in file_ranges_dict:
|
|
1191
|
+
file_ranges_dict[path].append(line_range)
|
|
1192
|
+
else:
|
|
1193
|
+
file_ranges_dict[path] = [line_range]
|
|
1194
|
+
except Exception as e:
|
|
1195
|
+
message += f"\n{file}: {str(e)}\n"
|
|
1196
|
+
continue
|
|
1197
|
+
|
|
1198
|
+
if coding_max_tokens:
|
|
1199
|
+
coding_max_tokens = max(0, coding_max_tokens - tokens)
|
|
1200
|
+
if noncoding_max_tokens:
|
|
1201
|
+
noncoding_max_tokens = max(0, noncoding_max_tokens - tokens)
|
|
1202
|
+
|
|
1203
|
+
range_formatted = range_format(start_line_num, end_line_num)
|
|
1204
|
+
message += (
|
|
1205
|
+
f'\n<file-contents-numbered path="{file}{range_formatted}">\n{content}\n'
|
|
1206
|
+
)
|
|
1207
|
+
|
|
1208
|
+
if not truncated:
|
|
1209
|
+
message += "</file-contents-numbered>"
|
|
1210
|
+
|
|
1211
|
+
# Check if we've hit both token limit
|
|
1212
|
+
if (
|
|
1213
|
+
truncated
|
|
1214
|
+
or (coding_max_tokens is not None and coding_max_tokens <= 0)
|
|
1215
|
+
and (noncoding_max_tokens is not None and noncoding_max_tokens <= 0)
|
|
1216
|
+
):
|
|
1217
|
+
not_reading = file_paths[i + 1 :]
|
|
1218
|
+
if not_reading:
|
|
1219
|
+
message += f"\nNot reading the rest of the files: {', '.join(not_reading)} due to token limit, please call again"
|
|
1220
|
+
break
|
|
1221
|
+
|
|
1222
|
+
return message, file_ranges_dict, truncated
|
|
1223
|
+
|
|
1224
|
+
|
|
1225
|
+
def read_file(
|
|
1226
|
+
file_path: str,
|
|
1227
|
+
coding_max_tokens: Optional[int],
|
|
1228
|
+
noncoding_max_tokens: Optional[int],
|
|
1229
|
+
context: Context,
|
|
1230
|
+
start_line_num: Optional[int] = None,
|
|
1231
|
+
end_line_num: Optional[int] = None,
|
|
1232
|
+
) -> tuple[str, bool, int, str, tuple[int, int]]:
|
|
1233
|
+
context.console.print(f"Reading file: {file_path}")
|
|
1234
|
+
show_line_numbers = True
|
|
1235
|
+
# Line numbers are now passed as parameters, no need to parse from path
|
|
1236
|
+
|
|
1237
|
+
# Expand the path before checking if it's absolute
|
|
1238
|
+
file_path = expand_user(file_path)
|
|
1239
|
+
|
|
1240
|
+
if not os.path.isabs(file_path):
|
|
1241
|
+
raise ValueError(
|
|
1242
|
+
f"Failure: file_path should be absolute path, current working directory is {context.bash_state.cwd}"
|
|
1243
|
+
)
|
|
1244
|
+
|
|
1245
|
+
path = Path(file_path)
|
|
1246
|
+
if not path.exists():
|
|
1247
|
+
raise ValueError(f"Error: file {file_path} does not exist")
|
|
1248
|
+
|
|
1249
|
+
# Read all lines of the file
|
|
1250
|
+
with path.open("r") as f:
|
|
1251
|
+
all_lines = f.readlines(10_000_000)
|
|
1252
|
+
|
|
1253
|
+
if all_lines and all_lines[-1].endswith("\n"):
|
|
1254
|
+
# Special handling of line counts because readlines doesn't consider last empty line as a separate line
|
|
1255
|
+
all_lines.append("")
|
|
1256
|
+
|
|
1257
|
+
total_lines = len(all_lines)
|
|
1258
|
+
|
|
1259
|
+
# Apply line range filtering if specified
|
|
1260
|
+
start_idx = 0
|
|
1261
|
+
if start_line_num is not None:
|
|
1262
|
+
# Convert 1-indexed line number to 0-indexed
|
|
1263
|
+
start_idx = max(0, start_line_num - 1)
|
|
1264
|
+
|
|
1265
|
+
end_idx = len(all_lines)
|
|
1266
|
+
if end_line_num is not None:
|
|
1267
|
+
# end_line_num is inclusive, so we use min to ensure it's within bounds
|
|
1268
|
+
end_idx = min(len(all_lines), end_line_num)
|
|
1269
|
+
|
|
1270
|
+
# Convert back to 1-indexed line numbers for tracking
|
|
1271
|
+
effective_start = start_line_num if start_line_num is not None else 1
|
|
1272
|
+
effective_end = end_line_num if end_line_num is not None else total_lines
|
|
1273
|
+
|
|
1274
|
+
filtered_lines = all_lines[start_idx:end_idx]
|
|
1275
|
+
|
|
1276
|
+
# Create content with or without line numbers
|
|
1277
|
+
if show_line_numbers:
|
|
1278
|
+
content_lines = []
|
|
1279
|
+
for i, line in enumerate(filtered_lines, start=start_idx + 1):
|
|
1280
|
+
content_lines.append(f"{i} {line}")
|
|
1281
|
+
content = "".join(content_lines)
|
|
1282
|
+
else:
|
|
1283
|
+
content = "".join(filtered_lines)
|
|
1284
|
+
|
|
1285
|
+
truncated = False
|
|
1286
|
+
tokens_counts = 0
|
|
1287
|
+
|
|
1288
|
+
# Select the appropriate max_tokens based on file type
|
|
1289
|
+
max_tokens = select_max_tokens(file_path, coding_max_tokens, noncoding_max_tokens)
|
|
1290
|
+
|
|
1291
|
+
# Handle token limit if specified
|
|
1292
|
+
if max_tokens is not None:
|
|
1293
|
+
tokens = default_enc.encoder(content)
|
|
1294
|
+
tokens_counts = len(tokens)
|
|
1295
|
+
|
|
1296
|
+
if len(tokens) > max_tokens:
|
|
1297
|
+
# Truncate at token boundary first
|
|
1298
|
+
truncated_tokens = tokens[:max_tokens]
|
|
1299
|
+
truncated_content = default_enc.decoder(truncated_tokens)
|
|
1300
|
+
|
|
1301
|
+
# Count how many lines we kept
|
|
1302
|
+
line_count = truncated_content.count("\n")
|
|
1303
|
+
|
|
1304
|
+
# Calculate the last line number shown (1-indexed)
|
|
1305
|
+
last_line_shown = start_idx + line_count
|
|
1306
|
+
|
|
1307
|
+
content = truncated_content
|
|
1308
|
+
# Add informative message about truncation with total line count
|
|
1309
|
+
total_lines = len(all_lines)
|
|
1310
|
+
content += (
|
|
1311
|
+
f"\n(...truncated) Only showing till line number {last_line_shown} of {total_lines} total lines due to the token limit, please continue reading from {last_line_shown + 1} if required"
|
|
1312
|
+
f" using syntax {file_path}:{last_line_shown + 1}-{total_lines}"
|
|
1313
|
+
)
|
|
1314
|
+
truncated = True
|
|
1315
|
+
|
|
1316
|
+
# Update effective_end if truncated
|
|
1317
|
+
effective_end = last_line_shown
|
|
1318
|
+
|
|
1319
|
+
# Return the content along with the effective line range that was read
|
|
1320
|
+
return (
|
|
1321
|
+
content,
|
|
1322
|
+
truncated,
|
|
1323
|
+
tokens_counts,
|
|
1324
|
+
file_path,
|
|
1325
|
+
(effective_start, effective_end),
|
|
1326
|
+
)
|
|
1327
|
+
|
|
1328
|
+
|
|
1329
|
+
if __name__ == "__main__":
|
|
1330
|
+
with BashState(
|
|
1331
|
+
rich.console.Console(style="blue", highlight=False, markup=False),
|
|
1332
|
+
"",
|
|
1333
|
+
None,
|
|
1334
|
+
None,
|
|
1335
|
+
None,
|
|
1336
|
+
None,
|
|
1337
|
+
True,
|
|
1338
|
+
None,
|
|
1339
|
+
) as BASH_STATE:
|
|
1340
|
+
print(
|
|
1341
|
+
get_tool_output(
|
|
1342
|
+
Context(BASH_STATE, BASH_STATE.console),
|
|
1343
|
+
Initialize(
|
|
1344
|
+
type="first_call",
|
|
1345
|
+
any_workspace_path="",
|
|
1346
|
+
initial_files_to_read=[],
|
|
1347
|
+
task_id_to_resume="",
|
|
1348
|
+
mode_name="wcgw",
|
|
1349
|
+
code_writer_config=None,
|
|
1350
|
+
thread_id="",
|
|
1351
|
+
),
|
|
1352
|
+
default_enc,
|
|
1353
|
+
0,
|
|
1354
|
+
lambda x, y: ("", 0),
|
|
1355
|
+
24000, # coding_max_tokens
|
|
1356
|
+
8000, # noncoding_max_tokens
|
|
1357
|
+
)
|
|
1358
|
+
)
|
|
1359
|
+
print(
|
|
1360
|
+
get_tool_output(
|
|
1361
|
+
Context(BASH_STATE, BASH_STATE.console),
|
|
1362
|
+
BashCommand(
|
|
1363
|
+
action_json=Command(command="pwd"),
|
|
1364
|
+
thread_id=BASH_STATE.current_thread_id,
|
|
1365
|
+
),
|
|
1366
|
+
default_enc,
|
|
1367
|
+
0,
|
|
1368
|
+
lambda x, y: ("", 0),
|
|
1369
|
+
24000, # coding_max_tokens
|
|
1370
|
+
8000, # noncoding_max_tokens
|
|
1371
|
+
)
|
|
1372
|
+
)
|
|
1373
|
+
|
|
1374
|
+
print(
|
|
1375
|
+
get_tool_output(
|
|
1376
|
+
Context(BASH_STATE, BASH_STATE.console),
|
|
1377
|
+
BashCommand(
|
|
1378
|
+
action_json=Command(command="source .venv/bin/activate"),
|
|
1379
|
+
thread_id=BASH_STATE.current_thread_id,
|
|
1380
|
+
),
|
|
1381
|
+
default_enc,
|
|
1382
|
+
0,
|
|
1383
|
+
lambda x, y: ("", 0),
|
|
1384
|
+
24000, # coding_max_tokens
|
|
1385
|
+
8000, # noncoding_max_tokens
|
|
1386
|
+
)
|
|
1387
|
+
)
|
|
1388
|
+
|
|
1389
|
+
print(
|
|
1390
|
+
get_tool_output(
|
|
1391
|
+
Context(BASH_STATE, BASH_STATE.console),
|
|
1392
|
+
BashCommand(
|
|
1393
|
+
action_json=Command(command="pwd"),
|
|
1394
|
+
thread_id=BASH_STATE.current_thread_id,
|
|
1395
|
+
),
|
|
1396
|
+
default_enc,
|
|
1397
|
+
0,
|
|
1398
|
+
lambda x, y: ("", 0),
|
|
1399
|
+
24000, # coding_max_tokens
|
|
1400
|
+
8000, # noncoding_max_tokens
|
|
1401
|
+
)
|
|
1402
|
+
)
|
|
1403
|
+
|
|
1404
|
+
print(
|
|
1405
|
+
get_tool_output(
|
|
1406
|
+
Context(BASH_STATE, BASH_STATE.console),
|
|
1407
|
+
BashCommand(
|
|
1408
|
+
action_json=Command(command="take src"),
|
|
1409
|
+
thread_id=BASH_STATE.current_thread_id,
|
|
1410
|
+
),
|
|
1411
|
+
default_enc,
|
|
1412
|
+
0,
|
|
1413
|
+
lambda x, y: ("", 0),
|
|
1414
|
+
24000, # coding_max_tokens
|
|
1415
|
+
8000, # noncoding_max_tokens
|
|
1416
|
+
)
|
|
1417
|
+
)
|
|
1418
|
+
|
|
1419
|
+
print(
|
|
1420
|
+
get_tool_output(
|
|
1421
|
+
Context(BASH_STATE, BASH_STATE.console),
|
|
1422
|
+
BashCommand(
|
|
1423
|
+
action_json=Command(command="pwd"),
|
|
1424
|
+
thread_id=BASH_STATE.current_thread_id,
|
|
1425
|
+
),
|
|
1426
|
+
default_enc,
|
|
1427
|
+
0,
|
|
1428
|
+
lambda x, y: ("", 0),
|
|
1429
|
+
24000, # coding_max_tokens
|
|
1430
|
+
8000, # noncoding_max_tokens
|
|
1431
|
+
)
|
|
1432
|
+
)
|