wcgw 2.3.3__tar.gz → 2.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of wcgw might be problematic. Click here for more details.
- {wcgw-2.3.3 → wcgw-2.4.0}/PKG-INFO +1 -1
- {wcgw-2.3.3 → wcgw-2.4.0}/pyproject.toml +1 -1
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/mcp_server/server.py +6 -5
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/tools.py +134 -57
- {wcgw-2.3.3 → wcgw-2.4.0}/tests/test_tools.py +2 -4
- {wcgw-2.3.3 → wcgw-2.4.0}/uv.lock +1 -1
- {wcgw-2.3.3 → wcgw-2.4.0}/.github/workflows/python-publish.yml +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/.github/workflows/python-tests.yml +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/.github/workflows/python-types.yml +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/.gitignore +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/.python-version +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/.vscode/settings.json +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/README.md +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/gpt_action_json_schema.json +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/gpt_instructions.txt +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/openai.md +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/__init__.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/__init__.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/__init__.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/__main__.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/anthropic_client.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/cli.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/common.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/computer_use.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/diff-instructions.txt +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/mcp_server/Readme.md +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/mcp_server/__init__.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/openai_client.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/openai_utils.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/client/sys_utils.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/relay/serve.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/relay/static/privacy.txt +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/src/wcgw/types_.py +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/static/claude-ss.jpg +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/static/computer-use.jpg +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/static/example.jpg +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/static/rocket-icon.png +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/static/ss1.png +0 -0
- {wcgw-2.3.3 → wcgw-2.4.0}/tests/test_basic.py +0 -0
|
@@ -82,14 +82,14 @@ async def handle_list_tools() -> list[types.Tool]:
|
|
|
82
82
|
ToolParam(
|
|
83
83
|
inputSchema=BashCommand.model_json_schema(),
|
|
84
84
|
name="BashCommand",
|
|
85
|
-
description="""
|
|
85
|
+
description=f"""
|
|
86
86
|
- Execute a bash command. This is stateful (beware with subsequent calls).
|
|
87
87
|
- Do not use interactive commands like nano. Prefer writing simpler commands.
|
|
88
88
|
- Status of the command and the current working directory will always be returned at the end.
|
|
89
89
|
- Optionally `exit shell has restarted` is the output, in which case environment resets, you can run fresh commands.
|
|
90
90
|
- The first or the last line might be `(...truncated)` if the output is too long.
|
|
91
91
|
- Always run `pwd` if you get any file or directory not found error to make sure you're not lost.
|
|
92
|
-
- The control will return to you in
|
|
92
|
+
- The control will return to you in {SLEEP_TIME_MAX_S} seconds regardless of the status. For heavy commands, keep checking status using BashInteraction till they are finished.
|
|
93
93
|
- Run long running commands in background using screen instead of "&".
|
|
94
94
|
- Use longer wait_for_seconds if the command is expected to run for a long time.
|
|
95
95
|
""",
|
|
@@ -97,13 +97,13 @@ async def handle_list_tools() -> list[types.Tool]:
|
|
|
97
97
|
ToolParam(
|
|
98
98
|
inputSchema=BashInteraction.model_json_schema(),
|
|
99
99
|
name="BashInteraction",
|
|
100
|
-
description="""
|
|
100
|
+
description=f"""
|
|
101
101
|
- Interact with running program using this tool
|
|
102
102
|
- Special keys like arrows, interrupts, enter, etc.
|
|
103
103
|
- Send text input to the running program.
|
|
104
104
|
- Send send_specials=["Enter"] to recheck status of a running program.
|
|
105
105
|
- Only one of send_text, send_specials, send_ascii should be provided.
|
|
106
|
-
- This returns within
|
|
106
|
+
- This returns within {SLEEP_TIME_MAX_S} seconds, for heavy programs keep checking status for upto 10 turns before asking user to continue checking again.
|
|
107
107
|
- Programs don't hang easily, so most likely explanation for no output is usually that the program is still running, and you need to check status again using ["Enter"].
|
|
108
108
|
- Do not send Ctrl-c before checking for status till 10 minutes or whatever is appropriate for the program to finish.
|
|
109
109
|
- Set longer wait_for_seconds when program is expected to run for a long time.
|
|
@@ -273,7 +273,8 @@ async def main(computer_use: bool) -> None:
|
|
|
273
273
|
global COMPUTER_USE_ON_DOCKER_ENABLED
|
|
274
274
|
|
|
275
275
|
tools.TIMEOUT = SLEEP_TIME_MAX_S
|
|
276
|
-
|
|
276
|
+
tools.TIMEOUT_WHILE_OUTPUT = 55
|
|
277
|
+
tools.OUTPUT_WAIT_PATIENCE = 5
|
|
277
278
|
tools.console = tools.DisableConsole()
|
|
278
279
|
|
|
279
280
|
if computer_use:
|
|
@@ -1,28 +1,22 @@
|
|
|
1
|
-
import asyncio
|
|
2
1
|
import base64
|
|
3
|
-
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
4
2
|
import datetime
|
|
5
|
-
from io import BytesIO
|
|
6
3
|
import json
|
|
7
4
|
import mimetypes
|
|
8
5
|
from pathlib import Path
|
|
9
6
|
import re
|
|
10
7
|
import shlex
|
|
11
|
-
import sys
|
|
12
|
-
import threading
|
|
13
8
|
import importlib.metadata
|
|
14
9
|
import time
|
|
15
10
|
import traceback
|
|
16
11
|
from tempfile import NamedTemporaryFile, TemporaryDirectory
|
|
17
12
|
from typing import (
|
|
18
13
|
Callable,
|
|
14
|
+
DefaultDict,
|
|
19
15
|
Literal,
|
|
20
|
-
NewType,
|
|
21
16
|
Optional,
|
|
22
17
|
ParamSpec,
|
|
23
18
|
Type,
|
|
24
19
|
TypeVar,
|
|
25
|
-
TypedDict,
|
|
26
20
|
)
|
|
27
21
|
import uuid
|
|
28
22
|
import humanize
|
|
@@ -33,22 +27,16 @@ from websockets.sync.client import connect as syncconnect
|
|
|
33
27
|
|
|
34
28
|
import os
|
|
35
29
|
import tiktoken
|
|
36
|
-
import petname # type: ignore[import-untyped]
|
|
37
30
|
import pexpect
|
|
38
31
|
from typer import Typer
|
|
39
32
|
import websockets
|
|
40
33
|
|
|
41
34
|
import rich
|
|
42
35
|
import pyte
|
|
43
|
-
from dotenv import load_dotenv
|
|
44
36
|
|
|
45
37
|
from syntax_checker import check_syntax
|
|
46
|
-
from openai import OpenAI
|
|
47
38
|
from openai.types.chat import (
|
|
48
39
|
ChatCompletionMessageParam,
|
|
49
|
-
ChatCompletionAssistantMessageParam,
|
|
50
|
-
ChatCompletionMessage,
|
|
51
|
-
ParsedChatCompletionMessage,
|
|
52
40
|
)
|
|
53
41
|
from difflib import SequenceMatcher
|
|
54
42
|
|
|
@@ -68,9 +56,7 @@ from ..types_ import (
|
|
|
68
56
|
GetScreenInfo,
|
|
69
57
|
)
|
|
70
58
|
|
|
71
|
-
from .common import CostData, Models, discard_input
|
|
72
59
|
from .sys_utils import command_run
|
|
73
|
-
from .openai_utils import get_input_cost, get_output_cost
|
|
74
60
|
|
|
75
61
|
|
|
76
62
|
class DisableConsole:
|
|
@@ -86,9 +72,11 @@ console: rich.console.Console | DisableConsole = rich.console.Console(
|
|
|
86
72
|
)
|
|
87
73
|
|
|
88
74
|
TIMEOUT = 5
|
|
75
|
+
TIMEOUT_WHILE_OUTPUT = 20
|
|
76
|
+
OUTPUT_WAIT_PATIENCE = 3
|
|
89
77
|
|
|
90
78
|
|
|
91
|
-
def render_terminal_output(text: str) -> str:
|
|
79
|
+
def render_terminal_output(text: str) -> list[str]:
|
|
92
80
|
screen = pyte.Screen(160, 500)
|
|
93
81
|
screen.set_mode(pyte.modes.LNM)
|
|
94
82
|
stream = pyte.Stream(screen)
|
|
@@ -101,9 +89,25 @@ def render_terminal_output(text: str) -> str:
|
|
|
101
89
|
else:
|
|
102
90
|
i = len(dsp)
|
|
103
91
|
lines = screen.display[: len(dsp) - i]
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
92
|
+
return lines
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def get_incremental_output(old_output: list[str], new_output: list[str]) -> list[str]:
|
|
96
|
+
nold = len(old_output)
|
|
97
|
+
nnew = len(new_output)
|
|
98
|
+
if not old_output:
|
|
99
|
+
return new_output
|
|
100
|
+
for i in range(nnew - 1, -1, -1):
|
|
101
|
+
if new_output[i] != old_output[-1]:
|
|
102
|
+
continue
|
|
103
|
+
for j in range(i - 1, -1, -1):
|
|
104
|
+
if (nold - 1 + j - i) < 0:
|
|
105
|
+
break
|
|
106
|
+
if new_output[j] != old_output[-1 + j - i]:
|
|
107
|
+
break
|
|
108
|
+
else:
|
|
109
|
+
return new_output[i + 1 :]
|
|
110
|
+
return new_output
|
|
107
111
|
|
|
108
112
|
|
|
109
113
|
class Confirmation(BaseModel):
|
|
@@ -144,6 +148,10 @@ def start_shell() -> pexpect.spawn: # type: ignore
|
|
|
144
148
|
shell.expect(PROMPT, timeout=TIMEOUT)
|
|
145
149
|
shell.sendline("stty -icanon -echo")
|
|
146
150
|
shell.expect(PROMPT, timeout=TIMEOUT)
|
|
151
|
+
shell.sendline("set +o pipefail")
|
|
152
|
+
shell.expect(PROMPT, timeout=TIMEOUT)
|
|
153
|
+
shell.sendline("export GIT_PAGER=cat PAGER=cat")
|
|
154
|
+
shell.expect(PROMPT, timeout=TIMEOUT)
|
|
147
155
|
return shell
|
|
148
156
|
|
|
149
157
|
|
|
@@ -155,16 +163,20 @@ def _is_int(mystr: str) -> bool:
|
|
|
155
163
|
return False
|
|
156
164
|
|
|
157
165
|
|
|
158
|
-
def
|
|
166
|
+
def _ensure_env_and_bg_jobs(shell: pexpect.spawn) -> Optional[int]: # type: ignore
|
|
159
167
|
if PROMPT != PROMPT_CONST:
|
|
160
|
-
return
|
|
168
|
+
return None
|
|
161
169
|
# First reset the prompt in case venv was sourced or other reasons.
|
|
162
170
|
shell.sendline(f"export PS1={PROMPT}")
|
|
163
171
|
shell.expect(PROMPT, timeout=0.2)
|
|
164
172
|
# Reset echo also if it was enabled
|
|
165
173
|
shell.sendline("stty -icanon -echo")
|
|
166
174
|
shell.expect(PROMPT, timeout=0.2)
|
|
167
|
-
shell.sendline("
|
|
175
|
+
shell.sendline("set +o pipefail")
|
|
176
|
+
shell.expect(PROMPT, timeout=0.2)
|
|
177
|
+
shell.sendline("export GIT_PAGER=cat PAGER=cat")
|
|
178
|
+
shell.expect(PROMPT, timeout=0.2)
|
|
179
|
+
shell.sendline("jobs | wc -l")
|
|
168
180
|
before = ""
|
|
169
181
|
while not _is_int(before): # Consume all previous output
|
|
170
182
|
try:
|
|
@@ -174,7 +186,8 @@ def _get_exit_code(shell: pexpect.spawn) -> int: # type: ignore
|
|
|
174
186
|
raise
|
|
175
187
|
assert isinstance(shell.before, str)
|
|
176
188
|
# Render because there could be some anscii escape sequences still set like in google colab env
|
|
177
|
-
|
|
189
|
+
before_lines = render_terminal_output(shell.before)
|
|
190
|
+
before = "\n".join(before_lines).strip()
|
|
178
191
|
|
|
179
192
|
try:
|
|
180
193
|
return int((before))
|
|
@@ -195,20 +208,23 @@ class BashState:
|
|
|
195
208
|
self._cwd: str = os.getcwd()
|
|
196
209
|
self._shell = start_shell()
|
|
197
210
|
self._whitelist_for_overwrite: set[str] = set()
|
|
211
|
+
self._pending_output = ""
|
|
198
212
|
|
|
199
213
|
# Get exit info to ensure shell is ready
|
|
200
|
-
|
|
214
|
+
_ensure_env_and_bg_jobs(self._shell)
|
|
201
215
|
|
|
202
216
|
@property
|
|
203
217
|
def shell(self) -> pexpect.spawn: # type: ignore
|
|
204
218
|
return self._shell
|
|
205
219
|
|
|
206
|
-
def set_pending(self) -> None:
|
|
220
|
+
def set_pending(self, last_pending_output: str) -> None:
|
|
207
221
|
if not isinstance(self._state, datetime.datetime):
|
|
208
222
|
self._state = datetime.datetime.now()
|
|
223
|
+
self._pending_output = last_pending_output
|
|
209
224
|
|
|
210
225
|
def set_repl(self) -> None:
|
|
211
226
|
self._state = "repl"
|
|
227
|
+
self._pending_output = ""
|
|
212
228
|
|
|
213
229
|
@property
|
|
214
230
|
def state(self) -> BASH_CLF_OUTPUT:
|
|
@@ -231,7 +247,8 @@ class BashState:
|
|
|
231
247
|
BASH_STATE.shell.sendline("pwd")
|
|
232
248
|
BASH_STATE.shell.expect(PROMPT, timeout=0.2)
|
|
233
249
|
assert isinstance(BASH_STATE.shell.before, str)
|
|
234
|
-
|
|
250
|
+
before_lines = render_terminal_output(BASH_STATE.shell.before)
|
|
251
|
+
current_dir = "\n".join(before_lines).strip()
|
|
235
252
|
self._cwd = current_dir
|
|
236
253
|
return current_dir
|
|
237
254
|
|
|
@@ -254,6 +271,10 @@ class BashState:
|
|
|
254
271
|
def add_to_whitelist_for_overwrite(self, file_path: str) -> None:
|
|
255
272
|
self._whitelist_for_overwrite.add(file_path)
|
|
256
273
|
|
|
274
|
+
@property
|
|
275
|
+
def pending_output(self) -> str:
|
|
276
|
+
return self._pending_output
|
|
277
|
+
|
|
257
278
|
|
|
258
279
|
BASH_STATE = BashState()
|
|
259
280
|
|
|
@@ -305,16 +326,17 @@ def update_repl_prompt(command: str) -> bool:
|
|
|
305
326
|
|
|
306
327
|
|
|
307
328
|
def get_status() -> str:
|
|
308
|
-
exit_code: Optional[int] = None
|
|
309
|
-
|
|
310
329
|
status = "\n\n---\n\n"
|
|
311
330
|
if BASH_STATE.state == "pending":
|
|
312
331
|
status += "status = still running\n"
|
|
313
332
|
status += "running for = " + BASH_STATE.get_pending_for() + "\n"
|
|
314
333
|
status += "cwd = " + BASH_STATE.cwd + "\n"
|
|
315
334
|
else:
|
|
316
|
-
|
|
317
|
-
|
|
335
|
+
bg_jobs = _ensure_env_and_bg_jobs(BASH_STATE.shell)
|
|
336
|
+
bg_desc = ""
|
|
337
|
+
if bg_jobs and bg_jobs > 0:
|
|
338
|
+
bg_desc = f"; {bg_jobs} background jobs running"
|
|
339
|
+
status += "status = process exited" + bg_desc + "\n"
|
|
318
340
|
status += "cwd = " + BASH_STATE.update_cwd() + "\n"
|
|
319
341
|
|
|
320
342
|
return status.rstrip()
|
|
@@ -348,6 +370,33 @@ def save_out_of_context(
|
|
|
348
370
|
return file_contents[0], rest_paths
|
|
349
371
|
|
|
350
372
|
|
|
373
|
+
def rstrip(lines: list[str]) -> str:
|
|
374
|
+
return "\n".join([line.rstrip() for line in lines])
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
def _incremental_text(text: str, last_pending_output: str) -> str:
|
|
378
|
+
# text = render_terminal_output(text[-100_000:])
|
|
379
|
+
text = text[-100_000:]
|
|
380
|
+
|
|
381
|
+
last_pending_output_rendered_lines = render_terminal_output(last_pending_output)
|
|
382
|
+
last_pending_output_rendered = "\n".join(last_pending_output_rendered_lines)
|
|
383
|
+
last_rendered_lines = last_pending_output_rendered.split("\n")
|
|
384
|
+
if not last_rendered_lines:
|
|
385
|
+
return rstrip(render_terminal_output(text))
|
|
386
|
+
|
|
387
|
+
text = text[len(last_pending_output) :]
|
|
388
|
+
old_rendered_applied = render_terminal_output(last_pending_output_rendered + text)
|
|
389
|
+
# True incremental is then
|
|
390
|
+
rendered = get_incremental_output(last_rendered_lines[:-1], old_rendered_applied)
|
|
391
|
+
|
|
392
|
+
if not rendered:
|
|
393
|
+
return ""
|
|
394
|
+
|
|
395
|
+
if rendered[0] == last_rendered_lines[-1]:
|
|
396
|
+
rendered = rendered[1:]
|
|
397
|
+
return rstrip(rendered)
|
|
398
|
+
|
|
399
|
+
|
|
351
400
|
def execute_bash(
|
|
352
401
|
enc: tiktoken.Encoding,
|
|
353
402
|
bash_arg: BashCommand | BashInteraction,
|
|
@@ -452,40 +501,70 @@ def execute_bash(
|
|
|
452
501
|
wait = timeout_s or TIMEOUT
|
|
453
502
|
index = BASH_STATE.shell.expect([PROMPT, pexpect.TIMEOUT], timeout=wait)
|
|
454
503
|
if index == 1:
|
|
455
|
-
BASH_STATE.set_pending()
|
|
456
504
|
text = BASH_STATE.shell.before or ""
|
|
505
|
+
incremental_text = _incremental_text(text, BASH_STATE.pending_output)
|
|
506
|
+
|
|
507
|
+
second_wait_success = False
|
|
508
|
+
if incremental_text and isinstance(bash_arg, BashInteraction):
|
|
509
|
+
# There's some text in BashInteraction mode wait for TIMEOUT_WHILE_OUTPUT
|
|
510
|
+
remaining = TIMEOUT_WHILE_OUTPUT - wait
|
|
511
|
+
patience = OUTPUT_WAIT_PATIENCE
|
|
512
|
+
itext = incremental_text
|
|
513
|
+
while remaining > 0 and patience > 0:
|
|
514
|
+
print(remaining, TIMEOUT_WHILE_OUTPUT)
|
|
515
|
+
index = BASH_STATE.shell.expect([PROMPT, pexpect.TIMEOUT], timeout=wait)
|
|
516
|
+
if index == 0:
|
|
517
|
+
second_wait_success = True
|
|
518
|
+
break
|
|
519
|
+
else:
|
|
520
|
+
_itext = BASH_STATE.shell.before or ""
|
|
521
|
+
_itext = _incremental_text(_itext, BASH_STATE.pending_output)
|
|
522
|
+
if _itext != itext:
|
|
523
|
+
patience = 3
|
|
524
|
+
else:
|
|
525
|
+
patience -= 1
|
|
526
|
+
itext = _itext
|
|
457
527
|
|
|
458
|
-
|
|
459
|
-
tokens = enc.encode(text)
|
|
460
|
-
|
|
461
|
-
if max_tokens and len(tokens) >= max_tokens:
|
|
462
|
-
text = "(...truncated)\n" + enc.decode(tokens[-(max_tokens - 1) :])
|
|
463
|
-
|
|
464
|
-
if is_interrupt:
|
|
465
|
-
text = (
|
|
466
|
-
text
|
|
467
|
-
+ """---
|
|
468
|
-
----
|
|
469
|
-
Failure interrupting.
|
|
470
|
-
If any REPL session was previously running or if bashrc was sourced, or if there is issue to other REPL related reasons:
|
|
471
|
-
Run BashCommand: "wcgw_update_prompt()" to reset the PS1 prompt.
|
|
472
|
-
Otherwise, you may want to try Ctrl-c again or program specific exit interactive commands.
|
|
473
|
-
"""
|
|
474
|
-
)
|
|
528
|
+
remaining = remaining - wait
|
|
475
529
|
|
|
476
|
-
|
|
477
|
-
|
|
530
|
+
if not second_wait_success:
|
|
531
|
+
text = BASH_STATE.shell.before or ""
|
|
532
|
+
incremental_text = _incremental_text(text, BASH_STATE.pending_output)
|
|
478
533
|
|
|
479
|
-
|
|
534
|
+
if not second_wait_success:
|
|
535
|
+
BASH_STATE.set_pending(text)
|
|
480
536
|
|
|
537
|
+
tokens = enc.encode(incremental_text)
|
|
538
|
+
|
|
539
|
+
if max_tokens and len(tokens) >= max_tokens:
|
|
540
|
+
incremental_text = "(...truncated)\n" + enc.decode(
|
|
541
|
+
tokens[-(max_tokens - 1) :]
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
if is_interrupt:
|
|
545
|
+
incremental_text = (
|
|
546
|
+
incremental_text
|
|
547
|
+
+ """---
|
|
548
|
+
----
|
|
549
|
+
Failure interrupting.
|
|
550
|
+
If any REPL session was previously running or if bashrc was sourced, or if there is issue to other REPL related reasons:
|
|
551
|
+
Run BashCommand: "wcgw_update_prompt()" to reset the PS1 prompt.
|
|
552
|
+
Otherwise, you may want to try Ctrl-c again or program specific exit interactive commands.
|
|
553
|
+
"""
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
exit_status = get_status()
|
|
557
|
+
incremental_text += exit_status
|
|
558
|
+
|
|
559
|
+
return incremental_text, 0
|
|
560
|
+
|
|
561
|
+
assert isinstance(BASH_STATE.shell.before, str)
|
|
562
|
+
output = _incremental_text(BASH_STATE.shell.before, BASH_STATE.pending_output)
|
|
481
563
|
BASH_STATE.set_repl()
|
|
482
564
|
|
|
483
565
|
if is_interrupt:
|
|
484
566
|
return "Interrupt successful", 0.0
|
|
485
567
|
|
|
486
|
-
assert isinstance(BASH_STATE.shell.before, str)
|
|
487
|
-
output = render_terminal_output(BASH_STATE.shell.before)
|
|
488
|
-
|
|
489
568
|
tokens = enc.encode(output)
|
|
490
569
|
if max_tokens and len(tokens) >= max_tokens:
|
|
491
570
|
output = "(...truncated)\n" + enc.decode(tokens[-(max_tokens - 1) :])
|
|
@@ -1101,8 +1180,6 @@ def get_tool_output(
|
|
|
1101
1180
|
History = list[ChatCompletionMessageParam]
|
|
1102
1181
|
|
|
1103
1182
|
default_enc = tiktoken.encoding_for_model("gpt-4o")
|
|
1104
|
-
default_model: Models = "gpt-4o-2024-08-06"
|
|
1105
|
-
default_cost = CostData(cost_per_1m_input_tokens=0.15, cost_per_1m_output_tokens=0.6)
|
|
1106
1183
|
curr_cost = 0.0
|
|
1107
1184
|
|
|
1108
1185
|
|
|
@@ -1121,7 +1198,7 @@ class Mdata(BaseModel):
|
|
|
1121
1198
|
|
|
1122
1199
|
|
|
1123
1200
|
def register_client(server_url: str, client_uuid: str = "") -> None:
|
|
1124
|
-
global default_enc,
|
|
1201
|
+
global default_enc, curr_cost
|
|
1125
1202
|
# Generate a unique UUID for this client
|
|
1126
1203
|
if not client_uuid:
|
|
1127
1204
|
client_uuid = str(uuid.uuid4())
|
|
@@ -5,7 +5,7 @@ from src.wcgw.types_ import WriteIfEmpty
|
|
|
5
5
|
|
|
6
6
|
|
|
7
7
|
class TestTools(unittest.TestCase):
|
|
8
|
-
def test_render_terminal_output(self):
|
|
8
|
+
def test_render_terminal_output(self) -> None:
|
|
9
9
|
# Simulated terminal output
|
|
10
10
|
terminal_output = (
|
|
11
11
|
"\x1b[1;31mHello\x1b[0m\nThis is a test\n\x1b[2K\rLine to clear\n"
|
|
@@ -14,9 +14,7 @@ class TestTools(unittest.TestCase):
|
|
|
14
14
|
expected_result = "Hello\nThis is a test\nLine to clear"
|
|
15
15
|
result = render_terminal_output(terminal_output)
|
|
16
16
|
# Stripping extra whitespace and ensuring content matches
|
|
17
|
-
self.assertEqual(
|
|
18
|
-
"\n".join(line.strip() for line in result.splitlines()), expected_result
|
|
19
|
-
)
|
|
17
|
+
self.assertEqual("\n".join(line.strip() for line in result), expected_result)
|
|
20
18
|
|
|
21
19
|
@patch("builtins.input", return_value="y")
|
|
22
20
|
def test_ask_confirmation_yes(self, mock_input):
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|