wcgw 2.3.3__py3-none-any.whl → 2.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of wcgw might be problematic. Click here for more details.

@@ -6,12 +6,12 @@ import sys
6
6
  import traceback
7
7
  from typing import Any
8
8
 
9
- from mcp.server.models import InitializationOptions
10
- import mcp.types as types
11
- from mcp.types import Tool as ToolParam
12
- from mcp.server import NotificationOptions, Server
9
+ from mcp_wcgw.server.models import InitializationOptions
10
+ import mcp_wcgw.types as types
11
+ from mcp_wcgw.types import Tool as ToolParam
12
+ from mcp_wcgw.server import NotificationOptions, Server
13
13
  from pydantic import AnyUrl, BaseModel, ValidationError
14
- import mcp.server.stdio
14
+ import mcp_wcgw.server.stdio
15
15
  from .. import tools
16
16
  from ..tools import DoneFlag, get_tool_output, which_tool_name, default_enc
17
17
  from ...types_ import (
@@ -82,14 +82,14 @@ async def handle_list_tools() -> list[types.Tool]:
82
82
  ToolParam(
83
83
  inputSchema=BashCommand.model_json_schema(),
84
84
  name="BashCommand",
85
- description="""
85
+ description=f"""
86
86
  - Execute a bash command. This is stateful (beware with subsequent calls).
87
87
  - Do not use interactive commands like nano. Prefer writing simpler commands.
88
88
  - Status of the command and the current working directory will always be returned at the end.
89
89
  - Optionally `exit shell has restarted` is the output, in which case environment resets, you can run fresh commands.
90
90
  - The first or the last line might be `(...truncated)` if the output is too long.
91
91
  - Always run `pwd` if you get any file or directory not found error to make sure you're not lost.
92
- - The control will return to you in 3 seconds regardless of the status. For heavy commands, keep checking status using BashInteraction till they are finished.
92
+ - The control will return to you in {SLEEP_TIME_MAX_S} seconds regardless of the status. For heavy commands, keep checking status using BashInteraction till they are finished.
93
93
  - Run long running commands in background using screen instead of "&".
94
94
  - Use longer wait_for_seconds if the command is expected to run for a long time.
95
95
  """,
@@ -97,13 +97,13 @@ async def handle_list_tools() -> list[types.Tool]:
97
97
  ToolParam(
98
98
  inputSchema=BashInteraction.model_json_schema(),
99
99
  name="BashInteraction",
100
- description="""
100
+ description=f"""
101
101
  - Interact with running program using this tool
102
102
  - Special keys like arrows, interrupts, enter, etc.
103
103
  - Send text input to the running program.
104
104
  - Send send_specials=["Enter"] to recheck status of a running program.
105
105
  - Only one of send_text, send_specials, send_ascii should be provided.
106
- - This returns within 3 seconds, for heavy programs keep checking status for upto 10 turns before asking user to continue checking again.
106
+ - This returns within {SLEEP_TIME_MAX_S} seconds, for heavy programs keep checking status for upto 10 turns before asking user to continue checking again.
107
107
  - Programs don't hang easily, so most likely explanation for no output is usually that the program is still running, and you need to check status again using ["Enter"].
108
108
  - Do not send Ctrl-c before checking for status till 10 minutes or whatever is appropriate for the program to finish.
109
109
  - Set longer wait_for_seconds when program is expected to run for a long time.
@@ -273,7 +273,8 @@ async def main(computer_use: bool) -> None:
273
273
  global COMPUTER_USE_ON_DOCKER_ENABLED
274
274
 
275
275
  tools.TIMEOUT = SLEEP_TIME_MAX_S
276
-
276
+ tools.TIMEOUT_WHILE_OUTPUT = 55
277
+ tools.OUTPUT_WAIT_PATIENCE = 5
277
278
  tools.console = tools.DisableConsole()
278
279
 
279
280
  if computer_use:
@@ -281,7 +282,7 @@ async def main(computer_use: bool) -> None:
281
282
 
282
283
  version = importlib.metadata.version("wcgw")
283
284
  # Run the server using stdin/stdout streams
284
- async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
285
+ async with mcp_wcgw.server.stdio.stdio_server() as (read_stream, write_stream):
285
286
  await server.run(
286
287
  read_stream,
287
288
  write_stream,
wcgw/client/tools.py CHANGED
@@ -1,28 +1,22 @@
1
- import asyncio
2
1
  import base64
3
- from concurrent.futures import ThreadPoolExecutor, as_completed
4
2
  import datetime
5
- from io import BytesIO
6
3
  import json
7
4
  import mimetypes
8
5
  from pathlib import Path
9
6
  import re
10
7
  import shlex
11
- import sys
12
- import threading
13
8
  import importlib.metadata
14
9
  import time
15
10
  import traceback
16
11
  from tempfile import NamedTemporaryFile, TemporaryDirectory
17
12
  from typing import (
18
13
  Callable,
14
+ DefaultDict,
19
15
  Literal,
20
- NewType,
21
16
  Optional,
22
17
  ParamSpec,
23
18
  Type,
24
19
  TypeVar,
25
- TypedDict,
26
20
  )
27
21
  import uuid
28
22
  import humanize
@@ -33,22 +27,16 @@ from websockets.sync.client import connect as syncconnect
33
27
 
34
28
  import os
35
29
  import tiktoken
36
- import petname # type: ignore[import-untyped]
37
30
  import pexpect
38
31
  from typer import Typer
39
32
  import websockets
40
33
 
41
34
  import rich
42
35
  import pyte
43
- from dotenv import load_dotenv
44
36
 
45
37
  from syntax_checker import check_syntax
46
- from openai import OpenAI
47
38
  from openai.types.chat import (
48
39
  ChatCompletionMessageParam,
49
- ChatCompletionAssistantMessageParam,
50
- ChatCompletionMessage,
51
- ParsedChatCompletionMessage,
52
40
  )
53
41
  from difflib import SequenceMatcher
54
42
 
@@ -68,9 +56,7 @@ from ..types_ import (
68
56
  GetScreenInfo,
69
57
  )
70
58
 
71
- from .common import CostData, Models, discard_input
72
59
  from .sys_utils import command_run
73
- from .openai_utils import get_input_cost, get_output_cost
74
60
 
75
61
 
76
62
  class DisableConsole:
@@ -86,9 +72,11 @@ console: rich.console.Console | DisableConsole = rich.console.Console(
86
72
  )
87
73
 
88
74
  TIMEOUT = 5
75
+ TIMEOUT_WHILE_OUTPUT = 20
76
+ OUTPUT_WAIT_PATIENCE = 3
89
77
 
90
78
 
91
- def render_terminal_output(text: str) -> str:
79
+ def render_terminal_output(text: str) -> list[str]:
92
80
  screen = pyte.Screen(160, 500)
93
81
  screen.set_mode(pyte.modes.LNM)
94
82
  stream = pyte.Stream(screen)
@@ -101,9 +89,25 @@ def render_terminal_output(text: str) -> str:
101
89
  else:
102
90
  i = len(dsp)
103
91
  lines = screen.display[: len(dsp) - i]
104
- # Strip trailing space
105
- lines = [line.rstrip() for line in lines]
106
- return "\n".join(lines)
92
+ return lines
93
+
94
+
95
+ def get_incremental_output(old_output: list[str], new_output: list[str]) -> list[str]:
96
+ nold = len(old_output)
97
+ nnew = len(new_output)
98
+ if not old_output:
99
+ return new_output
100
+ for i in range(nnew - 1, -1, -1):
101
+ if new_output[i] != old_output[-1]:
102
+ continue
103
+ for j in range(i - 1, -1, -1):
104
+ if (nold - 1 + j - i) < 0:
105
+ break
106
+ if new_output[j] != old_output[-1 + j - i]:
107
+ break
108
+ else:
109
+ return new_output[i + 1 :]
110
+ return new_output
107
111
 
108
112
 
109
113
  class Confirmation(BaseModel):
@@ -144,6 +148,10 @@ def start_shell() -> pexpect.spawn: # type: ignore
144
148
  shell.expect(PROMPT, timeout=TIMEOUT)
145
149
  shell.sendline("stty -icanon -echo")
146
150
  shell.expect(PROMPT, timeout=TIMEOUT)
151
+ shell.sendline("set +o pipefail")
152
+ shell.expect(PROMPT, timeout=TIMEOUT)
153
+ shell.sendline("export GIT_PAGER=cat PAGER=cat")
154
+ shell.expect(PROMPT, timeout=TIMEOUT)
147
155
  return shell
148
156
 
149
157
 
@@ -155,16 +163,20 @@ def _is_int(mystr: str) -> bool:
155
163
  return False
156
164
 
157
165
 
158
- def _get_exit_code(shell: pexpect.spawn) -> int: # type: ignore
166
+ def _ensure_env_and_bg_jobs(shell: pexpect.spawn) -> Optional[int]: # type: ignore
159
167
  if PROMPT != PROMPT_CONST:
160
- return 0
168
+ return None
161
169
  # First reset the prompt in case venv was sourced or other reasons.
162
170
  shell.sendline(f"export PS1={PROMPT}")
163
171
  shell.expect(PROMPT, timeout=0.2)
164
172
  # Reset echo also if it was enabled
165
173
  shell.sendline("stty -icanon -echo")
166
174
  shell.expect(PROMPT, timeout=0.2)
167
- shell.sendline("echo $?")
175
+ shell.sendline("set +o pipefail")
176
+ shell.expect(PROMPT, timeout=0.2)
177
+ shell.sendline("export GIT_PAGER=cat PAGER=cat")
178
+ shell.expect(PROMPT, timeout=0.2)
179
+ shell.sendline("jobs | wc -l")
168
180
  before = ""
169
181
  while not _is_int(before): # Consume all previous output
170
182
  try:
@@ -174,7 +186,8 @@ def _get_exit_code(shell: pexpect.spawn) -> int: # type: ignore
174
186
  raise
175
187
  assert isinstance(shell.before, str)
176
188
  # Render because there could be some anscii escape sequences still set like in google colab env
177
- before = render_terminal_output(shell.before).strip()
189
+ before_lines = render_terminal_output(shell.before)
190
+ before = "\n".join(before_lines).strip()
178
191
 
179
192
  try:
180
193
  return int((before))
@@ -195,20 +208,23 @@ class BashState:
195
208
  self._cwd: str = os.getcwd()
196
209
  self._shell = start_shell()
197
210
  self._whitelist_for_overwrite: set[str] = set()
211
+ self._pending_output = ""
198
212
 
199
213
  # Get exit info to ensure shell is ready
200
- _get_exit_code(self._shell)
214
+ _ensure_env_and_bg_jobs(self._shell)
201
215
 
202
216
  @property
203
217
  def shell(self) -> pexpect.spawn: # type: ignore
204
218
  return self._shell
205
219
 
206
- def set_pending(self) -> None:
220
+ def set_pending(self, last_pending_output: str) -> None:
207
221
  if not isinstance(self._state, datetime.datetime):
208
222
  self._state = datetime.datetime.now()
223
+ self._pending_output = last_pending_output
209
224
 
210
225
  def set_repl(self) -> None:
211
226
  self._state = "repl"
227
+ self._pending_output = ""
212
228
 
213
229
  @property
214
230
  def state(self) -> BASH_CLF_OUTPUT:
@@ -231,7 +247,8 @@ class BashState:
231
247
  BASH_STATE.shell.sendline("pwd")
232
248
  BASH_STATE.shell.expect(PROMPT, timeout=0.2)
233
249
  assert isinstance(BASH_STATE.shell.before, str)
234
- current_dir = render_terminal_output(BASH_STATE.shell.before).strip()
250
+ before_lines = render_terminal_output(BASH_STATE.shell.before)
251
+ current_dir = "\n".join(before_lines).strip()
235
252
  self._cwd = current_dir
236
253
  return current_dir
237
254
 
@@ -254,6 +271,10 @@ class BashState:
254
271
  def add_to_whitelist_for_overwrite(self, file_path: str) -> None:
255
272
  self._whitelist_for_overwrite.add(file_path)
256
273
 
274
+ @property
275
+ def pending_output(self) -> str:
276
+ return self._pending_output
277
+
257
278
 
258
279
  BASH_STATE = BashState()
259
280
 
@@ -305,16 +326,17 @@ def update_repl_prompt(command: str) -> bool:
305
326
 
306
327
 
307
328
  def get_status() -> str:
308
- exit_code: Optional[int] = None
309
-
310
329
  status = "\n\n---\n\n"
311
330
  if BASH_STATE.state == "pending":
312
331
  status += "status = still running\n"
313
332
  status += "running for = " + BASH_STATE.get_pending_for() + "\n"
314
333
  status += "cwd = " + BASH_STATE.cwd + "\n"
315
334
  else:
316
- exit_code = _get_exit_code(BASH_STATE.shell)
317
- status += f"status = exited with code {exit_code}\n"
335
+ bg_jobs = _ensure_env_and_bg_jobs(BASH_STATE.shell)
336
+ bg_desc = ""
337
+ if bg_jobs and bg_jobs > 0:
338
+ bg_desc = f"; {bg_jobs} background jobs running"
339
+ status += "status = process exited" + bg_desc + "\n"
318
340
  status += "cwd = " + BASH_STATE.update_cwd() + "\n"
319
341
 
320
342
  return status.rstrip()
@@ -348,6 +370,33 @@ def save_out_of_context(
348
370
  return file_contents[0], rest_paths
349
371
 
350
372
 
373
+ def rstrip(lines: list[str]) -> str:
374
+ return "\n".join([line.rstrip() for line in lines])
375
+
376
+
377
+ def _incremental_text(text: str, last_pending_output: str) -> str:
378
+ # text = render_terminal_output(text[-100_000:])
379
+ text = text[-100_000:]
380
+
381
+ last_pending_output_rendered_lines = render_terminal_output(last_pending_output)
382
+ last_pending_output_rendered = "\n".join(last_pending_output_rendered_lines)
383
+ last_rendered_lines = last_pending_output_rendered.split("\n")
384
+ if not last_rendered_lines:
385
+ return rstrip(render_terminal_output(text))
386
+
387
+ text = text[len(last_pending_output) :]
388
+ old_rendered_applied = render_terminal_output(last_pending_output_rendered + text)
389
+ # True incremental is then
390
+ rendered = get_incremental_output(last_rendered_lines[:-1], old_rendered_applied)
391
+
392
+ if not rendered:
393
+ return ""
394
+
395
+ if rendered[0] == last_rendered_lines[-1]:
396
+ rendered = rendered[1:]
397
+ return rstrip(rendered)
398
+
399
+
351
400
  def execute_bash(
352
401
  enc: tiktoken.Encoding,
353
402
  bash_arg: BashCommand | BashInteraction,
@@ -449,43 +498,73 @@ def execute_bash(
449
498
  BASH_STATE.shell.expect(PROMPT)
450
499
  return "---\n\nFailure: user interrupted the execution", 0.0
451
500
 
452
- wait = timeout_s or TIMEOUT
501
+ wait = min(timeout_s or TIMEOUT, TIMEOUT_WHILE_OUTPUT)
453
502
  index = BASH_STATE.shell.expect([PROMPT, pexpect.TIMEOUT], timeout=wait)
454
503
  if index == 1:
455
- BASH_STATE.set_pending()
456
504
  text = BASH_STATE.shell.before or ""
505
+ incremental_text = _incremental_text(text, BASH_STATE.pending_output)
506
+
507
+ second_wait_success = False
508
+ if incremental_text and isinstance(bash_arg, BashInteraction):
509
+ # There's some text in BashInteraction mode wait for TIMEOUT_WHILE_OUTPUT
510
+ remaining = TIMEOUT_WHILE_OUTPUT - wait
511
+ patience = OUTPUT_WAIT_PATIENCE
512
+ itext = incremental_text
513
+ while remaining > 0 and patience > 0:
514
+ print(remaining, TIMEOUT_WHILE_OUTPUT)
515
+ index = BASH_STATE.shell.expect([PROMPT, pexpect.TIMEOUT], timeout=wait)
516
+ if index == 0:
517
+ second_wait_success = True
518
+ break
519
+ else:
520
+ _itext = BASH_STATE.shell.before or ""
521
+ _itext = _incremental_text(_itext, BASH_STATE.pending_output)
522
+ if _itext != itext:
523
+ patience = 3
524
+ else:
525
+ patience -= 1
526
+ itext = _itext
457
527
 
458
- text = render_terminal_output(text[-100_000:])
459
- tokens = enc.encode(text)
460
-
461
- if max_tokens and len(tokens) >= max_tokens:
462
- text = "(...truncated)\n" + enc.decode(tokens[-(max_tokens - 1) :])
463
-
464
- if is_interrupt:
465
- text = (
466
- text
467
- + """---
468
- ----
469
- Failure interrupting.
470
- If any REPL session was previously running or if bashrc was sourced, or if there is issue to other REPL related reasons:
471
- Run BashCommand: "wcgw_update_prompt()" to reset the PS1 prompt.
472
- Otherwise, you may want to try Ctrl-c again or program specific exit interactive commands.
473
- """
474
- )
528
+ remaining = remaining - wait
475
529
 
476
- exit_status = get_status()
477
- text += exit_status
530
+ if not second_wait_success:
531
+ text = BASH_STATE.shell.before or ""
532
+ incremental_text = _incremental_text(text, BASH_STATE.pending_output)
478
533
 
479
- return text, 0
534
+ if not second_wait_success:
535
+ BASH_STATE.set_pending(text)
480
536
 
537
+ tokens = enc.encode(incremental_text)
538
+
539
+ if max_tokens and len(tokens) >= max_tokens:
540
+ incremental_text = "(...truncated)\n" + enc.decode(
541
+ tokens[-(max_tokens - 1) :]
542
+ )
543
+
544
+ if is_interrupt:
545
+ incremental_text = (
546
+ incremental_text
547
+ + """---
548
+ ----
549
+ Failure interrupting.
550
+ If any REPL session was previously running or if bashrc was sourced, or if there is issue to other REPL related reasons:
551
+ Run BashCommand: "wcgw_update_prompt()" to reset the PS1 prompt.
552
+ Otherwise, you may want to try Ctrl-c again or program specific exit interactive commands.
553
+ """
554
+ )
555
+
556
+ exit_status = get_status()
557
+ incremental_text += exit_status
558
+
559
+ return incremental_text, 0
560
+
561
+ assert isinstance(BASH_STATE.shell.before, str)
562
+ output = _incremental_text(BASH_STATE.shell.before, BASH_STATE.pending_output)
481
563
  BASH_STATE.set_repl()
482
564
 
483
565
  if is_interrupt:
484
566
  return "Interrupt successful", 0.0
485
567
 
486
- assert isinstance(BASH_STATE.shell.before, str)
487
- output = render_terminal_output(BASH_STATE.shell.before)
488
-
489
568
  tokens = enc.encode(output)
490
569
  if max_tokens and len(tokens) >= max_tokens:
491
570
  output = "(...truncated)\n" + enc.decode(tokens[-(max_tokens - 1) :])
@@ -1101,8 +1180,6 @@ def get_tool_output(
1101
1180
  History = list[ChatCompletionMessageParam]
1102
1181
 
1103
1182
  default_enc = tiktoken.encoding_for_model("gpt-4o")
1104
- default_model: Models = "gpt-4o-2024-08-06"
1105
- default_cost = CostData(cost_per_1m_input_tokens=0.15, cost_per_1m_output_tokens=0.6)
1106
1183
  curr_cost = 0.0
1107
1184
 
1108
1185
 
@@ -1121,7 +1198,7 @@ class Mdata(BaseModel):
1121
1198
 
1122
1199
 
1123
1200
  def register_client(server_url: str, client_uuid: str = "") -> None:
1124
- global default_enc, default_model, curr_cost
1201
+ global default_enc, curr_cost
1125
1202
  # Generate a unique UUID for this client
1126
1203
  if not client_uuid:
1127
1204
  client_uuid = str(uuid.uuid4())
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: wcgw
3
- Version: 2.3.3
3
+ Version: 2.4.1
4
4
  Summary: Shell and coding agent on claude and chatgpt
5
5
  Project-URL: Homepage, https://github.com/rusiaaman/wcgw
6
6
  Author-email: Aman Rusia <gapypi@arcfu.com>
@@ -8,7 +8,7 @@ Requires-Python: <3.13,>=3.11
8
8
  Requires-Dist: anthropic>=0.39.0
9
9
  Requires-Dist: fastapi>=0.115.0
10
10
  Requires-Dist: humanize>=4.11.0
11
- Requires-Dist: mcp
11
+ Requires-Dist: mcp-wcgw
12
12
  Requires-Dist: openai>=1.46.0
13
13
  Requires-Dist: petname>=2.6
14
14
  Requires-Dist: pexpect>=4.9.0
@@ -10,13 +10,13 @@ wcgw/client/diff-instructions.txt,sha256=s5AJKG23JsjwRYhFZFQVvwDpF67vElawrmdXwvu
10
10
  wcgw/client/openai_client.py,sha256=uJ2l9NXsZuipUcJYR_bFcNNmNlfnCvPm6-M-LiVSVts,17942
11
11
  wcgw/client/openai_utils.py,sha256=YNwCsA-Wqq7jWrxP0rfQmBTb1dI0s7dWXzQqyTzOZT4,2629
12
12
  wcgw/client/sys_utils.py,sha256=GajPntKhaTUMn6EOmopENWZNR2G_BJyuVbuot0x6veI,1376
13
- wcgw/client/tools.py,sha256=99v3uZyTU5MDiPHm1GZ3Atpl9UZ8Ju4rAn2TCbFOjZk,40975
13
+ wcgw/client/tools.py,sha256=ZLUlVDS-G-_UId5nJoeXINlvjvP2DUGLuTAFmHTDUvw,44131
14
14
  wcgw/client/mcp_server/Readme.md,sha256=I8N4dHkTUVGNQ63BQkBMBhCCBTgqGOSF_pUR6iOEiUk,2495
15
15
  wcgw/client/mcp_server/__init__.py,sha256=hyPPwO9cabAJsOMWhKyat9yl7OlSmIobaoAZKHu3DMc,381
16
- wcgw/client/mcp_server/server.py,sha256=4tJODj6-iH5K-KCWLEN92s1S0TNsav0I4IbyR4ITFpY,11820
16
+ wcgw/client/mcp_server/server.py,sha256=CNUOAd83lCq0Ed_ZRwd66gIjMFN9VBSO4moTLUPTWwM,11956
17
17
  wcgw/relay/serve.py,sha256=KLYjTvM9CfqdxgFOfHM8LUkFGZ9kKyyJunpNdEIFQUk,8766
18
18
  wcgw/relay/static/privacy.txt,sha256=s9qBdbx2SexCpC_z33sg16TptmAwDEehMCLz4L50JLc,529
19
- wcgw-2.3.3.dist-info/METADATA,sha256=zaU-wF0SlCYCh1SnJCAD-PLsZC9YbhY5r11Dqey0Q6Y,7950
20
- wcgw-2.3.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
21
- wcgw-2.3.3.dist-info/entry_points.txt,sha256=eKo1omwbAggWlQ0l7GKoR7uV1-j16nk9tK0BhC2Oz_E,120
22
- wcgw-2.3.3.dist-info/RECORD,,
19
+ wcgw-2.4.1.dist-info/METADATA,sha256=mjWjB7heir277o_H6FfQoabauQIgGTWFlKT548ToFNU,7955
20
+ wcgw-2.4.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
21
+ wcgw-2.4.1.dist-info/entry_points.txt,sha256=eKo1omwbAggWlQ0l7GKoR7uV1-j16nk9tK0BhC2Oz_E,120
22
+ wcgw-2.4.1.dist-info/RECORD,,
File without changes