wcgw 0.0.8__py3-none-any.whl → 0.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of wcgw might be problematic. Click here for more details.

wcgw/basic.py CHANGED
@@ -16,13 +16,12 @@ from openai.types.chat import (
16
16
  ParsedChatCompletionMessage,
17
17
  )
18
18
  import rich
19
- import petname
19
+ import petname # type: ignore[import-untyped]
20
20
  from typer import Typer
21
21
  import uuid
22
22
 
23
- from wcgw.common import Config, text_from_editor
24
-
25
- from .common import Models
23
+ from .common import Models, discard_input
24
+ from .common import CostData, History
26
25
  from .openai_utils import get_input_cost, get_output_cost
27
26
  from .tools import ExecuteBash, ReadImage, ImageData
28
27
 
@@ -31,7 +30,6 @@ from .tools import (
31
30
  Confirmation,
32
31
  DoneFlag,
33
32
  Writefile,
34
- get_is_waiting_user_input,
35
33
  get_tool_output,
36
34
  SHELL,
37
35
  start_shell,
@@ -40,14 +38,40 @@ from .tools import (
40
38
  import tiktoken
41
39
 
42
40
  from urllib import parse
41
+ import subprocess
43
42
  import os
43
+ import tempfile
44
44
 
45
45
  import toml
46
+ from pydantic import BaseModel
46
47
 
47
48
 
48
49
  from dotenv import load_dotenv
49
50
 
50
- History = list[ChatCompletionMessageParam]
51
+
52
+ class Config(BaseModel):
53
+ model: Models
54
+ secondary_model: Models
55
+ cost_limit: float
56
+ cost_file: dict[Models, CostData]
57
+ cost_unit: str = "$"
58
+
59
+
60
+ def text_from_editor(console: rich.console.Console) -> str:
61
+ # First consume all the input till now
62
+ discard_input()
63
+ console.print("\n---------------------------------------\n# User message")
64
+ data = input()
65
+ if data:
66
+ return data
67
+ editor = os.environ.get("EDITOR", "vim")
68
+ with tempfile.NamedTemporaryFile(suffix=".tmp") as tf:
69
+ subprocess.run([editor, tf.name], check=True)
70
+ with open(tf.name, "r") as f:
71
+ data = f.read()
72
+ console.print(data)
73
+ return data
74
+
51
75
 
52
76
  def save_history(history: History, session_id: str) -> None:
53
77
  myid = str(history[1]["content"]).replace("/", "_").replace(" ", "_").lower()[:60]
@@ -67,29 +91,22 @@ def parse_user_message_special(msg: str) -> ChatCompletionUserMessageParam:
67
91
  if line.startswith("%"):
68
92
  args = line[1:].strip().split(" ")
69
93
  command = args[0]
70
- assert command == 'image'
94
+ assert command == "image"
71
95
  image_path = args[1]
72
- with open(image_path, 'rb') as f:
96
+ with open(image_path, "rb") as f:
73
97
  image_bytes = f.read()
74
98
  image_b64 = base64.b64encode(image_bytes).decode("utf-8")
75
99
  image_type = mimetypes.guess_type(image_path)[0]
76
- dataurl=f'data:{image_type};base64,{image_b64}'
77
- parts.append({
78
- 'type': 'image_url',
79
- 'image_url': {
80
- 'url': dataurl,
81
- 'detail': 'auto'
82
- }
83
- })
100
+ dataurl = f"data:{image_type};base64,{image_b64}"
101
+ parts.append(
102
+ {"type": "image_url", "image_url": {"url": dataurl, "detail": "auto"}}
103
+ )
84
104
  else:
85
- if len(parts) > 0 and parts[-1]['type'] == 'text':
86
- parts[-1]['text'] += '\n' + line
105
+ if len(parts) > 0 and parts[-1]["type"] == "text":
106
+ parts[-1]["text"] += "\n" + line
87
107
  else:
88
- parts.append({'type': 'text', 'text': line})
89
- return {
90
- 'role': 'user',
91
- 'content': parts
92
- }
108
+ parts.append({"type": "text", "text": line})
109
+ return {"role": "user", "content": parts}
93
110
 
94
111
 
95
112
  app = Typer(pretty_exceptions_show_locals=False)
@@ -121,7 +138,7 @@ def loop(
121
138
  if history[1]["role"] != "user":
122
139
  raise ValueError("Invalid history file, second message should be user")
123
140
  first_message = ""
124
- waiting_for_assistant = history[-1]['role'] != 'assistant'
141
+ waiting_for_assistant = history[-1]["role"] != "assistant"
125
142
 
126
143
  my_dir = os.path.dirname(__file__)
127
144
  config_file = os.path.join(my_dir, "..", "..", "config.toml")
@@ -136,9 +153,6 @@ def loop(
136
153
  enc = tiktoken.encoding_for_model(
137
154
  config.model if not config.model.startswith("o1") else "gpt-4o"
138
155
  )
139
- is_waiting_user_input = get_is_waiting_user_input(
140
- config.model, config.cost_file[config.model]
141
- )
142
156
 
143
157
  tools = [
144
158
  openai.pydantic_function_tool(
@@ -265,7 +279,7 @@ System information:
265
279
  )
266
280
  system_console.print(f"\nTotal cost: {config.cost_unit}{cost:.3f}")
267
281
  output_toks += output_toks_
268
-
282
+
269
283
  _histories.append(item)
270
284
  for tool_call_id, toolcallargs in tool_call_args_by_id.items():
271
285
  for toolindex, tool_args in toolcallargs.items():
@@ -275,7 +289,7 @@ System information:
275
289
  enc,
276
290
  limit - cost,
277
291
  loop,
278
- is_waiting_user_input,
292
+ max_tokens=2048,
279
293
  )
280
294
  except Exception as e:
281
295
  output_or_done = (
@@ -297,42 +311,49 @@ System information:
297
311
  f"\nTotal cost: {config.cost_unit}{cost:.3f}"
298
312
  )
299
313
  return output_or_done.task_output, cost
300
-
314
+
301
315
  output = output_or_done
302
316
 
303
317
  if isinstance(output, ImageData):
304
318
  randomId = petname.Generate(2, "-")
305
319
  if not image_histories:
306
- image_histories.extend([
307
- {
308
- 'role': 'assistant',
309
- 'content': f'Share images with ids: {randomId}'
310
-
311
- },
312
- {
313
- 'role': 'user',
314
- 'content': [{
315
- 'type': 'image_url',
316
- 'image_url': {
317
- 'url': output.dataurl,
318
- 'detail': 'auto'
319
- }
320
- }]
321
- }]
320
+ image_histories.extend(
321
+ [
322
+ {
323
+ "role": "assistant",
324
+ "content": f"Share images with ids: {randomId}",
325
+ },
326
+ {
327
+ "role": "user",
328
+ "content": [
329
+ {
330
+ "type": "image_url",
331
+ "image_url": {
332
+ "url": output.dataurl,
333
+ "detail": "auto",
334
+ },
335
+ }
336
+ ],
337
+ },
338
+ ]
322
339
  )
323
340
  else:
324
- image_histories[0]['content'] += ', ' + randomId
325
- image_histories[1]["content"].append({ # type: ignore
326
- 'type': 'image_url',
327
- 'image_url': {
328
- 'url': output.dataurl,
329
- 'detail': 'auto'
341
+ image_histories[0]["content"] += ", " + randomId
342
+ second_content = image_histories[1]["content"]
343
+ assert isinstance(second_content, list)
344
+ second_content.append(
345
+ {
346
+ "type": "image_url",
347
+ "image_url": {
348
+ "url": output.dataurl,
349
+ "detail": "auto",
350
+ },
330
351
  }
331
- })
352
+ )
332
353
 
333
354
  item = {
334
355
  "role": "tool",
335
- "content": f'Ask user for image id: {randomId}',
356
+ "content": f"Ask user for image id: {randomId}",
336
357
  "tool_call_id": tool_call_id + str(toolindex),
337
358
  }
338
359
  else:
wcgw/common.py CHANGED
@@ -1,13 +1,9 @@
1
- import os
2
1
  import select
3
- import subprocess
4
2
  import sys
5
- import tempfile
6
3
  import termios
7
4
  import tty
8
5
  from typing import Literal
9
6
  from pydantic import BaseModel
10
- import rich
11
7
 
12
8
 
13
9
  class CostData(BaseModel):
@@ -15,6 +11,14 @@ class CostData(BaseModel):
15
11
  cost_per_1m_output_tokens: float
16
12
 
17
13
 
14
+ from openai.types.chat import (
15
+ ChatCompletionMessageParam,
16
+ ChatCompletionAssistantMessageParam,
17
+ ChatCompletionMessage,
18
+ ParsedChatCompletionMessage,
19
+ )
20
+
21
+ History = list[ChatCompletionMessageParam]
18
22
  Models = Literal["gpt-4o-2024-08-06", "gpt-4o-mini"]
19
23
 
20
24
 
@@ -41,27 +45,3 @@ def discard_input() -> None:
41
45
  finally:
42
46
  # Restore old terminal settings
43
47
  termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
44
-
45
-
46
- class Config(BaseModel):
47
- model: Models
48
- secondary_model: Models
49
- cost_limit: float
50
- cost_file: dict[Models, CostData]
51
- cost_unit: str = "$"
52
-
53
-
54
- def text_from_editor(console: rich.console.Console) -> str:
55
- # First consume all the input till now
56
- discard_input()
57
- console.print("\n---------------------------------------\n# User message")
58
- data = input()
59
- if data:
60
- return data
61
- editor = os.environ.get("EDITOR", "vim")
62
- with tempfile.NamedTemporaryFile(suffix=".tmp") as tf:
63
- subprocess.run([editor, tf.name], check=True)
64
- with open(tf.name, "r") as f:
65
- data = f.read()
66
- console.print(data)
67
- return data
File without changes
wcgw/tools.py CHANGED
@@ -5,14 +5,23 @@ import mimetypes
5
5
  import sys
6
6
  import threading
7
7
  import traceback
8
- from typing import Callable, Literal, NewType, Optional, ParamSpec, Sequence, TypeVar, TypedDict
8
+ from typing import (
9
+ Callable,
10
+ Literal,
11
+ NewType,
12
+ Optional,
13
+ ParamSpec,
14
+ Sequence,
15
+ TypeVar,
16
+ TypedDict,
17
+ )
9
18
  import uuid
10
19
  from pydantic import BaseModel, TypeAdapter
11
20
  from websockets.sync.client import connect as syncconnect
12
21
 
13
22
  import os
14
23
  import tiktoken
15
- import petname # type: ignore[import]
24
+ import petname # type: ignore[import-untyped]
16
25
  import pexpect
17
26
  from typer import Typer
18
27
  import websockets
@@ -68,14 +77,14 @@ class Writefile(BaseModel):
68
77
  file_content: str
69
78
 
70
79
 
71
- def start_shell():
80
+ def start_shell() -> pexpect.spawn:
72
81
  SHELL = pexpect.spawn(
73
82
  "/bin/bash --noprofile --norc",
74
- env={**os.environ, **{"PS1": "#@@"}},
83
+ env={**os.environ, **{"PS1": "#@@"}}, # type: ignore[arg-type]
75
84
  echo=False,
76
85
  encoding="utf-8",
77
86
  timeout=TIMEOUT,
78
- ) # type: ignore[arg-type]
87
+ )
79
88
  SHELL.expect("#@@")
80
89
  SHELL.sendline("stty -icanon -echo")
81
90
  SHELL.expect("#@@")
@@ -119,17 +128,6 @@ BASH_CLF_OUTPUT = Literal["running", "waiting_for_input", "wont_exit"]
119
128
  BASH_STATE: BASH_CLF_OUTPUT = "running"
120
129
 
121
130
 
122
- def get_output_of_last_command(enc: tiktoken.Encoding) -> str:
123
- global SHELL, BASH_STATE
124
- output = render_terminal_output(SHELL.before)
125
-
126
- tokens = enc.encode(output)
127
- if len(tokens) >= 2048:
128
- output = "...(truncated)\n" + enc.decode(tokens[-2047:])
129
-
130
- return output
131
-
132
-
133
131
  WAITING_INPUT_MESSAGE = """A command is already running waiting for input. NOTE: You can't run multiple shell sessions, likely a previous program hasn't exited.
134
132
  1. Get its output using `send_ascii: [10]`
135
133
  2. Use `send_ascii` to give inputs to the running program, don't use `execute_command` OR
@@ -137,9 +135,7 @@ WAITING_INPUT_MESSAGE = """A command is already running waiting for input. NOTE:
137
135
 
138
136
 
139
137
  def execute_bash(
140
- enc: tiktoken.Encoding,
141
- bash_arg: ExecuteBash,
142
- is_waiting_user_input: Callable[[str], tuple[BASH_CLF_OUTPUT, float]],
138
+ enc: tiktoken.Encoding, bash_arg: ExecuteBash, max_tokens: Optional[int]
143
139
  ) -> tuple[str, float]:
144
140
  global SHELL, BASH_STATE
145
141
  try:
@@ -186,38 +182,33 @@ def execute_bash(
186
182
  SHELL = start_shell()
187
183
  raise
188
184
 
189
- wait = timeout = 5
185
+ wait = 5
190
186
  index = SHELL.expect(["#@@", pexpect.TIMEOUT], timeout=wait)
191
187
  running = ""
192
188
  while index == 1:
193
189
  if wait > TIMEOUT:
194
190
  raise TimeoutError("Timeout while waiting for shell prompt")
195
191
 
196
- text = SHELL.before
192
+ BASH_STATE = "waiting_for_input"
193
+ text = SHELL.before or ""
197
194
  print(text[len(running) :])
198
195
  running = text
199
196
 
200
197
  text = render_terminal_output(text)
201
- BASH_STATE, cost = is_waiting_user_input(text)
202
- if BASH_STATE == "waiting_for_input" or BASH_STATE == "wont_exit":
203
- tokens = enc.encode(text)
198
+ tokens = enc.encode(text)
204
199
 
205
- if len(tokens) >= 2048:
206
- text = "...(truncated)\n" + enc.decode(tokens[-2047:])
200
+ if max_tokens and len(tokens) >= max_tokens:
201
+ text = "...(truncated)\n" + enc.decode(tokens[-(max_tokens - 1) :])
207
202
 
208
- last_line = (
209
- "(pending)" if BASH_STATE == "waiting_for_input" else "(won't exit)"
210
- )
211
- return text + f"\n{last_line}", cost
212
- index = SHELL.expect(["#@@", pexpect.TIMEOUT], timeout=wait)
213
- wait += timeout
203
+ last_line = "(pending)"
204
+ return text + f"\n{last_line}", 0
214
205
 
215
206
  assert isinstance(SHELL.before, str)
216
207
  output = render_terminal_output(SHELL.before)
217
208
 
218
209
  tokens = enc.encode(output)
219
- if len(tokens) >= 2048:
220
- output = "...(truncated)\n" + enc.decode(tokens[-2047:])
210
+ if max_tokens and len(tokens) >= max_tokens:
211
+ output = "...(truncated)\n" + enc.decode(tokens[-(max_tokens - 1) :])
221
212
 
222
213
  try:
223
214
  exit_code = _get_exit_code()
@@ -236,7 +227,7 @@ def execute_bash(
236
227
 
237
228
  class ReadImage(BaseModel):
238
229
  file_path: str
239
- type: Literal['ReadImage'] = 'ReadImage'
230
+ type: Literal["ReadImage"] = "ReadImage"
240
231
 
241
232
 
242
233
  def serve_image_in_bg(file_path: str, client_uuid: str, name: str) -> None:
@@ -258,11 +249,11 @@ def serve_image_in_bg(file_path: str, client_uuid: str, name: str) -> None:
258
249
  print(f"Connection closed for UUID: {client_uuid}, retrying")
259
250
  serve_image_in_bg(file_path, client_uuid, name)
260
251
 
252
+
261
253
  class ImageData(BaseModel):
262
254
  dataurl: str
263
255
 
264
256
 
265
-
266
257
  Param = ParamSpec("Param")
267
258
 
268
259
  T = TypeVar("T")
@@ -281,6 +272,7 @@ def ensure_no_previous_output(func: Callable[Param, T]) -> Callable[Param, T]:
281
272
 
282
273
  return wrapper
283
274
 
275
+
284
276
  @ensure_no_previous_output
285
277
  def read_image_from_shell(file_path: str) -> ImageData:
286
278
  if not os.path.isabs(file_path):
@@ -297,8 +289,8 @@ def read_image_from_shell(file_path: str) -> ImageData:
297
289
  image_bytes = image_file.read()
298
290
  image_b64 = base64.b64encode(image_bytes).decode("utf-8")
299
291
  image_type = mimetypes.guess_type(file_path)[0]
300
- return ImageData(dataurl=f'data:{image_type};base64,{image_b64}')
301
-
292
+ return ImageData(dataurl=f"data:{image_type};base64,{image_b64}")
293
+
302
294
 
303
295
  @ensure_no_previous_output
304
296
  def write_file(writefile: Writefile) -> str:
@@ -349,11 +341,17 @@ def which_tool(args: str) -> BaseModel:
349
341
 
350
342
 
351
343
  def get_tool_output(
352
- args: dict | Confirmation | ExecuteBash | Writefile | AIAssistant | DoneFlag | ReadImage,
344
+ args: dict[object, object]
345
+ | Confirmation
346
+ | ExecuteBash
347
+ | Writefile
348
+ | AIAssistant
349
+ | DoneFlag
350
+ | ReadImage,
353
351
  enc: tiktoken.Encoding,
354
352
  limit: float,
355
353
  loop_call: Callable[[str, float], tuple[str, float]],
356
- is_waiting_user_input: Callable[[str], tuple[BASH_CLF_OUTPUT, float]],
354
+ max_tokens: Optional[int],
357
355
  ) -> tuple[str | ImageData | DoneFlag, float]:
358
356
  if isinstance(args, dict):
359
357
  adapter = TypeAdapter[
@@ -362,13 +360,13 @@ def get_tool_output(
362
360
  arg = adapter.validate_python(args)
363
361
  else:
364
362
  arg = args
365
- output: tuple[str | DoneFlag, float]
363
+ output: tuple[str | DoneFlag | ImageData, float]
366
364
  if isinstance(arg, Confirmation):
367
365
  console.print("Calling ask confirmation tool")
368
366
  output = ask_confirmation(arg), 0.0
369
367
  elif isinstance(arg, ExecuteBash):
370
368
  console.print("Calling execute bash tool")
371
- output = execute_bash(enc, arg, is_waiting_user_input)
369
+ output = execute_bash(enc, arg, max_tokens)
372
370
  elif isinstance(arg, Writefile):
373
371
  console.print("Calling write file tool")
374
372
  output = write_file(arg), 0
@@ -391,7 +389,9 @@ def get_tool_output(
391
389
  History = list[ChatCompletionMessageParam]
392
390
 
393
391
 
394
- def get_is_waiting_user_input(model: Models, cost_data: CostData):
392
+ def get_is_waiting_user_input(
393
+ model: Models, cost_data: CostData
394
+ ) -> Callable[[str], tuple[BASH_CLF_OUTPUT, float]]:
395
395
  enc = tiktoken.encoding_for_model(model if not model.startswith("o1") else "gpt-4o")
396
396
  system_prompt = """You need to classify if a bash program is waiting for user input based on its stdout, or if it won't exit. You'll be given the output of any program.
397
397
  Return `waiting_for_input` if the program is waiting for INTERACTIVE input only, Return 'running' if it's waiting for external resources or just waiting to finish.
@@ -451,7 +451,7 @@ def execute_user_input() -> None:
451
451
  ExecuteBash(
452
452
  send_ascii=[ord(x) for x in user_input] + [ord("\n")]
453
453
  ),
454
- lambda x: ("waiting_for_input", 0),
454
+ max_tokens=None,
455
455
  )[0]
456
456
  )
457
457
  except Exception as e:
@@ -467,31 +467,25 @@ async def register_client(server_url: str, client_uuid: str = "") -> None:
467
467
 
468
468
  # Create the WebSocket connection
469
469
  async with websockets.connect(f"{server_url}/{client_uuid}") as websocket:
470
- print(f"Connected. Share this user id with the chatbot: {client_uuid} \nLink: https://chatgpt.com/g/g-Us0AAXkRh-wcgw-giving-shell-access")
470
+ print(
471
+ f"Connected. Share this user id with the chatbot: {client_uuid} \nLink: https://chatgpt.com/g/g-Us0AAXkRh-wcgw-giving-shell-access"
472
+ )
471
473
  try:
472
474
  while True:
473
475
  # Wait to receive data from the server
474
476
  message = await websocket.recv()
475
477
  mdata = Mdata.model_validate_json(message)
476
478
  with execution_lock:
477
- # is_waiting_user_input = get_is_waiting_user_input(
478
- # default_model, default_cost
479
- # )
480
- is_waiting_user_input = lambda x: ("waiting_for_input", 0)
481
479
  try:
482
480
  output, cost = get_tool_output(
483
- mdata.data,
484
- default_enc,
485
- 0.0,
486
- lambda x, y: ("", 0),
487
- is_waiting_user_input,
481
+ mdata.data, default_enc, 0.0, lambda x, y: ("", 0), None
488
482
  )
489
483
  curr_cost += cost
490
484
  print(f"{curr_cost=}")
491
485
  except Exception as e:
492
486
  output = f"GOT EXCEPTION while calling tool. Error: {e}"
493
487
  traceback.print_exc()
494
- assert not isinstance(output, DoneFlag)
488
+ assert isinstance(output, str)
495
489
  await websocket.send(output)
496
490
 
497
491
  except (websockets.ConnectionClosed, ConnectionError):
@@ -499,14 +493,17 @@ async def register_client(server_url: str, client_uuid: str = "") -> None:
499
493
  await register_client(server_url, client_uuid)
500
494
 
501
495
 
502
- def run() -> None:
503
- if len(sys.argv) > 1:
504
- server_url = sys.argv[1]
505
- else:
506
- server_url = "wss://wcgw.arcfu.com/register"
496
+ run = Typer(pretty_exceptions_show_locals=False, no_args_is_help=True)
497
+
507
498
 
499
+ @run.command()
500
+ def app(
501
+ server_url: str = "wss://wcgw.arcfu.com/register", client_uuid: Optional[str] = None
502
+ ) -> None:
508
503
  thread1 = threading.Thread(target=execute_user_input)
509
- thread2 = threading.Thread(target=asyncio.run, args=(register_client(server_url),))
504
+ thread2 = threading.Thread(
505
+ target=asyncio.run, args=(register_client(server_url, client_uuid or ""),)
506
+ )
510
507
 
511
508
  thread1.start()
512
509
  thread2.start()
@@ -0,0 +1,97 @@
1
+ Metadata-Version: 2.3
2
+ Name: wcgw
3
+ Version: 0.0.10
4
+ Summary: What could go wrong giving full shell access to chatgpt?
5
+ Project-URL: Homepage, https://github.com/rusiaaman/wcgw
6
+ Author-email: Aman Rusia <gapypi@arcfu.com>
7
+ Requires-Python: <3.13,>=3.8
8
+ Requires-Dist: fastapi>=0.115.0
9
+ Requires-Dist: mypy>=1.11.2
10
+ Requires-Dist: openai>=1.46.0
11
+ Requires-Dist: petname>=2.6
12
+ Requires-Dist: pexpect>=4.9.0
13
+ Requires-Dist: pydantic>=2.9.2
14
+ Requires-Dist: pyte>=0.8.2
15
+ Requires-Dist: python-dotenv>=1.0.1
16
+ Requires-Dist: rich>=13.8.1
17
+ Requires-Dist: shell>=1.0.1
18
+ Requires-Dist: tiktoken==0.7.0
19
+ Requires-Dist: toml>=0.10.2
20
+ Requires-Dist: typer>=0.12.5
21
+ Requires-Dist: types-pexpect>=4.9.0.20240806
22
+ Requires-Dist: uvicorn>=0.31.0
23
+ Requires-Dist: websockets>=13.1
24
+ Description-Content-Type: text/markdown
25
+
26
+ # Enable shell access on chatgpt.com
27
+ A custom gpt on chatgpt web app to interact with your local shell.
28
+
29
+ ### 🚀 Highlights
30
+ - ⚡ **Full Shell Access**: No restrictions, complete control.
31
+ - ⚡ **Create, Execute, Iterate**: Ask the gpt to keep running compiler checks till all errors are fixed, or ask it to keep checking for the status of a long running command till it's done.
32
+ - ⚡ **Interactive Command Handling**: [beta] Supports interactive commands using arrow keys, interrupt, and ansi escape sequences.
33
+
34
+ ### 🪜 Steps:
35
+ 1. Run the [cli client](https://github.com/rusiaaman/wcgw?tab=readme-ov-file#client) in any directory of choice.
36
+ 2. Share the generated id with this GPT: `https://chatgpt.com/g/g-Us0AAXkRh-wcgw-giving-shell-access`
37
+ 3. The custom GPT can now run any command on your cli
38
+
39
+ ## Client
40
+
41
+ ### Option 1: using pip
42
+ Supports python >=3.8 and <3.13
43
+ ```sh
44
+ $ pip3 install wcgw
45
+ $ wcgw
46
+ ```
47
+
48
+ ### Option 2: using uv
49
+ ```sh
50
+ $ curl -LsSf https://astral.sh/uv/install.sh | sh
51
+ $ uv tool run --python 3.12 wcgw
52
+ ```
53
+
54
+ This will print a UUID that you need to share with the gpt.
55
+
56
+
57
+ ## Chat
58
+ Open the following link or search the "wcgw" custom gpt using "Explore GPTs" on chatgpt.com
59
+
60
+ https://chatgpt.com/g/g-Us0AAXkRh-wcgw-giving-shell-access
61
+
62
+ Finally, let the chatgpt know your user id in any format. E.g., "user_id=<your uuid>" followed by rest of your instructions.
63
+
64
+ NOTE: you can resume a broken connection
65
+ `wcgw --client-uuid $previous_uuid`
66
+
67
+ # How it works
68
+ Your commands are relayed through a server I've hosted at https://wcgw.arcfu.com. The code for that is at `src/relay/serve.py`.
69
+
70
+ Chat gpt sends a request to the relay server using the user id that you share with it. The relay server holds a websocket with the terminal cilent against the user id and acts as a proxy to pass the request.
71
+
72
+ It's secure in both the directions. Either a malicious actor or a malicious Chatgpt has to correctly guess your UUID for any security breach.
73
+
74
+ NOTE: the relay server doesn't store any data. If you don't trust it then you may host the server on your own and create a custom gpt. Create an issue and I'll be happy to share the full instructions and schema I've given in the custom GPT configuration.
75
+
76
+ # Showcase
77
+
78
+ ## Create a todo app using react + typescript + vite
79
+ https://chatgpt.com/share/6717d94d-756c-8005-98a6-d021c7b586aa
80
+
81
+ ## Write unit tests for all files in my current repo
82
+ [Todo]
83
+
84
+
85
+ # [Optional] Local shell access with openai API key
86
+
87
+ Add `OPENAI_API_KEY` and `OPENAI_ORG_ID` env variables.
88
+
89
+ Clone the repo and run to install `wcgw_local` command
90
+
91
+ `pip install .`
92
+
93
+ Then run
94
+
95
+ `wcgw_local --limit 0.1` # Cost limit $0.1
96
+
97
+ You can now directly write messages or press enter key to open vim for multiline message and text pasting.
@@ -0,0 +1,11 @@
1
+ wcgw/__init__.py,sha256=okSsOWpTKDjEQzgOin3Kdpx4Mc3MFX1RunjopHQSIWE,62
2
+ wcgw/__main__.py,sha256=MjJnFwfYzA1rW47xuSP1EVsi53DTHeEGqESkQwsELFQ,34
3
+ wcgw/basic.py,sha256=t5l24fx8UNIJSa1yh2wh4WYtLwi35y5WR8XKG-7lC00,16040
4
+ wcgw/common.py,sha256=jn39zTpaFUO1PWof_z7qBmklaZH5G1blzjlBvez0cg4,1225
5
+ wcgw/openai_adapters.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ wcgw/openai_utils.py,sha256=YNwCsA-Wqq7jWrxP0rfQmBTb1dI0s7dWXzQqyTzOZT4,2629
7
+ wcgw/tools.py,sha256=HGcxk09mmwnENV0PPfGV-I7TaOEvRlOdD9KdH_juHhA,16350
8
+ wcgw-0.0.10.dist-info/METADATA,sha256=j3ODV1Y9v9ntqDYrOi8BO5LjyMRAYWmI8DnMKjmLpkw,3526
9
+ wcgw-0.0.10.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
10
+ wcgw-0.0.10.dist-info/entry_points.txt,sha256=T-IH7w6Vc650hr8xksC8kJfbJR4uwN8HDudejwDwrNM,59
11
+ wcgw-0.0.10.dist-info/RECORD,,
@@ -1,64 +0,0 @@
1
- Metadata-Version: 2.3
2
- Name: wcgw
3
- Version: 0.0.8
4
- Summary: What could go wrong giving full shell access to chatgpt?
5
- Project-URL: Homepage, https://github.com/rusiaaman/wcgw
6
- Author-email: Aman Rusia <gapypi@arcfu.com>
7
- Requires-Python: <3.13,>=3.8
8
- Requires-Dist: anthropic>=0.36.2
9
- Requires-Dist: fastapi>=0.115.0
10
- Requires-Dist: mypy>=1.11.2
11
- Requires-Dist: openai>=1.46.0
12
- Requires-Dist: petname>=2.6
13
- Requires-Dist: pexpect>=4.9.0
14
- Requires-Dist: pyte>=0.8.2
15
- Requires-Dist: python-dotenv>=1.0.1
16
- Requires-Dist: rich>=13.8.1
17
- Requires-Dist: shell>=1.0.1
18
- Requires-Dist: tiktoken==0.7.0
19
- Requires-Dist: toml>=0.10.2
20
- Requires-Dist: typer>=0.12.5
21
- Requires-Dist: types-pexpect>=4.9.0.20240806
22
- Requires-Dist: uvicorn>=0.31.0
23
- Requires-Dist: websockets>=13.1
24
- Description-Content-Type: text/markdown
25
-
26
- # Shell access to chatgpt.com
27
-
28
- ### 🚀 Highlights
29
- - ⚡ **Full Shell Access**: No restrictions, complete control.
30
- - ⚡ **Create, Execute, Iterate**: Seamless workflow for development and execution.
31
- - ⚡ **Interactive Command Handling**: Supports interactive commands with ease.
32
-
33
-
34
- ### 🪜 Steps:
35
- 1. Run the [cli client](https://github.com/rusiaaman/wcgw?tab=readme-ov-file#client) in any directory of choice.
36
- 2. Share the generated id with the GPT: `https://chatgpt.com/g/g-Us0AAXkRh-wcgw-giving-shell-access`
37
- 3. The custom GPT can now run any command on your cli
38
-
39
- ## Client
40
-
41
- ### Option 1: using pip
42
- ```sh
43
- $ pip install wcgw
44
- $ wcgw
45
- ```
46
-
47
- ### Option 2: using uv
48
- ```sh
49
- $ curl -LsSf https://astral.sh/uv/install.sh | sh
50
- $ uv tool run wcgw
51
- ```
52
-
53
- This will print a UUID that you need to share with the gpt.
54
-
55
-
56
- ## Chat
57
- https://chatgpt.com/g/g-Us0AAXkRh-wcgw-giving-shell-access
58
-
59
- Add user id the client generated to the first message along with the instructions.
60
-
61
- # How it works
62
- Your commands are relayed through a server I've hosted at https://wcgw.arcfu.com. The code for that is at `src/relay/serve.py`.
63
-
64
- The user id that you share with chatgpt is added in the request it sents to the relay server which holds a websocket with the terminal client.
@@ -1,10 +0,0 @@
1
- wcgw/__init__.py,sha256=okSsOWpTKDjEQzgOin3Kdpx4Mc3MFX1RunjopHQSIWE,62
2
- wcgw/__main__.py,sha256=MjJnFwfYzA1rW47xuSP1EVsi53DTHeEGqESkQwsELFQ,34
3
- wcgw/basic.py,sha256=o_iyg3Rmqz08LTWzgO7JIA0u_5l6GGaXyJe0zw73v8w,15085
4
- wcgw/common.py,sha256=gHiP1RVHkvp10jPc1Xzg5DtUqhGbgQ7pTJK7OvUXfZQ,1764
5
- wcgw/openai_utils.py,sha256=YNwCsA-Wqq7jWrxP0rfQmBTb1dI0s7dWXzQqyTzOZT4,2629
6
- wcgw/tools.py,sha256=ozJtcuOlMrdGoDmSv4UpolnStdY7Xz8Z7JyrRllKA7s,17088
7
- wcgw-0.0.8.dist-info/METADATA,sha256=XebadIrzit30OJD3F1e1llsu2z_0B_9x8lI6T-R3Q8I,2028
8
- wcgw-0.0.8.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
9
- wcgw-0.0.8.dist-info/entry_points.txt,sha256=T-IH7w6Vc650hr8xksC8kJfbJR4uwN8HDudejwDwrNM,59
10
- wcgw-0.0.8.dist-info/RECORD,,
File without changes