wcgw 0.0.8__tar.gz → 0.0.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of wcgw might be problematic. Click here for more details.

wcgw-0.0.10/PKG-INFO ADDED
@@ -0,0 +1,97 @@
1
+ Metadata-Version: 2.3
2
+ Name: wcgw
3
+ Version: 0.0.10
4
+ Summary: What could go wrong giving full shell access to chatgpt?
5
+ Project-URL: Homepage, https://github.com/rusiaaman/wcgw
6
+ Author-email: Aman Rusia <gapypi@arcfu.com>
7
+ Requires-Python: <3.13,>=3.8
8
+ Requires-Dist: fastapi>=0.115.0
9
+ Requires-Dist: mypy>=1.11.2
10
+ Requires-Dist: openai>=1.46.0
11
+ Requires-Dist: petname>=2.6
12
+ Requires-Dist: pexpect>=4.9.0
13
+ Requires-Dist: pydantic>=2.9.2
14
+ Requires-Dist: pyte>=0.8.2
15
+ Requires-Dist: python-dotenv>=1.0.1
16
+ Requires-Dist: rich>=13.8.1
17
+ Requires-Dist: shell>=1.0.1
18
+ Requires-Dist: tiktoken==0.7.0
19
+ Requires-Dist: toml>=0.10.2
20
+ Requires-Dist: typer>=0.12.5
21
+ Requires-Dist: types-pexpect>=4.9.0.20240806
22
+ Requires-Dist: uvicorn>=0.31.0
23
+ Requires-Dist: websockets>=13.1
24
+ Description-Content-Type: text/markdown
25
+
26
+ # Enable shell access on chatgpt.com
27
+ A custom gpt on chatgpt web app to interact with your local shell.
28
+
29
+ ### 🚀 Highlights
30
+ - ⚡ **Full Shell Access**: No restrictions, complete control.
31
+ - ⚡ **Create, Execute, Iterate**: Ask the gpt to keep running compiler checks till all errors are fixed, or ask it to keep checking for the status of a long running command till it's done.
32
+ - ⚡ **Interactive Command Handling**: [beta] Supports interactive commands using arrow keys, interrupt, and ansi escape sequences.
33
+
34
+ ### 🪜 Steps:
35
+ 1. Run the [cli client](https://github.com/rusiaaman/wcgw?tab=readme-ov-file#client) in any directory of choice.
36
+ 2. Share the generated id with this GPT: `https://chatgpt.com/g/g-Us0AAXkRh-wcgw-giving-shell-access`
37
+ 3. The custom GPT can now run any command on your cli
38
+
39
+ ## Client
40
+
41
+ ### Option 1: using pip
42
+ Supports python >=3.8 and <3.13
43
+ ```sh
44
+ $ pip3 install wcgw
45
+ $ wcgw
46
+ ```
47
+
48
+ ### Option 2: using uv
49
+ ```sh
50
+ $ curl -LsSf https://astral.sh/uv/install.sh | sh
51
+ $ uv tool run --python 3.12 wcgw
52
+ ```
53
+
54
+ This will print a UUID that you need to share with the gpt.
55
+
56
+
57
+ ## Chat
58
+ Open the following link or search the "wcgw" custom gpt using "Explore GPTs" on chatgpt.com
59
+
60
+ https://chatgpt.com/g/g-Us0AAXkRh-wcgw-giving-shell-access
61
+
62
+ Finally, let the chatgpt know your user id in any format. E.g., "user_id=<your uuid>" followed by rest of your instructions.
63
+
64
+ NOTE: you can resume a broken connection
65
+ `wcgw --client-uuid $previous_uuid`
66
+
67
+ # How it works
68
+ Your commands are relayed through a server I've hosted at https://wcgw.arcfu.com. The code for that is at `src/relay/serve.py`.
69
+
70
+ Chat gpt sends a request to the relay server using the user id that you share with it. The relay server holds a websocket with the terminal cilent against the user id and acts as a proxy to pass the request.
71
+
72
+ It's secure in both the directions. Either a malicious actor or a malicious Chatgpt has to correctly guess your UUID for any security breach.
73
+
74
+ NOTE: the relay server doesn't store any data. If you don't trust it then you may host the server on your own and create a custom gpt. Create an issue and I'll be happy to share the full instructions and schema I've given in the custom GPT configuration.
75
+
76
+ # Showcase
77
+
78
+ ## Create a todo app using react + typescript + vite
79
+ https://chatgpt.com/share/6717d94d-756c-8005-98a6-d021c7b586aa
80
+
81
+ ## Write unit tests for all files in my current repo
82
+ [Todo]
83
+
84
+
85
+ # [Optional] Local shell access with openai API key
86
+
87
+ Add `OPENAI_API_KEY` and `OPENAI_ORG_ID` env variables.
88
+
89
+ Clone the repo and run to install `wcgw_local` command
90
+
91
+ `pip install .`
92
+
93
+ Then run
94
+
95
+ `wcgw_local --limit 0.1` # Cost limit $0.1
96
+
97
+ You can now directly write messages or press enter key to open vim for multiline message and text pasting.
wcgw-0.0.10/README.md ADDED
@@ -0,0 +1,72 @@
1
+ # Enable shell access on chatgpt.com
2
+ A custom gpt on chatgpt web app to interact with your local shell.
3
+
4
+ ### 🚀 Highlights
5
+ - ⚡ **Full Shell Access**: No restrictions, complete control.
6
+ - ⚡ **Create, Execute, Iterate**: Ask the gpt to keep running compiler checks till all errors are fixed, or ask it to keep checking for the status of a long running command till it's done.
7
+ - ⚡ **Interactive Command Handling**: [beta] Supports interactive commands using arrow keys, interrupt, and ansi escape sequences.
8
+
9
+ ### 🪜 Steps:
10
+ 1. Run the [cli client](https://github.com/rusiaaman/wcgw?tab=readme-ov-file#client) in any directory of choice.
11
+ 2. Share the generated id with this GPT: `https://chatgpt.com/g/g-Us0AAXkRh-wcgw-giving-shell-access`
12
+ 3. The custom GPT can now run any command on your cli
13
+
14
+ ## Client
15
+
16
+ ### Option 1: using pip
17
+ Supports python >=3.8 and <3.13
18
+ ```sh
19
+ $ pip3 install wcgw
20
+ $ wcgw
21
+ ```
22
+
23
+ ### Option 2: using uv
24
+ ```sh
25
+ $ curl -LsSf https://astral.sh/uv/install.sh | sh
26
+ $ uv tool run --python 3.12 wcgw
27
+ ```
28
+
29
+ This will print a UUID that you need to share with the gpt.
30
+
31
+
32
+ ## Chat
33
+ Open the following link or search the "wcgw" custom gpt using "Explore GPTs" on chatgpt.com
34
+
35
+ https://chatgpt.com/g/g-Us0AAXkRh-wcgw-giving-shell-access
36
+
37
+ Finally, let the chatgpt know your user id in any format. E.g., "user_id=<your uuid>" followed by rest of your instructions.
38
+
39
+ NOTE: you can resume a broken connection
40
+ `wcgw --client-uuid $previous_uuid`
41
+
42
+ # How it works
43
+ Your commands are relayed through a server I've hosted at https://wcgw.arcfu.com. The code for that is at `src/relay/serve.py`.
44
+
45
+ Chat gpt sends a request to the relay server using the user id that you share with it. The relay server holds a websocket with the terminal cilent against the user id and acts as a proxy to pass the request.
46
+
47
+ It's secure in both the directions. Either a malicious actor or a malicious Chatgpt has to correctly guess your UUID for any security breach.
48
+
49
+ NOTE: the relay server doesn't store any data. If you don't trust it then you may host the server on your own and create a custom gpt. Create an issue and I'll be happy to share the full instructions and schema I've given in the custom GPT configuration.
50
+
51
+ # Showcase
52
+
53
+ ## Create a todo app using react + typescript + vite
54
+ https://chatgpt.com/share/6717d94d-756c-8005-98a6-d021c7b586aa
55
+
56
+ ## Write unit tests for all files in my current repo
57
+ [Todo]
58
+
59
+
60
+ # [Optional] Local shell access with openai API key
61
+
62
+ Add `OPENAI_API_KEY` and `OPENAI_ORG_ID` env variables.
63
+
64
+ Clone the repo and run to install `wcgw_local` command
65
+
66
+ `pip install .`
67
+
68
+ Then run
69
+
70
+ `wcgw_local --limit 0.1` # Cost limit $0.1
71
+
72
+ You can now directly write messages or press enter key to open vim for multiline message and text pasting.
@@ -1,7 +1,7 @@
1
1
  [project]
2
2
  authors = [{ name = "Aman Rusia", email = "gapypi@arcfu.com" }]
3
3
  name = "wcgw"
4
- version = "0.0.8"
4
+ version = "0.0.10"
5
5
  description = "What could go wrong giving full shell access to chatgpt?"
6
6
  readme = "README.md"
7
7
  requires-python = ">=3.8, <3.13"
@@ -21,7 +21,7 @@ dependencies = [
21
21
  "fastapi>=0.115.0",
22
22
  "uvicorn>=0.31.0",
23
23
  "websockets>=13.1",
24
- "anthropic>=0.36.2",
24
+ "pydantic>=2.9.2",
25
25
  ]
26
26
 
27
27
  [project.urls]
@@ -16,13 +16,12 @@ from openai.types.chat import (
16
16
  ParsedChatCompletionMessage,
17
17
  )
18
18
  import rich
19
- import petname
19
+ import petname # type: ignore[import-untyped]
20
20
  from typer import Typer
21
21
  import uuid
22
22
 
23
- from wcgw.common import Config, text_from_editor
24
-
25
- from .common import Models
23
+ from .common import Models, discard_input
24
+ from .common import CostData, History
26
25
  from .openai_utils import get_input_cost, get_output_cost
27
26
  from .tools import ExecuteBash, ReadImage, ImageData
28
27
 
@@ -31,7 +30,6 @@ from .tools import (
31
30
  Confirmation,
32
31
  DoneFlag,
33
32
  Writefile,
34
- get_is_waiting_user_input,
35
33
  get_tool_output,
36
34
  SHELL,
37
35
  start_shell,
@@ -40,14 +38,40 @@ from .tools import (
40
38
  import tiktoken
41
39
 
42
40
  from urllib import parse
41
+ import subprocess
43
42
  import os
43
+ import tempfile
44
44
 
45
45
  import toml
46
+ from pydantic import BaseModel
46
47
 
47
48
 
48
49
  from dotenv import load_dotenv
49
50
 
50
- History = list[ChatCompletionMessageParam]
51
+
52
+ class Config(BaseModel):
53
+ model: Models
54
+ secondary_model: Models
55
+ cost_limit: float
56
+ cost_file: dict[Models, CostData]
57
+ cost_unit: str = "$"
58
+
59
+
60
+ def text_from_editor(console: rich.console.Console) -> str:
61
+ # First consume all the input till now
62
+ discard_input()
63
+ console.print("\n---------------------------------------\n# User message")
64
+ data = input()
65
+ if data:
66
+ return data
67
+ editor = os.environ.get("EDITOR", "vim")
68
+ with tempfile.NamedTemporaryFile(suffix=".tmp") as tf:
69
+ subprocess.run([editor, tf.name], check=True)
70
+ with open(tf.name, "r") as f:
71
+ data = f.read()
72
+ console.print(data)
73
+ return data
74
+
51
75
 
52
76
  def save_history(history: History, session_id: str) -> None:
53
77
  myid = str(history[1]["content"]).replace("/", "_").replace(" ", "_").lower()[:60]
@@ -67,29 +91,22 @@ def parse_user_message_special(msg: str) -> ChatCompletionUserMessageParam:
67
91
  if line.startswith("%"):
68
92
  args = line[1:].strip().split(" ")
69
93
  command = args[0]
70
- assert command == 'image'
94
+ assert command == "image"
71
95
  image_path = args[1]
72
- with open(image_path, 'rb') as f:
96
+ with open(image_path, "rb") as f:
73
97
  image_bytes = f.read()
74
98
  image_b64 = base64.b64encode(image_bytes).decode("utf-8")
75
99
  image_type = mimetypes.guess_type(image_path)[0]
76
- dataurl=f'data:{image_type};base64,{image_b64}'
77
- parts.append({
78
- 'type': 'image_url',
79
- 'image_url': {
80
- 'url': dataurl,
81
- 'detail': 'auto'
82
- }
83
- })
100
+ dataurl = f"data:{image_type};base64,{image_b64}"
101
+ parts.append(
102
+ {"type": "image_url", "image_url": {"url": dataurl, "detail": "auto"}}
103
+ )
84
104
  else:
85
- if len(parts) > 0 and parts[-1]['type'] == 'text':
86
- parts[-1]['text'] += '\n' + line
105
+ if len(parts) > 0 and parts[-1]["type"] == "text":
106
+ parts[-1]["text"] += "\n" + line
87
107
  else:
88
- parts.append({'type': 'text', 'text': line})
89
- return {
90
- 'role': 'user',
91
- 'content': parts
92
- }
108
+ parts.append({"type": "text", "text": line})
109
+ return {"role": "user", "content": parts}
93
110
 
94
111
 
95
112
  app = Typer(pretty_exceptions_show_locals=False)
@@ -121,7 +138,7 @@ def loop(
121
138
  if history[1]["role"] != "user":
122
139
  raise ValueError("Invalid history file, second message should be user")
123
140
  first_message = ""
124
- waiting_for_assistant = history[-1]['role'] != 'assistant'
141
+ waiting_for_assistant = history[-1]["role"] != "assistant"
125
142
 
126
143
  my_dir = os.path.dirname(__file__)
127
144
  config_file = os.path.join(my_dir, "..", "..", "config.toml")
@@ -136,9 +153,6 @@ def loop(
136
153
  enc = tiktoken.encoding_for_model(
137
154
  config.model if not config.model.startswith("o1") else "gpt-4o"
138
155
  )
139
- is_waiting_user_input = get_is_waiting_user_input(
140
- config.model, config.cost_file[config.model]
141
- )
142
156
 
143
157
  tools = [
144
158
  openai.pydantic_function_tool(
@@ -265,7 +279,7 @@ System information:
265
279
  )
266
280
  system_console.print(f"\nTotal cost: {config.cost_unit}{cost:.3f}")
267
281
  output_toks += output_toks_
268
-
282
+
269
283
  _histories.append(item)
270
284
  for tool_call_id, toolcallargs in tool_call_args_by_id.items():
271
285
  for toolindex, tool_args in toolcallargs.items():
@@ -275,7 +289,7 @@ System information:
275
289
  enc,
276
290
  limit - cost,
277
291
  loop,
278
- is_waiting_user_input,
292
+ max_tokens=2048,
279
293
  )
280
294
  except Exception as e:
281
295
  output_or_done = (
@@ -297,42 +311,49 @@ System information:
297
311
  f"\nTotal cost: {config.cost_unit}{cost:.3f}"
298
312
  )
299
313
  return output_or_done.task_output, cost
300
-
314
+
301
315
  output = output_or_done
302
316
 
303
317
  if isinstance(output, ImageData):
304
318
  randomId = petname.Generate(2, "-")
305
319
  if not image_histories:
306
- image_histories.extend([
307
- {
308
- 'role': 'assistant',
309
- 'content': f'Share images with ids: {randomId}'
310
-
311
- },
312
- {
313
- 'role': 'user',
314
- 'content': [{
315
- 'type': 'image_url',
316
- 'image_url': {
317
- 'url': output.dataurl,
318
- 'detail': 'auto'
319
- }
320
- }]
321
- }]
320
+ image_histories.extend(
321
+ [
322
+ {
323
+ "role": "assistant",
324
+ "content": f"Share images with ids: {randomId}",
325
+ },
326
+ {
327
+ "role": "user",
328
+ "content": [
329
+ {
330
+ "type": "image_url",
331
+ "image_url": {
332
+ "url": output.dataurl,
333
+ "detail": "auto",
334
+ },
335
+ }
336
+ ],
337
+ },
338
+ ]
322
339
  )
323
340
  else:
324
- image_histories[0]['content'] += ', ' + randomId
325
- image_histories[1]["content"].append({ # type: ignore
326
- 'type': 'image_url',
327
- 'image_url': {
328
- 'url': output.dataurl,
329
- 'detail': 'auto'
341
+ image_histories[0]["content"] += ", " + randomId
342
+ second_content = image_histories[1]["content"]
343
+ assert isinstance(second_content, list)
344
+ second_content.append(
345
+ {
346
+ "type": "image_url",
347
+ "image_url": {
348
+ "url": output.dataurl,
349
+ "detail": "auto",
350
+ },
330
351
  }
331
- })
352
+ )
332
353
 
333
354
  item = {
334
355
  "role": "tool",
335
- "content": f'Ask user for image id: {randomId}',
356
+ "content": f"Ask user for image id: {randomId}",
336
357
  "tool_call_id": tool_call_id + str(toolindex),
337
358
  }
338
359
  else:
@@ -1,13 +1,9 @@
1
- import os
2
1
  import select
3
- import subprocess
4
2
  import sys
5
- import tempfile
6
3
  import termios
7
4
  import tty
8
5
  from typing import Literal
9
6
  from pydantic import BaseModel
10
- import rich
11
7
 
12
8
 
13
9
  class CostData(BaseModel):
@@ -15,6 +11,14 @@ class CostData(BaseModel):
15
11
  cost_per_1m_output_tokens: float
16
12
 
17
13
 
14
+ from openai.types.chat import (
15
+ ChatCompletionMessageParam,
16
+ ChatCompletionAssistantMessageParam,
17
+ ChatCompletionMessage,
18
+ ParsedChatCompletionMessage,
19
+ )
20
+
21
+ History = list[ChatCompletionMessageParam]
18
22
  Models = Literal["gpt-4o-2024-08-06", "gpt-4o-mini"]
19
23
 
20
24
 
@@ -41,27 +45,3 @@ def discard_input() -> None:
41
45
  finally:
42
46
  # Restore old terminal settings
43
47
  termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
44
-
45
-
46
- class Config(BaseModel):
47
- model: Models
48
- secondary_model: Models
49
- cost_limit: float
50
- cost_file: dict[Models, CostData]
51
- cost_unit: str = "$"
52
-
53
-
54
- def text_from_editor(console: rich.console.Console) -> str:
55
- # First consume all the input till now
56
- discard_input()
57
- console.print("\n---------------------------------------\n# User message")
58
- data = input()
59
- if data:
60
- return data
61
- editor = os.environ.get("EDITOR", "vim")
62
- with tempfile.NamedTemporaryFile(suffix=".tmp") as tf:
63
- subprocess.run([editor, tf.name], check=True)
64
- with open(tf.name, "r") as f:
65
- data = f.read()
66
- console.print(data)
67
- return data
File without changes