wcgw 0.0.10__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of wcgw might be problematic. Click here for more details.

wcgw/basic.py CHANGED
@@ -158,14 +158,16 @@ def loop(
158
158
  openai.pydantic_function_tool(
159
159
  ExecuteBash,
160
160
  description="""
161
- Execute a bash script. Stateful (beware with subsequent calls).
162
- Execute commands using `execute_command` attribute.
163
- Do not use interactive commands like nano. Prefer writing simpler commands.
164
- Last line will always be `(exit <int code>)` except if
165
- the last line is `(pending)` if the program is still running or waiting for user inputs. You can then send input using `send_ascii` attributes. You get status by sending `send_ascii: [10]`.
166
- Optionally the last line is `(won't exit)` in which case you need to kill the process if you want to run a new command.
167
- Optionally `exit shell has restarted` is the output, in which case environment resets, you can run fresh commands.
168
- The first line might be `(...truncated)` if the output is too long.""",
161
+ - Execute a bash script. This is stateful (beware with subsequent calls).
162
+ - Execute commands using `execute_command` attribute.
163
+ - Do not use interactive commands like nano. Prefer writing simpler commands.
164
+ - Last line will always be `(exit <int code>)` except if
165
+ - The last line is `(pending)` if the program is still running or waiting for your input. You can then send input using `send_ascii` attributes. You get status by sending new line `send_ascii: ["Enter"]` or `send_ascii: [10]`.
166
+ - Optionally the last line is `(won't exit)` in which case you need to kill the process if you want to run a new command.
167
+ - Optionally `exit shell has restarted` is the output, in which case environment resets, you can run fresh commands.
168
+ - The first line might be `(...truncated)` if the output is too long.
169
+ - Always run `pwd` if you get any file or directory not found error to make sure you're not lost.
170
+ """,
169
171
  ),
170
172
  openai.pydantic_function_tool(
171
173
  Writefile,
wcgw/claude.py ADDED
@@ -0,0 +1,384 @@
1
+ import base64
2
+ import json
3
+ import mimetypes
4
+ from pathlib import Path
5
+ import sys
6
+ import traceback
7
+ from typing import Callable, DefaultDict, Optional, cast
8
+ import rich
9
+ import petname
10
+ from typer import Typer
11
+ import uuid
12
+
13
+ from .common import Models, discard_input
14
+ from .common import CostData, Config, text_from_editor
15
+ from .tools import ExecuteBash, ReadImage, ImageData
16
+
17
+ from .tools import (
18
+ BASH_CLF_OUTPUT,
19
+ Confirmation,
20
+ DoneFlag,
21
+ Writefile,
22
+ get_is_waiting_user_input,
23
+ get_tool_output,
24
+ SHELL,
25
+ start_shell,
26
+ which_tool,
27
+ )
28
+ import tiktoken
29
+
30
+ from urllib import parse
31
+ import subprocess
32
+ import os
33
+ import tempfile
34
+
35
+ import toml
36
+ from pydantic import BaseModel
37
+
38
+
39
+ from dotenv import load_dotenv
40
+
41
+ from anthropic.types import MessageParam
42
+
43
+ History = list[MessageParam]
44
+
45
+ def save_history(history: History, session_id: str) -> None:
46
+ myid = str(history[1]["content"]).replace("/", "_").replace(" ", "_").lower()[:60]
47
+ myid += "_" + session_id
48
+ myid = myid + ".json"
49
+
50
+ mypath = Path(".wcgw") / myid
51
+ mypath.parent.mkdir(parents=True, exist_ok=True)
52
+ with open(mypath, "w") as f:
53
+ json.dump(history, f, indent=3)
54
+
55
+
56
+ def parse_user_message_special(msg: str) -> ChatCompletionUserMessageParam:
57
+ # Search for lines starting with `%` and treat them as special commands
58
+ parts: list[ChatCompletionContentPartParam] = []
59
+ for line in msg.split("\n"):
60
+ if line.startswith("%"):
61
+ args = line[1:].strip().split(" ")
62
+ command = args[0]
63
+ assert command == 'image'
64
+ image_path = args[1]
65
+ with open(image_path, 'rb') as f:
66
+ image_bytes = f.read()
67
+ image_b64 = base64.b64encode(image_bytes).decode("utf-8")
68
+ image_type = mimetypes.guess_type(image_path)[0]
69
+ dataurl=f'data:{image_type};base64,{image_b64}'
70
+ parts.append({
71
+ 'type': 'image_url',
72
+ 'image_url': {
73
+ 'url': dataurl,
74
+ 'detail': 'auto'
75
+ }
76
+ })
77
+ else:
78
+ if len(parts) > 0 and parts[-1]['type'] == 'text':
79
+ parts[-1]['text'] += '\n' + line
80
+ else:
81
+ parts.append({'type': 'text', 'text': line})
82
+ return {
83
+ 'role': 'user',
84
+ 'content': parts
85
+ }
86
+
87
+
88
+ app = Typer(pretty_exceptions_show_locals=False)
89
+
90
+
91
+ @app.command()
92
+ def loop(
93
+ first_message: Optional[str] = None,
94
+ limit: Optional[float] = None,
95
+ resume: Optional[str] = None,
96
+ ) -> tuple[str, float]:
97
+ load_dotenv()
98
+
99
+ session_id = str(uuid.uuid4())[:6]
100
+
101
+ history: History = []
102
+ waiting_for_assistant = False
103
+ if resume:
104
+ if resume == "latest":
105
+ resume_path = sorted(Path(".wcgw").iterdir(), key=os.path.getmtime)[-1]
106
+ else:
107
+ resume_path = Path(resume)
108
+ if not resume_path.exists():
109
+ raise FileNotFoundError(f"File {resume} not found")
110
+ with resume_path.open() as f:
111
+ history = json.load(f)
112
+ if len(history) <= 2:
113
+ raise ValueError("Invalid history file")
114
+ if history[1]["role"] != "user":
115
+ raise ValueError("Invalid history file, second message should be user")
116
+ first_message = ""
117
+ waiting_for_assistant = history[-1]['role'] != 'assistant'
118
+
119
+ my_dir = os.path.dirname(__file__)
120
+ config_file = os.path.join(my_dir, "..", "..", "config.toml")
121
+ with open(config_file) as f:
122
+ config_json = toml.load(f)
123
+ config = Config.model_validate(config_json)
124
+
125
+ if limit is not None:
126
+ config.cost_limit = limit
127
+ limit = config.cost_limit
128
+
129
+ enc = tiktoken.encoding_for_model(
130
+ config.model if not config.model.startswith("o1") else "gpt-4o"
131
+ )
132
+ is_waiting_user_input = get_is_waiting_user_input(
133
+ config.model, config.cost_file[config.model]
134
+ )
135
+
136
+ tools = [
137
+ openai.pydantic_function_tool(
138
+ ExecuteBash,
139
+ description="""
140
+ Execute a bash script. Stateful (beware with subsequent calls).
141
+ Execute commands using `execute_command` attribute.
142
+ Do not use interactive commands like nano. Prefer writing simpler commands.
143
+ Last line will always be `(exit <int code>)` except if
144
+ the last line is `(pending)` if the program is still running or waiting for user inputs. You can then send input using `send_ascii` attributes. You get status by sending `send_ascii: [10]`.
145
+ Optionally the last line is `(won't exit)` in which case you need to kill the process if you want to run a new command.
146
+ Optionally `exit shell has restarted` is the output, in which case environment resets, you can run fresh commands.
147
+ The first line might be `(...truncated)` if the output is too long.""",
148
+ ),
149
+ openai.pydantic_function_tool(
150
+ Writefile,
151
+ description="Write content to a file. Provide file path and content. Use this instead of ExecuteBash for writing files.",
152
+ ),
153
+ openai.pydantic_function_tool(
154
+ ReadImage, description="Read an image from the shell."
155
+ ),
156
+ ]
157
+ uname_sysname = os.uname().sysname
158
+ uname_machine = os.uname().machine
159
+
160
+ system = f"""
161
+ You're a cli assistant.
162
+
163
+ Instructions:
164
+
165
+ - You should use the provided bash execution tool to run script to complete objective.
166
+ - Do not use sudo. Do not use interactive commands.
167
+ - Ask user for confirmation before running anything major
168
+
169
+ System information:
170
+ - System: {uname_sysname}
171
+ - Machine: {uname_machine}
172
+ """
173
+
174
+ if not history:
175
+ history = [{"role": "system", "content": system}]
176
+ else:
177
+ if history[-1]["role"] == "tool":
178
+ waiting_for_assistant = True
179
+
180
+ client = OpenAI()
181
+
182
+ cost: float = 0
183
+ input_toks = 0
184
+ output_toks = 0
185
+ system_console = rich.console.Console(style="blue", highlight=False)
186
+ error_console = rich.console.Console(style="red", highlight=False)
187
+ user_console = rich.console.Console(style="bright_black", highlight=False)
188
+ assistant_console = rich.console.Console(style="white bold", highlight=False)
189
+
190
+ while True:
191
+ if cost > limit:
192
+ system_console.print(
193
+ f"\nCost limit exceeded. Current cost: {cost}, input tokens: {input_toks}, output tokens: {output_toks}"
194
+ )
195
+ break
196
+
197
+ if not waiting_for_assistant:
198
+ if first_message:
199
+ msg = first_message
200
+ first_message = ""
201
+ else:
202
+ msg = text_from_editor(user_console)
203
+
204
+ history.append(parse_user_message_special(msg))
205
+ else:
206
+ waiting_for_assistant = False
207
+
208
+ cost_, input_toks_ = get_input_cost(
209
+ config.cost_file[config.model], enc, history
210
+ )
211
+ cost += cost_
212
+ input_toks += input_toks_
213
+
214
+ stream = client.chat.completions.create(
215
+ messages=history,
216
+ model=config.model,
217
+ stream=True,
218
+ tools=tools,
219
+ )
220
+
221
+ system_console.print(
222
+ "\n---------------------------------------\n# Assistant response",
223
+ style="bold",
224
+ )
225
+ tool_call_args_by_id = DefaultDict[str, DefaultDict[int, str]](
226
+ lambda: DefaultDict(str)
227
+ )
228
+ _histories: History = []
229
+ item: ChatCompletionMessageParam
230
+ full_response: str = ""
231
+ image_histories: History = []
232
+ try:
233
+ for chunk in stream:
234
+ if chunk.choices[0].finish_reason == "tool_calls":
235
+ assert tool_call_args_by_id
236
+ item = {
237
+ "role": "assistant",
238
+ "content": full_response,
239
+ "tool_calls": [
240
+ {
241
+ "id": tool_call_id + str(toolindex),
242
+ "type": "function",
243
+ "function": {
244
+ "arguments": tool_args,
245
+ "name": type(which_tool(tool_args)).__name__,
246
+ },
247
+ }
248
+ for tool_call_id, toolcallargs in tool_call_args_by_id.items()
249
+ for toolindex, tool_args in toolcallargs.items()
250
+ ],
251
+ }
252
+ cost_, output_toks_ = get_output_cost(
253
+ config.cost_file[config.model], enc, item
254
+ )
255
+ cost += cost_
256
+ system_console.print(
257
+ f"\n---------------------------------------\n# Assistant invoked tools: {[which_tool(tool['function']['arguments']) for tool in item['tool_calls']]}"
258
+ )
259
+ system_console.print(f"\nTotal cost: {config.cost_unit}{cost:.3f}")
260
+ output_toks += output_toks_
261
+
262
+ _histories.append(item)
263
+ for tool_call_id, toolcallargs in tool_call_args_by_id.items():
264
+ for toolindex, tool_args in toolcallargs.items():
265
+ try:
266
+ output_or_done, cost_ = get_tool_output(
267
+ json.loads(tool_args),
268
+ enc,
269
+ limit - cost,
270
+ loop,
271
+ is_waiting_user_input,
272
+ )
273
+ except Exception as e:
274
+ output_or_done = (
275
+ f"GOT EXCEPTION while calling tool. Error: {e}"
276
+ )
277
+ tb = traceback.format_exc()
278
+ error_console.print(output_or_done + "\n" + tb)
279
+ cost_ = 0
280
+ cost += cost_
281
+ system_console.print(
282
+ f"\nTotal cost: {config.cost_unit}{cost:.3f}"
283
+ )
284
+
285
+ if isinstance(output_or_done, DoneFlag):
286
+ system_console.print(
287
+ f"\n# Task marked done, with output {output_or_done.task_output}",
288
+ )
289
+ system_console.print(
290
+ f"\nTotal cost: {config.cost_unit}{cost:.3f}"
291
+ )
292
+ return output_or_done.task_output, cost
293
+
294
+ output = output_or_done
295
+
296
+ if isinstance(output, ImageData):
297
+ randomId = petname.Generate(2, "-")
298
+ if not image_histories:
299
+ image_histories.extend([
300
+ {
301
+ 'role': 'assistant',
302
+ 'content': f'Share images with ids: {randomId}'
303
+
304
+ },
305
+ {
306
+ 'role': 'user',
307
+ 'content': [{
308
+ 'type': 'image_url',
309
+ 'image_url': {
310
+ 'url': output.dataurl,
311
+ 'detail': 'auto'
312
+ }
313
+ }]
314
+ }]
315
+ )
316
+ else:
317
+ image_histories[0]['content'] += ', ' + randomId
318
+ image_histories[1]["content"].append({ # type: ignore
319
+ 'type': 'image_url',
320
+ 'image_url': {
321
+ 'url': output.dataurl,
322
+ 'detail': 'auto'
323
+ }
324
+ })
325
+
326
+ item = {
327
+ "role": "tool",
328
+ "content": f'Ask user for image id: {randomId}',
329
+ "tool_call_id": tool_call_id + str(toolindex),
330
+ }
331
+ else:
332
+ item = {
333
+ "role": "tool",
334
+ "content": str(output),
335
+ "tool_call_id": tool_call_id + str(toolindex),
336
+ }
337
+ cost_, output_toks_ = get_output_cost(
338
+ config.cost_file[config.model], enc, item
339
+ )
340
+ cost += cost_
341
+ output_toks += output_toks_
342
+
343
+ _histories.append(item)
344
+ waiting_for_assistant = True
345
+ break
346
+ elif chunk.choices[0].finish_reason:
347
+ assistant_console.print("")
348
+ item = {
349
+ "role": "assistant",
350
+ "content": full_response,
351
+ }
352
+ cost_, output_toks_ = get_output_cost(
353
+ config.cost_file[config.model], enc, item
354
+ )
355
+ cost += cost_
356
+ output_toks += output_toks_
357
+
358
+ system_console.print(f"\nTotal cost: {config.cost_unit}{cost:.3f}")
359
+ _histories.append(item)
360
+ break
361
+
362
+ if chunk.choices[0].delta.tool_calls:
363
+ tool_call = chunk.choices[0].delta.tool_calls[0]
364
+ if tool_call.function and tool_call.function.arguments:
365
+ tool_call_args_by_id[tool_call.id or ""][tool_call.index] += (
366
+ tool_call.function.arguments
367
+ )
368
+
369
+ chunk_str = chunk.choices[0].delta.content or ""
370
+ assistant_console.print(chunk_str, end="")
371
+ full_response += chunk_str
372
+ except KeyboardInterrupt:
373
+ waiting_for_assistant = False
374
+ input("Interrupted...enter to redo the current turn")
375
+ else:
376
+ history.extend(_histories)
377
+ history.extend(image_histories)
378
+ save_history(history, session_id)
379
+
380
+ return "Couldn't finish the task", cost
381
+
382
+
383
+ if __name__ == "__main__":
384
+ app()
wcgw/common.py CHANGED
@@ -23,25 +23,27 @@ Models = Literal["gpt-4o-2024-08-06", "gpt-4o-mini"]
23
23
 
24
24
 
25
25
  def discard_input() -> None:
26
- # Get the file descriptor for stdin
27
- fd = sys.stdin.fileno()
28
-
29
- # Save current terminal settings
30
- old_settings = termios.tcgetattr(fd)
31
-
32
26
  try:
33
- # Switch terminal to non-canonical mode where input is read immediately
34
- tty.setcbreak(fd)
35
-
36
- # Discard all input
37
- while True:
38
- # Check if there is input to be read
39
- if sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
40
- sys.stdin.read(
41
- 1
42
- ) # Read one character at a time to flush the input buffer
43
- else:
44
- break
45
- finally:
46
- # Restore old terminal settings
47
- termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
27
+ # Get the file descriptor for stdin
28
+ fd = sys.stdin.fileno()
29
+
30
+ # Save current terminal settings
31
+ old_settings = termios.tcgetattr(fd)
32
+
33
+ try:
34
+ # Switch terminal to non-canonical mode where input is read immediately
35
+ tty.setcbreak(fd)
36
+
37
+ # Discard all input
38
+ while True:
39
+ # Check if there is input to be read
40
+ if sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
41
+ sys.stdin.read(1) # Read one character at a time to flush the input buffer
42
+ else:
43
+ break
44
+ finally:
45
+ # Restore old terminal settings
46
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
47
+ except (termios.error, ValueError) as e:
48
+ # Handle the error gracefully
49
+ print(f"Warning: Unable to discard input. Error: {e}")
wcgw/tools.py CHANGED
@@ -43,7 +43,7 @@ from .common import CostData, Models, discard_input
43
43
 
44
44
  from .openai_utils import get_input_cost, get_output_cost
45
45
 
46
- console = rich.console.Console(style="magenta", highlight=False)
46
+ console = rich.console.Console(style="magenta", highlight=False, markup=False)
47
47
 
48
48
  TIMEOUT = 30
49
49
 
@@ -103,6 +103,12 @@ def _is_int(mystr: str) -> bool:
103
103
 
104
104
 
105
105
  def _get_exit_code() -> int:
106
+ # First reset the prompt in case venv was sourced or other reasons.
107
+ SHELL.sendline('export PS1="#@@"')
108
+ SHELL.expect("#@@")
109
+ # Reset echo also if it was enabled
110
+ SHELL.sendline("stty -icanon -echo")
111
+ SHELL.expect("#@@")
106
112
  SHELL.sendline("echo $?")
107
113
  before = ""
108
114
  while not _is_int(before): # Consume all previous output
@@ -299,7 +305,7 @@ def write_file(writefile: Writefile) -> str:
299
305
  SHELL.expect("#@@")
300
306
  assert isinstance(SHELL.before, str)
301
307
  current_dir = SHELL.before.strip()
302
- writefile.file_path = os.path.join(current_dir, writefile.file_path)
308
+ return f"Failure: Use absolute path only. FYI current working directory is '{current_dir}'"
303
309
  os.makedirs(os.path.dirname(writefile.file_path), exist_ok=True)
304
310
  try:
305
311
  with open(writefile.file_path, "w") as f:
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: wcgw
3
- Version: 0.0.10
3
+ Version: 0.1.1
4
4
  Summary: What could go wrong giving full shell access to chatgpt?
5
5
  Project-URL: Homepage, https://github.com/rusiaaman/wcgw
6
6
  Author-email: Aman Rusia <gapypi@arcfu.com>
7
- Requires-Python: <3.13,>=3.8
7
+ Requires-Python: <3.13,>=3.10
8
8
  Requires-Dist: fastapi>=0.115.0
9
9
  Requires-Dist: mypy>=1.11.2
10
10
  Requires-Dist: openai>=1.46.0
@@ -26,6 +26,9 @@ Description-Content-Type: text/markdown
26
26
  # Enable shell access on chatgpt.com
27
27
  A custom gpt on chatgpt web app to interact with your local shell.
28
28
 
29
+ [![Tests](https://github.com/rusiaaman/wcgw/actions/workflows/python-tests.yml/badge.svg?branch=main)](https://github.com/rusiaaman/wcgw/actions/workflows/python-tests.yml)
30
+ [![Build](https://github.com/rusiaaman/wcgw/actions/workflows/python-publish.yml/badge.svg)](https://github.com/rusiaaman/wcgw/actions/workflows/python-publish.yml)
31
+
29
32
  ### 🚀 Highlights
30
33
  - ⚡ **Full Shell Access**: No restrictions, complete control.
31
34
  - ⚡ **Create, Execute, Iterate**: Ask the gpt to keep running compiler checks till all errors are fixed, or ask it to keep checking for the status of a long running command till it's done.
@@ -36,21 +39,24 @@ A custom gpt on chatgpt web app to interact with your local shell.
36
39
  2. Share the generated id with this GPT: `https://chatgpt.com/g/g-Us0AAXkRh-wcgw-giving-shell-access`
37
40
  3. The custom GPT can now run any command on your cli
38
41
 
42
+
39
43
  ## Client
44
+ You need to keep running this client for GPT to access your shell. Run it in a version controlled project's root.
40
45
 
41
- ### Option 1: using pip
42
- Supports python >=3.8 and <3.13
46
+ ### Option 1: using uv [Recommended]
43
47
  ```sh
44
- $ pip3 install wcgw
45
- $ wcgw
48
+ $ curl -LsSf https://astral.sh/uv/install.sh | sh
49
+ $ uv tool run --python 3.12 wcgw@latest
46
50
  ```
47
51
 
48
- ### Option 2: using uv
52
+ ### Option 2: using pip
53
+ Supports python >=3.10 and <3.13
49
54
  ```sh
50
- $ curl -LsSf https://astral.sh/uv/install.sh | sh
51
- $ uv tool run --python 3.12 wcgw
55
+ $ pip3 install wcgw
56
+ $ wcgw
52
57
  ```
53
58
 
59
+
54
60
  This will print a UUID that you need to share with the gpt.
55
61
 
56
62
 
@@ -67,20 +73,37 @@ NOTE: you can resume a broken connection
67
73
  # How it works
68
74
  Your commands are relayed through a server I've hosted at https://wcgw.arcfu.com. The code for that is at `src/relay/serve.py`.
69
75
 
70
- Chat gpt sends a request to the relay server using the user id that you share with it. The relay server holds a websocket with the terminal cilent against the user id and acts as a proxy to pass the request.
76
+ Chat gpt sends a request to the relay server using the user id that you share with it. The relay server holds a websocket with the terminal client against the user id and acts as a proxy to pass the request.
71
77
 
72
78
  It's secure in both the directions. Either a malicious actor or a malicious Chatgpt has to correctly guess your UUID for any security breach.
73
79
 
74
- NOTE: the relay server doesn't store any data. If you don't trust it then you may host the server on your own and create a custom gpt. Create an issue and I'll be happy to share the full instructions and schema I've given in the custom GPT configuration.
75
-
76
80
  # Showcase
77
81
 
82
+ ## Unit tests and github actions
83
+ [The first version of unit tests and github workflow to test on multiple python versions were written by the custom chatgpt](https://chatgpt.com/share/6717f922-8998-8005-b825-45d4b348b4dd)
84
+
78
85
  ## Create a todo app using react + typescript + vite
79
- https://chatgpt.com/share/6717d94d-756c-8005-98a6-d021c7b586aa
86
+ ![Screenshot](https://github.com/rusiaaman/wcgw/blob/main/static/ss1.png?raw=true)
87
+
88
+
89
+ # Privacy
90
+ The relay server doesn't store any data. I can't access any information passing through it and only secure channels are used to communicate.
91
+
92
+ You may host the server on your own and create a custom gpt using the following section.
93
+
94
+ # Creating your own custom gpt and the relay server.
95
+ I've used the following instructions and action json schema to create the custom GPT. (Replace wcgw.arcfu.com with the address to your server)
96
+
97
+ https://github.com/rusiaaman/wcgw/blob/main/gpt_instructions.txt
98
+ https://github.com/rusiaaman/wcgw/blob/main/gpt_action_json_schema.json
99
+
100
+ Run the server
101
+ `gunicorn --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:443 src.relay.serve:app --certfile fullchain.pem --keyfile privkey.pem`
80
102
 
81
- ## Write unit tests for all files in my current repo
82
- [Todo]
103
+ If you don't have public ip and domain name, you can use `ngrok` or similar services to get a https address to the api.
83
104
 
105
+ The specify the server url in the `wcgw` command like so
106
+ `wcgw --server-url https://your-url/register`
84
107
 
85
108
  # [Optional] Local shell access with openai API key
86
109
 
@@ -0,0 +1,12 @@
1
+ wcgw/__init__.py,sha256=okSsOWpTKDjEQzgOin3Kdpx4Mc3MFX1RunjopHQSIWE,62
2
+ wcgw/__main__.py,sha256=MjJnFwfYzA1rW47xuSP1EVsi53DTHeEGqESkQwsELFQ,34
3
+ wcgw/basic.py,sha256=z1RVJMTDE1-J33nAPSfMZDdJBliSPCFh55SDCvtDLFI,16198
4
+ wcgw/claude.py,sha256=Bp45-UMBIJd-4tzX618nu-SpRbVtkTb1Es6c_gW6xy0,14861
5
+ wcgw/common.py,sha256=grH-yV_4tnTQZ29xExn4YicGLxEq98z-HkEZwH0ReSg,1410
6
+ wcgw/openai_adapters.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ wcgw/openai_utils.py,sha256=YNwCsA-Wqq7jWrxP0rfQmBTb1dI0s7dWXzQqyTzOZT4,2629
8
+ wcgw/tools.py,sha256=0cbDs8WDYa_BOrj6_SOxFXZJ0CbDx7T24gooH2J4jG4,16627
9
+ wcgw-0.1.1.dist-info/METADATA,sha256=j63XR1wjlTJIqu-dmwYEWTKW-WrBW01FFR4WHPUYaF4,4892
10
+ wcgw-0.1.1.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
11
+ wcgw-0.1.1.dist-info/entry_points.txt,sha256=T-IH7w6Vc650hr8xksC8kJfbJR4uwN8HDudejwDwrNM,59
12
+ wcgw-0.1.1.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- wcgw/__init__.py,sha256=okSsOWpTKDjEQzgOin3Kdpx4Mc3MFX1RunjopHQSIWE,62
2
- wcgw/__main__.py,sha256=MjJnFwfYzA1rW47xuSP1EVsi53DTHeEGqESkQwsELFQ,34
3
- wcgw/basic.py,sha256=t5l24fx8UNIJSa1yh2wh4WYtLwi35y5WR8XKG-7lC00,16040
4
- wcgw/common.py,sha256=jn39zTpaFUO1PWof_z7qBmklaZH5G1blzjlBvez0cg4,1225
5
- wcgw/openai_adapters.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- wcgw/openai_utils.py,sha256=YNwCsA-Wqq7jWrxP0rfQmBTb1dI0s7dWXzQqyTzOZT4,2629
7
- wcgw/tools.py,sha256=HGcxk09mmwnENV0PPfGV-I7TaOEvRlOdD9KdH_juHhA,16350
8
- wcgw-0.0.10.dist-info/METADATA,sha256=j3ODV1Y9v9ntqDYrOi8BO5LjyMRAYWmI8DnMKjmLpkw,3526
9
- wcgw-0.0.10.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
10
- wcgw-0.0.10.dist-info/entry_points.txt,sha256=T-IH7w6Vc650hr8xksC8kJfbJR4uwN8HDudejwDwrNM,59
11
- wcgw-0.0.10.dist-info/RECORD,,
File without changes