autosh 0.0.0__py3-none-any.whl → 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
autosh/session.py ADDED
@@ -0,0 +1,246 @@
1
+ import asyncio
2
+ from pathlib import Path
3
+ import sys
4
+ from agentia import Agent
5
+ from agentia.chat_completion import MessageStream
6
+ from agentia.message import UserMessage
7
+ from agentia.plugins import PluginInitError
8
+
9
+ from autosh.config import CLI_OPTIONS, CONFIG
10
+ from autosh.md import stream_md
11
+ from .plugins import create_plugins
12
+ import rich
13
+ import platform
14
+ from rich.prompt import Prompt
15
+
16
+
17
+ INSTRUCTIONS = f"""
18
+ You are now acting as a AI-powered terminal shell, operating on the user's real computer.
19
+
20
+ The user will send you questions, prompts, or descriptions of the tasks.
21
+ You should take the prompts, and either answer the user's questions, or fullfill the tasks.
22
+ When necessary, generate the system commands, and execute them to fullfill the tasks.
23
+
24
+ Don't do anything else that the user doesn't ask for, or not relevant to the tasks.
25
+ The system command output are displayed to the user directly, so don't repeat the output in your response.
26
+ Just respond with the text if you want to simply print something to the terminal, no need to use `echo` or `print`.
27
+
28
+ If the prompt mentions it requires some arguments/options/flags, look for then in the command line arguments list and use them to complete the tasks.
29
+
30
+ You may use markdown to format your responses.
31
+
32
+ YOUR HOST OS INFO: {platform.platform()}
33
+ """
34
+
35
+
36
+ class Session:
37
+ def __init__(self):
38
+ self.agent = Agent(
39
+ model=CONFIG.model if not CLI_OPTIONS.think else CONFIG.think_model,
40
+ api_key=CONFIG.api_key,
41
+ instructions=INSTRUCTIONS,
42
+ tools=create_plugins(),
43
+ )
44
+
45
+ async def init(self):
46
+ try:
47
+ await self.agent.init()
48
+ except PluginInitError as e:
49
+ rich.print(
50
+ f"[bold red]Error:[/bold red] [red]Plugin [bold italic]{e.plugin}[/bold italic] failed to initialize: {str(e.original)}[/red]"
51
+ )
52
+ sys.exit(1)
53
+
54
+ def _exit_with_error(self, msg: str):
55
+ rich.print(f"[bold red]Error:[/bold red] [red]{msg}")
56
+ sys.exit(1)
57
+
58
+ async def _print_help_and_exit(self, prompt: str):
59
+ agent = Agent(
60
+ model="openai/gpt-4o-mini",
61
+ api_key=CONFIG.api_key,
62
+ instructions=f"""
63
+ This is a CLI program logic written in natural language.
64
+ Please help me to generate the CLI --help message for this CLI app.
65
+ Just output the help message, no need to add any other text.
66
+
67
+ RESPONSE FORMAT:
68
+
69
+ **Usage:** ...
70
+
71
+ The description of the program.
72
+
73
+ **Options:**
74
+
75
+ * -f, --foo Description of foo
76
+ * -b, --bar Description of bar
77
+ * --baz Description of baz
78
+ ...
79
+ * -h, --help Show this message and exit.
80
+ """,
81
+ )
82
+ agent.history.add(self._get_argv_message())
83
+ completion = agent.chat_completion(prompt, stream=True)
84
+ async for stream in completion:
85
+ await self.__render_streamed_markdown(stream)
86
+ sys.exit(0)
87
+
88
+ def _get_argv_message(self):
89
+ args = str(CLI_OPTIONS.args)
90
+ if not CLI_OPTIONS.script:
91
+ cmd = "PROMPT"
92
+ else:
93
+ cmd = CLI_OPTIONS.script.name
94
+ return UserMessage(
95
+ content=f"PROGRAM NAME: {cmd}\n\nCOMMAND LINE ARGS: {args}\n\nCWD: {str(Path.cwd())}",
96
+ role="user",
97
+ )
98
+
99
+ async def exec_prompt(self, prompt: str):
100
+ # Clean up the prompt
101
+ if prompt is not None:
102
+ prompt = prompt.strip()
103
+ if not prompt:
104
+ sys.exit(0)
105
+ # skip shebang line
106
+ if prompt.startswith("#!"):
107
+ prompt = prompt.split("\n", 1)[1]
108
+ if len(CLI_OPTIONS.args) == 1 and (
109
+ CLI_OPTIONS.args[0] == "-h" or CLI_OPTIONS.args[0] == "--help"
110
+ ):
111
+ await self._print_help_and_exit(prompt)
112
+ # Execute the prompt
113
+ loading = self.__create_loading_indicator()
114
+ CLI_OPTIONS.prompt = prompt
115
+ self.agent.history.add(self._get_argv_message())
116
+ if CLI_OPTIONS.stdin_has_data():
117
+ self.agent.history.add(
118
+ UserMessage(
119
+ content="IMPORTANT: The user is using piped stdin to feed additional data to you. Please use tools to read when necessary.",
120
+ role="user",
121
+ )
122
+ )
123
+ completion = self.agent.chat_completion(prompt, stream=True)
124
+ async for stream in completion:
125
+ if not loading:
126
+ loading = self.__create_loading_indicator()
127
+ if await self.__render_streamed_markdown(stream, loading=loading):
128
+ print()
129
+ loading = None
130
+
131
+ async def exec_from_stdin(self):
132
+ if sys.stdin.isatty():
133
+ self._exit_with_error("No prompt is piped to stdin.")
134
+ prompt = sys.stdin.read()
135
+ if not prompt:
136
+ sys.exit(0)
137
+ CLI_OPTIONS.stdin_is_script = True
138
+ await self.exec_prompt(prompt)
139
+
140
+ async def exec_script(self, script: Path):
141
+ CLI_OPTIONS.script = script
142
+ with open(script, "r") as f:
143
+ prompt = f.read()
144
+ await self.exec_prompt(prompt)
145
+
146
+ async def run_repl(self):
147
+ console = rich.console.Console()
148
+ while True:
149
+ try:
150
+ prompt = console.input("[bold blue]>[/bold blue] ").strip()
151
+ if prompt in ["exit", "quit"]:
152
+ break
153
+ if len(prompt) == 0:
154
+ continue
155
+ loading = self.__create_loading_indicator(newline=True)
156
+ completion = self.agent.chat_completion(prompt, stream=True)
157
+ async for stream in completion:
158
+ if not loading:
159
+ loading = self.__create_loading_indicator()
160
+ if await self.__render_streamed_markdown(stream, loading=loading):
161
+ print()
162
+ loading = None
163
+ except KeyboardInterrupt:
164
+ break
165
+
166
+ def __create_loading_indicator(self, newline: bool = False):
167
+ return (
168
+ asyncio.create_task(self.__loading(newline))
169
+ if sys.stdout.isatty()
170
+ else None
171
+ )
172
+
173
+ async def __loading(self, newline: bool = False):
174
+ chars = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
175
+ char_width = 1
176
+ msg = "Loading..."
177
+ count = 0
178
+ print("\x1b[2m", end="", flush=True)
179
+ while True:
180
+ try:
181
+ print(chars[count], end="", flush=True)
182
+ print(" " + msg, end="", flush=True)
183
+ count += 1
184
+ await asyncio.sleep(0.1)
185
+ length = char_width + len(msg) + 1
186
+ print("\b" * length, end="", flush=True)
187
+ print(" " * length, end="", flush=True)
188
+ print("\b" * length, end="", flush=True)
189
+ if count == len(chars):
190
+ count = 0
191
+ except asyncio.CancelledError:
192
+ length = char_width + len(msg) + 1
193
+ print("\b" * length, end="", flush=True)
194
+ print(" " * length, end="", flush=True)
195
+ print("\b" * length, end="", flush=True)
196
+ print("\x1b[0m", end="", flush=True)
197
+ if newline:
198
+ print()
199
+ break
200
+
201
+ async def __render_streamed_markdown(
202
+ self, stream: MessageStream, loading: asyncio.Task[None] | None = None
203
+ ):
204
+ if sys.stdout.isatty():
205
+ # buffer first few chars so we don't need to launch glow if there is no output
206
+ chunks = aiter(stream)
207
+ buf = ""
208
+ while len(buf) < 8:
209
+ try:
210
+ buf += await anext(chunks)
211
+ except StopAsyncIteration:
212
+ if len(buf) == 0:
213
+ if loading:
214
+ loading.cancel()
215
+ await loading
216
+ return False
217
+ break
218
+ if loading:
219
+ loading.cancel()
220
+ await loading
221
+
222
+ content = {"v": ""}
223
+
224
+ async def gen():
225
+ content["v"] = buf
226
+ if buf:
227
+ yield buf
228
+ while True:
229
+ try:
230
+ s = await anext(chunks)
231
+ content["v"] += s
232
+ for c in s:
233
+ yield c
234
+ except StopAsyncIteration:
235
+ break
236
+
237
+ await stream_md(gen())
238
+ return True
239
+ else:
240
+ has_content = False
241
+ async for chunk in stream:
242
+ if chunk == "":
243
+ continue
244
+ has_content = True
245
+ print(chunk, end="", flush=True)
246
+ return has_content
@@ -0,0 +1,77 @@
1
+ Metadata-Version: 2.4
2
+ Name: autosh
3
+ Version: 0.0.2
4
+ Summary: Add your description here
5
+ License-File: LICENSE
6
+ Requires-Python: >=3.13
7
+ Requires-Dist: agentia>=0.0.5
8
+ Requires-Dist: asyncio>=3.4.3
9
+ Requires-Dist: markdownify>=1.1.0
10
+ Requires-Dist: pydantic>=2.11.3
11
+ Requires-Dist: python-dotenv>=1.1.0
12
+ Requires-Dist: rich>=14.0.0
13
+ Requires-Dist: tavily-python>=0.5.4
14
+ Requires-Dist: typer>=0.12.5
15
+ Requires-Dist: tzlocal>=5.3.1
16
+ Description-Content-Type: text/markdown
17
+
18
+ # `autosh` - The AI-powered, noob-friendly interactive shell
19
+
20
+ # Getting Started
21
+
22
+ ## Install
23
+
24
+ ```bash
25
+ uv tool install autosh
26
+ ```
27
+
28
+ ## Usage
29
+
30
+ As an interactive shell: `ash` (alternatively, `autosh`)
31
+
32
+ Execute a single prompt: `ash "list current directory"`
33
+
34
+ Process piped data: `cat README.md | ash "summarise"`
35
+
36
+ ## Scripting
37
+
38
+ Write AI-powered shell scripts in Markdown using natural language!
39
+
40
+ Example script ([simple.a.md](examples/simple.a.md)):
41
+
42
+ ```markdown
43
+ #!/usr/bin/env ash
44
+
45
+ # This is a simple file manipulation script
46
+
47
+ First, please display a welcome message:)
48
+
49
+ Write "Hello, world" to _test.log
50
+ ```
51
+
52
+ * Run the script: `ash simple.a.md` or `chmod +x simple.a.md && simple.a.md`
53
+ * Auto generate help messages:
54
+
55
+ ```console
56
+ $ ash simple.a.md -h
57
+
58
+ Usage: simple.a.md [OPTIONS]
59
+
60
+ This is a simple file manipulation script that writes "Hello, world" to a log file named _x.log.
61
+
62
+ Options:
63
+
64
+ • -h, --help Show this message and exit.
65
+ ```
66
+
67
+ ## Plugins
68
+
69
+ `autosh` is equipped with several plugins to expand its potential:
70
+
71
+ * `ash "Create a directory "my-news", list the latest news, for each news, put the summary in a separate markdown file in this directory"`
72
+
73
+ # TODO
74
+
75
+ - [ ] Image generation
76
+ - [ ] Image input
77
+ - [ ] RAG for non-text files
@@ -0,0 +1,17 @@
1
+ autosh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ autosh/config.py,sha256=KPuXr_MF5Pbdn5pCCgq9AcyDSDlOnCJ4vBetKmhodic,2348
3
+ autosh/main.py,sha256=myRolKqfHyQEgujqmUndDSm9J0C21w3zqA9-SYNX1kY,4961
4
+ autosh/md.py,sha256=qXg5ZFVUwek3rUXb-oEti1soRRbIq8EbDq88lEYRoO4,14267
5
+ autosh/session.py,sha256=kCaD-7tZpozz56sWlMGp0njqwYRf3cZOau1ulWRKJEI,8885
6
+ autosh/plugins/__init__.py,sha256=yOTobuyYFpUWl5BCowGzJG8rZX2Whlagail_QyHVlo4,2487
7
+ autosh/plugins/calc.py,sha256=qo0EajIpNPv9PtLNLygyEjVaxo1F6_S62kmoJZq5oLM,581
8
+ autosh/plugins/cli.py,sha256=D6S_QHPmjBBB9gwgXeJrwxUs3u0TNty_tHVICbEPGbs,8522
9
+ autosh/plugins/clock.py,sha256=GGi0HAG6f6-FP1qqGoyCcUj11q_VnkaGArumsMk0CkY,542
10
+ autosh/plugins/code.py,sha256=0JwFzq6ejgbisCqBm_RG1r1WEVNou64ue-siVIpvZqs,2291
11
+ autosh/plugins/search.py,sha256=1d3Gqq6uXu0ntTBpw44Ab_haAySvZLMj3e2MQd3DHO0,2736
12
+ autosh/plugins/web.py,sha256=lmD2JnsqVI1qKgSFrk39851jCZoPyPRaVvHeEFYXylA,2597
13
+ autosh-0.0.2.dist-info/METADATA,sha256=y9pbK71nuueA11LCv6AsR24MSsnqNut-7-zljpOC5_Y,1720
14
+ autosh-0.0.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
15
+ autosh-0.0.2.dist-info/entry_points.txt,sha256=BV7bzUnxG6Z5InEkrfajGCxjooYORC5tZDDZctOPenQ,67
16
+ autosh-0.0.2.dist-info/licenses/LICENSE,sha256=BnLDJsIJe-Dm18unR9DOoSv7QOfAz6LeIQc1yHAjxp0,1066
17
+ autosh-0.0.2.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.0
2
+ Generator: hatchling 1.27.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ ash = autosh.main:main
3
+ autosh = autosh.main:main
@@ -1,6 +1,6 @@
1
1
  MIT License
2
2
 
3
- Copyright (c) 2021 Arnaud Blois
3
+ Copyright (c) 2024 Wenyu Zhao
4
4
 
5
5
  Permission is hereby granted, free of charge, to any person obtaining a copy
6
6
  of this software and associated documentation files (the "Software"), to deal
@@ -1,16 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: autosh
3
- Version: 0.0.0
4
- Summary: reserved
5
- License: MIT
6
- Author: Wenyu Zhao
7
- Author-email: wenyu.zhao@anu.edu.au
8
- Requires-Python: >=3.10
9
- Classifier: License :: OSI Approved :: MIT License
10
- Classifier: Programming Language :: Python :: 3
11
- Classifier: Programming Language :: Python :: 3.10
12
- Classifier: Programming Language :: Python :: 3.11
13
- Classifier: Programming Language :: Python :: 3.12
14
- Description-Content-Type: text/markdown
15
-
16
- reserved
@@ -1,5 +0,0 @@
1
- reserved/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- autosh-0.0.0.dist-info/LICENSE,sha256=XKJuERnF3YCXg9ujPMNx1yOhChxe6bBmoHnPZBl7o2I,1068
3
- autosh-0.0.0.dist-info/METADATA,sha256=6bD3KcuY6YsLtrAKPnA6R_i0xBeaynkq-cKR0V5nppo,462
4
- autosh-0.0.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
5
- autosh-0.0.0.dist-info/RECORD,,
File without changes