axon-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- axon/__init__.py +0 -0
- axon/commands.py +54 -0
- axon/config.py +1 -0
- axon/llm.py +12 -0
- axon/main.py +75 -0
- axon/memory.py +42 -0
- axon/renderer.py +64 -0
- axon/tools.py +80 -0
- axon_cli-0.1.0.dist-info/METADATA +9 -0
- axon_cli-0.1.0.dist-info/RECORD +13 -0
- axon_cli-0.1.0.dist-info/WHEEL +5 -0
- axon_cli-0.1.0.dist-info/entry_points.txt +2 -0
- axon_cli-0.1.0.dist-info/top_level.txt +1 -0
axon/__init__.py
ADDED
|
File without changes
|
axon/commands.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import ollama
|
|
2
|
+
import questionary
|
|
3
|
+
from rich.console import Console
|
|
4
|
+
|
|
5
|
+
from axon import config, memory, tools
|
|
6
|
+
|
|
7
|
+
console = Console()
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def handle_command(user_input):
|
|
11
|
+
if user_input == "/help":
|
|
12
|
+
print("/help - show commands")
|
|
13
|
+
print("/clear - clear history")
|
|
14
|
+
print("/save - save current session")
|
|
15
|
+
print("/load - load a saved session")
|
|
16
|
+
print("/model - change model")
|
|
17
|
+
print("/load-project - load a project folder into context")
|
|
18
|
+
print("/exit - quit")
|
|
19
|
+
return True
|
|
20
|
+
elif user_input == "/clear":
|
|
21
|
+
memory.clear_history()
|
|
22
|
+
return True
|
|
23
|
+
elif user_input == "/exit":
|
|
24
|
+
exit()
|
|
25
|
+
return True
|
|
26
|
+
elif user_input == "/save":
|
|
27
|
+
filename = input("Enter session name: ")
|
|
28
|
+
memory.save_session(filename)
|
|
29
|
+
print(f"Session saved as '{filename}'")
|
|
30
|
+
return True
|
|
31
|
+
elif user_input == "/load":
|
|
32
|
+
filename = input("Enter session name to load: ")
|
|
33
|
+
memory.load_session(filename)
|
|
34
|
+
print(f"Session '{filename}' loaded!")
|
|
35
|
+
return True
|
|
36
|
+
elif user_input == "/model":
|
|
37
|
+
models = ollama.list()
|
|
38
|
+
model_names = model_names = [
|
|
39
|
+
m.model for m in models.models if m.model is not None
|
|
40
|
+
]
|
|
41
|
+
selected = questionary.select("Select a model:", choices=model_names).ask()
|
|
42
|
+
if selected:
|
|
43
|
+
config.current_model = selected
|
|
44
|
+
print(f"Model switched to '{selected}'")
|
|
45
|
+
return True
|
|
46
|
+
elif user_input == "/load-project":
|
|
47
|
+
folder = input("Enter folder path: ")
|
|
48
|
+
print("Loading project...")
|
|
49
|
+
result = tools.load_project(folder)
|
|
50
|
+
memory.add_message("user", f"Here is my project:\n{result}")
|
|
51
|
+
console.print("[green]Project loaded![/green]")
|
|
52
|
+
return True
|
|
53
|
+
else:
|
|
54
|
+
return False
|
axon/config.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
current_model = "llama3.2"
|
axon/llm.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import ollama
|
|
2
|
+
|
|
3
|
+
from axon import config
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def stream_response(conversation_history):
|
|
7
|
+
"""Streams response tokens from Ollama one by one."""
|
|
8
|
+
bot = ollama.chat(
|
|
9
|
+
model=config.current_model, messages=conversation_history, stream=True
|
|
10
|
+
)
|
|
11
|
+
for chunk in bot:
|
|
12
|
+
yield chunk["message"]["content"]
|
axon/main.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import ollama
|
|
2
|
+
import questionary
|
|
3
|
+
from prompt_toolkit import PromptSession
|
|
4
|
+
from prompt_toolkit.completion import WordCompleter
|
|
5
|
+
|
|
6
|
+
from axon import commands, config, memory, tools
|
|
7
|
+
from axon.llm import stream_response
|
|
8
|
+
from axon.renderer import print_welcome, render_markdown, thinking
|
|
9
|
+
|
|
10
|
+
models = ollama.list()
|
|
11
|
+
model_names = [m.model for m in models.models if m.model is not None]
|
|
12
|
+
selected = questionary.select(
|
|
13
|
+
"Select a model to start with:", choices=model_names
|
|
14
|
+
).ask()
|
|
15
|
+
config.current_model = selected
|
|
16
|
+
print(f"Starting with '{selected}'\n")
|
|
17
|
+
print_welcome()
|
|
18
|
+
|
|
19
|
+
command_completer = WordCompleter(
|
|
20
|
+
["/help", "/clear", "/save", "/load", "/model", "/load-project", "/exit"],
|
|
21
|
+
sentence=True,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
session = PromptSession(completer=command_completer)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def main():
|
|
28
|
+
while True:
|
|
29
|
+
user_input = session.prompt("\n[>] ")
|
|
30
|
+
if user_input.startswith("/"):
|
|
31
|
+
commands.handle_command(user_input)
|
|
32
|
+
else:
|
|
33
|
+
memory.add_message("user", user_input)
|
|
34
|
+
response = stream_response(memory.conversation_history)
|
|
35
|
+
with thinking():
|
|
36
|
+
full_response = ""
|
|
37
|
+
for token in response:
|
|
38
|
+
full_response += token
|
|
39
|
+
tool, args = tools.parse_tool_call(full_response)
|
|
40
|
+
if tool == "read_file":
|
|
41
|
+
args = args.split("/")[-1].strip()
|
|
42
|
+
result = tools.read_file(args)
|
|
43
|
+
memory.add_message("assistant", full_response)
|
|
44
|
+
memory.add_message("user", f"Tool result:\n{result}")
|
|
45
|
+
with thinking():
|
|
46
|
+
full_response = ""
|
|
47
|
+
response = stream_response(memory.conversation_history)
|
|
48
|
+
for token in response:
|
|
49
|
+
full_response += token
|
|
50
|
+
elif tool == "run_command":
|
|
51
|
+
result = tools.run_command(args)
|
|
52
|
+
memory.add_message("assistant", full_response)
|
|
53
|
+
memory.add_message("user", f"Tool result:\n{result}")
|
|
54
|
+
with thinking():
|
|
55
|
+
full_response = ""
|
|
56
|
+
response = stream_response(memory.conversation_history)
|
|
57
|
+
for token in response:
|
|
58
|
+
full_response += token
|
|
59
|
+
elif tool == "write_file":
|
|
60
|
+
filepath, content = args.split("|", 1)
|
|
61
|
+
filepath = filepath.split("/")[-1].strip()
|
|
62
|
+
result = tools.write_file(filepath, content)
|
|
63
|
+
memory.add_message("assistant", full_response)
|
|
64
|
+
memory.add_message("user", f"Tool result:\n{result}")
|
|
65
|
+
with thinking():
|
|
66
|
+
full_response = ""
|
|
67
|
+
response = stream_response(memory.conversation_history)
|
|
68
|
+
for token in response:
|
|
69
|
+
full_response += token
|
|
70
|
+
render_markdown(full_response)
|
|
71
|
+
memory.add_message("assistant", full_response)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
if __name__ == "__main__":
|
|
75
|
+
main()
|
axon/memory.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import json
|
|
2
|
+
|
|
3
|
+
conversation_history = [
|
|
4
|
+
{
|
|
5
|
+
"role": "system",
|
|
6
|
+
"content": """You are AXON, a helpful CLI assistant running locally via Ollama.
|
|
7
|
+
|
|
8
|
+
ONLY use tools when the user EXPLICITLY asks you to read, write, or run something.
|
|
9
|
+
NEVER use tools to answer general questions — just respond normally.
|
|
10
|
+
|
|
11
|
+
To read a file respond ONLY with:
|
|
12
|
+
<tool>read_file</tool><args>filepath</args>
|
|
13
|
+
|
|
14
|
+
To run a command respond ONLY with:
|
|
15
|
+
<tool>run_command</tool><args>command</args>
|
|
16
|
+
|
|
17
|
+
To write a file respond ONLY with:
|
|
18
|
+
<tool>write_file</tool><args>filepath|content</args>
|
|
19
|
+
|
|
20
|
+
Use the EXACT filename the user mentioned. Never use placeholder paths.
|
|
21
|
+
For everything else including explaining code, answering questions, summarizing — respond normally and be concise.""",
|
|
22
|
+
}
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def add_message(role, content):
|
|
27
|
+
conversation_history.append({"role": role, "content": content})
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def clear_history():
|
|
31
|
+
conversation_history[:] = [conversation_history[0]]
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def save_session(filename):
|
|
35
|
+
with open(f"sessions/{filename}.json", "w") as f:
|
|
36
|
+
json.dump(conversation_history, f, indent=2)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def load_session(filename):
|
|
40
|
+
with open(f"sessions/{filename}.json", "r") as f:
|
|
41
|
+
data = json.load(f)
|
|
42
|
+
conversation_history[:] = data
|
axon/renderer.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
import pyfiglet
|
|
4
|
+
from rich.columns import Columns
|
|
5
|
+
from rich.console import Console
|
|
6
|
+
from rich.live import Live
|
|
7
|
+
from rich.markdown import Markdown
|
|
8
|
+
from rich.panel import Panel
|
|
9
|
+
from rich.spinner import Spinner
|
|
10
|
+
from rich.text import Text
|
|
11
|
+
|
|
12
|
+
from axon import config
|
|
13
|
+
|
|
14
|
+
console = Console()
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def print_token(token):
|
|
18
|
+
print(token, end="", flush=True)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def render_markdown(markdown_text):
|
|
22
|
+
md = Markdown(markdown_text)
|
|
23
|
+
console.print(md)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def thinking():
|
|
27
|
+
return Live(Spinner("dots", text=" Thinking..."), console=console)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def print_banner():
|
|
31
|
+
ascii_art = pyfiglet.figlet_format("AXON", font="slant")
|
|
32
|
+
text = Text(ascii_art)
|
|
33
|
+
text.stylize("bold cyan")
|
|
34
|
+
console.print(text)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def print_welcome():
|
|
38
|
+
print_banner()
|
|
39
|
+
|
|
40
|
+
left_content = Text()
|
|
41
|
+
left_content.append("\n Welcome back!\n\n", style="bold white")
|
|
42
|
+
left_content.append(" ◈\n\n", style="bold cyan")
|
|
43
|
+
left_content.append(f" {config.current_model}\n", style="dim")
|
|
44
|
+
left_content.append(" Local via Ollama\n", style="dim")
|
|
45
|
+
left_panel = Panel(left_content, style="cyan", width=35)
|
|
46
|
+
|
|
47
|
+
sessions = os.listdir("sessions") if os.path.exists("sessions") else []
|
|
48
|
+
activity = (
|
|
49
|
+
f"Last session: {sorted(sessions)[-1].replace('.json', '')}"
|
|
50
|
+
if sessions
|
|
51
|
+
else "No recent activity"
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
right_content = Text()
|
|
55
|
+
right_content.append("Tips for getting started\n\n", style="bold yellow")
|
|
56
|
+
right_content.append("1. Ask questions, edit files, or run commands.\n")
|
|
57
|
+
right_content.append("2. Use /load-project to load a full codebase.\n")
|
|
58
|
+
right_content.append("3. Use /model to switch models.\n\n")
|
|
59
|
+
right_content.append("Recent activity\n\n", style="bold yellow")
|
|
60
|
+
right_content.append(activity, style="dim")
|
|
61
|
+
right_panel = Panel(right_content, style="cyan", width=60)
|
|
62
|
+
|
|
63
|
+
console.print(Columns([left_panel, right_panel]))
|
|
64
|
+
console.print()
|
axon/tools.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import subprocess
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def load_project(folder_path):
|
|
6
|
+
allowed = [
|
|
7
|
+
".py",
|
|
8
|
+
".js",
|
|
9
|
+
".html",
|
|
10
|
+
".css",
|
|
11
|
+
".json",
|
|
12
|
+
".md",
|
|
13
|
+
".txt",
|
|
14
|
+
".ts",
|
|
15
|
+
".jsx",
|
|
16
|
+
".tsx",
|
|
17
|
+
]
|
|
18
|
+
skip_folders = ["node_modules", ".git", "__pycache__", ".venv", "venv"]
|
|
19
|
+
result = ""
|
|
20
|
+
file_count = 0
|
|
21
|
+
for root, dirs, files in os.walk(folder_path):
|
|
22
|
+
# skip junk folders
|
|
23
|
+
dirs[:] = [d for d in dirs if d not in skip_folders]
|
|
24
|
+
for file in files:
|
|
25
|
+
_, ext = os.path.splitext(file)
|
|
26
|
+
if ext not in allowed:
|
|
27
|
+
continue
|
|
28
|
+
full_path = os.path.join(root, file)
|
|
29
|
+
content = read_file(full_path)
|
|
30
|
+
result += f"\n=== {full_path} ===\n{content}\n"
|
|
31
|
+
file_count += 1
|
|
32
|
+
if file_count == 0:
|
|
33
|
+
return "No supported files found in that folder."
|
|
34
|
+
return f"Loaded {file_count} files:\n{result}"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def parse_tool_call(text):
|
|
38
|
+
if "<tool>" not in text:
|
|
39
|
+
return (None, None)
|
|
40
|
+
|
|
41
|
+
start = text.find("<tool>") + len("<tool>")
|
|
42
|
+
end = text.find("</tool>")
|
|
43
|
+
tool_name = text[start:end]
|
|
44
|
+
|
|
45
|
+
start = text.find("<args>") + len("<args>")
|
|
46
|
+
end = text.find("</args>")
|
|
47
|
+
args = text[start:end]
|
|
48
|
+
|
|
49
|
+
return (tool_name, args)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def read_file(file_path):
|
|
53
|
+
try:
|
|
54
|
+
with open(file_path, "r", encoding="utf-8", errors="ignore") as file:
|
|
55
|
+
return file.read()
|
|
56
|
+
except FileNotFoundError:
|
|
57
|
+
return f"Error: file '{file_path}' not found."
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def write_file(file_path, content):
|
|
61
|
+
try:
|
|
62
|
+
with open(file_path, "w") as file:
|
|
63
|
+
file.write(content)
|
|
64
|
+
return f"Success: file '{file_path}' written."
|
|
65
|
+
except Exception as e:
|
|
66
|
+
return f"Error: {e}"
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def run_command(command):
|
|
70
|
+
try:
|
|
71
|
+
result = subprocess.run(command, shell=True, capture_output=True, text=True)
|
|
72
|
+
# if there is output return it, otherwise return errors
|
|
73
|
+
if result.stdout:
|
|
74
|
+
return result.stdout
|
|
75
|
+
elif result.stderr:
|
|
76
|
+
return result.stderr
|
|
77
|
+
else:
|
|
78
|
+
return "Command ran successfully with no output."
|
|
79
|
+
except Exception as e:
|
|
80
|
+
return f"Error: {e}"
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
axon/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
axon/commands.py,sha256=TuUdtnnAMS1Fqj6owHlseG8UgATM15MB_zNsmt7dALI,1847
|
|
3
|
+
axon/config.py,sha256=XdKpURgJMFDMp3Xh8X-MsNw2rifslr4AeZvb5LIN3-w,28
|
|
4
|
+
axon/llm.py,sha256=BX9GwfKcKMpSZcmQbxS53_0EoOTInmMPnh_3E8i9Glg,326
|
|
5
|
+
axon/main.py,sha256=24yQa00EwhqUccHac6crhBraNssPT4W_VTWqYngCc0U,3018
|
|
6
|
+
axon/memory.py,sha256=su2KD9kfIT31W6Mv4PmR8Y4OR7-GhTUE32Za3jvw8qM,1356
|
|
7
|
+
axon/renderer.py,sha256=fl1icyy466Jhg6Zlf0NoPOJ6Cn1oN2tFoA7IHyqLdGo,1948
|
|
8
|
+
axon/tools.py,sha256=r80hfWiP7wODLFJnDxTZjEBxOdmTZtjoUyhte7eajdg,2253
|
|
9
|
+
axon_cli-0.1.0.dist-info/METADATA,sha256=xTVrd5a7mmPfhxLCQJuL14l4kAnSD5tjgbLigEpH4vk,236
|
|
10
|
+
axon_cli-0.1.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
|
|
11
|
+
axon_cli-0.1.0.dist-info/entry_points.txt,sha256=qwp1vV4rHTSDQzACmA9OmKzR5HiwwFp46LZxh1hjeFw,40
|
|
12
|
+
axon_cli-0.1.0.dist-info/top_level.txt,sha256=vRkPFUYD0GspqpZfeuY7c2fzJmpHIpgjOY_nnOHNwVc,5
|
|
13
|
+
axon_cli-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
axon
|