axon-cli 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,9 @@
1
+ Metadata-Version: 2.4
2
+ Name: axon-cli
3
+ Version: 0.1.0
4
+ Summary: A local CLI AI assistant powered by Ollama
5
+ Requires-Dist: ollama
6
+ Requires-Dist: rich
7
+ Requires-Dist: prompt_toolkit
8
+ Requires-Dist: pyfiglet
9
+ Requires-Dist: questionary
@@ -0,0 +1,98 @@
1
+ # AXON 🤖
2
+ > A local CLI AI assistant powered by Ollama — works like Gemini CLI or Claude Code, completely offline and private.
3
+
4
+ ## Features
5
+ - 🧠 Conversational AI with full memory across turns
6
+ - 🌊 Streaming responses with a thinking spinner
7
+ - 📄 Markdown rendering in the terminal
8
+ - 📁 Read, write files in your current directory
9
+ - ⚡ Run shell commands directly from chat
10
+ - 📂 Load full project folders into context
11
+ - 💾 Save and load chat sessions
12
+ - 🔀 Switch between Ollama models at runtime
13
+ - ⌨️ Slash command autocomplete
14
+
15
+ ## Requirements
16
+ - Python 3.10+
17
+ - [Ollama](https://ollama.com) installed and running
18
+ - At least one Ollama model pulled (e.g. `ollama pull llama3.2`)
19
+
20
+ ## Installation
21
+
22
+ **1. Clone the repository**
23
+
24
+ git clone https://github.com/yourusername/axon-cli.git
25
+ cd axon-cli
26
+
27
+ **2. Install dependencies**
28
+
29
+ pip install -r requirements.txt
30
+
31
+ **3. Install AXON as a command**
32
+
33
+ pip install -e .
34
+
35
+ **4. Make sure Ollama is running**
36
+
37
+ ollama serve
38
+
39
+ **5. Run AXON from anywhere**
40
+
41
+ axon
42
+
43
+ ## Usage
44
+
45
+ On startup, AXON will ask you to select a model from your installed Ollama models using arrow keys. Then you can start chatting right away.
46
+
47
+ ? Select a model to start with:
48
+ ❯ llama3.2:latest
49
+ gemma4:E4B
50
+
51
+ ## Commands
52
+
53
+ | Command | Description |
54
+ |---|---|
55
+ | `/help` | Show all available commands |
56
+ | `/clear` | Clear conversation history |
57
+ | `/save` | Save current session to disk |
58
+ | `/load` | Load a previously saved session |
59
+ | `/model` | Switch Ollama model at runtime |
60
+ | `/load-project` | Load a full project folder into context |
61
+ | `/exit` | Quit AXON |
62
+
63
+ ## Tools
64
+
65
+ AXON can interact with your filesystem and shell when asked:
66
+
67
+ - **Read a file** — `read the file main.py`
68
+ - **Write a file** — `write a file called notes.txt with...`
69
+ - **Run a command** — `run dir` or `run git status`
70
+ - **Load a project** — `/load-project` then provide folder path
71
+
72
+ ## Project Structure
73
+
74
+ axon-cli/
75
+ ├── axon/
76
+ │ ├── __init__.py
77
+ │ ├── main.py # Entry point and main loop
78
+ │ ├── llm.py # Ollama streaming client
79
+ │ ├── memory.py # Conversation history management
80
+ │ ├── renderer.py # Rich terminal UI and markdown rendering
81
+ │ ├── commands.py # Slash command handler
82
+ │ ├── tools.py # File and shell tools
83
+ │ └── config.py # Shared configuration
84
+ ├── sessions/ # Saved chat sessions (auto-created)
85
+ ├── pyproject.toml
86
+ ├── requirements.txt
87
+ └── README.md
88
+
89
+ ## Built With
90
+
91
+ - [Ollama](https://ollama.com) — local LLM inference
92
+ - [Rich](https://github.com/Textualize/rich) — terminal UI and markdown
93
+ - [prompt_toolkit](https://github.com/prompt-toolkit/python-prompt-toolkit) — autocomplete input
94
+ - [pyfiglet](https://github.com/pwaller/pyfiglet) — ASCII art banner
95
+ - [questionary](https://github.com/tmbo/questionary) — interactive model selection
96
+
97
+ ## License
98
+ MIT
File without changes
@@ -0,0 +1,54 @@
1
+ import ollama
2
+ import questionary
3
+ from rich.console import Console
4
+
5
+ from axon import config, memory, tools
6
+
7
+ console = Console()
8
+
9
+
10
+ def handle_command(user_input):
11
+ if user_input == "/help":
12
+ print("/help - show commands")
13
+ print("/clear - clear history")
14
+ print("/save - save current session")
15
+ print("/load - load a saved session")
16
+ print("/model - change model")
17
+ print("/load-project - load a project folder into context")
18
+ print("/exit - quit")
19
+ return True
20
+ elif user_input == "/clear":
21
+ memory.clear_history()
22
+ return True
23
+ elif user_input == "/exit":
24
+ exit()
25
+ return True
26
+ elif user_input == "/save":
27
+ filename = input("Enter session name: ")
28
+ memory.save_session(filename)
29
+ print(f"Session saved as '{filename}'")
30
+ return True
31
+ elif user_input == "/load":
32
+ filename = input("Enter session name to load: ")
33
+ memory.load_session(filename)
34
+ print(f"Session '{filename}' loaded!")
35
+ return True
36
+ elif user_input == "/model":
37
+ models = ollama.list()
38
+ model_names = model_names = [
39
+ m.model for m in models.models if m.model is not None
40
+ ]
41
+ selected = questionary.select("Select a model:", choices=model_names).ask()
42
+ if selected:
43
+ config.current_model = selected
44
+ print(f"Model switched to '{selected}'")
45
+ return True
46
+ elif user_input == "/load-project":
47
+ folder = input("Enter folder path: ")
48
+ print("Loading project...")
49
+ result = tools.load_project(folder)
50
+ memory.add_message("user", f"Here is my project:\n{result}")
51
+ console.print("[green]Project loaded![/green]")
52
+ return True
53
+ else:
54
+ return False
@@ -0,0 +1 @@
1
+ current_model = "llama3.2"
@@ -0,0 +1,12 @@
1
+ import ollama
2
+
3
+ from axon import config
4
+
5
+
6
+ def stream_response(conversation_history):
7
+ """Streams response tokens from Ollama one by one."""
8
+ bot = ollama.chat(
9
+ model=config.current_model, messages=conversation_history, stream=True
10
+ )
11
+ for chunk in bot:
12
+ yield chunk["message"]["content"]
@@ -0,0 +1,75 @@
1
+ import ollama
2
+ import questionary
3
+ from prompt_toolkit import PromptSession
4
+ from prompt_toolkit.completion import WordCompleter
5
+
6
+ from axon import commands, config, memory, tools
7
+ from axon.llm import stream_response
8
+ from axon.renderer import print_welcome, render_markdown, thinking
9
+
10
+ models = ollama.list()
11
+ model_names = [m.model for m in models.models if m.model is not None]
12
+ selected = questionary.select(
13
+ "Select a model to start with:", choices=model_names
14
+ ).ask()
15
+ config.current_model = selected
16
+ print(f"Starting with '{selected}'\n")
17
+ print_welcome()
18
+
19
+ command_completer = WordCompleter(
20
+ ["/help", "/clear", "/save", "/load", "/model", "/load-project", "/exit"],
21
+ sentence=True,
22
+ )
23
+
24
+ session = PromptSession(completer=command_completer)
25
+
26
+
27
+ def main():
28
+ while True:
29
+ user_input = session.prompt("\n[>] ")
30
+ if user_input.startswith("/"):
31
+ commands.handle_command(user_input)
32
+ else:
33
+ memory.add_message("user", user_input)
34
+ response = stream_response(memory.conversation_history)
35
+ with thinking():
36
+ full_response = ""
37
+ for token in response:
38
+ full_response += token
39
+ tool, args = tools.parse_tool_call(full_response)
40
+ if tool == "read_file":
41
+ args = args.split("/")[-1].strip()
42
+ result = tools.read_file(args)
43
+ memory.add_message("assistant", full_response)
44
+ memory.add_message("user", f"Tool result:\n{result}")
45
+ with thinking():
46
+ full_response = ""
47
+ response = stream_response(memory.conversation_history)
48
+ for token in response:
49
+ full_response += token
50
+ elif tool == "run_command":
51
+ result = tools.run_command(args)
52
+ memory.add_message("assistant", full_response)
53
+ memory.add_message("user", f"Tool result:\n{result}")
54
+ with thinking():
55
+ full_response = ""
56
+ response = stream_response(memory.conversation_history)
57
+ for token in response:
58
+ full_response += token
59
+ elif tool == "write_file":
60
+ filepath, content = args.split("|", 1)
61
+ filepath = filepath.split("/")[-1].strip()
62
+ result = tools.write_file(filepath, content)
63
+ memory.add_message("assistant", full_response)
64
+ memory.add_message("user", f"Tool result:\n{result}")
65
+ with thinking():
66
+ full_response = ""
67
+ response = stream_response(memory.conversation_history)
68
+ for token in response:
69
+ full_response += token
70
+ render_markdown(full_response)
71
+ memory.add_message("assistant", full_response)
72
+
73
+
74
+ if __name__ == "__main__":
75
+ main()
@@ -0,0 +1,42 @@
1
+ import json
2
+
3
+ conversation_history = [
4
+ {
5
+ "role": "system",
6
+ "content": """You are AXON, a helpful CLI assistant running locally via Ollama.
7
+
8
+ ONLY use tools when the user EXPLICITLY asks you to read, write, or run something.
9
+ NEVER use tools to answer general questions — just respond normally.
10
+
11
+ To read a file respond ONLY with:
12
+ <tool>read_file</tool><args>filepath</args>
13
+
14
+ To run a command respond ONLY with:
15
+ <tool>run_command</tool><args>command</args>
16
+
17
+ To write a file respond ONLY with:
18
+ <tool>write_file</tool><args>filepath|content</args>
19
+
20
+ Use the EXACT filename the user mentioned. Never use placeholder paths.
21
+ For everything else including explaining code, answering questions, summarizing — respond normally and be concise.""",
22
+ }
23
+ ]
24
+
25
+
26
+ def add_message(role, content):
27
+ conversation_history.append({"role": role, "content": content})
28
+
29
+
30
+ def clear_history():
31
+ conversation_history[:] = [conversation_history[0]]
32
+
33
+
34
+ def save_session(filename):
35
+ with open(f"sessions/{filename}.json", "w") as f:
36
+ json.dump(conversation_history, f, indent=2)
37
+
38
+
39
+ def load_session(filename):
40
+ with open(f"sessions/{filename}.json", "r") as f:
41
+ data = json.load(f)
42
+ conversation_history[:] = data
@@ -0,0 +1,64 @@
1
+ import os
2
+
3
+ import pyfiglet
4
+ from rich.columns import Columns
5
+ from rich.console import Console
6
+ from rich.live import Live
7
+ from rich.markdown import Markdown
8
+ from rich.panel import Panel
9
+ from rich.spinner import Spinner
10
+ from rich.text import Text
11
+
12
+ from axon import config
13
+
14
+ console = Console()
15
+
16
+
17
+ def print_token(token):
18
+ print(token, end="", flush=True)
19
+
20
+
21
+ def render_markdown(markdown_text):
22
+ md = Markdown(markdown_text)
23
+ console.print(md)
24
+
25
+
26
+ def thinking():
27
+ return Live(Spinner("dots", text=" Thinking..."), console=console)
28
+
29
+
30
+ def print_banner():
31
+ ascii_art = pyfiglet.figlet_format("AXON", font="slant")
32
+ text = Text(ascii_art)
33
+ text.stylize("bold cyan")
34
+ console.print(text)
35
+
36
+
37
+ def print_welcome():
38
+ print_banner()
39
+
40
+ left_content = Text()
41
+ left_content.append("\n Welcome back!\n\n", style="bold white")
42
+ left_content.append(" ◈\n\n", style="bold cyan")
43
+ left_content.append(f" {config.current_model}\n", style="dim")
44
+ left_content.append(" Local via Ollama\n", style="dim")
45
+ left_panel = Panel(left_content, style="cyan", width=35)
46
+
47
+ sessions = os.listdir("sessions") if os.path.exists("sessions") else []
48
+ activity = (
49
+ f"Last session: {sorted(sessions)[-1].replace('.json', '')}"
50
+ if sessions
51
+ else "No recent activity"
52
+ )
53
+
54
+ right_content = Text()
55
+ right_content.append("Tips for getting started\n\n", style="bold yellow")
56
+ right_content.append("1. Ask questions, edit files, or run commands.\n")
57
+ right_content.append("2. Use /load-project to load a full codebase.\n")
58
+ right_content.append("3. Use /model to switch models.\n\n")
59
+ right_content.append("Recent activity\n\n", style="bold yellow")
60
+ right_content.append(activity, style="dim")
61
+ right_panel = Panel(right_content, style="cyan", width=60)
62
+
63
+ console.print(Columns([left_panel, right_panel]))
64
+ console.print()
@@ -0,0 +1,80 @@
1
+ import os
2
+ import subprocess
3
+
4
+
5
+ def load_project(folder_path):
6
+ allowed = [
7
+ ".py",
8
+ ".js",
9
+ ".html",
10
+ ".css",
11
+ ".json",
12
+ ".md",
13
+ ".txt",
14
+ ".ts",
15
+ ".jsx",
16
+ ".tsx",
17
+ ]
18
+ skip_folders = ["node_modules", ".git", "__pycache__", ".venv", "venv"]
19
+ result = ""
20
+ file_count = 0
21
+ for root, dirs, files in os.walk(folder_path):
22
+ # skip junk folders
23
+ dirs[:] = [d for d in dirs if d not in skip_folders]
24
+ for file in files:
25
+ _, ext = os.path.splitext(file)
26
+ if ext not in allowed:
27
+ continue
28
+ full_path = os.path.join(root, file)
29
+ content = read_file(full_path)
30
+ result += f"\n=== {full_path} ===\n{content}\n"
31
+ file_count += 1
32
+ if file_count == 0:
33
+ return "No supported files found in that folder."
34
+ return f"Loaded {file_count} files:\n{result}"
35
+
36
+
37
+ def parse_tool_call(text):
38
+ if "<tool>" not in text:
39
+ return (None, None)
40
+
41
+ start = text.find("<tool>") + len("<tool>")
42
+ end = text.find("</tool>")
43
+ tool_name = text[start:end]
44
+
45
+ start = text.find("<args>") + len("<args>")
46
+ end = text.find("</args>")
47
+ args = text[start:end]
48
+
49
+ return (tool_name, args)
50
+
51
+
52
+ def read_file(file_path):
53
+ try:
54
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as file:
55
+ return file.read()
56
+ except FileNotFoundError:
57
+ return f"Error: file '{file_path}' not found."
58
+
59
+
60
+ def write_file(file_path, content):
61
+ try:
62
+ with open(file_path, "w") as file:
63
+ file.write(content)
64
+ return f"Success: file '{file_path}' written."
65
+ except Exception as e:
66
+ return f"Error: {e}"
67
+
68
+
69
+ def run_command(command):
70
+ try:
71
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
72
+ # if there is output return it, otherwise return errors
73
+ if result.stdout:
74
+ return result.stdout
75
+ elif result.stderr:
76
+ return result.stderr
77
+ else:
78
+ return "Command ran successfully with no output."
79
+ except Exception as e:
80
+ return f"Error: {e}"
@@ -0,0 +1,9 @@
1
+ Metadata-Version: 2.4
2
+ Name: axon-cli
3
+ Version: 0.1.0
4
+ Summary: A local CLI AI assistant powered by Ollama
5
+ Requires-Dist: ollama
6
+ Requires-Dist: rich
7
+ Requires-Dist: prompt_toolkit
8
+ Requires-Dist: pyfiglet
9
+ Requires-Dist: questionary
@@ -0,0 +1,16 @@
1
+ README.md
2
+ pyproject.toml
3
+ axon/__init__.py
4
+ axon/commands.py
5
+ axon/config.py
6
+ axon/llm.py
7
+ axon/main.py
8
+ axon/memory.py
9
+ axon/renderer.py
10
+ axon/tools.py
11
+ axon_cli.egg-info/PKG-INFO
12
+ axon_cli.egg-info/SOURCES.txt
13
+ axon_cli.egg-info/dependency_links.txt
14
+ axon_cli.egg-info/entry_points.txt
15
+ axon_cli.egg-info/requires.txt
16
+ axon_cli.egg-info/top_level.txt
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ axon = axon.main:main
@@ -0,0 +1,5 @@
1
+ ollama
2
+ rich
3
+ prompt_toolkit
4
+ pyfiglet
5
+ questionary
@@ -0,0 +1,3 @@
1
+ axon
2
+ dist
3
+ sessions
@@ -0,0 +1,21 @@
1
+ [project]
2
+ name = "axon-cli"
3
+ version = "0.1.0"
4
+ description = "A local CLI AI assistant powered by Ollama"
5
+ dependencies = [
6
+ "ollama",
7
+ "rich",
8
+ "prompt_toolkit",
9
+ "pyfiglet",
10
+ "questionary"
11
+ ]
12
+
13
+ [project.scripts]
14
+ axon = "axon.main:main"
15
+
16
+ [build-system]
17
+ requires = ["setuptools>=64"]
18
+ build-backend = "setuptools.build_meta"
19
+
20
+ [tool.setuptools.packages.find]
21
+ where = ["."]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+