aurafarmer 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,126 @@
1
+ Metadata-Version: 2.4
2
+ Name: aurafarmer
3
+ Version: 0.1.0
4
+ Summary: Aurex — Terminal AI Coding Agent powered by NVIDIA NIM
5
+ License: MIT
6
+ Keywords: ai,coding,terminal,cli,agent,nvidia,nim
7
+ Requires-Python: >=3.9
8
+ Description-Content-Type: text/markdown
9
+ Requires-Dist: openai>=1.0.0
10
+ Requires-Dist: rich>=13.0.0
11
+
12
+ # Aurex
13
+
14
+ > Terminal AI Coding Agent powered by NVIDIA NIM
15
+
16
+ ```
17
+ █████╗ ██╗ ██╗██████╗ ███████╗██╗ ██╗
18
+ ██╔══██╗██║ ██║██╔══██╗██╔════╝╚██╗██╔╝
19
+ ███████║██║ ██║██████╔╝█████╗ ╚███╔╝
20
+ ██╔══██║██║ ██║██╔══██╗██╔══╝ ██╔██╗
21
+ ██║ ██║╚██████╔╝██║ ██║███████╗██╔╝ ██╗
22
+ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝
23
+ ```
24
+
25
+ Aurex is a Claude Code-style AI agent that runs in your terminal. It can read/write files, run commands, search your codebase, and reason through multi-step coding tasks — all powered by NVIDIA NIM.
26
+
27
+ ---
28
+
29
+ ## Install
30
+
31
+ ### Option 1 — From PyPI (after publishing)
32
+
33
+ ```bash
34
+ pip install aurex
35
+ ```
36
+
37
+ ### Option 2 — From GitHub (recommended for now)
38
+
39
+ ```bash
40
+ pip install git+https://github.com/YOUR_USERNAME/aurex.git
41
+ ```
42
+
43
+ ### Option 3 — Local development install
44
+
45
+ ```bash
46
+ git clone https://github.com/YOUR_USERNAME/aurex.git
47
+ cd aurex
48
+ pip install -e .
49
+ ```
50
+
51
+ Then just run:
52
+
53
+ ```bash
54
+ aurex
55
+ ```
56
+
57
+ ---
58
+
59
+ ## Commands
60
+
61
+ | Command | Description |
62
+ |--------------|--------------------------------------|
63
+ | `model` | View / change the active AI model |
64
+ | `clear` | Wipe conversation history |
65
+ | `files` | List files in current directory |
66
+ | `help` | Show command list |
67
+ | `exit` | Quit |
68
+
69
+ ---
70
+
71
+ ## Tools Aurex can use
72
+
73
+ - **read_file** — Read any file
74
+ - **write_file** — Create or overwrite files
75
+ - **run_command** — Execute shell commands
76
+ - **list_files** — Browse directories
77
+ - **search_files** — Search code with grep
78
+ - **delete_file** — Remove files
79
+
80
+ ---
81
+
82
+ ## Available Models
83
+
84
+ Switch models at any time by typing `model` inside Aurex:
85
+
86
+ - moonshotai/kimi-k2.6
87
+ - mistralai/mistral-medium-3.5-128b
88
+ - nvidia/nemotron-3-nano-omni-30b-a3b-reasoning
89
+ - deepseek-ai/deepseek-v4-flash
90
+ - deepseek-ai/deepseek-v4-pro
91
+ - mistralai/mistral-large-3-675b-instruct-2512
92
+ - qwen/qwen3-coder-480b-a35b-instruct
93
+ - meta/llama-3.2-90b-vision-instruct
94
+ - ...and more
95
+
96
+ Type `C` for a completely custom model name.
97
+
98
+ ---
99
+
100
+ ## Publishing to PyPI (when ready)
101
+
102
+ ```bash
103
+ pip install build twine
104
+
105
+ # Build
106
+ python -m build
107
+
108
+ # Upload
109
+ twine upload dist/*
110
+ ```
111
+
112
+ You'll need a PyPI account and API token.
113
+
114
+ ---
115
+
116
+ ## Requirements
117
+
118
+ - Python 3.9+
119
+ - `openai` >= 1.0.0
120
+ - `rich` >= 13.0.0
121
+
122
+ ---
123
+
124
+ ## License
125
+
126
+ MIT
@@ -0,0 +1,115 @@
1
+ # Aurex
2
+
3
+ > Terminal AI Coding Agent powered by NVIDIA NIM
4
+
5
+ ```
6
+ █████╗ ██╗ ██╗██████╗ ███████╗██╗ ██╗
7
+ ██╔══██╗██║ ██║██╔══██╗██╔════╝╚██╗██╔╝
8
+ ███████║██║ ██║██████╔╝█████╗ ╚███╔╝
9
+ ██╔══██║██║ ██║██╔══██╗██╔══╝ ██╔██╗
10
+ ██║ ██║╚██████╔╝██║ ██║███████╗██╔╝ ██╗
11
+ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝
12
+ ```
13
+
14
+ Aurex is a Claude Code-style AI agent that runs in your terminal. It can read/write files, run commands, search your codebase, and reason through multi-step coding tasks — all powered by NVIDIA NIM.
15
+
16
+ ---
17
+
18
+ ## Install
19
+
20
+ ### Option 1 — From PyPI (after publishing)
21
+
22
+ ```bash
23
+ pip install aurex
24
+ ```
25
+
26
+ ### Option 2 — From GitHub (recommended for now)
27
+
28
+ ```bash
29
+ pip install git+https://github.com/YOUR_USERNAME/aurex.git
30
+ ```
31
+
32
+ ### Option 3 — Local development install
33
+
34
+ ```bash
35
+ git clone https://github.com/YOUR_USERNAME/aurex.git
36
+ cd aurex
37
+ pip install -e .
38
+ ```
39
+
40
+ Then just run:
41
+
42
+ ```bash
43
+ aurex
44
+ ```
45
+
46
+ ---
47
+
48
+ ## Commands
49
+
50
+ | Command | Description |
51
+ |--------------|--------------------------------------|
52
+ | `model` | View / change the active AI model |
53
+ | `clear` | Wipe conversation history |
54
+ | `files` | List files in current directory |
55
+ | `help` | Show command list |
56
+ | `exit` | Quit |
57
+
58
+ ---
59
+
60
+ ## Tools Aurex can use
61
+
62
+ - **read_file** — Read any file
63
+ - **write_file** — Create or overwrite files
64
+ - **run_command** — Execute shell commands
65
+ - **list_files** — Browse directories
66
+ - **search_files** — Search code with grep
67
+ - **delete_file** — Remove files
68
+
69
+ ---
70
+
71
+ ## Available Models
72
+
73
+ Switch models at any time by typing `model` inside Aurex:
74
+
75
+ - moonshotai/kimi-k2.6
76
+ - mistralai/mistral-medium-3.5-128b
77
+ - nvidia/nemotron-3-nano-omni-30b-a3b-reasoning
78
+ - deepseek-ai/deepseek-v4-flash
79
+ - deepseek-ai/deepseek-v4-pro
80
+ - mistralai/mistral-large-3-675b-instruct-2512
81
+ - qwen/qwen3-coder-480b-a35b-instruct
82
+ - meta/llama-3.2-90b-vision-instruct
83
+ - ...and more
84
+
85
+ Type `C` for a completely custom model name.
86
+
87
+ ---
88
+
89
+ ## Publishing to PyPI (when ready)
90
+
91
+ ```bash
92
+ pip install build twine
93
+
94
+ # Build
95
+ python -m build
96
+
97
+ # Upload
98
+ twine upload dist/*
99
+ ```
100
+
101
+ You'll need a PyPI account and API token.
102
+
103
+ ---
104
+
105
+ ## Requirements
106
+
107
+ - Python 3.9+
108
+ - `openai` >= 1.0.0
109
+ - `rich` >= 13.0.0
110
+
111
+ ---
112
+
113
+ ## License
114
+
115
+ MIT
@@ -0,0 +1,126 @@
1
+ Metadata-Version: 2.4
2
+ Name: aurafarmer
3
+ Version: 0.1.0
4
+ Summary: Aurex — Terminal AI Coding Agent powered by NVIDIA NIM
5
+ License: MIT
6
+ Keywords: ai,coding,terminal,cli,agent,nvidia,nim
7
+ Requires-Python: >=3.9
8
+ Description-Content-Type: text/markdown
9
+ Requires-Dist: openai>=1.0.0
10
+ Requires-Dist: rich>=13.0.0
11
+
12
+ # Aurex
13
+
14
+ > Terminal AI Coding Agent powered by NVIDIA NIM
15
+
16
+ ```
17
+ █████╗ ██╗ ██╗██████╗ ███████╗██╗ ██╗
18
+ ██╔══██╗██║ ██║██╔══██╗██╔════╝╚██╗██╔╝
19
+ ███████║██║ ██║██████╔╝█████╗ ╚███╔╝
20
+ ██╔══██║██║ ██║██╔══██╗██╔══╝ ██╔██╗
21
+ ██║ ██║╚██████╔╝██║ ██║███████╗██╔╝ ██╗
22
+ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝
23
+ ```
24
+
25
+ Aurex is a Claude Code-style AI agent that runs in your terminal. It can read/write files, run commands, search your codebase, and reason through multi-step coding tasks — all powered by NVIDIA NIM.
26
+
27
+ ---
28
+
29
+ ## Install
30
+
31
+ ### Option 1 — From PyPI (after publishing)
32
+
33
+ ```bash
34
+ pip install aurex
35
+ ```
36
+
37
+ ### Option 2 — From GitHub (recommended for now)
38
+
39
+ ```bash
40
+ pip install git+https://github.com/YOUR_USERNAME/aurex.git
41
+ ```
42
+
43
+ ### Option 3 — Local development install
44
+
45
+ ```bash
46
+ git clone https://github.com/YOUR_USERNAME/aurex.git
47
+ cd aurex
48
+ pip install -e .
49
+ ```
50
+
51
+ Then just run:
52
+
53
+ ```bash
54
+ aurex
55
+ ```
56
+
57
+ ---
58
+
59
+ ## Commands
60
+
61
+ | Command | Description |
62
+ |--------------|--------------------------------------|
63
+ | `model` | View / change the active AI model |
64
+ | `clear` | Wipe conversation history |
65
+ | `files` | List files in current directory |
66
+ | `help` | Show command list |
67
+ | `exit` | Quit |
68
+
69
+ ---
70
+
71
+ ## Tools Aurex can use
72
+
73
+ - **read_file** — Read any file
74
+ - **write_file** — Create or overwrite files
75
+ - **run_command** — Execute shell commands
76
+ - **list_files** — Browse directories
77
+ - **search_files** — Search code with grep
78
+ - **delete_file** — Remove files
79
+
80
+ ---
81
+
82
+ ## Available Models
83
+
84
+ Switch models at any time by typing `model` inside Aurex:
85
+
86
+ - moonshotai/kimi-k2.6
87
+ - mistralai/mistral-medium-3.5-128b
88
+ - nvidia/nemotron-3-nano-omni-30b-a3b-reasoning
89
+ - deepseek-ai/deepseek-v4-flash
90
+ - deepseek-ai/deepseek-v4-pro
91
+ - mistralai/mistral-large-3-675b-instruct-2512
92
+ - qwen/qwen3-coder-480b-a35b-instruct
93
+ - meta/llama-3.2-90b-vision-instruct
94
+ - ...and more
95
+
96
+ Type `C` for a completely custom model name.
97
+
98
+ ---
99
+
100
+ ## Publishing to PyPI (when ready)
101
+
102
+ ```bash
103
+ pip install build twine
104
+
105
+ # Build
106
+ python -m build
107
+
108
+ # Upload
109
+ twine upload dist/*
110
+ ```
111
+
112
+ You'll need a PyPI account and API token.
113
+
114
+ ---
115
+
116
+ ## Requirements
117
+
118
+ - Python 3.9+
119
+ - `openai` >= 1.0.0
120
+ - `rich` >= 13.0.0
121
+
122
+ ---
123
+
124
+ ## License
125
+
126
+ MIT
@@ -0,0 +1,14 @@
1
+ README.md
2
+ pyproject.toml
3
+ aurafarmer.egg-info/PKG-INFO
4
+ aurafarmer.egg-info/SOURCES.txt
5
+ aurafarmer.egg-info/dependency_links.txt
6
+ aurafarmer.egg-info/entry_points.txt
7
+ aurafarmer.egg-info/requires.txt
8
+ aurafarmer.egg-info/top_level.txt
9
+ aurex/__init__.py
10
+ aurex/agent.py
11
+ aurex/config.py
12
+ aurex/main.py
13
+ aurex/tools.py
14
+ aurex/ui.py
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ aurex = aurex.main:main
@@ -0,0 +1,2 @@
1
+ openai>=1.0.0
2
+ rich>=13.0.0
@@ -0,0 +1 @@
1
+ aurex
@@ -0,0 +1,2 @@
1
+ __version__ = "0.1.0"
2
+ __author__ = "aurex"
@@ -0,0 +1,148 @@
1
+ import json
2
+ from openai import OpenAI
3
+
4
+ from . import config
5
+ from . import tools as tool_module
6
+
7
+ SYSTEM_PROMPT = """\
8
+ You are Aurex, an elite AI coding agent running directly in the developer's terminal.
9
+
10
+ You have access to tools that let you read and write files, run shell commands, search code, \
11
+ and more. When asked to do something, do it — don't just describe how. Be concise, decisive, \
12
+ and action-oriented.
13
+
14
+ Rules:
15
+ - Always use tools to act on the filesystem rather than only describing changes.
16
+ - When writing code, write the entire file, not just snippets.
17
+ - When running commands, show what you're doing and why.
18
+ - Keep responses tight — no filler, no disclaimers.
19
+ """
20
+
21
+
22
+ class AurexAgent:
23
+ def __init__(self):
24
+ self.history: list[dict] = []
25
+ self.client = OpenAI(api_key=config.API_KEY, base_url=config.BASE_URL)
26
+
27
+ # ── Public ─────────────────────────────────────────────────────────────────
28
+
29
+ def clear_history(self) -> None:
30
+ self.history = []
31
+
32
+ def chat(self, user_message: str):
33
+ """
34
+ Generator — yields (event_type, data).
35
+
36
+ event_type data
37
+ ---------- ----------------------------------------------------
38
+ "text" str chunk
39
+ "tool_use" {"name": str, "args": dict}
40
+ "tool_res" {"name": str, "result": str}
41
+ "error" str
42
+ """
43
+ self.history.append({"role": "user", "content": user_message})
44
+ messages = [{"role": "system", "content": SYSTEM_PROMPT}] + self.history
45
+
46
+ MAX_ITERS = 10
47
+ for _ in range(MAX_ITERS):
48
+ text, tool_calls, error = yield from self._call_api(messages)
49
+
50
+ if error:
51
+ yield ("error", error)
52
+ return
53
+
54
+ if tool_calls:
55
+ # Build the assistant turn with tool_calls attached
56
+ assistant_msg: dict = {"role": "assistant", "content": text or None,
57
+ "tool_calls": self._format_tool_calls(tool_calls)}
58
+ messages.append(assistant_msg)
59
+
60
+ # Execute each tool and feed results back
61
+ tool_result_msgs = []
62
+ for tc in tool_calls:
63
+ try:
64
+ args = json.loads(tc["arguments"] or "{}")
65
+ except json.JSONDecodeError:
66
+ args = {}
67
+
68
+ yield ("tool_use", {"name": tc["name"], "args": args})
69
+ result = tool_module.execute_tool(tc["name"], args)
70
+ yield ("tool_res", {"name": tc["name"], "result": result})
71
+
72
+ tool_result_msgs.append({
73
+ "role": "tool",
74
+ "tool_call_id": tc["id"],
75
+ "content": result,
76
+ })
77
+
78
+ messages.extend(tool_result_msgs)
79
+ # Loop again so the model can see tool results
80
+
81
+ else:
82
+ # No tool calls — model is done
83
+ if text:
84
+ self.history.append({"role": "assistant", "content": text})
85
+ return
86
+
87
+ # ── Internal ───────────────────────────────────────────────────────────────
88
+
89
+ def _call_api(self, messages: list):
90
+ """
91
+ Streams one completion turn.
92
+ Yields (event_type, data) for text chunks.
93
+ Returns (full_text, tool_calls, error_or_None).
94
+ """
95
+ full_text = ""
96
+ tool_calls_acc: dict[int, dict] = {}
97
+ error = None
98
+
99
+ try:
100
+ stream = self.client.chat.completions.create(
101
+ model=config.get_model(),
102
+ messages=messages,
103
+ tools=tool_module.TOOL_DEFINITIONS,
104
+ stream=True,
105
+ max_tokens=4096,
106
+ )
107
+
108
+ for chunk in stream:
109
+ if not chunk.choices:
110
+ continue
111
+ choice = chunk.choices[0]
112
+ delta = choice.delta
113
+
114
+ # ── text ──────────────────────────────────────────────────────
115
+ if delta.content:
116
+ full_text += delta.content
117
+ yield ("text", delta.content)
118
+
119
+ # ── tool call fragments ────────────────────────────────────────
120
+ if delta.tool_calls:
121
+ for tc in delta.tool_calls:
122
+ i = tc.index
123
+ if i not in tool_calls_acc:
124
+ tool_calls_acc[i] = {"id": "", "name": "", "arguments": ""}
125
+ if tc.id:
126
+ tool_calls_acc[i]["id"] = tc.id
127
+ if tc.function:
128
+ if tc.function.name:
129
+ tool_calls_acc[i]["name"] += tc.function.name
130
+ if tc.function.arguments:
131
+ tool_calls_acc[i]["arguments"] += tc.function.arguments
132
+
133
+ except Exception as exc:
134
+ error = str(exc)
135
+
136
+ tool_calls = list(tool_calls_acc.values()) if tool_calls_acc else []
137
+ return full_text, tool_calls, error
138
+
139
+ @staticmethod
140
+ def _format_tool_calls(tool_calls: list[dict]) -> list[dict]:
141
+ return [
142
+ {
143
+ "id": tc["id"],
144
+ "type": "function",
145
+ "function": {"name": tc["name"], "arguments": tc["arguments"]},
146
+ }
147
+ for tc in tool_calls
148
+ ]
@@ -0,0 +1,61 @@
1
+ import json
2
+ from pathlib import Path
3
+
4
+ # ── API ────────────────────────────────────────────────────────────────────────
5
+ API_KEY = "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"
6
+ BASE_URL = "https://integrate.api.nvidia.com/v1"
7
+
8
+ # ── Available models ───────────────────────────────────────────────────────────
9
+ MODELS = [
10
+ "moonshotai/kimi-k2.6",
11
+ "mistralai/mistral-medium-3.5-128b",
12
+ "nvidia/nemotron-3-nano-omni-30b-a3b-reasoning",
13
+ "deepseek-ai/deepseek-v4-flash",
14
+ "deepseek-ai/deepseek-v4-pro",
15
+ "z-ai/glm-5.1",
16
+ "z-ai/glm-4.7",
17
+ "minimaxai/minimax-m2.7",
18
+ "google/gemma-4-31b-it",
19
+ "mistralai/mistral-small-4-119b-2603",
20
+ "nvidia/nemotron-3-super-120b-a12b",
21
+ "qwen/qwen3.5-122b-a10b",
22
+ "qwen/qwen3.5-397b-a17b",
23
+ "stepfun-ai/step-3.5-flash",
24
+ "mistralai/mistral-large-3-675b-instruct-2512",
25
+ "stockmark/stockmark-2-100b-instruct",
26
+ "qwen/qwen3-coder-480b-a35b-instruct",
27
+ "openai/gpt-oss-120b",
28
+ "meta/llama-3.2-90b-vision-instruct",
29
+ ]
30
+
31
+ DEFAULT_MODEL = MODELS[0]
32
+
33
+ # ── Persistence ────────────────────────────────────────────────────────────────
34
+ CONFIG_DIR = Path.home() / ".aurex"
35
+ CONFIG_FILE = CONFIG_DIR / "config.json"
36
+
37
+
38
+ def load_config() -> dict:
39
+ if CONFIG_FILE.exists():
40
+ try:
41
+ with open(CONFIG_FILE) as f:
42
+ return json.load(f)
43
+ except Exception:
44
+ pass
45
+ return {"model": DEFAULT_MODEL}
46
+
47
+
48
+ def save_config(cfg: dict) -> None:
49
+ CONFIG_DIR.mkdir(parents=True, exist_ok=True)
50
+ with open(CONFIG_FILE, "w") as f:
51
+ json.dump(cfg, f, indent=2)
52
+
53
+
54
+ def get_model() -> str:
55
+ return load_config().get("model", DEFAULT_MODEL)
56
+
57
+
58
+ def set_model(model: str) -> None:
59
+ cfg = load_config()
60
+ cfg["model"] = model
61
+ save_config(cfg)
@@ -0,0 +1,132 @@
1
+ import os
2
+ import sys
3
+
4
+ from rich.console import Console
5
+
6
+ from . import config, ui
7
+ from .agent import AurexAgent
8
+ from . import tools as tool_module
9
+
10
+ console = Console()
11
+
12
+
13
+ # ── Model command ──────────────────────────────────────────────────────────────
14
+
15
+ def handle_model_command(agent: AurexAgent) -> None:
16
+ models = config.MODELS
17
+ current = config.get_model()
18
+
19
+ ui.print_current_model(current)
20
+ ui.print_model_list(models, current)
21
+
22
+ try:
23
+ choice = console.input(" [bold red]❯[/bold red] ").strip()
24
+ except (KeyboardInterrupt, EOFError):
25
+ return
26
+
27
+ if not choice:
28
+ ui.print_info(f"Keeping: {current}")
29
+ return
30
+
31
+ if choice.upper() == "C":
32
+ try:
33
+ custom = console.input(" [dim red]Custom model name:[/dim red] ").strip()
34
+ except (KeyboardInterrupt, EOFError):
35
+ return
36
+ if custom:
37
+ config.set_model(custom)
38
+ ui.print_info(f"Model set to [bold red]{custom}[/bold red]")
39
+ else:
40
+ try:
41
+ idx = int(choice) - 1
42
+ if 0 <= idx < len(models):
43
+ selected = models[idx]
44
+ config.set_model(selected)
45
+ ui.print_info(f"Model set to [bold red]{selected}[/bold red]")
46
+ else:
47
+ ui.print_error(f"Enter a number between 1 and {len(models)}.")
48
+ except ValueError:
49
+ ui.print_error("Invalid input — enter a number or C.")
50
+
51
+
52
+ # ── Agent runner ───────────────────────────────────────────────────────────────
53
+
54
+ def run_agent(agent: AurexAgent, user_input: str) -> None:
55
+ console.print()
56
+ header_printed = False
57
+
58
+ for event_type, data in agent.chat(user_input):
59
+
60
+ if event_type == "text":
61
+ if not header_printed:
62
+ ui.print_assistant_header()
63
+ header_printed = True
64
+ console.print(data, end="", markup=False, highlight=False)
65
+
66
+ elif event_type == "tool_use":
67
+ console.print()
68
+ ui.print_tool_use(data["name"], data["args"])
69
+
70
+ elif event_type == "tool_res":
71
+ ui.print_tool_result(data["name"], data["result"])
72
+
73
+ elif event_type == "error":
74
+ ui.print_error(data)
75
+
76
+ console.print("\n") # breathing room after response
77
+
78
+
79
+ # ── Main REPL ──────────────────────────────────────────────────────────────────
80
+
81
+ def main() -> None:
82
+ os.system("clear" if os.name == "posix" else "cls")
83
+ ui.print_banner()
84
+
85
+ model = config.get_model()
86
+ console.print(f" [dim]Model [bold red]{model}[/bold red][/dim]")
87
+ console.print(f" [dim]CWD [bold red]{os.getcwd()}[/bold red][/dim]\n")
88
+ ui.print_rule()
89
+ console.print()
90
+
91
+ agent = AurexAgent()
92
+
93
+ while True:
94
+ try:
95
+ user_input = console.input("[bold red]❯ [/bold red]").strip()
96
+ except (KeyboardInterrupt, EOFError):
97
+ console.print("\n\n[bold red]Goodbye.[/bold red]\n")
98
+ sys.exit(0)
99
+
100
+ if not user_input:
101
+ continue
102
+
103
+ cmd = user_input.lower()
104
+
105
+ if cmd in ("exit", "quit", "bye", "q"):
106
+ console.print("\n[bold red]Goodbye.[/bold red]\n")
107
+ sys.exit(0)
108
+
109
+ elif cmd == "help":
110
+ ui.print_help()
111
+
112
+ elif cmd == "model":
113
+ handle_model_command(agent)
114
+
115
+ elif cmd == "clear":
116
+ agent.clear_history()
117
+ os.system("clear" if os.name == "posix" else "cls")
118
+ ui.print_banner()
119
+ ui.print_info("Conversation cleared.")
120
+
121
+ elif cmd in ("files", "ls"):
122
+ result = tool_module.list_files(".")
123
+ console.print(f"\n[dim red] {os.getcwd()}[/dim red]")
124
+ console.print(result)
125
+ console.print()
126
+
127
+ else:
128
+ run_agent(agent, user_input)
129
+
130
+
131
+ if __name__ == "__main__":
132
+ main()
@@ -0,0 +1,189 @@
1
+ import subprocess
2
+ from pathlib import Path
3
+
4
+ # ── Tool schemas (OpenAI function-calling format) ──────────────────────────────
5
+ TOOL_DEFINITIONS = [
6
+ {
7
+ "type": "function",
8
+ "function": {
9
+ "name": "read_file",
10
+ "description": "Read the full contents of a file on disk.",
11
+ "parameters": {
12
+ "type": "object",
13
+ "properties": {
14
+ "path": {"type": "string", "description": "Path to the file"}
15
+ },
16
+ "required": ["path"],
17
+ },
18
+ },
19
+ },
20
+ {
21
+ "type": "function",
22
+ "function": {
23
+ "name": "write_file",
24
+ "description": "Create or overwrite a file with the given content.",
25
+ "parameters": {
26
+ "type": "object",
27
+ "properties": {
28
+ "path": {"type": "string", "description": "Path to the file"},
29
+ "content": {"type": "string", "description": "Content to write"},
30
+ },
31
+ "required": ["path", "content"],
32
+ },
33
+ },
34
+ },
35
+ {
36
+ "type": "function",
37
+ "function": {
38
+ "name": "run_command",
39
+ "description": "Execute a shell command and return stdout + stderr.",
40
+ "parameters": {
41
+ "type": "object",
42
+ "properties": {
43
+ "command": {"type": "string", "description": "Shell command to run"}
44
+ },
45
+ "required": ["command"],
46
+ },
47
+ },
48
+ },
49
+ {
50
+ "type": "function",
51
+ "function": {
52
+ "name": "list_files",
53
+ "description": "List files and directories at a given path.",
54
+ "parameters": {
55
+ "type": "object",
56
+ "properties": {
57
+ "path": {"type": "string", "description": "Directory path (default: .)"}
58
+ },
59
+ "required": [],
60
+ },
61
+ },
62
+ },
63
+ {
64
+ "type": "function",
65
+ "function": {
66
+ "name": "search_files",
67
+ "description": "Search for a text pattern inside files recursively.",
68
+ "parameters": {
69
+ "type": "object",
70
+ "properties": {
71
+ "pattern": {"type": "string", "description": "Text to search for"},
72
+ "path": {"type": "string", "description": "Root directory (default: .)"},
73
+ "file_pattern": {"type": "string", "description": "Glob filter e.g. *.py"},
74
+ },
75
+ "required": ["pattern"],
76
+ },
77
+ },
78
+ },
79
+ {
80
+ "type": "function",
81
+ "function": {
82
+ "name": "delete_file",
83
+ "description": "Delete a file from disk.",
84
+ "parameters": {
85
+ "type": "object",
86
+ "properties": {
87
+ "path": {"type": "string", "description": "Path to the file to delete"}
88
+ },
89
+ "required": ["path"],
90
+ },
91
+ },
92
+ },
93
+ ]
94
+
95
+
96
+ # ── Implementations ────────────────────────────────────────────────────────────
97
+
98
+ def read_file(path: str) -> str:
99
+ try:
100
+ return Path(path).read_text(encoding="utf-8")
101
+ except FileNotFoundError:
102
+ return f"Error: '{path}' not found."
103
+ except Exception as e:
104
+ return f"Error reading file: {e}"
105
+
106
+
107
+ def write_file(path: str, content: str) -> str:
108
+ try:
109
+ p = Path(path)
110
+ p.parent.mkdir(parents=True, exist_ok=True)
111
+ p.write_text(content, encoding="utf-8")
112
+ return f"Wrote {len(content)} chars to '{path}'."
113
+ except Exception as e:
114
+ return f"Error writing file: {e}"
115
+
116
+
117
+ def run_command(command: str) -> str:
118
+ try:
119
+ result = subprocess.run(
120
+ command, shell=True, capture_output=True, text=True, timeout=60
121
+ )
122
+ out = result.stdout.strip()
123
+ err = result.stderr.strip()
124
+ parts = []
125
+ if out:
126
+ parts.append(out)
127
+ if err:
128
+ parts.append(f"[stderr]\n{err}")
129
+ if not parts:
130
+ parts.append(f"(exit code {result.returncode})")
131
+ return "\n".join(parts)
132
+ except subprocess.TimeoutExpired:
133
+ return "Error: command timed out after 60 s."
134
+ except Exception as e:
135
+ return f"Error: {e}"
136
+
137
+
138
+ def list_files(path: str = ".") -> str:
139
+ try:
140
+ p = Path(path)
141
+ lines = []
142
+ for item in sorted(p.iterdir()):
143
+ if item.is_dir():
144
+ lines.append(f" 📁 {item.name}/")
145
+ else:
146
+ size = item.stat().st_size
147
+ lines.append(f" 📄 {item.name} ({size:,} B)")
148
+ return "\n".join(lines) if lines else "(empty)"
149
+ except Exception as e:
150
+ return f"Error: {e}"
151
+
152
+
153
+ def search_files(pattern: str, path: str = ".", file_pattern: str = "*") -> str:
154
+ cmd = f"grep -rn --include='{file_pattern}' '{pattern}' {path}"
155
+ return run_command(cmd)
156
+
157
+
158
+ def delete_file(path: str) -> str:
159
+ try:
160
+ Path(path).unlink()
161
+ return f"Deleted '{path}'."
162
+ except FileNotFoundError:
163
+ return f"Error: '{path}' not found."
164
+ except Exception as e:
165
+ return f"Error: {e}"
166
+
167
+
168
+ # ── Dispatcher ─────────────────────────────────────────────────────────────────
169
+
170
+ def execute_tool(name: str, args: dict) -> str:
171
+ dispatch = {
172
+ "read_file": lambda a: read_file(a["path"]),
173
+ "write_file": lambda a: write_file(a["path"], a["content"]),
174
+ "run_command": lambda a: run_command(a["command"]),
175
+ "list_files": lambda a: list_files(a.get("path", ".")),
176
+ "search_files": lambda a: search_files(
177
+ a["pattern"],
178
+ a.get("path", "."),
179
+ a.get("file_pattern", "*"),
180
+ ),
181
+ "delete_file": lambda a: delete_file(a["path"]),
182
+ }
183
+ fn = dispatch.get(name)
184
+ if fn is None:
185
+ return f"Unknown tool: {name}"
186
+ try:
187
+ return fn(args)
188
+ except Exception as e:
189
+ return f"Tool error: {e}"
@@ -0,0 +1,150 @@
1
+ import json
2
+ from rich.console import Console
3
+ from rich.panel import Panel
4
+ from rich.table import Table
5
+ from rich.text import Text
6
+ from rich.rule import Rule
7
+ from rich.markdown import Markdown
8
+ from rich import box
9
+
10
+ console = Console()
11
+
12
+ # ── Banner ─────────────────────────────────────────────────────────────────────
13
+ BANNER = """\
14
+ █████╗ ██╗ ██╗██████╗ ███████╗██╗ ██╗
15
+ ██╔══██╗██║ ██║██╔══██╗██╔════╝╚██╗██╔╝
16
+ ███████║██║ ██║██████╔╝█████╗ ╚███╔╝
17
+ ██╔══██║██║ ██║██╔══██╗██╔══╝ ██╔██╗
18
+ ██║ ██║╚██████╔╝██║ ██║███████╗██╔╝ ██╗
19
+ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝"""
20
+
21
+ TOOL_ICONS = {
22
+ "read_file": "📖",
23
+ "write_file": "✏️ ",
24
+ "run_command": "⚡",
25
+ "list_files": "📁",
26
+ "search_files": "🔍",
27
+ "delete_file": "🗑️ ",
28
+ }
29
+
30
+
31
+ # ── Core ───────────────────────────────────────────────────────────────────────
32
+
33
+ def print_banner() -> None:
34
+ console.print(BANNER, style="bold red")
35
+ console.print(
36
+ "\n [dim]AI Coding Agent · Powered by NVIDIA NIM[/dim]\n"
37
+ " [dim]Type [bold red]help[/] for commands · "
38
+ "[bold red]model[/] to switch · "
39
+ "[bold red]exit[/] to quit[/dim]\n"
40
+ )
41
+
42
+
43
+ def print_rule() -> None:
44
+ console.print(Rule(style="dim red"))
45
+
46
+
47
+ def print_error(msg: str) -> None:
48
+ console.print(f"\n[bold red]✗[/bold red] [red]{msg}[/red]\n")
49
+
50
+
51
+ def print_info(msg: str) -> None:
52
+ console.print(f"\n[dim red]ℹ[/dim red] {msg}\n")
53
+
54
+
55
+ # ── Tool events ────────────────────────────────────────────────────────────────
56
+
57
+ def print_tool_use(name: str, args: dict) -> None:
58
+ icon = TOOL_ICONS.get(name, "🔧")
59
+ title = f"[bold red]{icon} {name}[/bold red]"
60
+ body = Text()
61
+ for k, v in args.items():
62
+ body.append(f" {k}: ", style="dim red")
63
+ val = v if len(str(v)) <= 120 else str(v)[:120] + "…"
64
+ body.append(f"{val}\n", style="white")
65
+ console.print(
66
+ Panel(body, title=title, border_style="red", padding=(0, 1), box=box.ROUNDED)
67
+ )
68
+
69
+
70
+ def print_tool_result(name: str, result: str) -> None:
71
+ MAX = 600
72
+ display = result if len(result) <= MAX else result[:MAX] + f"\n[dim]… {len(result)-MAX} more chars[/dim]"
73
+ console.print(
74
+ Panel(
75
+ display,
76
+ title="[dim red]result[/dim red]",
77
+ border_style="dim red",
78
+ padding=(0, 1),
79
+ box=box.ROUNDED,
80
+ )
81
+ )
82
+
83
+
84
+ # ── Assistant output ───────────────────────────────────────────────────────────
85
+
86
+ def print_assistant_header() -> None:
87
+ console.print("\n[bold red]◆ Aurex[/bold red]\n")
88
+
89
+
90
+ # ── Help ───────────────────────────────────────────────────────────────────────
91
+
92
+ def print_help() -> None:
93
+ t = Table(
94
+ title="[bold red]Aurex Commands[/bold red]",
95
+ border_style="red",
96
+ header_style="bold red",
97
+ box=box.ROUNDED,
98
+ show_lines=True,
99
+ )
100
+ t.add_column("Command", style="red", no_wrap=True)
101
+ t.add_column("Description", style="white")
102
+
103
+ rows = [
104
+ ("model", "Show current model / change model"),
105
+ ("clear", "Wipe conversation history"),
106
+ ("files", "List files in the current directory"),
107
+ ("help", "Show this help panel"),
108
+ ("exit / quit", "Exit Aurex"),
109
+ ]
110
+ for cmd, desc in rows:
111
+ t.add_row(cmd, desc)
112
+
113
+ console.print("\n", t, "\n")
114
+
115
+
116
+ # ── Model UI ───────────────────────────────────────────────────────────────────
117
+
118
+ def print_current_model(model: str) -> None:
119
+ console.print(
120
+ Panel(
121
+ f"[white]{model}[/white]",
122
+ title="[bold red]Active Model[/bold red]",
123
+ border_style="red",
124
+ padding=(0, 2),
125
+ box=box.ROUNDED,
126
+ )
127
+ )
128
+
129
+
130
+ def print_model_list(models: list[str], current: str) -> None:
131
+ t = Table(
132
+ title="[bold red]Available Models[/bold red]",
133
+ border_style="red",
134
+ header_style="bold red",
135
+ box=box.ROUNDED,
136
+ show_lines=True,
137
+ )
138
+ t.add_column("#", style="dim red", width=4, justify="right")
139
+ t.add_column("Model", style="white")
140
+ t.add_column("", style="green", width=12)
141
+
142
+ for i, m in enumerate(models, 1):
143
+ t.add_row(str(i), m, "◀ active" if m == current else "")
144
+
145
+ console.print("\n", t)
146
+ console.print(
147
+ "\n [dim]Enter [bold red]number[/] to select · "
148
+ "[bold red]C[/] for custom model · "
149
+ "[bold red]Enter[/] to keep current[/dim]\n"
150
+ )
@@ -0,0 +1,24 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "aurafarmer"
7
+ version = "0.1.0"
8
+ description = "Aurex — Terminal AI Coding Agent powered by NVIDIA NIM"
9
+ readme = "README.md"
10
+ requires-python = ">=3.9"
11
+ license = { text = "MIT" }
12
+ keywords = ["ai", "coding", "terminal", "cli", "agent", "nvidia", "nim"]
13
+
14
+ dependencies = [
15
+ "openai>=1.0.0",
16
+ "rich>=13.0.0",
17
+ ]
18
+
19
+ [project.scripts]
20
+ aurex = "aurex.main:main"
21
+
22
+ [tool.setuptools.packages.find]
23
+ where = ["."]
24
+ include = ["aurex*"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+