aria-x 1.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aria_x-1.0.1/PKG-INFO ADDED
@@ -0,0 +1,174 @@
1
+ Metadata-Version: 2.4
2
+ Name: aria-x
3
+ Version: 1.0.1
4
+ Summary: ARIA — Autonomous Reasoning and Intelligent Agent. Your project-aware coding partner.
5
+ Author-email: Sumit <samsungsumitv461@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/Lonerider007/aria-agent
8
+ Keywords: ai,agent,cli,coding,llm,ollama
9
+ Classifier: Development Status :: 4 - Beta
10
+ Classifier: Environment :: Console
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Topic :: Software Development
13
+ Classifier: Programming Language :: Python :: 3
14
+ Requires-Python: >=3.10
15
+ Description-Content-Type: text/markdown
16
+ Requires-Dist: openai>=1.0.0
17
+ Requires-Dist: rich>=13.0.0
18
+
19
+ # ◉ ARIA
20
+ ### Autonomous Reasoning and Intelligent Agent
21
+
22
+ > Your project-aware coding partner. Reason before action.
23
+
24
+ ARIA is an open-source CLI agent that works inside your project boundaries like a senior engineer — it plans before acting, validates its work, and remembers context across sessions.
25
+
26
+ ---
27
+
28
+ ## Demo
29
+
30
+ ```
31
+ ◉ aria(my-project) › Build a REST API with FastAPI and test it
32
+
33
+ ? What endpoints should the API have?
34
+ Answer: GET /health, POST /items, GET /items
35
+
36
+ ─────────────────── Plan ───────────────────
37
+ Goal: Build FastAPI REST API
38
+
39
+ 1. Scaffold project with venv + git
40
+ 2. Write main.py with endpoints
41
+ 3. Install dependencies
42
+ 4. Run server and test with curl
43
+ 5. Update README
44
+
45
+ Proceed? (yes / no / modify): yes
46
+
47
+ 1. new_project 'fastapi-api'
48
+ 2. write_file 'main.py'
49
+ 3. run_command '.venv/bin/pip install -r requirements.txt'
50
+ │ Successfully installed fastapi uvicorn
51
+ 4. run_command 'curl http://localhost:8000/health'
52
+ │ {"status":"ok"}
53
+
54
+ ─────────────────── Report ─────────────────
55
+ ✅ API built and tested successfully.
56
+ Files: main.py, requirements.txt, README.md
57
+ Run: uvicorn main:app --reload
58
+ ```
59
+
60
+ ---
61
+
62
+ ## Features
63
+
64
+ - **Plan before action** — shows you what it will do, waits for approval
65
+ - **Validates everything** — runs code, reads output, fixes errors automatically
66
+ - **Project memory** — remembers stack, decisions, and context across sessions
67
+ - **Project isolation** — auto-creates git, venv, .env, .gitignore, README
68
+ - **Approval system** — asks before dangerous operations
69
+ - **Beautiful diffs** — shows exactly what changed in every file
70
+ - **Git tools** — commit, diff, branch from natural language
71
+ - **Streaming responses** — real-time output, not a black box
72
+
73
+ ---
74
+
75
+ ## Install
76
+
77
+ ```bash
78
+ pip install aria-agent
79
+ ```
80
+
81
+ Requirements: Python 3.10+, [Ollama](https://ollama.com) (for local/cloud models)
82
+
83
+ ---
84
+
85
+ ## Usage
86
+
87
+ ```bash
88
+ # Run with default model
89
+ aria
90
+
91
+ # Run with specific model
92
+ aria --model nemotron-3-super:cloud
93
+
94
+ # Run with custom workspace
95
+ aria --workspace ~/my-projects
96
+ ```
97
+
98
+ ---
99
+
100
+ ## Slash Commands
101
+
102
+ | Command | Description |
103
+ |---|---|
104
+ | `/help` | Show all commands |
105
+ | `/status` | Session info — model, workspace, turns |
106
+ | `/clear` | Clear conversation history |
107
+ | `/model <name>` | Switch model mid-session |
108
+ | `/workspace <path>` | Change workspace |
109
+ | `/apikey` | Update API key |
110
+ | `/projects` | List all ARIA projects |
111
+ | `/memory` | Show persistent memory |
112
+ | `/tools` | List all agent tools |
113
+ | `/exit` | Exit ARIA |
114
+
115
+ ---
116
+
117
+ ## Models
118
+
119
+ ARIA works with any Ollama-compatible model:
120
+
121
+ ```bash
122
+ aria --model nemotron-3-super:cloud # Ollama cloud
123
+ aria --model llama3.3 # Local via Ollama
124
+ aria --model qwen2.5-coder:32b # Local coding model
125
+ ```
126
+
127
+ ---
128
+
129
+ ## How it works
130
+
131
+ Every task follows this workflow:
132
+
133
+ ```
134
+ Clarify → Plan → Approve → Execute → Validate → Remember → Report
135
+ ```
136
+
137
+ ARIA never silently modifies your system. Every action is visible, every dangerous operation requires your approval.
138
+
139
+ ---
140
+
141
+ ## Project Memory
142
+
143
+ ARIA stores project context in `~/.aria/`:
144
+
145
+ ```
146
+ ~/.aria/
147
+ ├── user_memory.json # Your preferences
148
+ └── projects/
149
+ └── my-project/
150
+ ├── meta.json # Stack, status, path
151
+ ├── memory.json # Key decisions
152
+ └── progress.md # Milestone history
153
+ ```
154
+
155
+ ---
156
+
157
+ ## Built by
158
+
159
+ **Sumit** — independent developer
160
+
161
+ ---
162
+
163
+ ## Contact
164
+
165
+ For feedback, collaborations, or business inquiries:
166
+
167
+ **Email:** samsungsumitv461@gmail.com
168
+ **GitHub:** [Lonerider007](https://github.com/Lonerider007)
169
+
170
+ ---
171
+
172
+ ## License
173
+
174
+ MIT
aria_x-1.0.1/README.md ADDED
@@ -0,0 +1,156 @@
1
+ # ◉ ARIA
2
+ ### Autonomous Reasoning and Intelligent Agent
3
+
4
+ > Your project-aware coding partner. Reason before action.
5
+
6
+ ARIA is an open-source CLI agent that works inside your project boundaries like a senior engineer — it plans before acting, validates its work, and remembers context across sessions.
7
+
8
+ ---
9
+
10
+ ## Demo
11
+
12
+ ```
13
+ ◉ aria(my-project) › Build a REST API with FastAPI and test it
14
+
15
+ ? What endpoints should the API have?
16
+ Answer: GET /health, POST /items, GET /items
17
+
18
+ ─────────────────── Plan ───────────────────
19
+ Goal: Build FastAPI REST API
20
+
21
+ 1. Scaffold project with venv + git
22
+ 2. Write main.py with endpoints
23
+ 3. Install dependencies
24
+ 4. Run server and test with curl
25
+ 5. Update README
26
+
27
+ Proceed? (yes / no / modify): yes
28
+
29
+ 1. new_project 'fastapi-api'
30
+ 2. write_file 'main.py'
31
+ 3. run_command '.venv/bin/pip install -r requirements.txt'
32
+ │ Successfully installed fastapi uvicorn
33
+ 4. run_command 'curl http://localhost:8000/health'
34
+ │ {"status":"ok"}
35
+
36
+ ─────────────────── Report ─────────────────
37
+ ✅ API built and tested successfully.
38
+ Files: main.py, requirements.txt, README.md
39
+ Run: uvicorn main:app --reload
40
+ ```
41
+
42
+ ---
43
+
44
+ ## Features
45
+
46
+ - **Plan before action** — shows you what it will do, waits for approval
47
+ - **Validates everything** — runs code, reads output, fixes errors automatically
48
+ - **Project memory** — remembers stack, decisions, and context across sessions
49
+ - **Project isolation** — auto-creates git, venv, .env, .gitignore, README
50
+ - **Approval system** — asks before dangerous operations
51
+ - **Beautiful diffs** — shows exactly what changed in every file
52
+ - **Git tools** — commit, diff, branch from natural language
53
+ - **Streaming responses** — real-time output, not a black box
54
+
55
+ ---
56
+
57
+ ## Install
58
+
59
+ ```bash
60
+ pip install aria-agent
61
+ ```
62
+
63
+ Requirements: Python 3.10+, [Ollama](https://ollama.com) (for local/cloud models)
64
+
65
+ ---
66
+
67
+ ## Usage
68
+
69
+ ```bash
70
+ # Run with default model
71
+ aria
72
+
73
+ # Run with specific model
74
+ aria --model nemotron-3-super:cloud
75
+
76
+ # Run with custom workspace
77
+ aria --workspace ~/my-projects
78
+ ```
79
+
80
+ ---
81
+
82
+ ## Slash Commands
83
+
84
+ | Command | Description |
85
+ |---|---|
86
+ | `/help` | Show all commands |
87
+ | `/status` | Session info — model, workspace, turns |
88
+ | `/clear` | Clear conversation history |
89
+ | `/model <name>` | Switch model mid-session |
90
+ | `/workspace <path>` | Change workspace |
91
+ | `/apikey` | Update API key |
92
+ | `/projects` | List all ARIA projects |
93
+ | `/memory` | Show persistent memory |
94
+ | `/tools` | List all agent tools |
95
+ | `/exit` | Exit ARIA |
96
+
97
+ ---
98
+
99
+ ## Models
100
+
101
+ ARIA works with any Ollama-compatible model:
102
+
103
+ ```bash
104
+ aria --model nemotron-3-super:cloud # Ollama cloud
105
+ aria --model llama3.3 # Local via Ollama
106
+ aria --model qwen2.5-coder:32b # Local coding model
107
+ ```
108
+
109
+ ---
110
+
111
+ ## How it works
112
+
113
+ Every task follows this workflow:
114
+
115
+ ```
116
+ Clarify → Plan → Approve → Execute → Validate → Remember → Report
117
+ ```
118
+
119
+ ARIA never silently modifies your system. Every action is visible, every dangerous operation requires your approval.
120
+
121
+ ---
122
+
123
+ ## Project Memory
124
+
125
+ ARIA stores project context in `~/.aria/`:
126
+
127
+ ```
128
+ ~/.aria/
129
+ ├── user_memory.json # Your preferences
130
+ └── projects/
131
+ └── my-project/
132
+ ├── meta.json # Stack, status, path
133
+ ├── memory.json # Key decisions
134
+ └── progress.md # Milestone history
135
+ ```
136
+
137
+ ---
138
+
139
+ ## Built by
140
+
141
+ **Sumit** — independent developer
142
+
143
+ ---
144
+
145
+ ## Contact
146
+
147
+ For feedback, collaborations, or business inquiries:
148
+
149
+ **Email:** samsungsumitv461@gmail.com
150
+ **GitHub:** [Lonerider007](https://github.com/Lonerider007)
151
+
152
+ ---
153
+
154
+ ## License
155
+
156
+ MIT
@@ -0,0 +1 @@
1
+ __version__ = "1.0.0"
@@ -0,0 +1,154 @@
1
+ import os
2
+ import json
3
+ from datetime import datetime
4
+
5
+ from openai import OpenAI
6
+ from aria.ui.console import console
7
+ from aria.ui.streaming import stream_response, print_response
8
+ from aria.memory.store import read_memory
9
+ from aria.prompts import SYSTEM_PROMPT
10
+
11
+ from aria.tools.files import (
12
+ read_file, write_file, edit_file,
13
+ delete_file, list_files, search_in_files
14
+ )
15
+ from aria.tools.shell import run_command, run_tests
16
+ from aria.tools.git import git_status, git_diff, git_commit, git_create_branch, git_log
17
+ from aria.tools.project import new_project, list_projects, mark_milestone
18
+ from aria.tools.interaction import (
19
+ create_plan, ask_clarification,
20
+ request_approval, notify_user
21
+ )
22
+ from aria.memory.store import save_memory, read_memory
23
+ from aria.memory.context import load_project_context
24
+
25
+ TOOL_MAP = {
26
+ "read_file": read_file,
27
+ "write_file": write_file,
28
+ "edit_file": edit_file,
29
+ "delete_file": delete_file,
30
+ "list_files": list_files,
31
+ "search_in_files": search_in_files,
32
+ "run_command": run_command,
33
+ "run_tests": run_tests,
34
+ "git_status": git_status,
35
+ "git_diff": git_diff,
36
+ "git_commit": git_commit,
37
+ "git_create_branch": git_create_branch,
38
+ "git_log": git_log,
39
+ "new_project": new_project,
40
+ "list_projects": list_projects,
41
+ "mark_milestone": mark_milestone,
42
+ "create_plan": create_plan,
43
+ "ask_clarification": ask_clarification,
44
+ "request_approval": request_approval,
45
+ "notify_user": notify_user,
46
+ "save_memory": save_memory,
47
+ "read_memory": read_memory,
48
+ "load_project_context": load_project_context,
49
+ }
50
+
51
+ TOOLS = [
52
+ {"type":"function","function":{"name":"read_file","description":"Read file contents","parameters":{"type":"object","properties":{"path":{"type":"string"}},"required":["path"]}}},
53
+ {"type":"function","function":{"name":"write_file","description":"Create or overwrite a file","parameters":{"type":"object","properties":{"path":{"type":"string"},"content":{"type":"string"}},"required":["path","content"]}}},
54
+ {"type":"function","function":{"name":"edit_file","description":"Replace exact string in a file. Shows diff.","parameters":{"type":"object","properties":{"path":{"type":"string"},"old_str":{"type":"string"},"new_str":{"type":"string"}},"required":["path","old_str","new_str"]}}},
55
+ {"type":"function","function":{"name":"delete_file","description":"Delete a file","parameters":{"type":"object","properties":{"path":{"type":"string"}},"required":["path"]}}},
56
+ {"type":"function","function":{"name":"list_files","description":"List files in directory","parameters":{"type":"object","properties":{"path":{"type":"string"},"recursive":{"type":"boolean"}},"required":[]}}},
57
+ {"type":"function","function":{"name":"search_in_files","description":"Search pattern in files (grep)","parameters":{"type":"object","properties":{"pattern":{"type":"string"},"path":{"type":"string"},"file_pattern":{"type":"string"}},"required":["pattern"]}}},
58
+ {"type":"function","function":{"name":"run_command","description":"Run a shell command","parameters":{"type":"object","properties":{"command":{"type":"string"},"cwd":{"type":"string"}},"required":["command"]}}},
59
+ {"type":"function","function":{"name":"run_tests","description":"Run test suite","parameters":{"type":"object","properties":{"command":{"type":"string"},"cwd":{"type":"string"}},"required":[]}}},
60
+ {"type":"function","function":{"name":"git_status","description":"Git working tree status","parameters":{"type":"object","properties":{"cwd":{"type":"string"},"path":{"type":"string"}},"required":[]}}},
61
+ {"type":"function","function":{"name":"git_diff","description":"Show git diff","parameters":{"type":"object","properties":{"cwd":{"type":"string"},"path":{"type":"string"}},"required":[]}}},
62
+ {"type":"function","function":{"name":"git_commit","description":"Stage all and commit","parameters":{"type":"object","properties":{"message":{"type":"string"},"cwd":{"type":"string"},"path":{"type":"string"}},"required":["message"]}}},
63
+ {"type":"function","function":{"name":"git_create_branch","description":"Create and checkout a new branch","parameters":{"type":"object","properties":{"name":{"type":"string"},"cwd":{"type":"string"},"path":{"type":"string"}},"required":["name"]}}},
64
+ {"type":"function","function":{"name":"git_log","description":"Show recent git commits","parameters":{"type":"object","properties":{"n":{"type":"integer"},"cwd":{"type":"string"},"path":{"type":"string"}},"required":[]}}},
65
+ {"type":"function","function":{"name":"new_project","description":"Scaffold new project: folder, git, venv, .env, README. ALWAYS use for new projects.","parameters":{"type":"object","properties":{"name":{"type":"string"},"description":{"type":"string"},"stack":{"type":"string"},"path":{"type":"string"}},"required":["name","description","stack"]}}},
66
+ {"type":"function","function":{"name":"list_projects","description":"List all known ARIA projects","parameters":{"type":"object","properties":{},"required":[]}}},
67
+ {"type":"function","function":{"name":"mark_milestone","description":"Record project milestone with status","parameters":{"type":"object","properties":{"project":{"type":"string"},"milestone":{"type":"string"},"status":{"type":"string","enum":["done","in_progress","blocked"]},"notes":{"type":"string"}},"required":["project","milestone","status"]}}},
68
+ {"type":"function","function":{"name":"create_plan","description":"REQUIRED: Show plan to user and get approval before executing anything.","parameters":{"type":"object","properties":{"goal":{"type":"string"},"steps":{"type":"array","items":{"type":"string"}}},"required":["goal","steps"]}}},
69
+ {"type":"function","function":{"name":"ask_clarification","description":"Ask user a clarifying question when task is ambiguous.","parameters":{"type":"object","properties":{"question":{"type":"string"}},"required":["question"]}}},
70
+ {"type":"function","function":{"name":"request_approval","description":"Ask user approval before dangerous operations (delete, rm -rf, force push, etc).","parameters":{"type":"object","properties":{"action":{"type":"string"}},"required":["action"]}}},
71
+ {"type":"function","function":{"name":"notify_user","description":"Send status update to user during long tasks.","parameters":{"type":"object","properties":{"message":{"type":"string"},"level":{"type":"string","enum":["info","success","warning","error"]}},"required":["message"]}}},
72
+ {"type":"function","function":{"name":"save_memory","description":"Persist a key fact or decision for future sessions.","parameters":{"type":"object","properties":{"key":{"type":"string"},"value":{"type":"string"},"project":{"type":"string"}},"required":["key","value"]}}},
73
+ {"type":"function","function":{"name":"read_memory","description":"Load persisted memory from previous sessions.","parameters":{"type":"object","properties":{"project":{"type":"string"}},"required":[]}}},
74
+ {"type":"function","function":{"name":"load_project_context","description":"Load full project memory, history, and context. Call at start of any project task.","parameters":{"type":"object","properties":{"project":{"type":"string"}},"required":["project"]}}},
75
+ ]
76
+
77
+ SILENT_TOOLS = {
78
+ "create_plan", "ask_clarification", "request_approval",
79
+ "notify_user", "mark_milestone"
80
+ }
81
+
82
+
83
+ class Agent:
84
+ def __init__(self, client: OpenAI, model: str):
85
+ self.client = client
86
+ self.model = model
87
+ self.turn = 0
88
+ self.reset_messages()
89
+
90
+ def reset_messages(self):
91
+ user_mem = read_memory()
92
+ self.messages = [{
93
+ "role": "system",
94
+ "content": SYSTEM_PROMPT.format(
95
+ cwd=os.getcwd(),
96
+ time=datetime.now().strftime("%Y-%m-%d %H:%M"),
97
+ user_memory=user_mem
98
+ )
99
+ }]
100
+
101
+ def run(self, user_input: str):
102
+ self.turn += 1
103
+ self.messages.append({"role": "user", "content": user_input})
104
+ step_num = 0
105
+
106
+ while True:
107
+ msg_dict, tool_calls, text = stream_response(
108
+ self.client, self.model, self.messages, TOOLS
109
+ )
110
+ self.messages.append(msg_dict)
111
+
112
+ if not tool_calls:
113
+ print_response(text)
114
+ break
115
+
116
+ rejected = False
117
+ for tc in tool_calls:
118
+ name = tc["function"]["name"]
119
+ try:
120
+ args = json.loads(tc["function"]["arguments"])
121
+ except json.JSONDecodeError:
122
+ args = {}
123
+
124
+ if name not in SILENT_TOOLS:
125
+ step_num += 1
126
+ key_arg = next(iter(args.values()), "") if args else ""
127
+ preview = repr(key_arg)[:70] if isinstance(key_arg, str) else ""
128
+ console.print(
129
+ f" [aria.step]{step_num}.[/aria.step] "
130
+ f"[aria.tool]{name}[/aria.tool] "
131
+ f"[aria.dim]{preview}[/aria.dim]"
132
+ )
133
+
134
+ fn = TOOL_MAP.get(name)
135
+ result = fn(**args) if fn else f"ERROR: unknown tool '{name}'"
136
+
137
+ if name == "run_command" and result.strip():
138
+ for line in result.strip().splitlines()[:10]:
139
+ console.print(f" [aria.dim]│ {line}[/aria.dim]")
140
+
141
+ self.messages.append({
142
+ "role": "tool",
143
+ "tool_call_id": tc["id"],
144
+ "content": str(result)
145
+ })
146
+
147
+ # Hard stop if user rejected the plan
148
+ if str(result).startswith("USER_REJECTED"):
149
+ console.print("\n [aria.warning]◉[/aria.warning] [aria.dim]Task cancelled.[/aria.dim]")
150
+ rejected = True
151
+ break
152
+
153
+ if rejected:
154
+ break
@@ -0,0 +1,41 @@
1
+ import json
2
+ import os
3
+ from pathlib import Path
4
+
5
+ CONFIG_FILE = Path.home() / ".aria" / "config.json"
6
+
7
+ DEFAULTS = {
8
+ "default_model": "llama3.3",
9
+ "base_url": "http://localhost:11434/v1",
10
+ "api_key": "",
11
+ "theme": "purple",
12
+ "stream": True,
13
+ "max_retries": 3,
14
+ "auto_approve": False,
15
+ }
16
+
17
+
18
+ def load_config() -> dict:
19
+ if CONFIG_FILE.exists():
20
+ stored = json.loads(CONFIG_FILE.read_text())
21
+ return {**DEFAULTS, **stored}
22
+ return dict(DEFAULTS)
23
+
24
+
25
+ def save_config(cfg: dict):
26
+ CONFIG_FILE.parent.mkdir(exist_ok=True)
27
+ CONFIG_FILE.write_text(json.dumps(cfg, indent=2))
28
+
29
+
30
+ def get(key: str, fallback=None):
31
+ cfg = load_config()
32
+ env_map = {
33
+ "base_url": "ARIA_BASE_URL",
34
+ "api_key": "ARIA_API_KEY",
35
+ "default_model": "ARIA_MODEL",
36
+ }
37
+ if key in env_map:
38
+ env_val = os.getenv(env_map[key]) or os.getenv("OLLAMA_" + key.upper())
39
+ if env_val:
40
+ return env_val
41
+ return cfg.get(key, fallback)
@@ -0,0 +1,107 @@
1
+ import os
2
+ import sys
3
+ import argparse
4
+ from pathlib import Path
5
+
6
+ from openai import OpenAI
7
+ from rich.panel import Panel
8
+ from rich.prompt import Prompt
9
+ from prompt_toolkit import PromptSession
10
+ from prompt_toolkit.history import InMemoryHistory
11
+ from prompt_toolkit.styles import Style as PTStyle
12
+
13
+ from aria.ui.console import console
14
+ from aria.ui.banner import show_intro, show_tnc, show_banner, VERSION
15
+ from aria.tools.interaction import masked_input
16
+ from aria.config import get, load_config, save_config
17
+ from aria.agent import Agent
18
+ from aria.ui.commands import handle
19
+
20
+ PT_STYLE = PTStyle.from_dict({
21
+ "prompt": "#7C3AED bold",
22
+ "prompt.arg": "#6B7280",
23
+ })
24
+
25
+
26
+ def onboarding(args) -> tuple:
27
+ show_intro()
28
+ console.print()
29
+ show_tnc()
30
+ console.print()
31
+
32
+ while True:
33
+ choice = Prompt.ask("[bold]agree / disagree[/bold]").strip().lower()
34
+ if choice == "agree":
35
+ console.print("[aria.success]✓[/aria.success] Accepted.\n")
36
+ break
37
+ elif choice == "disagree":
38
+ console.print("[aria.error]Exiting.[/aria.error]")
39
+ sys.exit(0)
40
+ else:
41
+ console.print("[aria.dim]Type 'agree' or 'disagree'.[/aria.dim]")
42
+
43
+ existing = args.api_key or get("api_key") or ""
44
+ hint = " (Enter to skip — not needed for local Ollama)" if not existing else " (Enter to use saved key)"
45
+ console.print(f"[aria.warning]API Key[/aria.warning][aria.dim]{hint}[/aria.dim]")
46
+ api_key = masked_input("Key: ").strip() or existing or "aria"
47
+
48
+ ws_input = Prompt.ask("[aria.warning]Workspace[/aria.warning]", default=os.getcwd()).strip()
49
+ workspace = str(Path(ws_input or os.getcwd()).expanduser().resolve())
50
+ if not Path(workspace).exists():
51
+ Path(workspace).mkdir(parents=True, exist_ok=True)
52
+ console.print(f"[aria.dim]Created:[/aria.dim] {workspace}")
53
+ os.chdir(workspace)
54
+ console.print(f"[aria.success]✓[/aria.success] Workspace: [aria.cyan]{workspace}[/aria.cyan]")
55
+
56
+ model_input = Prompt.ask("[aria.warning]Model[/aria.warning]", default=args.model).strip()
57
+ model = model_input or args.model
58
+ console.print(f"[aria.success]✓[/aria.success] Model: [aria.cyan]{model}[/aria.cyan]\n")
59
+
60
+ return api_key, workspace, model
61
+
62
+
63
+ def main():
64
+ parser = argparse.ArgumentParser(description="ARIA — Autonomous Reasoning and Intelligent Agent")
65
+ parser.add_argument("--model", default=get("default_model", "llama3.3"))
66
+ parser.add_argument("--base-url", default=get("base_url", "http://localhost:11434/v1"))
67
+ parser.add_argument("--api-key", default=get("api_key", ""))
68
+ args = parser.parse_args()
69
+
70
+ api_key, workspace, model = onboarding(args)
71
+
72
+ client = OpenAI(base_url=args.base_url, api_key=api_key)
73
+ agent = Agent(client, model)
74
+ state = {
75
+ "model": model,
76
+ "workspace": workspace,
77
+ "api_key": api_key,
78
+ "base_url": args.base_url,
79
+ }
80
+
81
+ show_banner(model, workspace)
82
+
83
+ session = PromptSession(history=InMemoryHistory())
84
+
85
+ while True:
86
+ try:
87
+ cwd_short = Path(os.getcwd()).name
88
+ prompt_text = f"\n◉ aria({cwd_short}) › "
89
+ user_input = session.prompt(prompt_text, style=PT_STYLE).strip()
90
+ except (KeyboardInterrupt, EOFError):
91
+ console.print("\n[aria.dim]Session ended.[/aria.dim]")
92
+ break
93
+
94
+ if not user_input:
95
+ continue
96
+ if user_input.startswith("/"):
97
+ handle(user_input, agent, state)
98
+ continue
99
+ if user_input.lower() in ("exit", "quit", "q"):
100
+ console.print("[aria.dim]Session ended.[/aria.dim]")
101
+ break
102
+
103
+ agent.run(user_input)
104
+
105
+
106
+ if __name__ == "__main__":
107
+ main()
File without changes
@@ -0,0 +1,29 @@
1
+ import json
2
+ from aria.memory.store import project_dir
3
+
4
+
5
+ def load_project_context(project: str) -> str:
6
+ pd = project_dir(project)
7
+ parts = []
8
+
9
+ meta_f = pd / "meta.json"
10
+ if meta_f.exists():
11
+ meta = json.loads(meta_f.read_text())
12
+ parts.append(
13
+ f"Project: {meta.get('name')} | Stack: {meta.get('stack')} "
14
+ f"| Status: {meta.get('status')} | Path: {meta.get('path')}"
15
+ )
16
+
17
+ mem_f = pd / "memory.json"
18
+ if mem_f.exists():
19
+ data = json.loads(mem_f.read_text())
20
+ if data:
21
+ parts.append("Memory:\n" + "\n".join(f" {k}: {v['value']}" for k, v in data.items()))
22
+
23
+ prog_f = pd / "progress.md"
24
+ if prog_f.exists():
25
+ content = prog_f.read_text().strip()
26
+ if content:
27
+ parts.append("Progress (recent):\n" + content[-600:])
28
+
29
+ return "\n\n".join(parts) if parts else "(no project context found)"