akitallm 1.0.3__tar.gz → 1.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {akitallm-1.0.3 → akitallm-1.1.0}/PKG-INFO +1 -1
  2. akitallm-1.1.0/akita/__init__.py +1 -0
  3. {akitallm-1.0.3 → akitallm-1.1.0}/akita/cli/main.py +78 -14
  4. {akitallm-1.0.3 → akitallm-1.1.0}/akita/core/config.py +12 -2
  5. akitallm-1.1.0/akita/core/providers.py +181 -0
  6. {akitallm-1.0.3 → akitallm-1.1.0}/akita/models/base.py +12 -7
  7. akitallm-1.1.0/akita/tools/git.py +79 -0
  8. {akitallm-1.0.3 → akitallm-1.1.0}/akitallm.egg-info/PKG-INFO +1 -1
  9. {akitallm-1.0.3 → akitallm-1.1.0}/akitallm.egg-info/SOURCES.txt +2 -0
  10. {akitallm-1.0.3 → akitallm-1.1.0}/pyproject.toml +1 -1
  11. akitallm-1.0.3/akita/__init__.py +0 -1
  12. {akitallm-1.0.3 → akitallm-1.1.0}/LICENSE +0 -0
  13. {akitallm-1.0.3 → akitallm-1.1.0}/README.md +0 -0
  14. {akitallm-1.0.3 → akitallm-1.1.0}/akita/core/ast_utils.py +0 -0
  15. {akitallm-1.0.3 → akitallm-1.1.0}/akita/core/indexing.py +0 -0
  16. {akitallm-1.0.3 → akitallm-1.1.0}/akita/core/plugins.py +0 -0
  17. {akitallm-1.0.3 → akitallm-1.1.0}/akita/core/trace.py +0 -0
  18. {akitallm-1.0.3 → akitallm-1.1.0}/akita/plugins/__init__.py +0 -0
  19. {akitallm-1.0.3 → akitallm-1.1.0}/akita/plugins/files.py +0 -0
  20. {akitallm-1.0.3 → akitallm-1.1.0}/akita/reasoning/engine.py +0 -0
  21. {akitallm-1.0.3 → akitallm-1.1.0}/akita/reasoning/session.py +0 -0
  22. {akitallm-1.0.3 → akitallm-1.1.0}/akita/schemas/review.py +0 -0
  23. {akitallm-1.0.3 → akitallm-1.1.0}/akita/tools/base.py +0 -0
  24. {akitallm-1.0.3 → akitallm-1.1.0}/akita/tools/context.py +0 -0
  25. {akitallm-1.0.3 → akitallm-1.1.0}/akita/tools/diff.py +0 -0
  26. {akitallm-1.0.3 → akitallm-1.1.0}/akitallm.egg-info/dependency_links.txt +0 -0
  27. {akitallm-1.0.3 → akitallm-1.1.0}/akitallm.egg-info/entry_points.txt +0 -0
  28. {akitallm-1.0.3 → akitallm-1.1.0}/akitallm.egg-info/requires.txt +0 -0
  29. {akitallm-1.0.3 → akitallm-1.1.0}/akitallm.egg-info/top_level.txt +0 -0
  30. {akitallm-1.0.3 → akitallm-1.1.0}/setup.cfg +0 -0
  31. {akitallm-1.0.3 → akitallm-1.1.0}/tests/test_ast.py +0 -0
  32. {akitallm-1.0.3 → akitallm-1.1.0}/tests/test_basic.py +0 -0
  33. {akitallm-1.0.3 → akitallm-1.1.0}/tests/test_diff.py +0 -0
  34. {akitallm-1.0.3 → akitallm-1.1.0}/tests/test_interactive.py +0 -0
  35. {akitallm-1.0.3 → akitallm-1.1.0}/tests/test_plugins.py +0 -0
  36. {akitallm-1.0.3 → akitallm-1.1.0}/tests/test_review_mock.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: akitallm
3
- Version: 1.0.3
3
+ Version: 1.1.0
4
4
  Summary: AkitaLLM: An open-source local-first AI system for programming.
5
5
  Author: KerubinDev
6
6
  License: MIT
@@ -0,0 +1 @@
1
+ __version__ = "1.1.0"
@@ -1,4 +1,5 @@
1
1
  import typer
2
+ from typing import Optional, List, Dict, Any
2
3
  from rich.console import Console
3
4
  from rich.panel import Panel
4
5
  from akita.reasoning.engine import ReasoningEngine
@@ -10,6 +11,8 @@ from rich.markdown import Markdown
10
11
  from rich.syntax import Syntax
11
12
  from dotenv import load_dotenv
12
13
  from akita.tools.diff import DiffApplier
14
+ from akita.tools.git import GitTool
15
+ from akita.core.providers import detect_provider
13
16
 
14
17
  # Load environment variables from .env file
15
18
  load_dotenv()
@@ -42,27 +45,65 @@ def main(
42
45
 
43
46
  def run_onboarding():
44
47
  console.print(Panel(
45
- "[bold cyan]AkitaLLM[/]\n\n[italic]Understanding the internals...[/]",
48
+ "[bold cyan]AkitaLLM Configuration[/]\n\n[italic]API-first setup...[/]",
46
49
  title="Onboarding"
47
50
  ))
48
51
 
49
- console.print("1) Use default project model (GPT-4o Mini)")
50
- console.print("2) Configure my own model")
52
+ api_key = typer.prompt("🔑 Paste your API Key (or type 'ollama' for local)", hide_input=False)
51
53
 
52
- choice = typer.prompt("\nChoose an option", type=int, default=1)
54
+ provider = detect_provider(api_key)
55
+ if not provider:
56
+ console.print("[bold red]❌ Could not detect provider from the given key.[/]")
57
+ console.print("Make sure you are using a valid OpenAI (sk-...) or Anthropic (sk-ant-...) key.")
58
+ raise typer.Abort()
59
+
60
+ console.print(f"[bold green]✅ Detected Provider:[/] {provider.name.upper()}")
61
+
62
+ with console.status(f"[bold blue]Consulting {provider.name} API for available models..."):
63
+ try:
64
+ models = provider.list_models(api_key)
65
+ except Exception as e:
66
+ console.print(f"[bold red]❌ Failed to list models:[/] {e}")
67
+ raise typer.Abort()
53
68
 
54
- if choice == 1:
55
- config = {"model": {"provider": "openai", "name": "gpt-4o-mini"}}
56
- save_config(config)
57
- console.print("[bold green]✅ Default model (GPT-4o Mini) selected and saved![/]")
69
+ if not models:
70
+ console.print("[bold yellow]⚠️ No models found for this provider.[/]")
71
+ raise typer.Abort()
72
+
73
+ console.print("\n[bold]Select a model:[/]")
74
+ for i, model in enumerate(models):
75
+ name_display = f" ({model.name})" if model.name else ""
76
+ console.print(f"{i+1}) [cyan]{model.id}[/]{name_display}")
77
+
78
+ choice = typer.prompt("\nChoose a model number", type=int, default=1)
79
+ if 1 <= choice <= len(models):
80
+ selected_model = models[choice-1].id
58
81
  else:
59
- provider = typer.prompt("Enter model provider (e.g., openai, ollama, anthropic)", default="openai")
60
- name = typer.prompt("Enter model name (e.g., gpt-4o, llama3, claude-3-opus)", default="gpt-4o-mini")
61
- config = {"model": {"provider": provider, "name": name}}
62
- save_config(config)
63
- console.print(f"[bold green]✅ Model configured: {provider}/{name}[/]")
82
+ console.print("[bold red]Invalid choice.[/]")
83
+ raise typer.Abort()
84
+
85
+ # Determine if we should save the key or use an env ref
86
+ use_env = typer.confirm("Would you like to use an environment variable for the API key? (Recommended)", default=True)
64
87
 
65
- console.print("\n[dim]Configuration saved at ~/.akita/config.toml[/]\n")
88
+ final_key_ref = api_key
89
+ if use_env and provider.name != "ollama":
90
+ env_var_name = f"{provider.name.upper()}_API_KEY"
91
+ console.print(f"[dim]Please ensure you set [bold]{env_var_name}[/] in your .env or shell.[/]")
92
+ final_key_ref = f"env:{env_var_name}"
93
+
94
+ config = {
95
+ "model": {
96
+ "provider": provider.name,
97
+ "name": selected_model,
98
+ "api_key": final_key_ref
99
+ }
100
+ }
101
+
102
+ save_config(config)
103
+ console.print(f"\n[bold green]✨ Configuration saved![/]")
104
+ console.print(f"Model: [bold]{selected_model}[/]")
105
+ console.print(f"Key reference: [dim]{final_key_ref}[/]")
106
+ console.print("\n[dim]Configuration stored at ~/.akita/config.toml[/]\n")
66
107
 
67
108
  @app.command()
68
109
  def review(
@@ -193,6 +234,29 @@ def plan(
193
234
  console.print(f"[bold red]Planning failed:[/] {e}")
194
235
  raise typer.Exit(code=1)
195
236
 
237
+ @app.command()
238
+ def clone(
239
+ url: str = typer.Argument(..., help="Git repository URL to clone."),
240
+ branch: Optional[str] = typer.Option(None, "--branch", "-b", help="Specific branch to clone."),
241
+ depth: Optional[int] = typer.Option(None, "--depth", "-d", help="Create a shallow clone with a history truncated to the specified number of commits.")
242
+ ):
243
+ """
244
+ Clone a remote Git repository into the Akita workspace (~/.akita/repos/).
245
+ """
246
+ console.print(Panel(f"🌐 [bold blue]Akita[/] is cloning: [yellow]{url}[/]", title="Clone Mode"))
247
+
248
+ try:
249
+ with console.status("[bold green]Cloning repository..."):
250
+ local_path = GitTool.clone_repo(url, branch=branch, depth=depth)
251
+
252
+ console.print(f"\n[bold green]✅ Repository cloned successfully![/]")
253
+ console.print(f"📍 Local path: [cyan]{local_path}[/]")
254
+ except FileExistsError as e:
255
+ console.print(f"[bold yellow]⚠️ {e}[/]")
256
+ except Exception as e:
257
+ console.print(f"[bold red]❌ Clone failed:[/] {e}")
258
+ raise typer.Exit(code=1)
259
+
196
260
  @app.command()
197
261
  def index(
198
262
  path: str = typer.Argument(".", help="Path to index for RAG.")
@@ -43,9 +43,19 @@ def reset_config():
43
43
  if CONFIG_FILE.exists():
44
44
  CONFIG_FILE.unlink()
45
45
 
46
+ def resolve_config_value(value: Any) -> Any:
47
+ """
48
+ Resolves values like 'env:VAR_NAME' to their environment variable content.
49
+ """
50
+ if isinstance(value, str) and value.startswith("env:"):
51
+ env_var = value[4:]
52
+ return os.getenv(env_var, value)
53
+ return value
54
+
46
55
  def get_config_value(section: str, key: str, default: Any = None) -> Any:
47
- """Get a specific value from the config."""
56
+ """Get a specific value from the config and resolve env refs."""
48
57
  config = load_config()
49
58
  if not config:
50
59
  return default
51
- return config.get(section, {}).get(key, default)
60
+ val = config.get(section, {}).get(key, default)
61
+ return resolve_config_value(val)
@@ -0,0 +1,181 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Optional
3
+ import requests
4
+ from pydantic import BaseModel
5
+
6
+ class ModelInfo(BaseModel):
7
+ id: str
8
+ name: Optional[str] = None
9
+
10
+ class BaseProvider(ABC):
11
+ @property
12
+ @abstractmethod
13
+ def name(self) -> str:
14
+ pass
15
+
16
+ @abstractmethod
17
+ def validate_key(self, api_key: str) -> bool:
18
+ pass
19
+
20
+ @abstractmethod
21
+ def list_models(self, api_key: str) -> List[ModelInfo]:
22
+ pass
23
+
24
+ class OpenAIProvider(BaseProvider):
25
+ @property
26
+ def name(self) -> str:
27
+ return "openai"
28
+
29
+ def validate_key(self, api_key: str) -> bool:
30
+ if not api_key.startswith("sk-"):
31
+ return False
32
+ # Simple validation request
33
+ try:
34
+ response = requests.get(
35
+ "https://api.openai.com/v1/models",
36
+ headers={"Authorization": f"Bearer {api_key}"},
37
+ timeout=5
38
+ )
39
+ return response.status_code == 200
40
+ except Exception:
41
+ return False
42
+
43
+ def list_models(self, api_key: str) -> List[ModelInfo]:
44
+ response = requests.get(
45
+ "https://api.openai.com/v1/models",
46
+ headers={"Authorization": f"Bearer {api_key}"},
47
+ timeout=10
48
+ )
49
+ response.raise_for_status()
50
+ data = response.json()
51
+ exclude_keywords = ["vision", "instruct", "audio", "realtime", "tts", "dall-e", "embedding", "moderation", "davinci", "babbage", "curie", "ada"]
52
+
53
+ models = []
54
+ for m in data["data"]:
55
+ model_id = m["id"]
56
+ if not any(kw in model_id.lower() for kw in exclude_keywords):
57
+ if model_id.startswith("gpt-") or model_id.startswith("o1") or model_id.startswith("o3"):
58
+ models.append(ModelInfo(id=model_id))
59
+ return sorted(models, key=lambda x: x.id)
60
+
61
+ class AnthropicProvider(BaseProvider):
62
+ @property
63
+ def name(self) -> str:
64
+ return "anthropic"
65
+
66
+ def validate_key(self, api_key: str) -> bool:
67
+ if not api_key.startswith("sk-ant-"):
68
+ return False
69
+ # Anthropic validation usually requires a full request, but we'll check prefix for now
70
+ # or do a no-op call if possible.
71
+ return True
72
+
73
+ def list_models(self, api_key: str) -> List[ModelInfo]:
74
+ # Anthropic doesn't have a public models list API like OpenAI
75
+ return [
76
+ ModelInfo(id="claude-3-5-sonnet-latest", name="Claude 3.5 Sonnet (Latest)"),
77
+ ModelInfo(id="claude-3-5-haiku-latest", name="Claude 3.5 Haiku (Latest)"),
78
+ ModelInfo(id="claude-3-opus-20240229", name="Claude 3 Opus"),
79
+ ModelInfo(id="claude-3-sonnet-20240229", name="Claude 3 Sonnet"),
80
+ ModelInfo(id="claude-3-haiku-20240307", name="Claude 3 Haiku"),
81
+ ]
82
+
83
+ class OllamaProvider(BaseProvider):
84
+ @property
85
+ def name(self) -> str:
86
+ return "ollama"
87
+
88
+ def validate_key(self, api_key: str) -> bool:
89
+ # Ollama doesn't use keys by default, we just check if it's reachable
90
+ try:
91
+ response = requests.get("http://localhost:11434/api/tags", timeout=2)
92
+ return response.status_code == 200
93
+ except Exception:
94
+ return False
95
+
96
+ def list_models(self, api_key: str) -> List[ModelInfo]:
97
+ response = requests.get("http://localhost:11434/api/tags", timeout=5)
98
+ response.raise_for_status()
99
+ data = response.json()
100
+ return [ModelInfo(id=m["name"]) for m in data["models"]]
101
+
102
+ class GeminiProvider(BaseProvider):
103
+ @property
104
+ def name(self) -> str:
105
+ return "gemini"
106
+
107
+ def validate_key(self, api_key: str) -> bool:
108
+ if not api_key.startswith("AIza"):
109
+ return False
110
+ return True
111
+
112
+ def list_models(self, api_key: str) -> List[ModelInfo]:
113
+ # Gemini API URL for listing models
114
+ url = f"https://generativelanguage.googleapis.com/v1beta/models?key={api_key}"
115
+ response = requests.get(url, timeout=10)
116
+ response.raise_for_status()
117
+ data = response.json()
118
+
119
+ exclude_keywords = ["nano", "banana", "vision", "embedding", "aqa", "learnlm"]
120
+
121
+ models = []
122
+ for m in data["models"]:
123
+ model_id = m["name"].split("/")[-1]
124
+ display_name = m["displayName"]
125
+
126
+ # Check if it supports generation and doesn't have excluded keywords
127
+ if "generateContent" in m["supportedGenerationMethods"]:
128
+ if not any(kw in model_id.lower() or kw in display_name.lower() for kw in exclude_keywords):
129
+ models.append(ModelInfo(id=model_id, name=display_name))
130
+
131
+ return models
132
+
133
+ class GroqProvider(BaseProvider):
134
+ @property
135
+ def name(self) -> str:
136
+ return "groq"
137
+
138
+ def validate_key(self, api_key: str) -> bool:
139
+ if not api_key.startswith("gsk_"):
140
+ return False
141
+ return True
142
+
143
+ def list_models(self, api_key: str) -> List[ModelInfo]:
144
+ # Groq uses OpenAI-compatible models endpoint
145
+ response = requests.get(
146
+ "https://api.groq.com/openai/v1/models",
147
+ headers={"Authorization": f"Bearer {api_key}"},
148
+ timeout=10
149
+ )
150
+ response.raise_for_status()
151
+ data = response.json()
152
+
153
+ # Filter for text models
154
+ exclude_keywords = ["vision", "audio"]
155
+ models = []
156
+ for m in data["data"]:
157
+ model_id = m["id"]
158
+ if not any(kw in model_id.lower() for kw in exclude_keywords):
159
+ models.append(ModelInfo(id=model_id))
160
+ return sorted(models, key=lambda x: x.id)
161
+
162
+ def detect_provider(api_key: str) -> Optional[BaseProvider]:
163
+ """
164
+ Attempts to detect the provider based on the API key or environment.
165
+ """
166
+ if api_key.lower() == "ollama":
167
+ return OllamaProvider()
168
+
169
+ if api_key.startswith("sk-ant-"):
170
+ return AnthropicProvider()
171
+
172
+ if api_key.startswith("gsk_"):
173
+ return GroqProvider()
174
+
175
+ if api_key.startswith("sk-"):
176
+ return OpenAIProvider()
177
+
178
+ if api_key.startswith("AIza"):
179
+ return GeminiProvider()
180
+
181
+ return None
@@ -35,14 +35,19 @@ def get_model(model_name: Optional[str] = None) -> AIModel:
35
35
  """
36
36
  Get an AIModel instance based on config or provided name.
37
37
  """
38
+ provider = get_config_value("model", "provider", "openai")
39
+ api_key = get_config_value("model", "api_key")
40
+
38
41
  if model_name is None:
39
42
  model_name = get_config_value("model", "name", "gpt-4o-mini")
40
43
 
41
- provider = get_config_value("model", "provider", "openai")
42
-
43
- # LiteLLM usually wants "provider/model_name" for some providers
44
- # but for OpenAI it handles "gpt-3.5-turbo" directly.
45
- # If it's a custom provider, we might need to prepend it.
46
- full_model_name = f"{provider}/{model_name}" if provider != "openai" else model_name
44
+ # LiteLLM wants "provider/model_name" for non-OpenAI providers
45
+ if provider == "openai":
46
+ full_model_name = model_name
47
+ elif provider == "gemini":
48
+ full_model_name = f"gemini/{model_name}"
49
+ else:
50
+ full_model_name = f"{provider}/{model_name}"
47
51
 
48
- return AIModel(model_name=full_model_name)
52
+ # For Ollama, we might need a base_url, but for now we assume default
53
+ return AIModel(model_name=full_model_name, api_key=api_key)
@@ -0,0 +1,79 @@
1
+ import os
2
+ import pathlib
3
+ import subprocess
4
+ from typing import Tuple, Optional
5
+ from urllib.parse import urlparse
6
+ from akita.core.config import CONFIG_DIR
7
+
8
+ class GitTool:
9
+ """
10
+ Utility for managing Git repositories within the Akita ecosystem.
11
+ """
12
+ REPOS_DIR = CONFIG_DIR / "repos"
13
+
14
+ @staticmethod
15
+ def parse_repo_url(url: str) -> Tuple[str, str]:
16
+ """
17
+ Extracts owner and repo name from a git URL.
18
+ Supports HTTPS and SSH formats.
19
+ """
20
+ # Clean URL
21
+ clean_url = url.strip().rstrip("/")
22
+ if clean_url.endswith(".git"):
23
+ clean_url = clean_url[:-4]
24
+
25
+ host = None
26
+ path_part = None
27
+
28
+ # Handle SCP-like SSH syntax: git@github.com:owner/repo
29
+ if "@" in clean_url and ":" in clean_url.split("@", 1)[1]:
30
+ user_host, path_part = clean_url.split("@", 1)[1].split(":", 1)
31
+ host = user_host
32
+ else:
33
+ parsed = urlparse(clean_url)
34
+ host = parsed.hostname
35
+ path_part = parsed.path.lstrip("/") if parsed.path else ""
36
+
37
+ if host and (host == "github.com" or host.endswith(".github.com")):
38
+ parts = [p for p in path_part.split("/") if p]
39
+ if len(parts) >= 2:
40
+ return parts[0], parts[1]
41
+
42
+ # Fallback for other providers or generic paths
43
+ parts = clean_url.split("/")
44
+ if len(parts) >= 2:
45
+ return parts[-2], parts[-1]
46
+
47
+ raise ValueError(f"Could not parse repository owner and name from URL: {url}")
48
+
49
+ @classmethod
50
+ def clone_repo(cls, url: str, branch: Optional[str] = None, depth: Optional[int] = None) -> str:
51
+ """
52
+ Clones a repository to ~/.akita/repos/<owner>/<repo>.
53
+ Returns the local path.
54
+ """
55
+ owner, repo = cls.parse_repo_url(url)
56
+ target_path = cls.REPOS_DIR / owner / repo
57
+
58
+ if target_path.exists():
59
+ raise FileExistsError(f"Repository already exists at: {target_path}. Please delete it first if you want to re-clone.")
60
+
61
+ target_path.parent.mkdir(parents=True, exist_ok=True)
62
+
63
+ cmd = ["git", "clone"]
64
+ if branch:
65
+ cmd += ["--branch", branch]
66
+ if depth:
67
+ cmd += ["--depth", str(depth)]
68
+
69
+ cmd += [url, str(target_path)]
70
+
71
+ result = subprocess.run(cmd, capture_all=True if hasattr(subprocess, 'capture_all') else False, text=True, check=False)
72
+
73
+ if result.returncode != 0:
74
+ # Clean up parent if it was empty
75
+ if not any(target_path.parent.iterdir()):
76
+ target_path.parent.rmdir()
77
+ raise RuntimeError(f"Git clone failed: {result.stderr or 'Unknown error'}")
78
+
79
+ return str(target_path)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: akitallm
3
- Version: 1.0.3
3
+ Version: 1.1.0
4
4
  Summary: AkitaLLM: An open-source local-first AI system for programming.
5
5
  Author: KerubinDev
6
6
  License: MIT
@@ -7,6 +7,7 @@ akita/core/ast_utils.py
7
7
  akita/core/config.py
8
8
  akita/core/indexing.py
9
9
  akita/core/plugins.py
10
+ akita/core/providers.py
10
11
  akita/core/trace.py
11
12
  akita/models/base.py
12
13
  akita/plugins/__init__.py
@@ -17,6 +18,7 @@ akita/schemas/review.py
17
18
  akita/tools/base.py
18
19
  akita/tools/context.py
19
20
  akita/tools/diff.py
21
+ akita/tools/git.py
20
22
  akitallm.egg-info/PKG-INFO
21
23
  akitallm.egg-info/SOURCES.txt
22
24
  akitallm.egg-info/dependency_links.txt
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "akitallm"
3
- version = "1.0.3"
3
+ version = "1.1.0"
4
4
  description = "AkitaLLM: An open-source local-first AI system for programming."
5
5
  authors = [{ name = "KerubinDev" }]
6
6
  readme = "README.md"
@@ -1 +0,0 @@
1
- __version__ = "0.1.1"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes