akitallm 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
akita/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.1.0"
1
+ __version__ = "1.2.0"
akita/cli/main.py CHANGED
@@ -13,6 +13,7 @@ from dotenv import load_dotenv
13
13
  from akita.tools.diff import DiffApplier
14
14
  from akita.tools.git import GitTool
15
15
  from akita.core.providers import detect_provider
16
+ from akita.core.i18n import t
16
17
 
17
18
  # Load environment variables from .env file
18
19
  load_dotenv()
@@ -24,7 +25,7 @@ app = typer.Typer(
24
25
  )
25
26
  console = Console()
26
27
 
27
- @app.callback()
28
+ @app.callback(invoke_without_command=True)
28
29
  def main(
29
30
  ctx: typer.Context,
30
31
  dry_run: bool = typer.Option(False, "--dry-run", help="Run without making any changes.")
@@ -39,17 +40,31 @@ def main(
39
40
  if dry_run:
40
41
  console.print("[bold yellow]⚠️ Running in DRY-RUN mode. No changes will be applied.[/]")
41
42
 
42
- # Onboarding check
43
+ # Proactive Onboarding / Welcome Logic
43
44
  if not CONFIG_FILE.exists():
45
+ # If no config, always run onboarding (except for help/version which are handled by Typer earlier)
44
46
  run_onboarding()
47
+ # After onboarding, if it was a bare command, show welcome
48
+ if ctx.invoked_subcommand is None:
49
+ console.print(Panel(
50
+ f"{t('welcome.subtitle')}\n\n{t('welcome.commands')}\n\n{t('welcome.help_hint')}",
51
+ title=t("welcome.title")
52
+ ))
53
+ elif ctx.invoked_subcommand is None:
54
+ # Config exists, bare command -> Show Welcome Banner
55
+ console.print(Panel(
56
+ f"{t('welcome.subtitle')}\n\n{t('welcome.commands')}\n\n{t('welcome.help_hint')}",
57
+ title=t("welcome.title")
58
+ ))
59
+
45
60
 
46
61
  def run_onboarding():
47
62
  console.print(Panel(
48
- "[bold cyan]AkitaLLM Configuration[/]\n\n[italic]API-first setup...[/]",
63
+ t("onboarding.welcome"),
49
64
  title="Onboarding"
50
65
  ))
51
66
 
52
- api_key = typer.prompt("🔑 Paste your API Key (or type 'ollama' for local)", hide_input=False)
67
+ api_key = typer.prompt(t("onboarding.api_key_prompt"), hide_input=False)
53
68
 
54
69
  provider = detect_provider(api_key)
55
70
  if not provider:
@@ -57,53 +72,61 @@ def run_onboarding():
57
72
  console.print("Make sure you are using a valid OpenAI (sk-...) or Anthropic (sk-ant-...) key.")
58
73
  raise typer.Abort()
59
74
 
60
- console.print(f"[bold green]✅ Detected Provider:[/] {provider.name.upper()}")
75
+ console.print(t("onboarding.provider_detected", provider=provider.name.upper()))
61
76
 
62
- with console.status(f"[bold blue]Consulting {provider.name} API for available models..."):
77
+ with console.status(t("onboarding.models_consulting", provider=provider.name)):
63
78
  try:
64
79
  models = provider.list_models(api_key)
65
80
  except Exception as e:
66
- console.print(f"[bold red]❌ Failed to list models:[/] {e}")
81
+ console.print(t("onboarding.models_failed", error=e))
67
82
  raise typer.Abort()
68
83
 
69
84
  if not models:
70
- console.print("[bold yellow]⚠️ No models found for this provider.[/]")
85
+ console.print(t("onboarding.no_models"))
71
86
  raise typer.Abort()
72
87
 
73
- console.print("\n[bold]Select a model:[/]")
88
+ console.print(t("onboarding.select_model"))
74
89
  for i, model in enumerate(models):
75
90
  name_display = f" ({model.name})" if model.name else ""
76
91
  console.print(f"{i+1}) [cyan]{model.id}[/]{name_display}")
77
92
 
78
- choice = typer.prompt("\nChoose a model number", type=int, default=1)
93
+ choice = typer.prompt(t("onboarding.choice_prompt"), type=int, default=1)
79
94
  if 1 <= choice <= len(models):
80
95
  selected_model = models[choice-1].id
81
96
  else:
82
- console.print("[bold red]Invalid choice.[/]")
97
+ console.print(t("onboarding.invalid_choice"))
83
98
  raise typer.Abort()
84
99
 
100
+ # New: Preferred Language for UI (Visual Sync Test)
101
+ lang_choice = typer.prompt(t("onboarding.lang_choice"), default="en")
102
+
103
+ # New: Creativity (Temperature) setting
104
+ creativity = typer.prompt(t("onboarding.creativity_prompt"), default=0.7, type=float)
105
+
85
106
  # Determine if we should save the key or use an env ref
86
- use_env = typer.confirm("Would you like to use an environment variable for the API key? (Recommended)", default=True)
107
+ use_env = typer.confirm(t("onboarding.env_confirm"), default=True)
87
108
 
88
109
  final_key_ref = api_key
89
110
  if use_env and provider.name != "ollama":
90
111
  env_var_name = f"{provider.name.upper()}_API_KEY"
91
- console.print(f"[dim]Please ensure you set [bold]{env_var_name}[/] in your .env or shell.[/]")
112
+ console.print(t("onboarding.env_instruction", env_var=env_var_name))
92
113
  final_key_ref = f"env:{env_var_name}"
93
114
 
94
115
  config = {
95
116
  "model": {
96
117
  "provider": provider.name,
97
118
  "name": selected_model,
98
- "api_key": final_key_ref
119
+ "api_key": final_key_ref,
120
+ "language": lang_choice,
121
+ "temperature": creativity
99
122
  }
100
123
  }
101
124
 
102
125
  save_config(config)
103
- console.print(f"\n[bold green]✨ Configuration saved![/]")
126
+ console.print(t("onboarding.saved"))
104
127
  console.print(f"Model: [bold]{selected_model}[/]")
105
128
  console.print(f"Key reference: [dim]{final_key_ref}[/]")
106
- console.print("\n[dim]Configuration stored at ~/.akita/config.toml[/]\n")
129
+ console.print(t("onboarding.saved_location", path="~/.akita/config.toml"))
107
130
 
108
131
  @app.command()
109
132
  def review(
@@ -161,7 +184,7 @@ def review(
161
184
 
162
185
  @app.command()
163
186
  def solve(
164
- query: str,
187
+ query: Optional[str] = typer.Argument(None, help="The task for Akita to solve."),
165
188
  interactive: bool = typer.Option(False, "--interactive", "-i", help="Run in interactive mode to refine the solution."),
166
189
  trace: bool = typer.Option(False, "--trace", help="Show the internal reasoning trace."),
167
190
  dry_run: bool = typer.Option(False, "--dry-run", help="Run in dry-run mode.")
@@ -169,50 +192,71 @@ def solve(
169
192
  """
170
193
  Generate and apply a solution for the given query.
171
194
  """
195
+ # Interactive Input if no query provided
196
+ if not query:
197
+ console.print(t("solve.input_prompt"))
198
+ lines = []
199
+ try:
200
+ while True:
201
+ line = input()
202
+ lines.append(line)
203
+ except EOFError:
204
+ pass
205
+ query = "\n".join(lines).strip()
206
+
207
+ if not query:
208
+ console.print("[yellow]Empty query. Exiting.[/]")
209
+ raise typer.Exit()
210
+
172
211
  model = get_model()
173
212
  engine = ReasoningEngine(model)
174
- console.print(Panel(f"[bold blue]Akita[/] is thinking about: [italic]{query}[/]", title="Solve Mode"))
213
+ console.print(Panel(f"[bold blue]Akita[/] is thinking about: [italic]{query}[/]", title=t("solve.mode_title")))
175
214
 
176
215
  current_query = query
177
216
  session = None
178
217
 
179
218
  try:
180
219
  while True:
220
+ # Pass interactive session if reusing context
181
221
  diff_output = engine.run_solve(current_query, session=session)
182
222
  session = engine.session
183
223
 
184
224
  if trace:
185
- console.print(Panel(str(engine.trace), title="[bold cyan]Reasoning Trace[/]", border_style="cyan"))
186
- console.print(Panel("[bold green]Suggested Code Changes (Unified Diff):[/]"))
225
+ console.print(Panel(str(engine.trace), title=t("solve.trace_title"), border_style="cyan"))
226
+
227
+ console.print(Panel(t("solve.diff_title")))
187
228
  syntax = Syntax(diff_output, "diff", theme="monokai", line_numbers=True)
188
229
  console.print(syntax)
189
230
 
231
+ # If explicit interactive flag OR we just captured input interactively, we probably want to offer refinement?
232
+ # The spec said "interactive solve", usually implies refinement loop.
233
+ # Let's keep the logic: if --interactive flag is used, prompt.
190
234
  if interactive:
191
- action = typer.prompt("\n[A]pprove, [R]efine with feedback, or [C]ancel?", default="A").upper()
235
+ action = typer.prompt(t("solve.interactive_prompt"), default="A").upper()
192
236
  if action == "A":
193
237
  break
194
238
  elif action == "R":
195
- current_query = typer.prompt("Enter your feedback/refinement")
239
+ current_query = typer.prompt(t("solve.refine_prompt"))
196
240
  continue
197
241
  else:
198
- console.print("[yellow]Operation cancelled.[/]")
242
+ console.print(t("solve.cancelled"))
199
243
  return
200
244
  else:
201
245
  break
202
246
 
203
247
  if not dry_run:
204
- confirm = typer.confirm("\nDo you want to apply these changes?")
248
+ confirm = typer.confirm(t("solve.confirm_apply"))
205
249
  if confirm:
206
- console.print("[bold yellow]🚀 Applying changes...[/]")
250
+ console.print(t("solve.applying"))
207
251
  success = DiffApplier.apply_unified_diff(diff_output)
208
252
  if success:
209
- console.print("[bold green]✅ Changes applied successfully![/]")
253
+ console.print(t("solve.success"))
210
254
  else:
211
- console.print("[bold red]❌ Failed to apply changes.[/]")
255
+ console.print(t("solve.failed"))
212
256
  else:
213
- console.print("[bold yellow]Changes discarded.[/]")
257
+ console.print(t("solve.discarded"))
214
258
  except Exception as e:
215
- console.print(f"[bold red]Solve failed:[/] {e}")
259
+ console.print(t("error.solve_failed", error=e))
216
260
  raise typer.Exit(code=1)
217
261
 
218
262
  @app.command()
@@ -313,34 +357,59 @@ def docs():
313
357
  config_app = typer.Typer(help="Manage AkitaLLM configuration.")
314
358
  app.add_typer(config_app, name="config")
315
359
 
316
- @config_app.command("model")
317
- def config_model(
318
- reset: bool = typer.Option(False, "--reset", help="Reset configuration to defaults.")
319
- ):
360
+ @config_app.callback(invoke_without_command=True)
361
+ def main_config(ctx: typer.Context):
320
362
  """
321
- View or change the model configuration.
363
+ Manage AkitaLLM configuration via interactive menu.
322
364
  """
323
- if reset:
324
- if typer.confirm("Are you sure you want to delete your configuration?"):
325
- reset_config()
326
- console.print("[bold green]✅ Configuration reset. Onboarding will run on next command.[/]")
365
+ if ctx.invoked_subcommand:
327
366
  return
328
367
 
329
- config = load_config()
330
- if not config:
331
- console.print("[yellow]No configuration found. Running setup...[/]")
332
- run_onboarding()
333
- config = load_config()
368
+ while True:
369
+ console.print(Panel(
370
+ f"{t('config.menu.option.model')}\n"
371
+ f"{t('config.menu.option.language')}\n"
372
+ f"{t('config.menu.option.show')}\n"
373
+ f"{t('config.menu.option.exit')}",
374
+ title=t("config.menu.title")
375
+ ))
376
+
377
+ choice = typer.prompt(t("config.menu.prompt"), default="3")
378
+
379
+ if choice == "1":
380
+ # Model
381
+ if typer.confirm("Re-run model setup setup?"):
382
+ run_onboarding()
383
+ elif choice == "2":
384
+ # Language
385
+ lang = typer.prompt(t("onboarding.lang_choice"), default="en")
386
+ config = load_config() or {}
387
+ if "model" not in config: config["model"] = {}
388
+ config["model"]["language"] = lang
389
+ save_config(config)
390
+ console.print(t("onboarding.saved"))
391
+ elif choice == "3":
392
+ # Show
393
+ config = load_config()
394
+ if not config:
395
+ console.print("[yellow]No config.[/]")
396
+ continue
397
+ console.print(Panel(
398
+ f"Provider: [yellow]{config.get('model', {}).get('provider')}[/]\n"
399
+ f"Name: [yellow]{config.get('model', {}).get('name')}[/]\n"
400
+ f"Language: [yellow]{config.get('model', {}).get('language')}[/]",
401
+ title=t("config.current_title")
402
+ ))
403
+ typer.prompt("Press Enter to continue")
404
+ elif choice == "4":
405
+ break
406
+ else:
407
+ console.print(t("onboarding.invalid_choice"))
334
408
 
335
- console.print(Panel(
336
- f"[bold blue]Current Model Configuration[/]\n\n"
337
- f"Provider: [yellow]{config['model']['provider']}[/]\n"
338
- f"Name: [yellow]{config['model']['name']}[/]",
339
- title="Settings"
340
- ))
341
-
342
- if typer.confirm("Do you want to change these settings?"):
343
- run_onboarding()
409
+ @config_app.command("model")
410
+ def config_model_legacy():
411
+ """Legacy command for scripting."""
412
+ run_onboarding()
344
413
 
345
414
  if __name__ == "__main__":
346
415
  app()
akita/core/config.py CHANGED
@@ -14,6 +14,7 @@ DEFAULT_CONFIG = {
14
14
  "model": {
15
15
  "provider": "openai",
16
16
  "name": "gpt-4o-mini",
17
+ "language": "en",
17
18
  }
18
19
  }
19
20
 
akita/core/i18n.py ADDED
@@ -0,0 +1,123 @@
1
+ from typing import Dict, Any, Optional
2
+ from akita.core.config import get_config_value
3
+
4
+ TRANSLATIONS = {
5
+ "en": {
6
+ "onboarding.welcome": "[bold cyan]AkitaLLM Configuration[/]\n\n[italic]API-first setup...[/]",
7
+ "onboarding.api_key_prompt": "🔑 Paste your API Key (or type 'ollama' for local)",
8
+ "onboarding.provider_detected": "[bold green]✅ Detected Provider:[/] {provider}",
9
+ "onboarding.models_consulting": "[bold blue]Consulting {provider} API for available models...",
10
+ "onboarding.models_failed": "[bold red]❌ Failed to list models:[/] {error}",
11
+ "onboarding.no_models": "[bold yellow]⚠️ No models found for this provider.[/]",
12
+ "onboarding.select_model": "\n[bold]Select a model:[/]",
13
+ "onboarding.choice_prompt": "\nChoose a model number",
14
+ "onboarding.invalid_choice": "[bold red]Invalid choice.[/]",
15
+ "onboarding.lang_choice": "🌍 Select preferred UI language (en/pt/es)",
16
+ "onboarding.creativity_prompt": "🎨 Creativity level (0.0=precise, 1.0=creative)",
17
+ "onboarding.env_confirm": "Would you like to use an environment variable for the API key? (Recommended)",
18
+ "onboarding.env_instruction": "[dim]Please ensure you set [bold]{env_var}[/] in your .env or shell.[/]",
19
+ "onboarding.saved": "\n[bold green]✨ Configuration saved![/]",
20
+ "onboarding.saved_location": "\n[dim]Configuration stored at {path}[/]\n",
21
+
22
+ "solve.thinking": "🤖 [bold green]Thinking...[/]",
23
+ "solve.mode_title": "Solve Mode",
24
+ "solve.trace_title": "[bold cyan]Reasoning Trace[/]",
25
+ "solve.diff_title": "[bold green]Suggested Code Changes (Unified Diff):[/]",
26
+ "solve.interactive_prompt": "\n[A]pprove, [R]efine with feedback, or [C]ancel?",
27
+ "solve.refine_prompt": "Enter your feedback/refinement",
28
+ "solve.cancelled": "[yellow]Operation cancelled.[/]",
29
+ "solve.applying": "[bold yellow]🚀 Applying changes...[/]",
30
+ "solve.success": "[bold green]✅ Changes applied successfully![/]",
31
+ "solve.failed": "[bold red]❌ Failed to apply changes.[/]",
32
+ "solve.discarded": "[bold yellow]Changes discarded.[/]",
33
+ "solve.confirm_apply": "\nDo you want to apply these changes?",
34
+ "solve.input_prompt": "[bold cyan]Describe your task (Ctrl+D to finish):[/]",
35
+
36
+ "error.solve_failed": "[bold red]Solve failed:[/] {error}",
37
+ "error.validation": "Solve aborted: Model returned invalid content ({type}).",
38
+
39
+ "config.menu.title": "Configuration Menu",
40
+ "config.menu.option.model": "1. Change Model",
41
+ "config.menu.option.language": "2. Change Language",
42
+ "config.menu.option.show": "3. Show Current Config",
43
+ "config.menu.option.exit": "4. Exit",
44
+ "config.menu.prompt": "Select an option",
45
+ "config.current_title": "Current Configuration",
46
+
47
+ "welcome.title": "[bold cyan]Welcome to AkitaLLM[/]",
48
+ "welcome.subtitle": "A deterministic AI orchestrator for programmers.",
49
+ "welcome.help_hint": "[dim]Run [bold]akita --help[/] to see all commands.[/]",
50
+ "welcome.commands": "[bold]Common Commands:[/]\n- [cyan]akita solve[/]: Solve a coding task\n- [cyan]akita review[/]: Audit current directory\n- [cyan]akita config[/]: Manage settings",
51
+ },
52
+ "pt": {
53
+ "onboarding.welcome": "[bold cyan]Configuração do AkitaLLM[/]\n\n[italic]Configuração API-first...[/]",
54
+ "onboarding.api_key_prompt": "🔑 Cole sua API Key (ou digite 'ollama' para local)",
55
+ "onboarding.provider_detected": "[bold green]✅ Provedor Detectado:[/] {provider}",
56
+ "onboarding.models_consulting": "[bold blue]Consultando API {provider} para modelos disponíveis...",
57
+ "onboarding.models_failed": "[bold red]❌ Falha ao listar modelos:[/] {error}",
58
+ "onboarding.no_models": "[bold yellow]⚠️ Nenhum modelo encontrado para este provedor.[/]",
59
+ "onboarding.select_model": "\n[bold]Selecione um modelo:[/]",
60
+ "onboarding.choice_prompt": "\nEscolha o número do modelo",
61
+ "onboarding.invalid_choice": "[bold red]Escolha inválida.[/]",
62
+ "onboarding.lang_choice": "🌍 Escolha o idioma da UI (en/pt/es)",
63
+ "onboarding.creativity_prompt": "🎨 Nível de criatividade (0.0=preciso, 1.0=criativo)",
64
+ "onboarding.env_confirm": "Deseja usar uma variável de ambiente para a API key? (Recomendado)",
65
+ "onboarding.env_instruction": "[dim]Por favor, certifique que [bold]{env_var}[/] está definida no seu .env ou shell.[/]",
66
+ "onboarding.saved": "\n[bold green]✨ Configuração salva![/]",
67
+ "onboarding.saved_location": "\n[dim]Configuração salva em {path}[/]\n",
68
+
69
+ "solve.thinking": "🤖 [bold green]Pensando...[/]",
70
+ "solve.mode_title": "Modo Solução",
71
+ "solve.trace_title": "[bold cyan]Rastro de Racionalização[/]",
72
+ "solve.diff_title": "[bold green]Mudanças Sugeridas (Unified Diff):[/]",
73
+ "solve.interactive_prompt": "\n[A]provar, [R]efinar com feedback, ou [C]ancelar?",
74
+ "solve.refine_prompt": "Digite seu feedback/refinamento",
75
+ "solve.cancelled": "[yellow]Operação cancelada.[/]",
76
+ "solve.applying": "[bold yellow]🚀 Aplicando mudanças...[/]",
77
+ "solve.success": "[bold green]✅ Mudanças aplicadas com sucesso![/]",
78
+ "solve.failed": "[bold red]❌ Falha ao aplicar mudanças.[/]",
79
+ "solve.discarded": "[bold yellow]Mudanças descartadas.[/]",
80
+ "solve.confirm_apply": "\nDeseja aplicar essas mudanças?",
81
+ "solve.input_prompt": "[bold cyan]Descreva sua tarefa (Ctrl+D para finalizar):[/]",
82
+
83
+ "error.solve_failed": "[bold red]Solução falhou:[/] {error}",
84
+ "error.validation": "Solução abortada: Modelo retornou conteúdo inválido ({type}).",
85
+
86
+ "config.menu.title": "Menu de Configuração",
87
+ "config.menu.option.model": "1. Alterar Modelo",
88
+ "config.menu.option.language": "2. Alterar Idioma",
89
+ "config.menu.option.show": "3. Mostrar Config Atual",
90
+ "config.menu.option.exit": "4. Sair",
91
+ "config.menu.prompt": "Selecione uma opção",
92
+ "config.current_title": "Configuração Atual",
93
+
94
+ "welcome.title": "[bold cyan]Bem-vindo ao AkitaLLM[/]",
95
+ "welcome.subtitle": "Um orquestrador de IA determinístico para programadores.",
96
+ "welcome.help_hint": "[dim]Execute [bold]akita --help[/] para ver todos os comandos.[/]",
97
+ "welcome.commands": "[bold]Comandos Comuns:[/]\n- [cyan]akita solve[/]: Resolver uma tarefa\n- [cyan]akita review[/]: Auditar diretório\n- [cyan]akita config[/]: Gerenciar configurações",
98
+ }
99
+ }
100
+
101
+ def t(key: str, **kwargs) -> str:
102
+ """
103
+ Get a translated string for the given key.
104
+ Uses 'model.language' from config, defaulting to 'en'.
105
+ Falls back to 'en' if key is missing in target language.
106
+ """
107
+ lang = get_config_value("model", "language", default="en")
108
+
109
+ # Support 'es' mapping to 'en' or 'pt' or its own if added later.
110
+ # For now, let's map unknown langs to 'en'.
111
+ if lang not in TRANSLATIONS:
112
+ lang = "en"
113
+
114
+ text = TRANSLATIONS.get(lang, {}).get(key)
115
+
116
+ # Fallback to English
117
+ if text is None:
118
+ text = TRANSLATIONS["en"].get(key, key)
119
+
120
+ try:
121
+ return text.format(**kwargs)
122
+ except KeyError:
123
+ return text
akita/models/base.py CHANGED
@@ -12,21 +12,26 @@ class ModelResponse(BaseModel):
12
12
  raw: Any
13
13
 
14
14
  class AIModel:
15
- def __init__(self, model_name: str, api_key: Optional[str] = None, base_url: Optional[str] = None):
15
+ def __init__(self, model_name: str, api_key: Optional[str] = None, base_url: Optional[str] = None, temperature: float = 0.7):
16
16
  self.model_name = model_name
17
17
  self.api_key = api_key
18
18
  self.base_url = base_url
19
+ self.temperature = temperature
19
20
 
20
21
  def chat(self, messages: List[Dict[str, str]], **kwargs) -> ModelResponse:
21
22
  """
22
23
  Send a chat completion request.
23
24
  """
25
+ # Merge global temperature with specific kwargs if provided
26
+ request_kwargs = {"temperature": self.temperature}
27
+ request_kwargs.update(kwargs)
28
+
24
29
  response = litellm.completion(
25
30
  model=self.model_name,
26
31
  messages=messages,
27
32
  api_key=self.api_key,
28
33
  base_url=self.base_url,
29
- **kwargs
34
+ **request_kwargs
30
35
  )
31
36
  content = response.choices[0].message.content
32
37
  return ModelResponse(content=content, raw=response)
@@ -37,6 +42,7 @@ def get_model(model_name: Optional[str] = None) -> AIModel:
37
42
  """
38
43
  provider = get_config_value("model", "provider", "openai")
39
44
  api_key = get_config_value("model", "api_key")
45
+ temperature = get_config_value("model", "temperature", 0.7)
40
46
 
41
47
  if model_name is None:
42
48
  model_name = get_config_value("model", "name", "gpt-4o-mini")
@@ -49,5 +55,5 @@ def get_model(model_name: Optional[str] = None) -> AIModel:
49
55
  else:
50
56
  full_model_name = f"{provider}/{model_name}"
51
57
 
52
- # For Ollama, we might need a base_url, but for now we assume default
53
- return AIModel(model_name=full_model_name, api_key=api_key)
58
+ # Pass temperature to the model
59
+ return AIModel(model_name=full_model_name, api_key=api_key, temperature=temperature)
akita/reasoning/engine.py CHANGED
@@ -7,6 +7,8 @@ from akita.schemas.review import ReviewResult
7
7
  from akita.core.trace import ReasoningTrace
8
8
  from akita.reasoning.session import ConversationSession
9
9
  import json
10
+ from akita.core.i18n import t
11
+ import pydantic
10
12
  from rich.console import Console
11
13
 
12
14
  console = Console()
@@ -133,8 +135,37 @@ class ReasoningEngine:
133
135
  else:
134
136
  session.add_message("user", query)
135
137
 
136
- console.print("🤖 [bold green]Thinking...[/]")
137
- response = self.model.chat(session.get_messages_dict())
138
+ # --- ROBUST EXECUTION ---
139
+ try:
140
+ console.print(t("solve.thinking"))
141
+ response = self.model.chat(session.get_messages_dict())
142
+ except pydantic.ValidationError as e:
143
+ # Friendly error for output contract violations
144
+ error_msg = t("error.validation", type=str(e))
145
+ console.print(f"[bold red]{error_msg}[/]")
146
+ # Log debug info if needed
147
+ raise ValueError(error_msg)
148
+ except Exception as e:
149
+ if "validation error" in str(e).lower():
150
+ error_msg = t("error.validation", type="ModelResponse")
151
+ console.print(f"[bold red]{error_msg}[/]")
152
+ raise ValueError(error_msg)
153
+ raise e
154
+
155
+ # --- CONTRACT ENFORCEMENT ---
156
+ if not isinstance(response.content, str):
157
+ error_msg = t("error.validation", type=type(response.content))
158
+ console.print(f"[bold red]{error_msg}[/]")
159
+ raise ValueError(error_msg)
160
+
161
+ if "+++" not in response.content or "---" not in response.content:
162
+ # Relaxed check: Sometimes it's a valid string but forgets headers?
163
+ # No, strictly require headers for safety.
164
+ error_msg = "Solve aborted: Model returned content without Unified Diff headers (+++/---)."
165
+ console.print(f"[bold red]{error_msg}[/]")
166
+ console.print(f"[dim]Output start: {response.content[:100]}...[/]")
167
+ raise ValueError(error_msg)
168
+
138
169
  session.add_message("assistant", response.content)
139
170
 
140
171
  self.trace.add_step("LLM Response", "Received solution from model")
akita/tools/diff.py CHANGED
@@ -3,107 +3,164 @@ import shutil
3
3
  import pathlib
4
4
  from pathlib import Path
5
5
  import whatthepatch
6
- from typing import List, Tuple, Optional
6
+ from typing import List, Tuple, Optional, Any
7
7
 
8
8
  class DiffApplier:
9
+ @staticmethod
10
+ def validate_diff_context(patch: Any, file_content: str) -> bool:
11
+ """
12
+ Strictly validates that the context lines in the patch exist in the file content
13
+ at the expected locations.
14
+ """
15
+ if not patch.changes:
16
+ return True
17
+
18
+ file_lines = file_content.splitlines()
19
+
20
+ # We need to simulate the patch application to check context
21
+ # whatthepatch.apply_diff does this, returning None if context doesn't match
22
+ try:
23
+ result = whatthepatch.apply_diff(patch, file_lines)
24
+ return result is not None
25
+ except Exception:
26
+ return False
27
+
9
28
  @staticmethod
10
29
  def apply_unified_diff(diff_text: str, base_path: str = ".") -> bool:
11
30
  """
12
31
  Applies a unified diff to files in the base_path.
13
- Includes backup and rollback logic for atomicity.
32
+ Includes PRE-FLIGHT DRY-RUN, strict context checking, and then atomic application.
14
33
  """
15
- patches = list(whatthepatch.parse_patch(diff_text))
34
+ try:
35
+ patches = list(whatthepatch.parse_patch(diff_text))
36
+ except Exception as e:
37
+ print(f"❌ ERROR: Failed to parse diff: {e}")
38
+ return False
39
+
16
40
  if not patches:
17
- print("ERROR: No valid patches found in the diff text.")
41
+ print("ERROR: No valid patches found in the diff text.")
18
42
  return False
19
43
 
20
- backups: List[Tuple[Path, Path]] = []
21
44
  base = Path(base_path)
22
45
  backup_dir = base / ".akita" / "backups"
23
- backup_dir.mkdir(parents=True, exist_ok=True)
46
+
47
+ # --- PHASE 1: PRE-FLIGHT VALIDATION (DRY RUN) ---
48
+ print("🛡️ Running Structural Guard (Dry Run)...")
49
+ pending_changes: List[Tuple[Path, List[str]]] = []
50
+
51
+ for patch in patches:
52
+ if not patch.header:
53
+ continue
54
+
55
+ # Resolve path
56
+ rel_path = patch.header.new_path
57
+ is_new = (patch.header.old_path == "/dev/null")
58
+ is_delete = (patch.header.new_path == "/dev/null")
24
59
 
25
- try:
26
- for patch in patches:
27
- if not patch.header:
28
- continue
29
-
30
- # whatthepatch identifies the target file in the header
31
- # We usually want the 'new' filename (the +++ part)
60
+ if is_new:
32
61
  rel_path = patch.header.new_path
33
- is_new = (patch.header.old_path == "/dev/null")
34
- is_delete = (patch.header.new_path == "/dev/null")
62
+ elif is_delete:
63
+ rel_path = patch.header.old_path
64
+ else:
65
+ rel_path = patch.header.new_path or patch.header.old_path
35
66
 
36
- if is_new:
37
- rel_path = patch.header.new_path
38
- elif is_delete:
39
- rel_path = patch.header.old_path
40
- else:
41
- rel_path = patch.header.new_path or patch.header.old_path
42
-
43
- if not rel_path or rel_path == "/dev/null":
44
- continue
45
-
46
- # Clean up path (sometimes they have a/ or b/ prefixes)
47
- if rel_path.startswith("a/") or rel_path.startswith("b/"):
48
- rel_path = rel_path[2:]
67
+ if not rel_path or rel_path == "/dev/null":
68
+ continue
69
+
70
+ # Clean up path
71
+ if rel_path.startswith("a/") or rel_path.startswith("b/"):
72
+ rel_path = rel_path[2:]
73
+
74
+ target_file = (base / rel_path).resolve()
75
+
76
+ # Check existence scenarios
77
+ if not is_new and not target_file.exists():
78
+ print(f" ERROR: Target file {target_file} does not exist.")
79
+ return False
49
80
 
50
- target_file = (base / rel_path).resolve()
81
+ # Read content
82
+ content = ""
83
+ if target_file.exists():
84
+ try:
85
+ with open(target_file, "r", encoding="utf-8") as f:
86
+ content = f.read()
87
+ except UnicodeDecodeError:
88
+ print(f"❌ ERROR: Could not verify context for binary/non-utf8 file: {rel_path}")
89
+ return False
90
+
91
+ # Strict Context Check & Dry Apply
92
+ if not is_new:
93
+ # This uses whatthepatch's internal context verification
94
+ # If it raises HunkApplyException or returns None, it means context mismatch
95
+ try:
96
+ new_lines = whatthepatch.apply_diff(patch, content.splitlines())
97
+ except Exception as e:
98
+ new_lines = None # Treat exception as failure
51
99
 
52
- if not is_new and not target_file.exists():
53
- print(f"ERROR: Target file {target_file} does not exist for patching.")
100
+ if new_lines is None:
101
+ print(f"ERROR: Context Mismatch in {rel_path}.")
102
+ print(" The code the AI 'saw' does not match the file on disk.")
103
+ print(" Action aborted to prevent corruption.")
54
104
  return False
105
+ pending_changes.append((target_file, new_lines))
106
+ elif is_new:
107
+ # valid new file
108
+ # reconstruct from patch changes for new file
109
+ new_lines = []
110
+ for change in patch.changes:
111
+ if change.line is not None:
112
+ new_lines.append(change.line)
113
+ pending_changes.append((target_file, new_lines))
114
+ elif is_delete:
115
+ # We mark for deletion by setting new_lines to None
116
+ pending_changes.append((target_file, None))
117
+
118
+ print("✅ Structural Guard Passed. Applying changes...")
55
119
 
56
- # 1. Create backup
120
+ # --- PHASE 2: ATOMIC APPLICATION ---
121
+ backups: List[Tuple[Path, Path]] = []
122
+ backup_dir.mkdir(parents=True, exist_ok=True)
123
+
124
+ try:
125
+ for target_file, new_lines in pending_changes:
126
+ # 1. Backup
57
127
  if target_file.exists():
58
128
  backup_file = backup_dir / f"{target_file.name}.bak"
59
129
  shutil.copy2(target_file, backup_file)
60
130
  backups.append((target_file, backup_file))
61
131
  else:
62
- backups.append((target_file, None)) # Mark for deletion on rollback if it's a new file
132
+ backups.append((target_file, None))
63
133
 
64
- # 2. Apply patch
65
- content = ""
66
- if target_file.exists():
67
- with open(target_file, "r", encoding="utf-8") as f:
68
- content = f.read()
69
-
70
- lines = content.splitlines()
71
- # whatthepatch apply_diff returns a generator of lines
72
- patched_lines = whatthepatch.apply_diff(patch, lines)
73
-
74
- if patched_lines is None:
75
- print(f"ERROR: Failed to apply patch to {rel_path}.")
76
- raise Exception(f"Patch failure on {rel_path}")
77
-
78
- # 3. Write new content
79
- target_file.parent.mkdir(parents=True, exist_ok=True)
80
- with open(target_file, "w", encoding="utf-8") as f:
81
- f.write("\n".join(patched_lines) + "\n")
82
-
83
- print(f"SUCCESS: Applied {len(patches)} patches successfully.")
134
+ # 2. Write (or delete)
135
+ if new_lines is None: # Delete
136
+ target_file.unlink()
137
+ else:
138
+ target_file.parent.mkdir(parents=True, exist_ok=True)
139
+ with open(target_file, "w", encoding="utf-8") as f:
140
+ f.write("\n".join(new_lines) + "\n")
141
+
142
+ print(f"SUCCESS: Applied changes to {len(pending_changes)} files.")
84
143
 
85
- # 4. Pre-flight Validation
86
- # Run tests to ensure the patch didn't break anything
144
+ # 3. Post-flight Validation (Tests)
87
145
  if (base / "tests").exists():
88
- print("🧪 Running pre-flight validation (pytest)...")
146
+ print("🧪 Running post-flight validation (pytest)...")
89
147
  import subprocess
90
- # Run pytest in the base_path
91
148
  result = subprocess.run(["pytest"], cwd=str(base), capture_output=True, text=True)
92
149
  if result.returncode != 0:
93
150
  print(f"❌ Validation FAILED:\n{result.stdout}")
94
- raise Exception("Pre-flight validation failed. Tests are broken.")
151
+ raise Exception("Post-flight validation failed. Tests are broken.")
95
152
  else:
96
- print("✅ Pre-flight validation passed!")
153
+ print("✅ Post-flight validation passed!")
97
154
 
98
155
  return True
99
156
 
100
157
  except Exception as e:
101
- print(f"CRITICAL ERROR: {e}. Starting rollback...")
158
+ print(f"CRITICAL ERROR during write: {e}. Starting rollback...")
102
159
  for target, backup in backups:
103
160
  if backup and backup.exists():
104
161
  shutil.move(str(backup), str(target))
105
162
  elif not backup and target.exists():
106
- target.unlink() # Delete newly created file
163
+ target.unlink()
107
164
  return False
108
165
 
109
166
  @staticmethod
@@ -0,0 +1,217 @@
1
+ Metadata-Version: 2.4
2
+ Name: akitallm
3
+ Version: 1.2.0
4
+ Summary: AkitaLLM: An open-source local-first AI system for programming.
5
+ Author: KerubinDev
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/KerubinDev/AkitaLLM
8
+ Project-URL: Repository, https://github.com/KerubinDev/AkitaLLM
9
+ Project-URL: Issues, https://github.com/KerubinDev/AkitaLLM/issues
10
+ Keywords: ai,cli,programming,local-first,llm
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.10
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Requires-Python: >=3.10
19
+ Description-Content-Type: text/markdown
20
+ License-File: LICENSE
21
+ Requires-Dist: typer[all]
22
+ Requires-Dist: litellm
23
+ Requires-Dist: pydantic
24
+ Requires-Dist: rich
25
+ Requires-Dist: python-dotenv
26
+ Requires-Dist: pytest
27
+ Requires-Dist: pytest-mock
28
+ Requires-Dist: gitpython
29
+ Requires-Dist: tomli-w
30
+ Requires-Dist: tomli
31
+ Requires-Dist: whatthepatch>=1.0.5
32
+ Requires-Dist: tree-sitter>=0.21.3
33
+ Requires-Dist: tree-sitter-python>=0.21.0
34
+ Dynamic: license-file
35
+
36
+ # AkitaLLM
37
+
38
+ ![PyPI](https://img.shields.io/pypi/v/akitallm)
39
+ ![Python](https://img.shields.io/pypi/pyversions/akitallm)
40
+ ![License](https://img.shields.io/github/license/KerubinDev/AkitaLLM)
41
+ [![Tests](https://img.shields.io/github/actions/workflow/status/KerubinDev/AkitaLLM/tests.yml)](https://github.com/KerubinDev/AkitaLLM/actions)
42
+ ![Downloads](https://img.shields.io/pypi/dm/akitallm)
43
+
44
+
45
+ ```
46
+
47
+ Analyze → Plan → Execute → Validate
48
+
49
+ ````
50
+
51
+ **A deterministic, local-first AI orchestrator for software engineers.**
52
+
53
+ AkitaLLM is not a chat interface.
54
+ It is not autocomplete.
55
+ It is not “AI magic”.
56
+
57
+ It is an engineering tool.
58
+
59
+ ---
60
+
61
+ ## What AkitaLLM is (and what it is not)
62
+
63
+ AkitaLLM treats Large Language Models as **non-deterministic execution engines** that must operate inside a **strict, auditable pipeline**.
64
+
65
+ Instead of asking an AI *“please fix my code”*, you force it to:
66
+
67
+ 1. **Analyze** the real project structure
68
+ 2. **Plan** concrete technical steps
69
+ 3. **Execute** changes as reviewable diffs
70
+ 4. **Validate** results with real tooling
71
+
72
+ No hidden prompts.
73
+ No blind edits.
74
+ No guessing.
75
+
76
+ ---
77
+
78
+ ## Why this project exists
79
+
80
+ Most AI coding tools optimize for **speed of output**.
81
+
82
+ Software engineering optimizes for:
83
+ - correctness
84
+ - predictability
85
+ - debuggability
86
+ - long-term maintainability
87
+
88
+ That mismatch causes real problems:
89
+
90
+ - Code is generated without understanding the project
91
+ - Developers approve changes they don’t fully understand
92
+ - Bugs are pushed faster, not fewer
93
+
94
+ AkitaLLM exists to **slow AI down** and force it to behave like a junior engineer working under strict supervision.
95
+
96
+ ---
97
+
98
+ ## The core difference
99
+
100
+ | Aspect | Typical AI Tools | AkitaLLM |
101
+ |------|-----------------|----------|
102
+ | Interaction | Chat / Autocomplete | Structured pipeline |
103
+ | Control | Implicit | Explicit and reviewable |
104
+ | Output | Raw code | Unified diffs |
105
+ | Context | Prompt-limited | Project-aware |
106
+ | Validation | Manual | Automated |
107
+ | Philosophy | “Trust the model” | “Trust the process” |
108
+
109
+ ---
110
+
111
+ ## Design principles
112
+
113
+ **Local-first**
114
+ Your code stays on your machine. AkitaLLM runs locally and only sends what is strictly necessary to the model.
115
+
116
+ **No magic**
117
+ Every decision is logged. Every step is inspectable. Every change is explicit.
118
+
119
+ **Tool-driven**
120
+ The AI uses tools (AST parsing, tests, linters). It does not replace them.
121
+
122
+ **Human-in-the-loop**
123
+ Nothing is applied without your approval.
124
+
125
+ ---
126
+
127
+ ## What AkitaLLM can do today
128
+
129
+ - 🔍 **Structural code reviews**
130
+ Detect bugs, architectural risks, performance issues, and security problems.
131
+
132
+ - 🧭 **Technical planning**
133
+ Generate step-by-step implementation plans in Markdown.
134
+
135
+ - 🧩 **Diff-based solutions**
136
+ Propose changes as standard unified diffs — no direct file mutation.
137
+
138
+ - 🧪 **Local validation**
139
+ Run tests and tooling before applying changes.
140
+
141
+ - 🔌 **Extensible architecture**
142
+ Plugin system for custom tools and workflows.
143
+
144
+ - 🤖 **Model agnostic**
145
+ Works with OpenAI, Anthropic, Ollama, and any LiteLLM-compatible provider.
146
+
147
+ ---
148
+
149
+ ## Installation
150
+
151
+ ```bash
152
+ pip install akitallm
153
+ ````
154
+
155
+ Python 3.10+ required.
156
+
157
+ ---
158
+
159
+ ## Basic usage
160
+
161
+ ### Initialize / Review a project
162
+
163
+ ```bash
164
+ akita review .
165
+ ```
166
+
167
+ ### Generate a technical plan
168
+
169
+ ```bash
170
+ akita plan "Refactor authentication to use JWT with refresh tokens"
171
+ ```
172
+
173
+ ### Solve a concrete problem
174
+
175
+ ```bash
176
+ akita solve "Fix silent failures in the reasoning engine error handling"
177
+ ```
178
+
179
+ All commands follow the same pipeline:
180
+
181
+ ```
182
+ Analyze → Plan → Execute → Validate
183
+ ```
184
+
185
+ ---
186
+
187
+ ## Extending AkitaLLM
188
+
189
+ AkitaLLM is designed to be extended by engineers.
190
+
191
+ * Custom tools
192
+ * Custom validators
193
+ * Custom reasoning steps
194
+
195
+ See the [Plugin Development Guide](PLUGINS.md).
196
+
197
+ ---
198
+
199
+ ## Contributing
200
+
201
+ AkitaLLM is not looking for volume.
202
+ It is looking for **engineering-quality contributions**.
203
+
204
+ If you care about:
205
+
206
+ * clean abstractions
207
+ * predictable systems
208
+ * readable diffs
209
+ * testable behavior
210
+
211
+ You’ll fit right in.
212
+
213
+ See [CONTRIBUTING.md](CONTRIBUTING.md).
214
+
215
+ ---
216
+
217
+ > “Understanding the internals is the first step to excellence.”
@@ -1,24 +1,25 @@
1
- akita/__init__.py,sha256=LGVQyDsWifdACo7qztwb8RWWHds1E7uQ-ZqD8SAjyw4,22
2
- akita/cli/main.py,sha256=6jR4Dfg0HpDGkrr8kdpF6AfzLm6-5nStvsPN1VZd5as,12820
1
+ akita/__init__.py,sha256=MpAT5hgNoHnTtG1XRD_GV_A7QrHVU6vJjGSw_8qMGA4,22
2
+ akita/cli/main.py,sha256=qjBRMVgdM4sNmE05llUYlKStAhiyun84kmCc-JMgne8,15313
3
3
  akita/core/ast_utils.py,sha256=8JrTZgfWjIvbzY5KzV2G9PuyOi8IxVdLMjDCPPLiz_I,3127
4
- akita/core/config.py,sha256=0nlA5AiGjm0Kv5yUxqaHV2zA4Ld1rU_j2s2IjnYBB_Y,1656
4
+ akita/core/config.py,sha256=JlROCAxV3uHgKexClr3vr0Zrfmjg5BUa46bnCi_1K-M,1682
5
+ akita/core/i18n.py,sha256=BQlzJ38DkHClCeeiQ1jad_yAfGhv2FbgfjQ9eI_A5s0,7446
5
6
  akita/core/indexing.py,sha256=2j_NK8buZ1ugH3foa9KFQEtGOD-Lgoo2Se3Lx6Q7ZO4,3686
6
7
  akita/core/plugins.py,sha256=P3azOFJ-yTw-kDdvjmHfNiU7nfvXQFadVPRnp1O7h-c,2951
7
8
  akita/core/providers.py,sha256=SwCb2aTYJ-iNtWdah9IYzLf3vfTDA472i5nhJmsaZLs,6168
8
9
  akita/core/trace.py,sha256=AxXUVZ7P8a0l5QTK5w9iSnUncUe62FGfRzDNN9xG5dg,692
9
- akita/models/base.py,sha256=GC3WB9kMXpg1GCAX5104uovgdOZBIVIHJBq2SO8aU00,1723
10
+ akita/models/base.py,sha256=qIP7FxI8D5osZCHvosgd_64QgGoS_9bN4PJoBOs9NSM,2013
10
11
  akita/plugins/__init__.py,sha256=kfjmQqBhzhqQrH-Rd0jh0KxXyIT9T5DtEh-BETQD0FM,28
11
12
  akita/plugins/files.py,sha256=Ha4YxmCz2G7iafqdr2TRE_xRlq1oeOBo6By3_S86jkE,1113
12
- akita/reasoning/engine.py,sha256=w1gB-Y_Tzoan66T-EFd04X7V5mDcVOkSP2G6X3lmvxU,8634
13
+ akita/reasoning/engine.py,sha256=GXw-2829O_ePnOKhGeiFw415ZUW3_C6-9Irw8hF7m1M,10081
13
14
  akita/reasoning/session.py,sha256=rcJxcJXNjObjRwfuCY8NQKpKCqxeIppqkUpN-3mVRpE,472
14
15
  akita/schemas/review.py,sha256=zzjLzTuiEpJfu4etS0NUBWfS3wyNobNDmDMhb5amWTI,905
15
16
  akita/tools/base.py,sha256=jDA3jTP2qo6TjoTF6BSIb71BSfCJGSqbueIQz6lxuCM,1235
16
17
  akita/tools/context.py,sha256=i6QjKMsKCZMIdCx82hkhMUzBQJolrcch2v1x-6nLy8U,5008
17
- akita/tools/diff.py,sha256=bVH6_vHWoC9oYoS1RU4eOEnZHh6eFNtt6HCCzeGb6wY,4805
18
+ akita/tools/diff.py,sha256=d5Q3yRvCKvaCTHW3PibAu1n4AXuFq1zcEeIFD_jxhbo,6942
18
19
  akita/tools/git.py,sha256=58ZCI2ZL7NYUQdRIe3969t6gRpVmCPD8B-UbP7cPBNY,2798
19
- akitallm-1.1.0.dist-info/licenses/LICENSE,sha256=WE7_tfGR-IzkulSh6Pos02gucCXKboaXguAdr0bI9V0,1067
20
- akitallm-1.1.0.dist-info/METADATA,sha256=rpPJHCMGPciP2apqy6Z8Vaho9XTChTiP_IfrbJK3BLo,5500
21
- akitallm-1.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
22
- akitallm-1.1.0.dist-info/entry_points.txt,sha256=Au1aAXCO2lX4kgElgknSVDpq7BcN5xAJJ0WvOAkhLzU,105
23
- akitallm-1.1.0.dist-info/top_level.txt,sha256=duGU-i6qCRLqjo_b1XUqfhlSQky3QIO0Hlvfn2OV3hU,6
24
- akitallm-1.1.0.dist-info/RECORD,,
20
+ akitallm-1.2.0.dist-info/licenses/LICENSE,sha256=WE7_tfGR-IzkulSh6Pos02gucCXKboaXguAdr0bI9V0,1067
21
+ akitallm-1.2.0.dist-info/METADATA,sha256=Z1U-G8xzeOOrNakY_aVO7kbqbn-Q5QuYWiaB4RrrOIQ,5274
22
+ akitallm-1.2.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
23
+ akitallm-1.2.0.dist-info/entry_points.txt,sha256=Au1aAXCO2lX4kgElgknSVDpq7BcN5xAJJ0WvOAkhLzU,105
24
+ akitallm-1.2.0.dist-info/top_level.txt,sha256=duGU-i6qCRLqjo_b1XUqfhlSQky3QIO0Hlvfn2OV3hU,6
25
+ akitallm-1.2.0.dist-info/RECORD,,
@@ -1,140 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: akitallm
3
- Version: 1.1.0
4
- Summary: AkitaLLM: An open-source local-first AI system for programming.
5
- Author: KerubinDev
6
- License: MIT
7
- Project-URL: Homepage, https://github.com/KerubinDev/AkitaLLM
8
- Project-URL: Repository, https://github.com/KerubinDev/AkitaLLM
9
- Project-URL: Issues, https://github.com/KerubinDev/AkitaLLM/issues
10
- Keywords: ai,cli,programming,local-first,llm
11
- Classifier: Development Status :: 4 - Beta
12
- Classifier: Intended Audience :: Developers
13
- Classifier: License :: OSI Approved :: MIT License
14
- Classifier: Programming Language :: Python :: 3
15
- Classifier: Programming Language :: Python :: 3.10
16
- Classifier: Programming Language :: Python :: 3.11
17
- Classifier: Programming Language :: Python :: 3.12
18
- Requires-Python: >=3.10
19
- Description-Content-Type: text/markdown
20
- License-File: LICENSE
21
- Requires-Dist: typer[all]
22
- Requires-Dist: litellm
23
- Requires-Dist: pydantic
24
- Requires-Dist: rich
25
- Requires-Dist: python-dotenv
26
- Requires-Dist: pytest
27
- Requires-Dist: pytest-mock
28
- Requires-Dist: gitpython
29
- Requires-Dist: tomli-w
30
- Requires-Dist: tomli
31
- Requires-Dist: whatthepatch>=1.0.5
32
- Requires-Dist: tree-sitter>=0.21.3
33
- Requires-Dist: tree-sitter-python>=0.21.0
34
- Dynamic: license-file
35
-
36
- # AkitaLLM
37
- ### A deterministic, local-first AI orchestrator for software engineers.
38
-
39
- ---
40
-
41
- ## What is AkitaLLM?
42
-
43
- AkitaLLM is not another "AI wrapper." It is a command-line utility designed for developers who value engineering rigor over generative "magic." It treats Large Language Models as non-deterministic execution engines that must be constrained within a strict, auditable pipeline: **Analyze → Plan → Execute → Validate**.
44
-
45
- Built as a local-first tool, it provides you with an AI-augmented workflow that respects your project's context, follows security best practices, and prioritizes structured output over conversational noise.
46
-
47
- ---
48
-
49
- ## Why AkitaLLM exists
50
-
51
- Most current AI tools (ChatGPT, Copilot, Cursor) operate in a "black-box" conversational mode. They are excellent at text generation but often fail at **software engineering**, which requires:
52
- - **Project-Level Context**: Understanding how a change in `utils.py` affects `main.py`.
53
- - **Previsibilty**: Knowing exactly what the AI intends to do before it modifies a single byte.
54
- - **Verification**: Automatically ensuring that proposed changes don't break existing logic.
55
-
56
- AkitaLLM was built to bridge this gap, treating AI as a component of a larger, human-controlled engineering process.
57
-
58
- ---
59
-
60
- ## The Engineering Difference
61
-
62
- | Feature | Generic AI Tools | AkitaLLM |
63
- | :--- | :--- | :--- |
64
- | **Logic** | Conversational / Guesswork | Analyze → Plan → Execute → Validate |
65
- | **Control** | Autocomplete / Chat | Explicit technical plans & reviewable Diffs |
66
- | **Security** | Cloud-heavy | Local-first, respects `.gitignore` and `.env` |
67
- | **Validation** | Post-facto manual review | Automated local test execution |
68
- | **Philosophy** | "It just works" (Hype) | "Understand the internals" (Engineering) |
69
-
70
- ---
71
-
72
- ## Core Principles
73
-
74
- 1. **Local-First**: Your code remains on your machine. AkitaLLM orchestrates local models (via Ollama) or remote APIs (via LiteLLM) through encrypted, controlled channels.
75
- 2. **Contextual Awareness**: It uses recursive file scanning and structure analysis to build a high-fidelity map of your project before making suggestions.
76
- 3. **No Magic**: No hidden prompts, no mysterious "thinking" phases. All actions are logged, auditable, and based on standard engineering patterns.
77
- 4. **Tool-Driven**: AI is a user of tools (linters, test runners, AST parsers), not a replacement for them.
78
-
79
- ---
80
-
81
- ## Key Features
82
-
83
- - **Structural Code Review**: Detailed analysis of bugs, style, performance, and security risks with prioritized severity levels.
84
- - **Technical Planning**: Generation of step-by-step implementation plans in Markdown for complex feature requests.
85
- - **Actionable Diffs**: Proposed changes are generated as standard Unified Diffs for human review before application.
86
- - **Environment Isolation**: Supports `.env` and local configuration storage (`~/.akita/`) to keep secrets safe.
87
- - **Model Agnostic**: Seamlessly switch between GPT-4o, Claude 3.5, Llama 3, and more.
88
-
89
- ---
90
-
91
- ## Installation
92
-
93
- AkitaLLM is available on PyPI. You can install it directly using pip:
94
-
95
- ```bash
96
- pip install akitallm
97
- ```
98
-
99
- ---
100
-
101
- ## Usage
102
-
103
- ### 1. Project Initialization
104
- Run any command to trigger the initial configuration and onboarding.
105
- ```bash
106
- akita review .
107
- ```
108
-
109
- ### 2. Strategic Code Review
110
- Analyze a directory for potential architectural risks and bugs.
111
- ```bash
112
- akita review src/
113
- ```
114
-
115
- ### 3. Implementation Planning
116
- Generate a technical plan for a specific goal.
117
- ```bash
118
- akita plan "Implement JWT authentication with Redis-based session storage"
119
- ```
120
-
121
- ### 4. Code Problem Solving
122
- Generate a diff to solve a precise issue or refactor a module.
123
- ```bash
124
- akita solve "Improve error handling in the reasoning engine to prevent silent failures"
125
- ```
126
-
127
- ---
128
-
129
- ### 🔌 Extensibility
130
- AkitaLLM is built to be extended. You can create your own tools and plugins. Check the [Plugin Development Guide](PLUGINS.md) for more details.
131
-
132
- ## 🤝 Contributing
133
-
134
- We are looking for engineers, not just coders. If you value robust abstractions, clean code, and predictable systems, your contribution is welcome.
135
-
136
- Review our [CONTRIBUTING.md](CONTRIBUTING.md) to understand our engineering standards and PR workflow. High-quality PRs with test coverage are prioritized.
137
-
138
- ---
139
-
140
- *“Understanding the internals is the first step to excellence.”*