@voria/cli 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/README.md +439 -0
  2. package/bin/voria +730 -0
  3. package/docs/ARCHITECTURE.md +419 -0
  4. package/docs/CHANGELOG.md +189 -0
  5. package/docs/CONTRIBUTING.md +447 -0
  6. package/docs/DESIGN_DECISIONS.md +380 -0
  7. package/docs/DEVELOPMENT.md +535 -0
  8. package/docs/EXAMPLES.md +434 -0
  9. package/docs/INSTALL.md +335 -0
  10. package/docs/IPC_PROTOCOL.md +310 -0
  11. package/docs/LLM_INTEGRATION.md +416 -0
  12. package/docs/MODULES.md +470 -0
  13. package/docs/PERFORMANCE.md +346 -0
  14. package/docs/PLUGINS.md +432 -0
  15. package/docs/QUICKSTART.md +184 -0
  16. package/docs/README.md +133 -0
  17. package/docs/ROADMAP.md +346 -0
  18. package/docs/SECURITY.md +334 -0
  19. package/docs/TROUBLESHOOTING.md +565 -0
  20. package/docs/USER_GUIDE.md +700 -0
  21. package/package.json +63 -0
  22. package/python/voria/__init__.py +8 -0
  23. package/python/voria/__pycache__/__init__.cpython-312.pyc +0 -0
  24. package/python/voria/__pycache__/engine.cpython-312.pyc +0 -0
  25. package/python/voria/core/__init__.py +1 -0
  26. package/python/voria/core/__pycache__/__init__.cpython-312.pyc +0 -0
  27. package/python/voria/core/__pycache__/setup.cpython-312.pyc +0 -0
  28. package/python/voria/core/agent/__init__.py +9 -0
  29. package/python/voria/core/agent/__pycache__/__init__.cpython-312.pyc +0 -0
  30. package/python/voria/core/agent/__pycache__/loop.cpython-312.pyc +0 -0
  31. package/python/voria/core/agent/loop.py +343 -0
  32. package/python/voria/core/executor/__init__.py +19 -0
  33. package/python/voria/core/executor/__pycache__/__init__.cpython-312.pyc +0 -0
  34. package/python/voria/core/executor/__pycache__/executor.cpython-312.pyc +0 -0
  35. package/python/voria/core/executor/executor.py +431 -0
  36. package/python/voria/core/github/__init__.py +33 -0
  37. package/python/voria/core/github/__pycache__/__init__.cpython-312.pyc +0 -0
  38. package/python/voria/core/github/__pycache__/client.cpython-312.pyc +0 -0
  39. package/python/voria/core/github/client.py +438 -0
  40. package/python/voria/core/llm/__init__.py +55 -0
  41. package/python/voria/core/llm/__pycache__/__init__.cpython-312.pyc +0 -0
  42. package/python/voria/core/llm/__pycache__/base.cpython-312.pyc +0 -0
  43. package/python/voria/core/llm/__pycache__/claude_provider.cpython-312.pyc +0 -0
  44. package/python/voria/core/llm/__pycache__/gemini_provider.cpython-312.pyc +0 -0
  45. package/python/voria/core/llm/__pycache__/modal_provider.cpython-312.pyc +0 -0
  46. package/python/voria/core/llm/__pycache__/model_discovery.cpython-312.pyc +0 -0
  47. package/python/voria/core/llm/__pycache__/openai_provider.cpython-312.pyc +0 -0
  48. package/python/voria/core/llm/base.py +152 -0
  49. package/python/voria/core/llm/claude_provider.py +188 -0
  50. package/python/voria/core/llm/gemini_provider.py +148 -0
  51. package/python/voria/core/llm/modal_provider.py +228 -0
  52. package/python/voria/core/llm/model_discovery.py +289 -0
  53. package/python/voria/core/llm/openai_provider.py +146 -0
  54. package/python/voria/core/patcher/__init__.py +9 -0
  55. package/python/voria/core/patcher/__pycache__/__init__.cpython-312.pyc +0 -0
  56. package/python/voria/core/patcher/__pycache__/patcher.cpython-312.pyc +0 -0
  57. package/python/voria/core/patcher/patcher.py +375 -0
  58. package/python/voria/core/planner/__init__.py +1 -0
  59. package/python/voria/core/setup.py +201 -0
  60. package/python/voria/core/token_manager/__init__.py +29 -0
  61. package/python/voria/core/token_manager/__pycache__/__init__.cpython-312.pyc +0 -0
  62. package/python/voria/core/token_manager/__pycache__/manager.cpython-312.pyc +0 -0
  63. package/python/voria/core/token_manager/manager.py +241 -0
  64. package/python/voria/engine.py +1185 -0
  65. package/python/voria/plugins/__init__.py +1 -0
  66. package/python/voria/plugins/python/__init__.py +1 -0
  67. package/python/voria/plugins/typescript/__init__.py +1 -0
@@ -0,0 +1,416 @@
1
+ # LLM Integration Guide
2
+
3
+ How to add support for new LLM providers in voria.
4
+
5
+ ## Adding a New LLM Provider
6
+
7
+ voria supports multiple LLM providers. Here's how to add a new one like Kimi, MiniMax, or custom APIs.
8
+
9
+ ## Step 1: Choose an LLM
10
+
11
+ **Current Providers** (supported out of box):
12
+ - **Modal** - Free 745B GLM model (until Apr 30)
13
+ - **OpenAI** - GPT-5.4 series (frontier models)
14
+ - **Google Gemini** - Gemini 3.x (fast & cheap)
15
+ - **Anthropic Claude** - Claude 4.6 (highest quality)
16
+
17
+ **Candidates to Add**:
18
+ - **Kimi (Moon)** - Chinese LLM
19
+ - **MiniMax** - Cost-effective option
20
+ - **Aleph Alpha** - Private deployment option
21
+ - **Together** - Distributed inference
22
+ - **LocalAI** - Self-hosted option
23
+ - **Ollama** - Local models
24
+
25
+ ## Implementation (Example: Kimi)
26
+
27
+ ### Step 1: Create Provider Class
28
+
29
+ Create `python/voria/core/llm/providers/kimi.py`:
30
+
31
+ ```python
32
+ from .base import BaseLLMProvider, LLMResponse
33
+ import httpx
34
+ from dataclasses import dataclass
35
+ from typing import Optional
36
+
37
+ @dataclass
38
+ class KimiModelInfo:
39
+ """Kimi model information"""
40
+ name: str
41
+ display_name: str
42
+ max_tokens: int = 4096
43
+
44
+ class KimiProvider(BaseLLMProvider):
45
+ """Kimi (Moonshot) LLM provider"""
46
+
47
+ BASE_URL = "https://api.moonshot.cn/v1"
48
+
49
+ def __init__(self, api_key: str, model: str = "moonshot-v1-32k"):
50
+ self.api_key = api_key
51
+ self.model = model
52
+ self.client = httpx.AsyncClient(
53
+ base_url=self.BASE_URL,
54
+ headers={
55
+ "Authorization": f"Bearer {api_key}",
56
+ "Content-Type": "application/json"
57
+ }
58
+ )
59
+
60
+ async def plan(self, issue_description: str) -> str:
61
+ """Generate a fix plan for the issue"""
62
+ prompt = f"""Analyze this GitHub issue and create a detailed fix plan:
63
+
64
+ {issue_description}
65
+
66
+ Provide:
67
+ 1. Root cause analysis
68
+ 2. Files that need changes
69
+ 3. Changes needed (point form)
70
+ 4. Potential edge cases
71
+ 5. Testing strategy"""
72
+
73
+ response = await self._call_api(prompt, "planning")
74
+ return response.content
75
+
76
+ async def generate_patch(self, issue_context: dict, plan: str) -> str:
77
+ """Generate a unified diff based on the plan"""
78
+ prompt = f"""Based on this plan, generate a unified diff:
79
+
80
+ Plan:
81
+ {plan}
82
+
83
+ Context:
84
+ {issue_context}
85
+
86
+ Generate a valid unified diff starting with:
87
+ --- a/filename
88
+ +++ b/filename
89
+ @@..."""
90
+
91
+ response = await self._call_api(prompt, "patching")
92
+ return response.content
93
+
94
+ async def analyze_test_failure(self, test_output: str, code: str) -> str:
95
+ """Analyze test failures and suggest fixes"""
96
+ prompt = f"""The code changes failed tests:
97
+
98
+ Test Output:
99
+ {test_output}
100
+
101
+ Modified Code:
102
+ {code}
103
+
104
+ Analyze why tests failed and suggest fixes. Be specific about what changed."""
105
+
106
+ response = await self._call_api(prompt, "analysis")
107
+ return response.content
108
+
109
+ async def _call_api(self, prompt: str, task_type: str = "default") -> LLMResponse:
110
+ """Call Kimi API"""
111
+
112
+ # System prompt based on task
113
+ system_prompts = {
114
+ "planning": "You are an expert code analyzer. Provide detailed, actionable plans.",
115
+ "patching": "You are an expert code generator. Generate valid, working code patches.",
116
+ "analysis": "You are an expert debugger. Analyze failures and suggest fixes."
117
+ }
118
+
119
+ payload = {
120
+ "model": self.model,
121
+ "messages": [
122
+ {"role": "system", "content": system_prompts.get(task_type, "")},
123
+ {"role": "user", "content": prompt}
124
+ ],
125
+ "temperature": 0.7,
126
+ "top_p": 0.95,
127
+ "max_tokens": 4096
128
+ }
129
+
130
+ response = await self.client.post("/chat/completions", json=payload)
131
+ response.raise_for_status()
132
+
133
+ data = response.json()
134
+ content = data["choices"][0]["message"]["content"]
135
+ tokens_used = data.get("usage", {}).get("total_tokens", 0)
136
+
137
+ return LLMResponse(
138
+ content=content,
139
+ tokens_used=tokens_used,
140
+ finish_reason=data["choices"][0].get("finish_reason", "stop")
141
+ )
142
+
143
+ @staticmethod
144
+ async def discover_models(api_key: str) -> List[ModelInfo]:
145
+ """Discover available models from Kimi"""
146
+ try:
147
+ client = httpx.AsyncClient(
148
+ headers={"Authorization": f"Bearer {api_key}"}
149
+ )
150
+ resp = await client.get("https://api.moonshot.cn/v1/models")
151
+ data = resp.json()
152
+
153
+ models = []
154
+ for model in data.get("data", []):
155
+ models.append(ModelInfo(
156
+ name=model["id"],
157
+ display_name=f"Kimi - {model['id']}",
158
+ tokens_per_hour=unlimited,
159
+ max_tokens=model.get("context_window", 4096),
160
+ description="Moonshot Kimi model"
161
+ ))
162
+ return models
163
+ except:
164
+ # Fallback to known models
165
+ return [
166
+ ModelInfo(
167
+ name="moonshot-v1-32k",
168
+ display_name="Kimi (32K Context)",
169
+ max_tokens=32768,
170
+ description="Moonshot Kimi with 32K context"
171
+ ),
172
+ ModelInfo(
173
+ name="moonshot-v1-128k",
174
+ display_name="Kimi (128K Context)",
175
+ max_tokens=131072,
176
+ description="Moonshot Kimi with 128K context"
177
+ )
178
+ ]
179
+ ```
180
+
181
+ ### Step 2: Register Provider
182
+
183
+ Edit `python/voria/core/llm/__init__.py`:
184
+
185
+ ```python
186
+ from .providers.kimi import KimiProvider
187
+
188
+ class LLMProviderFactory:
189
+ PROVIDERS = {
190
+ "modal": ModalProvider,
191
+ "openai": OpenAIProvider,
192
+ "gemini": GeminiProvider,
193
+ "claude": ClaudeProvider,
194
+ "kimi": KimiProvider, # ← Add this
195
+ }
196
+
197
+ @classmethod
198
+ async def discover_models(cls, provider_name: str, api_key: str):
199
+ """Discover available models"""
200
+ provider_class = cls.PROVIDERS.get(provider_name)
201
+ if not provider_class:
202
+ raise ValueError(f"Unknown provider: {provider_name}")
203
+ return await provider_class.discover_models(api_key)
204
+ ```
205
+
206
+ ### Step 3: Add to Setup
207
+
208
+ Edit `python/voria/core/setup.py`:
209
+
210
+ ```python
211
+ class ProviderSetup:
212
+ KNOWN_PROVIDERS = {
213
+ "modal": {
214
+ "name": "Modal Z.ai",
215
+ "env_key": "MODAL_API_KEY",
216
+ "url": "https://modal.com"
217
+ },
218
+ "openai": {
219
+ "name": "OpenAI",
220
+ "env_key": "OPENAI_API_KEY",
221
+ "url": "https://platform.openai.com"
222
+ },
223
+ "gemini": {
224
+ "name": "Google Gemini",
225
+ "env_key": "GOOGLE_API_KEY",
226
+ "url": "https://makersuite.google.com"
227
+ },
228
+ "claude": {
229
+ "name": "Anthropic Claude",
230
+ "env_key": "ANTHROPIC_API_KEY",
231
+ "url": "https://console.anthropic.com"
232
+ },
233
+ "kimi": { # ← Add this
234
+ "name": "Kimi (Moonshot)",
235
+ "env_key": "KIMI_API_KEY",
236
+ "url": "https://platform.moonshot.cn"
237
+ },
238
+ }
239
+ ```
240
+
241
+ ### Step 4: Add Token Pricing
242
+
243
+ Edit `python/voria/core/token_manager/pricing.py`:
244
+
245
+ ```python
246
+ PROVIDER_PRICING = {
247
+ "modal": {
248
+ "input_price": 0.00, # Free until Apr 30
249
+ "output_price": 0.00,
250
+ "currency": "USD"
251
+ },
252
+ "openai": {
253
+ "input_price": 0.0025, # Per 1K tokens
254
+ "output_price": 0.010,
255
+ "currency": "USD"
256
+ },
257
+ "gemini": {
258
+ "input_price": 0.000075,
259
+ "output_price": 0.00030,
260
+ "currency": "USD"
261
+ },
262
+ "claude": {
263
+ "input_price": 0.003,
264
+ "output_price": 0.015,
265
+ "currency": "USD"
266
+ },
267
+ "kimi": { # ← Add this
268
+ "input_price": 0.0006, # Per 1K tokens
269
+ "output_price": 0.0018,
270
+ "currency": "Yuan (CNY)"
271
+ },
272
+ }
273
+ ```
274
+
275
+ ### Step 5: Test
276
+
277
+ Create `tests/test_kimi.py`:
278
+
279
+ ```python
280
+ import pytest
281
+ from voria.core.llm import LLMProviderFactory
282
+
283
+ @pytest.mark.asyncio
284
+ async def test_kimi_provider_creation():
285
+ """Test creating Kimi provider"""
286
+ provider = LLMProviderFactory.create(
287
+ "kimi",
288
+ "fake-key-for-testing",
289
+ "moonshot-v1-32k"
290
+ )
291
+ assert provider is not None
292
+ assert provider.model == "moonshot-v1-32k"
293
+
294
+ @pytest.mark.asyncio
295
+ async def test_kimi_model_discovery():
296
+ """Test discovering Kimi models"""
297
+ models = await LLMProviderFactory.discover_models("kimi", "fake-key")
298
+ assert len(models) > 0
299
+ assert any("Kimi" in m.display_name for m in models)
300
+
301
+ @pytest.mark.asyncio
302
+ async def test_kimi_api_call():
303
+ """Test actual API call (skipped if no real key)"""
304
+ import os
305
+ api_key = os.getenv("KIMI_API_KEY")
306
+ if not api_key:
307
+ pytest.skip("KIMI_API_KEY not set")
308
+
309
+ provider = LLMProviderFactory.create("kimi", api_key)
310
+ response = await provider.plan("Fix a bug")
311
+ assert response # Should return something
312
+ ```
313
+
314
+ ### Step 6: Update Documentation
315
+
316
+ Add to relevant docs:
317
+ - [USER_GUIDE.md](USER_GUIDE.md) - Usage with Kimi
318
+ - [EXAMPLES.md](EXAMPLES.md) - Example with Kimi
319
+ - Add pricing to comparison table
320
+
321
+ ## API Key Configuration
322
+
323
+ Users can configure new provider via:
324
+
325
+ **Option 1: Interactive Setup**
326
+ ```bash
327
+ python3 -m voria.core.setup
328
+ # Choose: kimi
329
+ # Enter API key: xxx
330
+ ```
331
+
332
+ **Option 2: Environment Variable**
333
+ ```bash
334
+ export KIMI_API_KEY="xxx"
335
+ ```
336
+
337
+ **Option 3: Manual Config**
338
+ ```json
339
+ {
340
+ "providers": {
341
+ "kimi": {
342
+ "api_key": "xxx",
343
+ "model": "moonshot-v1-32k"
344
+ }
345
+ }
346
+ }
347
+ ```
348
+
349
+ ## Checklist for New Provider
350
+
351
+ - [ ] Create provider class inheriting from `BaseLLMProvider`
352
+ - [ ] Implement `plan()`, `generate_patch()`, `analyze_test_failure()`
353
+ - [ ] Implement `discover_models()` static method
354
+ - [ ] Add to `LLMProviderFactory.PROVIDERS` dict
355
+ - [ ] Add to `ProviderSetup.KNOWN_PROVIDERS`
356
+ - [ ] Add pricing to `token_manager`
357
+ - [ ] Write unit tests
358
+ - [ ] Test with real API key (if available)
359
+ - [ ] Update documentation with examples
360
+ - [ ] Submit PR or document for users
361
+
362
+ ## Provider Comparison
363
+
364
+ | Provider | Cost | Speed | Quality | Notes |
365
+ |----------|------|-------|---------|-------|
366
+ | Modal | FREE | Fast | Good | Limited to GLM-5.1FP8 |
367
+ | OpenAI | $5/hr | 2-3min | Excellent | GPT-5.4 |
368
+ | Gemini | $1/hr | 1-2min | Good | Cost-effective |
369
+ | Claude | $3/hr | 3-4min | Excellent | Takes longer |
370
+ | Kimi | $$/hr | Fast | Good | Chinese LLM |
371
+
372
+ ## Security Notes
373
+
374
+ When implementing new providers:
375
+ - Never log API keys
376
+ - Use environment variables for local testing
377
+ - Validate API responses
378
+ - Handle rate limiting
379
+ - Implement exponential backoff
380
+ - Use secure HTTPS connections
381
+
382
+ ## Example: Local Model (Ollama)
383
+
384
+ ```python
385
+ class OllamaProvider(BaseLLMProvider):
386
+ """Local Ollama model provider"""
387
+
388
+ def __init__(self, model: str = "llama2", base_url: str = "http://localhost:11434"):
389
+ self.model = model
390
+ self.base_url = base_url
391
+
392
+ async def _call_api(self, prompt: str, **kwargs) -> LLMResponse:
393
+ """Call local Ollama server"""
394
+ async with httpx.AsyncClient() as client:
395
+ response = await client.post(
396
+ f"{self.base_url}/api/generate",
397
+ json={
398
+ "model": self.model,
399
+ "prompt": prompt,
400
+ "stream": False
401
+ }
402
+ )
403
+ data = response.json()
404
+ return LLMResponse(
405
+ content=data["response"],
406
+ tokens_used=data.get("tokens", 0),
407
+ finish_reason="stop"
408
+ )
409
+ ```
410
+
411
+ ---
412
+
413
+ **See Also:**
414
+ - [MODULES.md](MODULES.md) - `llm/` module documentation
415
+ - [DEVELOPMENT.md](DEVELOPMENT.md) - Development setup
416
+ - [PLUGINS.md](PLUGINS.md) - Plugin development (for test executors, etc)