onecoder 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. onecoder/agent.py +95 -0
  2. onecoder/agentic_tool_search/__init__.py +0 -0
  3. onecoder/agentic_tool_search/dynamic_tool_search.py +64 -0
  4. onecoder/agentic_tool_search/registry.py +33 -0
  5. onecoder/agents/__init__.py +7 -0
  6. onecoder/agents/documentation_agent.py +12 -0
  7. onecoder/agents/file_reader_agent.py +19 -0
  8. onecoder/agents/file_writer_agent.py +19 -0
  9. onecoder/agents/orchestrator_agent.py +51 -0
  10. onecoder/agents/refactoring_agent.py +12 -0
  11. onecoder/agents/research_agent.py +31 -0
  12. onecoder/agents/task_suggestion_agent.py +88 -0
  13. onecoder/alignment.py +236 -0
  14. onecoder/api.py +162 -0
  15. onecoder/api_client.py +112 -0
  16. onecoder/backends/base.py +22 -0
  17. onecoder/backends/local_tui.py +65 -0
  18. onecoder/blackboard.py +102 -0
  19. onecoder/cli.py +108 -0
  20. onecoder/commands/__init__.py +1 -0
  21. onecoder/commands/auth.py +78 -0
  22. onecoder/commands/ci.py +29 -0
  23. onecoder/commands/delegate.py +557 -0
  24. onecoder/commands/doctor.py +40 -0
  25. onecoder/commands/issue.py +136 -0
  26. onecoder/commands/logs.py +45 -0
  27. onecoder/commands/project.py +270 -0
  28. onecoder/commands/server.py +170 -0
  29. onecoder/config_manager.py +87 -0
  30. onecoder/constants.py +9 -0
  31. onecoder/diagnostics/__init__.py +2 -0
  32. onecoder/diagnostics/env_scan.py +207 -0
  33. onecoder/discovery.py +101 -0
  34. onecoder/distillation.py +236 -0
  35. onecoder/evaluation/__init__.py +1 -0
  36. onecoder/evaluation/ttu.py +176 -0
  37. onecoder/governance/__init__.py +0 -0
  38. onecoder/governance/probllm.py +91 -0
  39. onecoder/hooks.py +74 -0
  40. onecoder/ipc_auth.py +200 -0
  41. onecoder/issues.py +188 -0
  42. onecoder/jules_client.py +343 -0
  43. onecoder/knowledge.py +106 -0
  44. onecoder/llm.py +61 -0
  45. onecoder/logger.py +42 -0
  46. onecoder/metrics.py +129 -0
  47. onecoder/models/delegation.py +46 -0
  48. onecoder/onboarding.py +264 -0
  49. onecoder/review.py +233 -0
  50. onecoder/services/delegation_service.py +209 -0
  51. onecoder/services/validation_service.py +104 -0
  52. onecoder/sessions.py +186 -0
  53. onecoder/sprint_collector.py +165 -0
  54. onecoder/sync.py +167 -0
  55. onecoder/tmux.py +86 -0
  56. onecoder/tools/__init__.py +10 -0
  57. onecoder/tools/executor.py +53 -0
  58. onecoder/tools/external_tools.py +106 -0
  59. onecoder/tools/file_tools.py +77 -0
  60. onecoder/tools/interface.py +25 -0
  61. onecoder/tools/jules_tools.py +122 -0
  62. onecoder/tools/kit_tools.py +122 -0
  63. onecoder/tools/registry.py +32 -0
  64. onecoder/tui/__init__.py +5 -0
  65. onecoder/tui/app.py +263 -0
  66. onecoder/tui/commands.py +150 -0
  67. onecoder/tui/widgets.py +92 -0
  68. onecoder/worktree.py +186 -0
  69. onecoder-0.0.2.dist-info/METADATA +17 -0
  70. onecoder-0.0.2.dist-info/RECORD +73 -0
  71. onecoder-0.0.2.dist-info/WHEEL +5 -0
  72. onecoder-0.0.2.dist-info/entry_points.txt +2 -0
  73. onecoder-0.0.2.dist-info/top_level.txt +1 -0
onecoder/agent.py ADDED
@@ -0,0 +1,95 @@
1
+ import os
2
+ from google.adk.agents import LlmAgent
3
+ from google.adk.models import Gemini, LiteLlm
4
+ from dotenv import load_dotenv
5
+
6
+ from .agents import (
7
+ create_documentation_agent,
8
+ create_orchestrator_agent,
9
+ create_refactoring_agent,
10
+ create_file_reader_agent,
11
+ create_file_writer_agent,
12
+ create_research_agent
13
+ )
14
+ from .config_manager import config_manager
15
+
16
+ load_dotenv(dotenv_path=os.path.join(os.path.dirname(__file__), "..", ".env"))
17
+
18
+ # --- LLM Configuration ---
19
+ def get_model():
20
+ # 1. Check user configuration via config_manager
21
+ model_config = config_manager.get_model_config()
22
+ if model_config and model_config.get("model_name"):
23
+ return LiteLlm(
24
+ model=model_config.get("model_name"),
25
+ api_key=model_config.get("api_key"),
26
+ base_url=model_config.get("base_url"),
27
+ )
28
+
29
+ # 2. Priority: Gemini (Default)
30
+ gemini_api_key = os.getenv("GEMINI_API_KEY")
31
+ if gemini_api_key:
32
+ return Gemini(
33
+ model="gemini-2.0-flash-exp",
34
+ api_key=gemini_api_key
35
+ )
36
+
37
+ # 3. Priority: OpenAI
38
+ openai_api_key = os.getenv("OPENAI_API_KEY")
39
+ if openai_api_key:
40
+ return LiteLlm(
41
+ model="openai/gpt-4o",
42
+ api_key=openai_api_key
43
+ )
44
+
45
+ # 4. Check for OLLAMA_API_KEY (Defaulting to qwen coder)
46
+ ollama_api_key = os.getenv("OLLAMA_API_KEY")
47
+ if ollama_api_key:
48
+ base_url = os.getenv("OLLAMA_BASE_URL", "https://ollama.com")
49
+ return LiteLlm(
50
+ model="ollama/qwen3-coder:480b-cloud",
51
+ api_key=ollama_api_key,
52
+ base_url=base_url,
53
+ )
54
+
55
+ # 5. Fallback to OpenRouter
56
+ openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
57
+ if openrouter_api_key:
58
+ return LiteLlm(
59
+ model="openrouter/xiaomi/mimo-v2-flash:free",
60
+ api_key=openrouter_api_key,
61
+ base_url="https://openrouter.ai/api/v1",
62
+ )
63
+
64
+ raise ValueError(
65
+ "No API key found. Please set GEMINI_API_KEY, OPENAI_API_KEY, OLLAMA_API_KEY, or OPENROUTER_API_KEY, "
66
+ "or configure the model using 'onecoder config model'."
67
+ )
68
+
69
+ # --- Agent Instances ---
70
+ _root_agent = None
71
+
72
+ def get_root_agent():
73
+ global _root_agent
74
+ if _root_agent is None:
75
+ model = get_model()
76
+
77
+ # Create specialist agents
78
+ refactoring_agent = create_refactoring_agent(model)
79
+ documentation_agent = create_documentation_agent(model)
80
+ file_reader_agent = create_file_reader_agent(model)
81
+ file_writer_agent = create_file_writer_agent(model)
82
+ research_agent = create_research_agent(model)
83
+
84
+ # Create the orchestrator as root_agent
85
+ _root_agent = create_orchestrator_agent(
86
+ model,
87
+ sub_agents=[
88
+ refactoring_agent,
89
+ documentation_agent,
90
+ file_reader_agent,
91
+ file_writer_agent,
92
+ research_agent
93
+ ]
94
+ )
95
+ return _root_agent
File without changes
@@ -0,0 +1,64 @@
1
+ # onecoder/agentic_tool_search/dynamic_tool_search.py
2
+
3
+ import inspect
4
+ import numpy as np
5
+
6
+ class DynamicToolExecutor:
7
+ """
8
+ Manages the dynamic finding and execution of tools based on semantic search.
9
+ """
10
+ def __init__(self, model, registry):
11
+ self.model = model
12
+ self.registry = registry
13
+
14
+ def _cosine_similarity(self, v1, v2):
15
+ """Calculates the cosine similarity between two vectors."""
16
+ dot_product = np.dot(v1, v2)
17
+ norm_v1 = np.linalg.norm(v1)
18
+ norm_v2 = np.linalg.norm(v2)
19
+ return dot_product / (norm_v1 * norm_v2) if norm_v1 > 0 and norm_v2 > 0 else 0.0
20
+
21
+ async def find_and_execute_tool(self, query: str, tool_args: dict = None) -> dict:
22
+ """
23
+ Dynamically finds and executes the most appropriate tool for a given query.
24
+
25
+ This function performs a semantic search over the descriptions of all
26
+ registered tools to find the best match for the user's query. Once the
27
+ tool is identified, it is executed with the provided arguments.
28
+
29
+ Args:
30
+ query: The natural language query describing the task.
31
+ tool_args: A dictionary of arguments to be passed to the selected tool.
32
+
33
+ Returns:
34
+ The result of the executed tool, or an error message if no
35
+ suitable tool is found.
36
+ """
37
+ if tool_args is None:
38
+ tool_args = {}
39
+
40
+ # 1. Find the best tool using semantic search
41
+ tool_descriptions = self.registry.get_all_tool_descriptions()
42
+ query_embedding = self.model.encode(query)
43
+ description_embeddings = self.model.encode(tool_descriptions)
44
+
45
+ similarities = [self._cosine_similarity(query_embedding, desc_emb) for desc_emb in description_embeddings]
46
+ best_tool_index = np.argmax(similarities)
47
+ best_tool_description = tool_descriptions[best_tool_index]
48
+ best_tool_name = self.registry.get_tool_name_from_description(best_tool_description)
49
+
50
+ # 2. Execute the best tool
51
+ tool_function = self.registry.get_tool_function(best_tool_name)
52
+ if tool_function:
53
+ print(f"Agent decided to use tool: '{best_tool_name}' for query: '{query}'")
54
+
55
+ sig = inspect.signature(tool_function)
56
+ valid_args = {key: value for key, value in tool_args.items() if key in sig.parameters}
57
+
58
+ if 'query' in sig.parameters:
59
+ valid_args['query'] = query
60
+
61
+ result = await tool_function(**valid_args)
62
+ return result
63
+ else:
64
+ return {"error": "Could not find a suitable tool for the query."}
@@ -0,0 +1,33 @@
1
+ # onecoder/agentic_tool_search/registry.py
2
+
3
+ class ToolRegistry:
4
+ """A simple, in-memory registry for discoverable tools."""
5
+ def __init__(self):
6
+ self._tools = {}
7
+ self.tool_metadata = []
8
+
9
+ def register(self, tool_function, name, description, data_source=None):
10
+ """Registers a tool, making it available for search and execution."""
11
+ self._tools[name] = {
12
+ "function": tool_function,
13
+ "data_source": data_source
14
+ }
15
+ self.tool_metadata.append({
16
+ "name": name,
17
+ "description": description,
18
+ })
19
+
20
+ def get_tool_function(self, name):
21
+ return self._tools.get(name, {}).get("function")
22
+
23
+ def get_tool_data_source(self, name):
24
+ return self._tools.get(name, {}).get("data_source")
25
+
26
+ def get_all_tool_descriptions(self):
27
+ return [item['description'] for item in self.tool_metadata]
28
+
29
+ def get_tool_name_from_description(self, description):
30
+ for item in self.tool_metadata:
31
+ if item['description'] == description:
32
+ return item['name']
33
+ return None
@@ -0,0 +1,7 @@
1
+ from .refactoring_agent import create_refactoring_agent
2
+ from .documentation_agent import create_documentation_agent
3
+ from .orchestrator_agent import create_orchestrator_agent
4
+ from .file_reader_agent import create_file_reader_agent
5
+ from .file_writer_agent import create_file_writer_agent
6
+ from .research_agent import create_research_agent
7
+ from .task_suggestion_agent import TaskSuggester
@@ -0,0 +1,12 @@
1
+ from google.adk.agents import LlmAgent
2
+ from google.adk.models.lite_llm import LiteLlm
3
+
4
+
5
+ def create_documentation_agent(model: LiteLlm) -> LlmAgent:
6
+ """Create a documentation agent."""
7
+ return LlmAgent(
8
+ name="documentation_agent",
9
+ model=model,
10
+ instruction="You are an expert technical writer. Your task is to write clear and concise documentation for the given code.",
11
+ output_key="documentation",
12
+ )
@@ -0,0 +1,19 @@
1
+ from google.adk.agents import LlmAgent
2
+ from google.adk.models.lite_llm import LiteLlm
3
+ from ..tools.file_tools import read_file_tool, list_directory_tool
4
+
5
+
6
+ def create_file_reader_agent(model: LiteLlm) -> LlmAgent:
7
+ """Create a file reader agent."""
8
+ return LlmAgent(
9
+ name="file_reader_agent",
10
+ model=model,
11
+ instruction=(
12
+ "You are a specialized file reader agent. Your ONLY job is to execute the `read_file_tool` or `list_directory_tool`. "
13
+ "When you receive a request, you must call the appropriate tool and return its output EXACTLY. "
14
+ "DO NOT add any conversational filler, explanations, or labels. "
15
+ "Your entire response MUST be just the content returned by the tool."
16
+ ),
17
+ tools=[read_file_tool, list_directory_tool],
18
+ output_key="file_content",
19
+ )
@@ -0,0 +1,19 @@
1
+ from google.adk.agents import LlmAgent
2
+ from google.adk.models.lite_llm import LiteLlm
3
+ from ..tools.file_tools import write_file_tool
4
+
5
+
6
+ def create_file_writer_agent(model: LiteLlm) -> LlmAgent:
7
+ """Create a file writer agent."""
8
+ return LlmAgent(
9
+ name="file_writer_agent",
10
+ model=model,
11
+ instruction=(
12
+ "You are a specialized file writer agent. Your ONLY job is to execute the `write_file_tool`. "
13
+ "When you receive a request, you must call the tool and return its output EXACTLY. "
14
+ "DO NOT add any conversational filler, explanations, or confirmation messages. "
15
+ "Your entire response MUST be just the string returned by the tool."
16
+ ),
17
+ tools=[write_file_tool],
18
+ output_key="write_status",
19
+ )
@@ -0,0 +1,51 @@
1
+ from typing import List, Optional
2
+ from google.adk.agents import LlmAgent, BaseAgent
3
+ from google.adk.models.lite_llm import LiteLlm
4
+ from ..tools import registry
5
+ from ..knowledge import ProjectKnowledge
6
+
7
+ def create_orchestrator_agent(model: LiteLlm, sub_agents: Optional[List[BaseAgent]] = None) -> LlmAgent:
8
+ """Create an orchestrator agent that routes to specialist agents or uses tools."""
9
+
10
+ # Get available tools from registry for the prompt
11
+ tool_descriptions = registry.get_tool_descriptions()
12
+
13
+ # Load Project Knowledge (Governance & Context)
14
+ pk = ProjectKnowledge()
15
+ durable_context = pk.get_durable_context()
16
+ agents_guidelines = durable_context.get("agents_guidelines", "")
17
+
18
+ governance_section = ""
19
+ if agents_guidelines:
20
+ governance_section = f"\n\nGOVERNANCE & POLICY (Must Follow):\n{agents_guidelines}\n"
21
+
22
+ return LlmAgent(
23
+ name="orchestrator_agent",
24
+ model=model,
25
+ sub_agents=sub_agents or [],
26
+ instruction=(
27
+ "You are the OneCoder Orchestrator, a high-level coordination agent for a multi-agent coding system.\n"
28
+ f"{governance_section}\n"
29
+ "YOUR MISSION:\n"
30
+ "Analyze user requests and either solve them directly using your available tools or DELEGATE to a specialist sub-agent.\n\n"
31
+ "SPECIALIST SUB-AGENTS:\n"
32
+ "- 'refactoring_specialist': Code improvements, optimizations, and modernizing legacy code.\n"
33
+ "- 'documentation_specialist': Writing docstrings, READMEs, and technical documentation.\n"
34
+ "- 'file_reader_agent': Essential for exploring the codebase and reading file contents.\n"
35
+ "- 'file_writer_agent': Used to apply changes or create new files.\n"
36
+ "- 'research_agent': Optimized for broad codebase research, indexing, and structural analysis.\n\n"
37
+ "EXTERNAL CAPABILITIES:\n"
38
+ "- 'shell_executor': Can run any shell command. Use this for system queries or if a tool is missing.\n"
39
+ "- 'gemini_ask': Use this to query the Gemini CLI for assistance.\n"
40
+ "- **IMAGE GENERATION**: Image generation with 'nanobanana' requires the interactive Gemini TUI. If the user asks for images, instruct them to 'run gemini' in their terminal.\n\n"
41
+ "AVAILABLE TOOLS:\n"
42
+ f"{tool_descriptions}\n\n"
43
+ "STRATEGY:\n"
44
+ "1. **Analyze**: Understand if the task is deep research, focused refactoring, or simple file manipulation.\n"
45
+ "2. **Delegate**: If a specialist exists for the task, call that sub-agent.\n"
46
+ "3. **Synthesize**: When a sub-agent returns, summarize the findings and present them clearly to the user.\n\n"
47
+ "RESPONSE STYLE:\n"
48
+ "Maintain a helpful, professional engineer tone. Use markdown for code blocks and reports."
49
+ ),
50
+ output_key="final_response",
51
+ )
@@ -0,0 +1,12 @@
1
+ from google.adk.agents import LlmAgent
2
+ from google.adk.models.lite_llm import LiteLlm
3
+
4
+
5
+ def create_refactoring_agent(model: LiteLlm) -> LlmAgent:
6
+ """Create a refactoring agent."""
7
+ return LlmAgent(
8
+ name="refactoring_agent",
9
+ model=model,
10
+ instruction="You are an expert software engineer. Your task is to refactor the given code to improve its readability, performance, and maintainability.",
11
+ output_key="refactored_code",
12
+ )
@@ -0,0 +1,31 @@
1
+ # onecoder/agents/research_agent.py
2
+
3
+ from google.adk.agents import LlmAgent
4
+ from google.adk.models.lite_llm import LiteLlm
5
+ from ..tools.executor import executor
6
+
7
+ def create_research_agent(model: LiteLlm) -> LlmAgent:
8
+ """
9
+ Create a research agent that uses the 'Agentic Search' pattern.
10
+ It can dynamically discover and use tools to analyze the repository.
11
+ """
12
+
13
+ async def agentic_search(task_description: str) -> str:
14
+ """
15
+ Dynamically find and execute the best tool for the repository research task.
16
+ """
17
+ result = await executor.find_and_execute_tool(task_description)
18
+ return str(result)
19
+
20
+ return LlmAgent(
21
+ name="research_agent",
22
+ model=model,
23
+ instruction=(
24
+ "You are a repository research specialist. Your goal is to provide deep analysis of the codebase. "
25
+ "You have access to a powerful 'agentic_search' tool that can find and execute various repository tools (like indexing, symbols, file tree). "
26
+ "When asked to analyze, index, or search the repository, use 'agentic_search' with a descriptive task. "
27
+ "Your response should synthesize the information you find into a clear report."
28
+ ),
29
+ tools=[agentic_search],
30
+ output_key="research_report",
31
+ )
@@ -0,0 +1,88 @@
1
+ import os
2
+ import json
3
+ import logging
4
+ from typing import List, Dict, Any
5
+
6
+ try:
7
+ import litellm
8
+ except ImportError:
9
+ litellm = None
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ class TaskSuggester:
14
+ """
15
+ Analyzes project context and suggests actionable tasks using an LLM.
16
+ """
17
+ def __init__(self, model_name: str = "openrouter/xiaomi/mimo-v2-flash:free"):
18
+ self.model_name = os.getenv("ONECODER_MODEL", model_name)
19
+ self.api_key = os.getenv("OPENROUTER_API_KEY")
20
+
21
+ def suggest_next_tasks(self, context: List[Dict[str, Any]]) -> List[Dict[str, str]]:
22
+ """
23
+ Generates task suggestions based on sprint context.
24
+ """
25
+ if not litellm or not self.api_key:
26
+ logger.warning("LLM dependencies not met. Returning empty suggestions.")
27
+ return []
28
+
29
+ prompt = self._construct_prompt(context)
30
+
31
+ try:
32
+ response = litellm.completion(
33
+ model=self.model_name,
34
+ messages=[{"role": "user", "content": prompt}],
35
+ api_key=self.api_key,
36
+ base_url="https://openrouter.ai/api/v1",
37
+ response_format={"type": "json_object"}
38
+ )
39
+
40
+ content = response.choices[0].message.content
41
+ return self._parse_response(content)
42
+ except Exception as e:
43
+ logger.error(f"Task suggestion failed: {e}")
44
+ return []
45
+
46
+ def _construct_prompt(self, context: List[Dict[str, Any]]) -> str:
47
+ sprints_str = json.dumps(context, indent=2)
48
+ return f"""
49
+ You are an expert Agile Technical Project Manager and Architect.
50
+ Analyze the following context from recent sprints (goals, completed tasks, learnings, backlogs) and suggest 3-5 high-impact next tasks.
51
+
52
+ Focus on:
53
+ 1. carrying over important incomplete work.
54
+ 2. applying 'learnings' to improved workflows or automated fixes.
55
+ 3. logically progressing towards sprint goals.
56
+
57
+ Context:
58
+ {sprints_str}
59
+
60
+ Return a JSON object with a key "suggestions" containing a list of objects, each with:
61
+ - "title": Short, actionable title (e.g., "Implement retry logic for API").
62
+ - "rationale": Why this is important based on context.
63
+ - "type": One of "feature", "fix", "chore", "governance".
64
+
65
+ Example:
66
+ {{
67
+ "suggestions": [
68
+ {{ "title": "...", "rationale": "...", "type": "..." }}
69
+ ]
70
+ }}
71
+ """
72
+
73
+ def _parse_response(self, content: str) -> List[Dict[str, str]]:
74
+ try:
75
+ data = json.loads(content)
76
+ if isinstance(data, dict) and "suggestions" in data:
77
+ return data["suggestions"]
78
+ elif isinstance(data, list):
79
+ return data
80
+ return []
81
+ except json.JSONDecodeError:
82
+ # Simple fallback extraction if JSON fails
83
+ import re
84
+ suggestions = []
85
+ matches = re.findall(r'"title":\s*"(.*?)"', content)
86
+ for m in matches:
87
+ suggestions.append({"title": m, "rationale": "Extracted from text", "type": "task"})
88
+ return suggestions