codpilot-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,49 @@
1
+ from google.adk.agents.llm_agent import LlmAgent
2
+ from google.adk.agents.loop_agent import LoopAgent
3
+ from google.adk.tools.tool_context import ToolContext
4
+ from app.agents.tools.github_mcp import github_mcp
5
+ from app.services.build_model_service import build_model
6
+
7
+
8
+ def finish_task(tool_context: ToolContext):
9
+ """Call this ONLY when a Pull Request URL has been successfully generated."""
10
+ tool_context.actions.escalate = True
11
+ return {"status": "complete"}
12
+
13
+
14
+ base_feature_agent = LlmAgent(
15
+ name="CodeAnalyzerAgent",
16
+ model=build_model(),
17
+ instruction="""
18
+ You are a coding agent.
19
+ 1. Analyze the repo. 2. Make changes. 3. Raise a Draft Pull Request.
20
+
21
+ IMPORTANT: If the chat history shows you already modified files but missed the PR,
22
+ skip the analysis and call the PR tool immediately.
23
+ """,
24
+ output_key="worker_output",
25
+ tools=[github_mcp()],
26
+ )
27
+
28
+ verifier_agent = LlmAgent(
29
+ name="CodeVerifierAgent",
30
+ model=build_model(),
31
+ instruction="""
32
+ Review the following output from the CodeAnalyzer:
33
+
34
+ \"\"\"
35
+ {{worker_output}}
36
+ \"\"\"
37
+
38
+ Task:
39
+ - If a GitHub Pull Request URL is visible above, call 'finish_task' immediately.
40
+ - If NO Pull Request URL is found, explicitly tell the worker: 'You failed to create a PR. Please execute the PR tool now.'
41
+ """,
42
+ tools=[finish_task],
43
+ )
44
+
45
+ feature_agent = LoopAgent(
46
+ name="PrGenerationAgent",
47
+ sub_agents=[base_feature_agent, verifier_agent],
48
+ max_iterations=3,
49
+ )
@@ -0,0 +1,70 @@
1
+ from google.adk.agents.llm_agent import LlmAgent
2
+ from app.agents.tools.github_mcp import github_mcp
3
+ from app.services.build_model_service import build_model
4
+ from google.adk.tools.tool_context import ToolContext
5
+ from google.adk.agents.loop_agent import LoopAgent
6
+
7
+
8
+ def finish_review(tool_context: ToolContext):
9
+ """Call this ONLY when the final PR summary and recommendation (Approve/Request Changes) have been posted."""
10
+ tool_context.actions.escalate = True
11
+ return {"status": "review_submitted"}
12
+
13
+
14
+ base_review_agent = LlmAgent(
15
+ name="PullRequestReviewWorker",
16
+ model=build_model(),
17
+ instruction="""
18
+ You are a senior software engineer.
19
+
20
+ TASK:
21
+ 1. Use github_mcp to get the PR diff.
22
+ 2. Review files one by one.
23
+ 3. Identify issues in:
24
+ - Correctness
25
+ - Edge cases
26
+ - Security
27
+ - Performance
28
+ - Readability & maintainability
29
+ - Architecture & consistency
30
+ - Tests (missing or insufficient)
31
+ 4. For each issue, use the 'post_inline_comment' tool.
32
+
33
+ RULES
34
+ - NO POSITIVE FEEDBACK: Never post "Good," "Consistent," or "Correct." If code is good, skip it.
35
+ - NO CHATTER: Do not explain your process.
36
+ - For code improvement comment must include a ```suggestion``` block.
37
+
38
+ IMPORTANT: Check the chat history. If you have already commented on certain files,
39
+ DO NOT repeat them. Move to the next file in the diff.
40
+
41
+ When finished with ALL files, provide a 'Final Summary' and call the tool to submit the overall review.
42
+ """,
43
+ output_key="review_output",
44
+ tools=[github_mcp()],
45
+ )
46
+
47
+ review_verifier_agent = LlmAgent(
48
+ name="ReviewVerifierAgent",
49
+ model=build_model(),
50
+ instruction="""
51
+ You are an auditor. Examine the 'review_output' provided below:
52
+
53
+ REVIEW_LOG:
54
+ {{review_output}}
55
+
56
+ CHECKLIST:
57
+ - Did the worker post a final summary? (Yes/No)
58
+ - Did the worker submit a formal recommendation (Approve/Reject)? (Yes/No)
59
+
60
+ IF BOTH ARE YES: Call 'finish_review' immediately.
61
+ IF NO: Tell the worker exactly what is missing (e.g., 'You reviewed the files but forgot to submit the final Approval').
62
+ """,
63
+ tools=[finish_review],
64
+ )
65
+
66
+ pr_review_agent = LoopAgent(
67
+ name="AutonomousPrReviewer",
68
+ sub_agents=[base_review_agent, review_verifier_agent],
69
+ max_iterations=5,
70
+ )
@@ -0,0 +1,59 @@
1
+ from google.adk.agents.llm_agent import LlmAgent
2
+ from google.adk.agents.loop_agent import LoopAgent
3
+ from google.adk.tools.tool_context import ToolContext
4
+ from app.agents.tools.github_mcp import github_mcp
5
+ from app.services.build_model_service import build_model
6
+
7
+
8
+ def finish_discussion(tool_context: ToolContext):
9
+ """Signals that the agent has successfully contributed to the discussion."""
10
+ tool_context.actions.escalate = True
11
+ return {"status": "contribution_posted"}
12
+
13
+
14
+ base_agent = LlmAgent(
15
+ name="IssueAdvisorAgent",
16
+ model=build_model(),
17
+ instruction="""
18
+ You are a Technical Advisor specializing in GitHub Issues. Your goal is to provide deep technical insights on reported problems or feature requests.
19
+
20
+ STEP 1: GATHER CONTEXT
21
+ - Fetch the Issue description and ALL existing comments.
22
+ - Search the codebase using relevant keywords from the issue to identify the specific files or functions involved.
23
+
24
+ STEP 2: ANALYZE
25
+ - Evaluate if the reported issue is a bug, a performance bottleneck, or a feature request.
26
+ - Propose a technical strategy or root cause analysis based on the codebase search.
27
+ - Acknowledge any previous comments to maintain a collaborative tone.
28
+
29
+ STEP 3: POST COMMENT
30
+ - Use the GitHub MCP tool to post your technical findings as a comment.
31
+
32
+ STRICT RULES:
33
+ - Do not look at Pull Requests. Never modify files or create branches.
34
+ - Do not offer to draft code changes.
35
+ """,
36
+ output_key="worker_output",
37
+ tools=[github_mcp()],
38
+ )
39
+
40
+ verifier_agent = LlmAgent(
41
+ name="ActionVerifier",
42
+ model=build_model(),
43
+ instruction="""
44
+ Review the 'worker_output'.
45
+
46
+ If the output mentions it is a Pull Request:
47
+ - Immediately call 'finish_discussion' to stop the loop.
48
+ If the text indicates a GitHub comment was successfully submitted via a tool call, run 'finish_discussion'.
49
+ If the text contains a technical analysis but NO evidence of a tool call (like 'comment_posted'),
50
+ explicitly command the agent: 'I see your analysis. You must now use the GitHub MCP tool to post this as a comment on the issue.'
51
+ """,
52
+ tools=[finish_discussion],
53
+ )
54
+
55
+ suggestion_agent = LoopAgent(
56
+ name="DiscussionAgent",
57
+ sub_agents=[base_agent, verifier_agent],
58
+ max_iterations=2,
59
+ )
@@ -0,0 +1,23 @@
1
+ import keyring
2
+ from google.adk.tools.mcp_tool import McpToolset
3
+ from google.adk.tools.mcp_tool.mcp_session_manager import StreamableHTTPServerParams
4
+
5
+ MCP_URL = "https://api.githubcopilot.com/mcp/"
6
+ SERVICE_NAME = "github-agent"
7
+ TOKEN_KEY = "github_token"
8
+
9
+
10
+ def github_mcp():
11
+ token = keyring.get_password(SERVICE_NAME, TOKEN_KEY)
12
+
13
+ return McpToolset(
14
+ connection_params=StreamableHTTPServerParams(
15
+ timeout=120,
16
+ url=MCP_URL,
17
+ headers={
18
+ "Authorization": f"Bearer {token}",
19
+ "X-MCP-Toolsets": "all",
20
+ "X-MCP-Readonly": "false",
21
+ },
22
+ ),
23
+ )
@@ -0,0 +1,44 @@
1
+ from app.workflows.agent_workflow import agent_workflow
2
+ import typer
3
+ import asyncio
4
+ from app.cli.inputs import (
5
+ ask_repo_url,
6
+ ask_agent_type,
7
+ ask_llm_model,
8
+ ask_github_token,
9
+ )
10
+ from app.cli.llm import ask_llm_token
11
+ from rich.console import Console
12
+ from rich.panel import Panel
13
+ from rich import box
14
+ from importlib.metadata import version
15
+ from app.cli.spinner import run_with_spinner
16
+
17
+ console = Console()
18
+
19
+
20
+ def run():
21
+ console.print(
22
+ Panel(
23
+ f"[bold cyan]CodPilot[/bold cyan] [green]v{version('codpilot')}[/green]\n"
24
+ "[dim]Autonomous coding agent for GitHub[/dim]",
25
+ box=box.DOUBLE,
26
+ border_style="cyan",
27
+ padding=(1, 4),
28
+ )
29
+ )
30
+ agent_type, description = ask_agent_type()
31
+ repo_url = ask_repo_url(agent_type)
32
+ model = ask_llm_model()
33
+ ask_llm_token(model)
34
+ ask_github_token()
35
+
36
+ asyncio.run(
37
+ run_with_spinner(
38
+ agent_workflow(
39
+ repo_url=repo_url, agent_type=agent_type, description=description
40
+ )
41
+ )
42
+ )
43
+
44
+ typer.echo("✅ Execution completed")
@@ -0,0 +1,8 @@
1
+ import typer
2
+ from importlib.metadata import version
3
+
4
+
5
+ def version_callback(value: bool):
6
+ if value:
7
+ typer.echo(f"codpilot v{version('ai-coding-agent')}")
8
+ raise typer.Exit()
app/cli/inputs.py ADDED
@@ -0,0 +1,164 @@
1
+ import questionary
2
+ import keyring
3
+ import requests
4
+ import time
5
+ import typer
6
+
7
+ from app.cli.llm import ask_llm_token
8
+
9
+ WORKFLOW_OPTIONS = {
10
+ "review_pr": "Review PR",
11
+ "create_feature": "Create a new feature",
12
+ "suggest_changes": "Add your suggestions to the Issue",
13
+ }
14
+ LLM_MODELS = ["Gemini", "OpenAI", "Anthropic"]
15
+ SERVICE_NAME = "github-agent"
16
+ LLM_MODEL_KEY = "llm_model"
17
+ LLM_TOKEN_KEY = "llm_api_token"
18
+
19
+
20
+ def ask_repo_url(agent_type):
21
+ if agent_type == "create_feature":
22
+ msg = "Enter the GitHub repository URL:"
23
+ elif agent_type == "suggest_changes":
24
+ msg = "Enter the GitHub Issue URL:"
25
+ else:
26
+ msg = "Enter the GitHub PR URL:"
27
+
28
+ repo_url = questionary.text(
29
+ msg,
30
+ ).ask()
31
+
32
+ if not repo_url:
33
+ typer.echo("⛔️ URL is required")
34
+ raise SystemExit(1)
35
+
36
+ if not repo_url.startswith("https://github.com/"):
37
+ typer.echo("⛔️ Invalid URL")
38
+ raise SystemExit(1)
39
+
40
+ return repo_url
41
+
42
+
43
+ def ask_agent_type():
44
+ choice = questionary.select(
45
+ "Select mode:",
46
+ choices=[
47
+ questionary.Choice(title=label, value=key)
48
+ for key, label in WORKFLOW_OPTIONS.items()
49
+ ],
50
+ ).ask()
51
+
52
+ if not choice:
53
+ typer.echo("Mode is required")
54
+ raise SystemExit(1)
55
+
56
+ description = None
57
+ if choice == "create_feature":
58
+ description = questionary.text(
59
+ "Enter the details of the feature you want to create:",
60
+ ).ask()
61
+
62
+ if not description:
63
+ typer.echo("Description is required")
64
+ raise SystemExit(1)
65
+
66
+ return choice, description
67
+
68
+
69
+ def ask_llm_model():
70
+ selected_model = keyring.get_password(SERVICE_NAME, LLM_MODEL_KEY)
71
+
72
+ if selected_model:
73
+ typer.echo(f"Using LLM model {selected_model}")
74
+ return selected_model
75
+
76
+ model = questionary.select(
77
+ "Select LLM model:",
78
+ choices=LLM_MODELS,
79
+ ).ask()
80
+
81
+ if not model:
82
+ typer.echo("LLM model is required")
83
+ raise SystemExit(1)
84
+
85
+ keyring.set_password(SERVICE_NAME, LLM_MODEL_KEY, model)
86
+
87
+ return model
88
+
89
+
90
+ def ask_github_token():
91
+ token = keyring.get_password(SERVICE_NAME, "github_token")
92
+
93
+ if token:
94
+ return
95
+
96
+ token = questionary.password("Enter your GitHub API token:").ask()
97
+
98
+ if not token:
99
+ typer.echo("⛔️ GitHub token is required")
100
+ raise SystemExit(1)
101
+
102
+ if _validate_github_token(token):
103
+ keyring.set_password(SERVICE_NAME, "github_token", token)
104
+ time.sleep(1)
105
+ else:
106
+ typer.echo("⛔️ Invalid GitHub token")
107
+ raise SystemExit(1)
108
+
109
+
110
+ def reset_github_token():
111
+ existing = keyring.get_password(SERVICE_NAME, "github_token")
112
+
113
+ if not existing:
114
+ typer.echo("⚠️ No GitHub token found in keyring.")
115
+ return
116
+
117
+ confirm = questionary.confirm(
118
+ "Are you sure you want to reset the stored GitHub token?"
119
+ ).ask()
120
+
121
+ if not confirm:
122
+ typer.echo("ℹ️ Token reset cancelled.")
123
+ return
124
+
125
+ keyring.delete_password(SERVICE_NAME, "github_token")
126
+ typer.echo("🗑️ GitHub token removed.")
127
+
128
+ ask_github_token()
129
+ typer.echo("GitHub token has been reset successfully.")
130
+
131
+
132
+ def change_llm_model():
133
+ existing = keyring.get_password(SERVICE_NAME, LLM_MODEL_KEY)
134
+
135
+ if not existing:
136
+ typer.echo("⚠️ No LLM model found.")
137
+ return
138
+
139
+ confirm = questionary.confirm(
140
+ "Are you sure you want to change the LLM model?"
141
+ ).ask()
142
+
143
+ if not confirm:
144
+ typer.echo("ℹ️ Change LLM model cancelled.")
145
+ return
146
+
147
+ keyring.delete_password(SERVICE_NAME, LLM_MODEL_KEY)
148
+ model = ask_llm_model()
149
+ if keyring.get_password(SERVICE_NAME, LLM_TOKEN_KEY):
150
+ keyring.delete_password(SERVICE_NAME, LLM_TOKEN_KEY)
151
+
152
+ ask_llm_token(model)
153
+
154
+
155
+ def _validate_github_token(token: str) -> bool:
156
+ try:
157
+ resp = requests.get(
158
+ "https://api.github.com/user",
159
+ headers={"Authorization": f"Bearer {token}"},
160
+ timeout=5,
161
+ )
162
+ return resp.status_code == 200
163
+ except requests.RequestException:
164
+ return False
app/cli/llm.py ADDED
@@ -0,0 +1,100 @@
1
+ import keyring
2
+ import questionary
3
+ import os
4
+ import subprocess
5
+ import typer
6
+
7
+ SERVICE_NAME = "github-agent"
8
+ LLM_TOKEN_KEY = "llm_api_token"
9
+ LLM_MODEL_KEY = "llm_model"
10
+
11
+
12
+ def ask_llm_token(model):
13
+ token = keyring.get_password(SERVICE_NAME, LLM_TOKEN_KEY)
14
+
15
+ if token:
16
+ set_llm_token_to_env(token)
17
+ return
18
+
19
+ token = questionary.password(
20
+ f"🔑 Enter your {model} token:",
21
+ ).ask()
22
+
23
+ if not token:
24
+ typer.echo("❌ LLM token is required")
25
+ raise SystemExit(1)
26
+
27
+ if ping_model(model, token):
28
+ keyring.set_password(SERVICE_NAME, LLM_TOKEN_KEY, token)
29
+ set_llm_token_to_env(token)
30
+ typer.echo("🔐 Token saved securely in system keychain.")
31
+
32
+
33
+ def set_llm_token_to_env(token):
34
+ selected_model = keyring.get_password(SERVICE_NAME, LLM_MODEL_KEY)
35
+ if selected_model == "OpenAI":
36
+ os.environ["OPENAI_API_KEY"] = token
37
+ elif selected_model == "Anthropic":
38
+ os.environ["ANTHROPIC_API_KEY"] = token
39
+ else:
40
+ os.environ["GEMINI_API_KEY"] = token
41
+
42
+
43
+ def ping_model(model, token):
44
+ typer.echo("🤖 Pinging {}".format(model))
45
+ try:
46
+ if model == "OpenAI":
47
+ cmd = [
48
+ "curl",
49
+ "-s",
50
+ "-o",
51
+ "/dev/null",
52
+ "-w",
53
+ "%{http_code}",
54
+ "https://api.openai.com/v1/models",
55
+ "-H",
56
+ f"Authorization: Bearer {token}",
57
+ ]
58
+
59
+ elif model == "Anthropic":
60
+ cmd = [
61
+ "curl",
62
+ "-s",
63
+ "-o",
64
+ "/dev/null",
65
+ "-w",
66
+ "%{http_code}",
67
+ "https://api.anthropic.com/v1/models",
68
+ "-H",
69
+ f"x-api-key: {token}",
70
+ "-H",
71
+ "anthropic-version: 2023-06-01",
72
+ ]
73
+
74
+ elif model == "Gemini":
75
+ cmd = [
76
+ "curl",
77
+ "-s",
78
+ "-o",
79
+ "/dev/null",
80
+ "-w",
81
+ "%{http_code}",
82
+ f"https://generativelanguage.googleapis.com/v1beta/models?key={token}",
83
+ ]
84
+
85
+ else:
86
+ typer.echo("❌ Unknown model provider")
87
+ raise SystemExit(1)
88
+
89
+ result = subprocess.run(cmd, capture_output=True, text=True)
90
+
91
+ if result.stdout.strip() == "200":
92
+ typer.echo("✅ Model ping successful.")
93
+ return True
94
+ else:
95
+ typer.echo("❌ Model ping failed. Invalid token.")
96
+ raise SystemExit(1)
97
+
98
+ except Exception as e:
99
+ typer.echo(f"Error: {e}")
100
+ raise SystemExit(1)
app/cli/main.py ADDED
@@ -0,0 +1,31 @@
1
+ import typer
2
+ from app.cli.commands.run import run as run_command
3
+ from app.cli.commands.version import version_callback
4
+ from app.cli.inputs import reset_github_token, change_llm_model
5
+
6
+ app = typer.Typer(help="🤖 CodPilot CLI")
7
+
8
+
9
+ @app.callback()
10
+ def get_version(
11
+ version: bool = typer.Option(
12
+ False,
13
+ "--version",
14
+ "-v",
15
+ callback=version_callback,
16
+ is_eager=True,
17
+ help="Get codpilot version.",
18
+ ),
19
+ ):
20
+ pass
21
+
22
+
23
+ app.command("run", help="📀 Run the agent.")(run_command)
24
+ app.command("reset-github-token", help="🔑 Reset the stored GitHub token.")(
25
+ reset_github_token
26
+ )
27
+ app.command("change-llm", help="🤖 Change the LLM model.")(change_llm_model)
28
+
29
+
30
+ if __name__ == "__main__":
31
+ app()
app/cli/spinner.py ADDED
@@ -0,0 +1,11 @@
1
+ from rich.console import Console
2
+ from rich.spinner import Spinner
3
+ from rich.live import Live
4
+
5
+ console = Console()
6
+
7
+
8
+ async def run_with_spinner(coro):
9
+ spinner = Spinner("dots", text="Running agent...")
10
+ with Live(spinner, console=console, refresh_per_second=10):
11
+ await coro
@@ -0,0 +1,14 @@
1
+ import keyring
2
+ from google.adk.models.lite_llm import LiteLlm
3
+
4
+ SERVICE_NAME = "github-agent"
5
+ LLM_MODEL_KEY = "llm_model"
6
+
7
+
8
+ def build_model():
9
+ selected_model = keyring.get_password(SERVICE_NAME, LLM_MODEL_KEY)
10
+ if selected_model == "OpenAI":
11
+ return LiteLlm(model="openai/gpt-5-mini")
12
+ elif selected_model == "Anthropic":
13
+ return LiteLlm(model="anthropic/claude-3-7-sonnet-20250219", temperature=0)
14
+ return "gemini-3-flash-preview"
@@ -0,0 +1,34 @@
1
+ from google.genai.types import Content, Part
2
+
3
+
4
+ class RunnerService:
5
+ def __init__(self, runner, session_id):
6
+ self.runner = runner
7
+ self.session_id = session_id
8
+
9
+ async def process(self, repo_url, description=None):
10
+ final_text_parts = []
11
+
12
+ if description:
13
+ message = f"Here is the repository URL: {repo_url}\n\nHere are my instructions:\n{description}"
14
+ else:
15
+ message = f"Here is the repository URL: {repo_url}"
16
+
17
+ async for event in self.runner.run_async(
18
+ user_id="user",
19
+ session_id=self.session_id,
20
+ new_message=Content(parts=[Part(text=message)], role="user"),
21
+ ):
22
+ if event.content and event.content.parts:
23
+ for part in event.content.parts:
24
+ if part.text:
25
+ final_text_parts.append(part.text)
26
+
27
+ if event.is_final_response() and event.actions and event.actions.escalate:
28
+ return (
29
+ f"Agent escalated: {event.error_message or 'No specific message.'}"
30
+ )
31
+
32
+ return (
33
+ "".join(final_text_parts) if final_text_parts else "No response generated."
34
+ )
@@ -0,0 +1,50 @@
1
+ from google.adk.runners import App, Runner
2
+ from app.services.runner_service import RunnerService
3
+ from google.adk.sessions import InMemorySessionService
4
+ import uuid
5
+
6
+ session_service = InMemorySessionService()
7
+
8
+
9
+ async def agent_workflow(repo_url, agent_type, description=None):
10
+ app = App(
11
+ name="CodingAgentWorkflow",
12
+ root_agent=get_agent_by_type(agent_type),
13
+ )
14
+
15
+ runner = get_runner(app)
16
+ session_id = uuid.uuid4().hex
17
+ await create_session(app, runner, session_id)
18
+
19
+ runner_service = RunnerService(runner=runner, session_id=session_id)
20
+ await runner_service.process(repo_url, description)
21
+
22
+
23
+ async def create_session(app, runner, session_id):
24
+ return await runner.session_service.create_session(
25
+ app_name=app.name, user_id="user", session_id=session_id
26
+ )
27
+
28
+
29
+ def get_runner(app: App):
30
+ return Runner(
31
+ app=app,
32
+ session_service=session_service,
33
+ )
34
+
35
+
36
+ def get_agent_by_type(agent_type):
37
+ if agent_type == "review_pr":
38
+ from app.agents.pr_review_agent import pr_review_agent
39
+
40
+ return pr_review_agent
41
+ elif agent_type == "create_feature":
42
+ from app.agents.feature_agent import feature_agent
43
+
44
+ return feature_agent
45
+ elif agent_type == "suggest_changes":
46
+ from app.agents.suggestion_agent import suggestion_agent
47
+
48
+ return suggestion_agent
49
+ else:
50
+ raise ValueError(f"Unknown agent type: {agent_type}")
@@ -0,0 +1,88 @@
1
+ Metadata-Version: 2.4
2
+ Name: codpilot-cli
3
+ Version: 0.1.0
4
+ Summary: A multi-agent CLI tool that integrates with GitHub to review PRs, create features, and suggest changes.
5
+ License-Expression: MIT
6
+ Keywords: cli,codpilot,github
7
+ Classifier: Development Status :: 3 - Alpha
8
+ Classifier: Environment :: Console
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: Programming Language :: Python :: 3.14
11
+ Classifier: Topic :: Software Development :: Quality Assurance
12
+ Requires-Python: >=3.14
13
+ Requires-Dist: google-adk>=1.24.0
14
+ Requires-Dist: keyring>=25.7.0
15
+ Requires-Dist: litellm>=1.81.11
16
+ Requires-Dist: python-dotenv>=1.2.1
17
+ Requires-Dist: questionary>=2.1.1
18
+ Requires-Dist: rich>=14.3.2
19
+ Requires-Dist: typer>=0.23.1
20
+ Description-Content-Type: text/markdown
21
+
22
+ # CodePilot CLI
23
+
24
+ A multi agent CLI tool that integrates with GitHub that can help you to review pull requests, create new features, and suggest changes on your behalf — all from your terminal.
25
+
26
+ ## Features
27
+
28
+ **Review PR** — Analyzes the pull request and posts inline comments with suggestions.
29
+
30
+ **Create Feature** — It can implements new features, then opens a draft pull request against the target repository.
31
+
32
+ **Suggest Changes** — Participate in GitHub Issue discussions, analyze the codebase and conversation, and post technical suggestions as comments.
33
+
34
+ ## Available Commands
35
+
36
+ | Command | Description |
37
+ | ------------------------------ | ----------------------------- |
38
+ | `codepilot run` | Run the agent |
39
+ | `codepilot reset-github-token` | Reset the stored GitHub token |
40
+ | `codepilot change-llm` | Change the LLM model |
41
+ | `codepilot --version, -v` | Show the current version |
42
+
43
+ `codepilot run` has the following options:
44
+
45
+ - Review PR
46
+ - Create Feature
47
+ - Suggest Changes
48
+
49
+ ## Prerequisites
50
+
51
+ - Python 3.14+
52
+ - A GitHub personal access token
53
+ - An API key for at least one LLM provider (Gemini, OpenAI, or Anthropic)
54
+
55
+ ## Installing locally
56
+
57
+ ```bash
58
+ git clone https://github.com/<your-username>/ai-coding-agent.git
59
+ cd codepilot
60
+ pip install -e .
61
+ ```
62
+
63
+ Verify the installation:
64
+
65
+ ```bash
66
+ codepilot --version
67
+ ```
68
+
69
+ ## Usage
70
+
71
+ ```bash
72
+ codepilot run
73
+ ```
74
+
75
+ The interactive prompt will walk you through:
76
+
77
+ 1. Selecting an agent (Review PR / Create Feature / Suggest Changes)
78
+ 2. Entering the GitHub URL (PR, repository, or issue depending on the agent)
79
+ 3. Choosing an LLM provider (Gemini / OpenAI / Anthropic)
80
+ 4. Providing API credentials for github and LLM (cached in your system keychain for future runs)
81
+
82
+ ## Supported Models
83
+
84
+ | Provider | Model |
85
+ | --------- | ---------------------------- |
86
+ | Gemini | `gemini-3-flash-preview` |
87
+ | OpenAI | `gpt-5-mini` |
88
+ | Anthropic | `claude-3-7-sonnet-20250219` |
@@ -0,0 +1,17 @@
1
+ app/agents/feature_agent.py,sha256=gUJLMRlP1hRJeV9N7vZ2DADrslz_x1EeUoqV4pfD9Rk,1485
2
+ app/agents/pr_review_agent.py,sha256=imbQFCxqEMfDwoSLBX9h60EZZCs953rpulC9f5wOb5E,2440
3
+ app/agents/suggestion_agent.py,sha256=mOqENSrOQAazLy2MHp1tBUz1FwBDHuXEAeWZL8vcKeo,2291
4
+ app/agents/tools/github_mcp.py,sha256=VsCNeHKK43DnvqpiydH48MSk02A_Uueypr2XOAhA3z0,650
5
+ app/cli/inputs.py,sha256=4xwK4hYXmgfGOyp3jw_9dvvvuD2Bx2Og5DIlL2CQVN4,4110
6
+ app/cli/llm.py,sha256=v9SF5mIuETvhwYkZqsBvMRvvHqKubTYpMMu7ghUpRnY,2672
7
+ app/cli/main.py,sha256=EUtuVufnw7VNNiI03tHaHqTJFsJFAlUfAtubTZdctZc,746
8
+ app/cli/spinner.py,sha256=rbr8LcosjFzHXkEYqt8VUOSIzTf-qfsh5rfcmbtNcRA,288
9
+ app/cli/commands/run.py,sha256=XAhVTdg-E3r3Aq9Bx_SuLy6WbidKfngU9ParE-m0Taw,1110
10
+ app/cli/commands/version.py,sha256=wGo5mXhUygfo-xsQ17UvzdU2RTLqIDfBRwRow-ebNdk,192
11
+ app/services/build_model_service.py,sha256=hV-tolkuj5DqyKtf1poFk7F_1QyHs59FgVHgeyMRLMU,458
12
+ app/services/runner_service.py,sha256=U4AOTeIRzwxcVogSMmyANDlXEmjCeXK6SC9trxR2nyo,1212
13
+ app/workflows/agent_workflow.py,sha256=7MWIFSezCoLgIvS6qdEbhnP84nOp5AzKSZ1Esv0KrUM,1426
14
+ codpilot_cli-0.1.0.dist-info/METADATA,sha256=apWUa0Hy84rZLhE1idJwuRNlj8EV4JBHDdVJEK-ET68,2796
15
+ codpilot_cli-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
16
+ codpilot_cli-0.1.0.dist-info/entry_points.txt,sha256=4sv7rFPuZfAVfDPCqSog1ybGMvfRIZlVEdMDpTWXYLs,46
17
+ codpilot_cli-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.28.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ codpilot = app.cli.main:app