breadcrumb-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- breadcrumb/__init__.py +7 -0
- breadcrumb/ai/__init__.py +1 -0
- breadcrumb/ai/prompts.py +60 -0
- breadcrumb/ai/router.py +187 -0
- breadcrumb/cli.py +144 -0
- breadcrumb/commands/__init__.py +1 -0
- breadcrumb/commands/ask.py +98 -0
- breadcrumb/commands/audit.py +77 -0
- breadcrumb/commands/chat.py +123 -0
- breadcrumb/commands/commit.py +87 -0
- breadcrumb/commands/diff.py +90 -0
- breadcrumb/commands/digest.py +80 -0
- breadcrumb/commands/explain_error.py +63 -0
- breadcrumb/commands/init.py +67 -0
- breadcrumb/commands/share.py +209 -0
- breadcrumb/config.py +84 -0
- breadcrumb/history.py +110 -0
- breadcrumb/ingest.py +163 -0
- breadcrumb_cli-0.1.0.dist-info/METADATA +342 -0
- breadcrumb_cli-0.1.0.dist-info/RECORD +23 -0
- breadcrumb_cli-0.1.0.dist-info/WHEEL +4 -0
- breadcrumb_cli-0.1.0.dist-info/entry_points.txt +2 -0
- breadcrumb_cli-0.1.0.dist-info/licenses/LICENSE +23 -0
breadcrumb/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""AI package initialization."""
|
breadcrumb/ai/prompts.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
"""
|
|
2
|
+
System prompts and prompt templates for various commands.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
SYSTEM_PROMPT_DEFAULT = """You are Bread Crumb, an expert AI code reviewer and architect.
|
|
6
|
+
You analyze code repositories with clarity and precision.
|
|
7
|
+
Always be concise but thorough. Suggest improvements when relevant.
|
|
8
|
+
Format code blocks with language specifiers for syntax highlighting."""
|
|
9
|
+
|
|
10
|
+
SYSTEM_PROMPT_AUDIT = """You are a security and architecture auditor.
|
|
11
|
+
Analyze this codebase for:
|
|
12
|
+
- Security vulnerabilities (SQL injection, XSS, auth issues, etc.)
|
|
13
|
+
- Performance bottlenecks
|
|
14
|
+
- Architectural anti-patterns
|
|
15
|
+
- Dependencies vulnerabilities
|
|
16
|
+
- Code quality issues
|
|
17
|
+
|
|
18
|
+
Be specific about WHAT is wrong and HOW to fix it.
|
|
19
|
+
Prioritize critical issues first."""
|
|
20
|
+
|
|
21
|
+
SYSTEM_PROMPT_COMMIT = """Generate a conventional commit message for the staged changes.
|
|
22
|
+
Format: type(scope): description
|
|
23
|
+
Types: feat, fix, docs, style, refactor, perf, test, chore, ci
|
|
24
|
+
Keep under 72 characters.
|
|
25
|
+
Return ONLY the commit message, no explanation."""
|
|
26
|
+
|
|
27
|
+
SYSTEM_PROMPT_EXPLAIN_ERROR = """Analyze this error or stack trace and provide:
|
|
28
|
+
1. Plain English explanation of what went wrong
|
|
29
|
+
2. Root cause
|
|
30
|
+
3. Specific steps to fix it
|
|
31
|
+
Be concise and practical."""
|
|
32
|
+
|
|
33
|
+
SYSTEM_PROMPT_DIFF = """Review this git diff and provide:
|
|
34
|
+
1. Summary of changes
|
|
35
|
+
2. Any potential issues or bugs
|
|
36
|
+
3. Suggestions for improvement
|
|
37
|
+
4. Impact assessment
|
|
38
|
+
Be critical but constructive."""
|
|
39
|
+
|
|
40
|
+
SYSTEM_PROMPT_DIGEST = """Summarize these git commits into a concise daily digest.
|
|
41
|
+
What changed? What was fixed? Any notable improvements?
|
|
42
|
+
Keep it under 200 words. Use bullet points."""
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def get_system_prompt(command: str, custom: str = "") -> str:
|
|
46
|
+
"""Get system prompt for a command, with optional custom override."""
|
|
47
|
+
if custom:
|
|
48
|
+
return custom
|
|
49
|
+
|
|
50
|
+
prompts = {
|
|
51
|
+
"chat": SYSTEM_PROMPT_DEFAULT,
|
|
52
|
+
"ask": SYSTEM_PROMPT_DEFAULT,
|
|
53
|
+
"audit": SYSTEM_PROMPT_AUDIT,
|
|
54
|
+
"commit": SYSTEM_PROMPT_COMMIT,
|
|
55
|
+
"explain-error": SYSTEM_PROMPT_EXPLAIN_ERROR,
|
|
56
|
+
"diff": SYSTEM_PROMPT_DIFF,
|
|
57
|
+
"digest": SYSTEM_PROMPT_DIGEST,
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
return prompts.get(command, SYSTEM_PROMPT_DEFAULT)
|
breadcrumb/ai/router.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AI provider routing and message handling.
|
|
3
|
+
Supports Anthropic, OpenAI, Gemini, and Ollama.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import Iterator, Optional
|
|
7
|
+
|
|
8
|
+
from breadcrumb.config import Config
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class AIRouter:
|
|
12
|
+
"""Routes requests to the appropriate AI provider."""
|
|
13
|
+
|
|
14
|
+
def __init__(self, provider: Optional[str] = None):
|
|
15
|
+
self.config = Config()
|
|
16
|
+
self.provider = provider or self.config.get("provider", "anthropic")
|
|
17
|
+
self.api_key = self.config.get_api_key(self.provider)
|
|
18
|
+
self.model = self.config.get_model(self.provider)
|
|
19
|
+
|
|
20
|
+
def chat(self, messages: list, system: str = "", **kwargs) -> str:
|
|
21
|
+
"""Send a message and get a response."""
|
|
22
|
+
if self.provider == "anthropic":
|
|
23
|
+
return self._chat_anthropic(messages, system, **kwargs)
|
|
24
|
+
elif self.provider == "openai":
|
|
25
|
+
return self._chat_openai(messages, system, **kwargs)
|
|
26
|
+
elif self.provider == "gemini":
|
|
27
|
+
return self._chat_gemini(messages, system, **kwargs)
|
|
28
|
+
elif self.provider == "ollama":
|
|
29
|
+
return self._chat_ollama(messages, system, **kwargs)
|
|
30
|
+
else:
|
|
31
|
+
raise ValueError(f"Unknown provider: {self.provider}")
|
|
32
|
+
|
|
33
|
+
def stream(self, messages: list, system: str = "", **kwargs) -> Iterator[str]:
|
|
34
|
+
"""Stream a response token by token."""
|
|
35
|
+
if self.provider == "anthropic":
|
|
36
|
+
yield from self._stream_anthropic(messages, system, **kwargs)
|
|
37
|
+
elif self.provider == "openai":
|
|
38
|
+
yield from self._stream_openai(messages, system, **kwargs)
|
|
39
|
+
elif self.provider == "gemini":
|
|
40
|
+
yield from self._stream_gemini(messages, system, **kwargs)
|
|
41
|
+
elif self.provider == "ollama":
|
|
42
|
+
yield from self._stream_ollama(messages, system, **kwargs)
|
|
43
|
+
else:
|
|
44
|
+
raise ValueError(f"Unknown provider: {self.provider}")
|
|
45
|
+
|
|
46
|
+
def count_tokens(self, text: str) -> int:
|
|
47
|
+
"""Estimate token count (rough approximation)."""
|
|
48
|
+
# Rough estimate: 4 chars ≈ 1 token
|
|
49
|
+
return len(text) // 4
|
|
50
|
+
|
|
51
|
+
# ── Anthropic ──────────────────────────────────────────────────────────────
|
|
52
|
+
def _chat_anthropic(self, messages: list, system: str = "", **kwargs) -> str:
|
|
53
|
+
"""Synchronous chat with Anthropic."""
|
|
54
|
+
try:
|
|
55
|
+
import anthropic
|
|
56
|
+
except ImportError:
|
|
57
|
+
raise ImportError("anthropic package required. Install: pip install anthropic")
|
|
58
|
+
|
|
59
|
+
client = anthropic.Anthropic(api_key=self.api_key)
|
|
60
|
+
response = client.messages.create(
|
|
61
|
+
model=self.model,
|
|
62
|
+
max_tokens=kwargs.get("max_tokens", 4096),
|
|
63
|
+
system=system,
|
|
64
|
+
messages=messages,
|
|
65
|
+
temperature=kwargs.get("temperature", 0.7),
|
|
66
|
+
)
|
|
67
|
+
text = getattr(response.content[0], "text", "")
|
|
68
|
+
return text or ""
|
|
69
|
+
|
|
70
|
+
def _stream_anthropic(self, messages: list, system: str = "", **kwargs) -> Iterator[str]:
|
|
71
|
+
"""Stream response from Anthropic."""
|
|
72
|
+
try:
|
|
73
|
+
import anthropic
|
|
74
|
+
except ImportError:
|
|
75
|
+
raise ImportError("anthropic package required. Install: pip install anthropic")
|
|
76
|
+
|
|
77
|
+
client = anthropic.Anthropic(api_key=self.api_key)
|
|
78
|
+
with client.messages.stream(
|
|
79
|
+
model=self.model,
|
|
80
|
+
max_tokens=kwargs.get("max_tokens", 4096),
|
|
81
|
+
system=system,
|
|
82
|
+
messages=messages,
|
|
83
|
+
temperature=kwargs.get("temperature", 0.7),
|
|
84
|
+
) as stream:
|
|
85
|
+
for text in stream.text_stream:
|
|
86
|
+
yield text
|
|
87
|
+
|
|
88
|
+
# ── OpenAI ────────────────────────────────────────────────────────────────
|
|
89
|
+
def _chat_openai(self, messages: list, system: str = "", **kwargs) -> str:
|
|
90
|
+
"""Synchronous chat with OpenAI."""
|
|
91
|
+
try:
|
|
92
|
+
import openai
|
|
93
|
+
except ImportError:
|
|
94
|
+
raise ImportError("openai package required. Install: pip install openai")
|
|
95
|
+
|
|
96
|
+
client = openai.OpenAI(api_key=self.api_key)
|
|
97
|
+
if system:
|
|
98
|
+
messages = [{"role": "system", "content": system}] + messages
|
|
99
|
+
response = client.chat.completions.create(
|
|
100
|
+
model=self.model,
|
|
101
|
+
messages=messages,
|
|
102
|
+
max_tokens=kwargs.get("max_tokens", 4096),
|
|
103
|
+
temperature=kwargs.get("temperature", 0.7),
|
|
104
|
+
)
|
|
105
|
+
return response.choices[0].message.content or ""
|
|
106
|
+
|
|
107
|
+
def _stream_openai(self, messages: list, system: str = "", **kwargs) -> Iterator[str]:
|
|
108
|
+
"""Stream response from OpenAI."""
|
|
109
|
+
try:
|
|
110
|
+
import openai
|
|
111
|
+
except ImportError:
|
|
112
|
+
raise ImportError("openai package required. Install: pip install openai")
|
|
113
|
+
|
|
114
|
+
client = openai.OpenAI(api_key=self.api_key)
|
|
115
|
+
if system:
|
|
116
|
+
messages = [{"role": "system", "content": system}] + messages
|
|
117
|
+
stream = client.chat.completions.create(
|
|
118
|
+
model=self.model,
|
|
119
|
+
messages=messages,
|
|
120
|
+
max_tokens=kwargs.get("max_tokens", 4096),
|
|
121
|
+
temperature=kwargs.get("temperature", 0.7),
|
|
122
|
+
stream=True,
|
|
123
|
+
)
|
|
124
|
+
for chunk in stream:
|
|
125
|
+
if chunk.choices[0].delta.content:
|
|
126
|
+
yield chunk.choices[0].delta.content
|
|
127
|
+
|
|
128
|
+
# ── Gemini ────────────────────────────────────────────────────────────────
|
|
129
|
+
def _chat_gemini(self, messages: list, system: str = "", **kwargs) -> str:
|
|
130
|
+
"""Synchronous chat with Gemini."""
|
|
131
|
+
try:
|
|
132
|
+
import google.generativeai as genai
|
|
133
|
+
except ImportError:
|
|
134
|
+
raise ImportError(
|
|
135
|
+
"google-generativeai package required. Install: pip install google-generativeai"
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
genai.configure(api_key=self.api_key)
|
|
139
|
+
model = genai.GenerativeModel(self.model, system_instruction=system)
|
|
140
|
+
response = model.generate_content([m["content"] for m in messages])
|
|
141
|
+
return response.text
|
|
142
|
+
|
|
143
|
+
def _stream_gemini(self, messages: list, system: str = "", **kwargs) -> Iterator[str]:
|
|
144
|
+
"""Stream response from Gemini."""
|
|
145
|
+
try:
|
|
146
|
+
import google.generativeai as genai
|
|
147
|
+
except ImportError:
|
|
148
|
+
raise ImportError(
|
|
149
|
+
"google-generativeai package required. Install: pip install google-generativeai"
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
genai.configure(api_key=self.api_key)
|
|
153
|
+
model = genai.GenerativeModel(self.model, system_instruction=system)
|
|
154
|
+
response = model.generate_content([m["content"] for m in messages], stream=True)
|
|
155
|
+
for chunk in response:
|
|
156
|
+
yield chunk.text
|
|
157
|
+
|
|
158
|
+
# ── Ollama ────────────────────────────────────────────────────────────────
|
|
159
|
+
def _chat_ollama(self, messages: list, system: str = "", **kwargs) -> str:
|
|
160
|
+
"""Synchronous chat with Ollama."""
|
|
161
|
+
try:
|
|
162
|
+
import ollama
|
|
163
|
+
except ImportError:
|
|
164
|
+
raise ImportError("ollama package required. Install: pip install ollama")
|
|
165
|
+
|
|
166
|
+
url = self.config.get("ollama_url", "http://localhost:11434")
|
|
167
|
+
client = ollama.Client(host=url)
|
|
168
|
+
if system:
|
|
169
|
+
messages = [{"role": "system", "content": system}] + messages
|
|
170
|
+
response = client.chat(model=self.model, messages=messages)
|
|
171
|
+
return response["message"]["content"]
|
|
172
|
+
|
|
173
|
+
def _stream_ollama(self, messages: list, system: str = "", **kwargs) -> Iterator[str]:
|
|
174
|
+
"""Stream response from Ollama."""
|
|
175
|
+
try:
|
|
176
|
+
import ollama
|
|
177
|
+
except ImportError:
|
|
178
|
+
raise ImportError("ollama package required. Install: pip install ollama")
|
|
179
|
+
|
|
180
|
+
url = self.config.get("ollama_url", "http://localhost:11434")
|
|
181
|
+
client = ollama.Client(host=url)
|
|
182
|
+
if system:
|
|
183
|
+
messages = [{"role": "system", "content": system}] + messages
|
|
184
|
+
stream = client.chat(model=self.model, messages=messages, stream=True)
|
|
185
|
+
for chunk in stream:
|
|
186
|
+
if "message" in chunk:
|
|
187
|
+
yield chunk["message"]["content"]
|
breadcrumb/cli.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Main CLI entry point using Click.
|
|
3
|
+
Wires all commands together.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import sys
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
import click
|
|
10
|
+
|
|
11
|
+
from breadcrumb import __version__
|
|
12
|
+
from breadcrumb.commands.ask import cmd_ask
|
|
13
|
+
from breadcrumb.commands.audit import cmd_audit
|
|
14
|
+
from breadcrumb.commands.chat import cmd_chat
|
|
15
|
+
from breadcrumb.commands.commit import cmd_commit
|
|
16
|
+
from breadcrumb.commands.diff import cmd_diff
|
|
17
|
+
from breadcrumb.commands.explain_error import cmd_explain_error
|
|
18
|
+
from breadcrumb.commands.init import cmd_init
|
|
19
|
+
from breadcrumb.config import Config
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@click.group(invoke_without_command=True)
|
|
23
|
+
@click.version_option(version=__version__)
|
|
24
|
+
@click.pass_context
|
|
25
|
+
def cli(ctx):
|
|
26
|
+
"""🍞 Bread Crumb — Chat with your codebase."""
|
|
27
|
+
# If no command, show help
|
|
28
|
+
if ctx.invoked_subcommand is None:
|
|
29
|
+
click.echo(ctx.get_help())
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@cli.command()
|
|
33
|
+
@click.argument("repo_path", type=click.Path(exists=True), default=".")
|
|
34
|
+
@click.option("--provider", help="AI provider (anthropic, openai, gemini, ollama)")
|
|
35
|
+
@click.option("--session", default="default", help="Session name")
|
|
36
|
+
def chat(repo_path, provider, session):
|
|
37
|
+
"""Interactive chat with your codebase."""
|
|
38
|
+
cmd_chat(Path(repo_path), provider, session)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@cli.command()
|
|
42
|
+
@click.argument("repo_path", type=click.Path(exists=True), default=".")
|
|
43
|
+
@click.argument("question", required=False)
|
|
44
|
+
@click.option("--provider", help="AI provider")
|
|
45
|
+
@click.option("--model", help="Model name")
|
|
46
|
+
@click.option("--format", type=click.Choice(["text", "markdown"]), default="text")
|
|
47
|
+
@click.option("--pipe", is_flag=True, help="Read question from stdin")
|
|
48
|
+
def ask(repo_path, question, provider, model, format, pipe):
|
|
49
|
+
"""Ask a one-shot question about the codebase."""
|
|
50
|
+
cmd_ask(Path(repo_path), question or "", provider, model, format, pipe)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@cli.command()
|
|
54
|
+
@click.argument("repo_path", type=click.Path(exists=True), default=".")
|
|
55
|
+
@click.option("--provider", help="AI provider")
|
|
56
|
+
@click.option("--model", help="Model name")
|
|
57
|
+
@click.option("--format", type=click.Choice(["text", "markdown"]), default="text")
|
|
58
|
+
def audit(repo_path, provider, model, format):
|
|
59
|
+
"""Run a security and architecture audit."""
|
|
60
|
+
cmd_audit(Path(repo_path), provider, model, format)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@cli.command()
|
|
64
|
+
@click.argument("repo_path", type=click.Path(exists=True), default=".")
|
|
65
|
+
@click.argument("revision", required=False, default="")
|
|
66
|
+
@click.option("--provider", help="AI provider")
|
|
67
|
+
def diff(repo_path, revision, provider):
|
|
68
|
+
"""Review a git diff with AI."""
|
|
69
|
+
cmd_diff(Path(repo_path), revision, provider)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@cli.command()
|
|
73
|
+
@click.argument("repo_path", type=click.Path(exists=True), default=".")
|
|
74
|
+
@click.option("--provider", help="AI provider")
|
|
75
|
+
@click.option("--silent", is_flag=True, help="Only output the message")
|
|
76
|
+
def commit(repo_path, provider, silent):
|
|
77
|
+
"""Generate a conventional commit message."""
|
|
78
|
+
cmd_commit(Path(repo_path), provider, silent)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@cli.command()
|
|
82
|
+
@click.option("--provider", help="AI provider")
|
|
83
|
+
@click.argument("error", required=False)
|
|
84
|
+
def explain_error(provider, error):
|
|
85
|
+
"""Explain an error or stack trace."""
|
|
86
|
+
cmd_explain_error(error, provider)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
@cli.command()
|
|
90
|
+
@click.argument("repo_path", type=click.Path(exists=True), default=".")
|
|
91
|
+
def init(repo_path):
|
|
92
|
+
"""Initialize .breadcrumb.yaml in the repository."""
|
|
93
|
+
cmd_init(Path(repo_path))
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@cli.group()
|
|
97
|
+
def config():
|
|
98
|
+
"""Manage global configuration."""
|
|
99
|
+
pass
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
@config.command()
|
|
103
|
+
@click.argument("key")
|
|
104
|
+
@click.argument("value")
|
|
105
|
+
def set_key(key, value):
|
|
106
|
+
"""Set a configuration value."""
|
|
107
|
+
cfg = Config()
|
|
108
|
+
cfg.set(key, value)
|
|
109
|
+
click.echo(f"✓ Set {key} = {value}")
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@config.command()
|
|
113
|
+
@click.argument("key")
|
|
114
|
+
def get_key(key):
|
|
115
|
+
"""Get a configuration value."""
|
|
116
|
+
cfg = Config()
|
|
117
|
+
value = cfg.get(key)
|
|
118
|
+
click.echo(value)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@config.command()
|
|
122
|
+
def show():
|
|
123
|
+
"""Show all configuration."""
|
|
124
|
+
cfg = Config()
|
|
125
|
+
for key, value in sorted(cfg.to_dict().items()):
|
|
126
|
+
if "key" in key.lower() and value:
|
|
127
|
+
value = "***" + value[-4:] # Hide API keys
|
|
128
|
+
click.echo(f"{key}: {value}")
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def main():
|
|
132
|
+
"""Entry point for the CLI."""
|
|
133
|
+
try:
|
|
134
|
+
cli()
|
|
135
|
+
except KeyboardInterrupt:
|
|
136
|
+
click.echo("\n")
|
|
137
|
+
sys.exit(0)
|
|
138
|
+
except Exception as e:
|
|
139
|
+
click.echo(f"Error: {e}", err=True)
|
|
140
|
+
sys.exit(1)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
if __name__ == "__main__":
|
|
144
|
+
main()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Commands package initialization."""
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
"""
|
|
2
|
+
One-shot non-interactive query command.
|
|
3
|
+
Ask a question and get an answer in one go.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import sys
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
from rich.markdown import Markdown
|
|
12
|
+
|
|
13
|
+
from breadcrumb.ai.prompts import get_system_prompt
|
|
14
|
+
from breadcrumb.ai.router import AIRouter
|
|
15
|
+
from breadcrumb.ingest import FileIngester
|
|
16
|
+
|
|
17
|
+
console = Console()
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def cmd_ask(
|
|
21
|
+
repo_path: Path,
|
|
22
|
+
question: str,
|
|
23
|
+
provider: Optional[str] = None,
|
|
24
|
+
model: Optional[str] = None,
|
|
25
|
+
format: str = "text",
|
|
26
|
+
pipe: bool = False,
|
|
27
|
+
) -> str:
|
|
28
|
+
"""
|
|
29
|
+
Ask a one-shot question about the repository.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
repo_path: Repository path
|
|
33
|
+
question: The question to ask
|
|
34
|
+
provider: AI provider (overrides config)
|
|
35
|
+
model: Model name (overrides config)
|
|
36
|
+
format: Output format ('text' or 'markdown')
|
|
37
|
+
pipe: If True, read question from stdin if not provided
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
The AI response
|
|
41
|
+
"""
|
|
42
|
+
if not question and pipe:
|
|
43
|
+
question = sys.stdin.read()
|
|
44
|
+
|
|
45
|
+
if not question:
|
|
46
|
+
console.print("[red]Error: No question provided[/red]")
|
|
47
|
+
return ""
|
|
48
|
+
|
|
49
|
+
# Get repository context
|
|
50
|
+
try:
|
|
51
|
+
ingester = FileIngester(repo_path)
|
|
52
|
+
context = ingester.get_content()
|
|
53
|
+
|
|
54
|
+
if not context:
|
|
55
|
+
console.print("[yellow]Warning: No code files found in repository[/yellow]")
|
|
56
|
+
except Exception as e:
|
|
57
|
+
console.print(f"[red]Error reading repository: {e}[/red]")
|
|
58
|
+
context = ""
|
|
59
|
+
|
|
60
|
+
# Setup AI
|
|
61
|
+
router = AIRouter(provider)
|
|
62
|
+
if model:
|
|
63
|
+
router.model = model
|
|
64
|
+
|
|
65
|
+
# Build prompt
|
|
66
|
+
system = get_system_prompt("ask")
|
|
67
|
+
prompt = f"""Repo context:
|
|
68
|
+
{context}
|
|
69
|
+
|
|
70
|
+
Question: {question}"""
|
|
71
|
+
|
|
72
|
+
messages = [{"role": "user", "content": prompt}]
|
|
73
|
+
|
|
74
|
+
# Show spinner and stream response
|
|
75
|
+
response_text = ""
|
|
76
|
+
try:
|
|
77
|
+
with console.status("[bold cyan]Thinking...", spinner="dots"):
|
|
78
|
+
for chunk in router.stream(messages, system):
|
|
79
|
+
response_text += chunk
|
|
80
|
+
except Exception as e:
|
|
81
|
+
console.print(f"[red]Error: {e}[/red]")
|
|
82
|
+
return ""
|
|
83
|
+
|
|
84
|
+
# Output based on format
|
|
85
|
+
if format == "markdown":
|
|
86
|
+
console.print(Markdown(response_text))
|
|
87
|
+
else:
|
|
88
|
+
console.print(response_text)
|
|
89
|
+
|
|
90
|
+
# Show token usage
|
|
91
|
+
total_tokens = (
|
|
92
|
+
router.count_tokens(context)
|
|
93
|
+
+ router.count_tokens(question)
|
|
94
|
+
+ router.count_tokens(response_text)
|
|
95
|
+
)
|
|
96
|
+
console.print(f"\n[dim]~{total_tokens:,} tokens used[/dim]")
|
|
97
|
+
|
|
98
|
+
return response_text
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Security and architecture audit command.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
from rich.markdown import Markdown
|
|
10
|
+
|
|
11
|
+
from breadcrumb.ai.prompts import get_system_prompt
|
|
12
|
+
from breadcrumb.ai.router import AIRouter
|
|
13
|
+
from breadcrumb.ingest import FileIngester
|
|
14
|
+
|
|
15
|
+
console = Console()
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def cmd_audit(
|
|
19
|
+
repo_path: Path,
|
|
20
|
+
provider: Optional[str] = None,
|
|
21
|
+
model: Optional[str] = None,
|
|
22
|
+
format: str = "text",
|
|
23
|
+
) -> str:
|
|
24
|
+
"""
|
|
25
|
+
Run a security and architecture audit on the repository.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
repo_path: Repository path
|
|
29
|
+
provider: AI provider (overrides config)
|
|
30
|
+
model: Model name (overrides config)
|
|
31
|
+
format: Output format ('text' or 'markdown')
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
The audit report
|
|
35
|
+
"""
|
|
36
|
+
# Get repository context
|
|
37
|
+
try:
|
|
38
|
+
ingester = FileIngester(repo_path)
|
|
39
|
+
context = ingester.get_content()
|
|
40
|
+
|
|
41
|
+
if not context:
|
|
42
|
+
console.print("[yellow]Warning: No code files found[/yellow]")
|
|
43
|
+
except Exception as e:
|
|
44
|
+
console.print(f"[red]Error reading repository: {e}[/red]")
|
|
45
|
+
context = ""
|
|
46
|
+
|
|
47
|
+
# Setup AI
|
|
48
|
+
router = AIRouter(provider)
|
|
49
|
+
if model:
|
|
50
|
+
router.model = model
|
|
51
|
+
|
|
52
|
+
system = get_system_prompt("audit")
|
|
53
|
+
prompt = f"""Analyze this codebase for security, architecture, and quality issues.
|
|
54
|
+
|
|
55
|
+
{context}"""
|
|
56
|
+
|
|
57
|
+
messages = [{"role": "user", "content": prompt}]
|
|
58
|
+
|
|
59
|
+
try:
|
|
60
|
+
response_text = ""
|
|
61
|
+
with console.status("[bold cyan]Auditing codebase...", spinner="dots"):
|
|
62
|
+
for chunk in router.stream(messages, system):
|
|
63
|
+
response_text += chunk
|
|
64
|
+
|
|
65
|
+
if format == "markdown":
|
|
66
|
+
console.print(Markdown(response_text))
|
|
67
|
+
else:
|
|
68
|
+
console.print(response_text)
|
|
69
|
+
|
|
70
|
+
# Show token usage
|
|
71
|
+
total_tokens = router.count_tokens(context) + router.count_tokens(response_text)
|
|
72
|
+
console.print(f"\n[dim]~{total_tokens:,} tokens used[/dim]")
|
|
73
|
+
|
|
74
|
+
return response_text
|
|
75
|
+
except Exception as e:
|
|
76
|
+
console.print(f"[red]Error: {e}[/red]")
|
|
77
|
+
return ""
|