kittycode 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kittycode/__init__.py ADDED
@@ -0,0 +1,10 @@
1
+ """KittyCode - minimal AI coding agent."""
2
+
3
+ __version__ = "0.1.0"
4
+
5
+ from kittycode.agent import Agent
6
+ from kittycode.config import Config
7
+ from kittycode.llm import LLM
8
+ from kittycode.tools import ALL_TOOLS
9
+
10
+ __all__ = ["Agent", "Config", "LLM", "ALL_TOOLS", "__version__"]
kittycode/__main__.py ADDED
@@ -0,0 +1,3 @@
1
+ from kittycode.cli import main
2
+
3
+ main()
kittycode/agent.py ADDED
@@ -0,0 +1,125 @@
1
+ """Core agent loop.
2
+
3
+ This is the heart of KittyCode.
4
+
5
+ user message -> LLM (with tools) -> tool calls? -> execute -> loop
6
+ -> text reply? -> return to user
7
+
8
+ It keeps looping until the LLM responds with plain text, which means it is
9
+ done working and ready to report back.
10
+ """
11
+
12
+ import concurrent.futures
13
+
14
+ from .context import ContextManager
15
+ from .llm import LLM
16
+ from .prompt import system_prompt
17
+ from .skills import load_skills
18
+ from .tools import ALL_TOOLS, get_tool
19
+ from .tools.agent import AgentTool
20
+ from .tools.base import Tool
21
+
22
+
23
+ class Agent:
24
+ def __init__(
25
+ self,
26
+ llm: LLM,
27
+ tools: list[Tool] | None = None,
28
+ max_context_tokens: int = 128_000,
29
+ max_rounds: int = 50,
30
+ ):
31
+ self.llm = llm
32
+ self.tools = tools if tools is not None else ALL_TOOLS
33
+ self.messages: list[dict] = []
34
+ self.context = ContextManager(max_tokens=max_context_tokens)
35
+ self.max_rounds = max_rounds
36
+ self.skills = []
37
+ self._system = ""
38
+ self.refresh_skills(force_reload=True)
39
+
40
+ for tool in self.tools:
41
+ if isinstance(tool, AgentTool):
42
+ tool._parent_agent = self
43
+
44
+ def _full_messages(self) -> list[dict]:
45
+ self.refresh_skills()
46
+ return [{"role": "system", "content": self._system}] + self.messages
47
+
48
+ def _tool_schemas(self) -> list[dict]:
49
+ return [tool.schema() for tool in self.tools]
50
+
51
+ def chat(self, user_input: str, on_token=None, on_tool=None) -> str:
52
+ """Process one user message. May involve multiple LLM/tool rounds."""
53
+ self.messages.append({"role": "user", "content": user_input})
54
+ self.context.maybe_compress(self.messages, self.llm)
55
+
56
+ for _ in range(self.max_rounds):
57
+ response = self.llm.chat(
58
+ messages=self._full_messages(),
59
+ tools=self._tool_schemas(),
60
+ on_token=on_token,
61
+ )
62
+
63
+ if not response.tool_calls:
64
+ self.messages.append(response.message)
65
+ return response.content
66
+
67
+ self.messages.append(response.message)
68
+
69
+ if len(response.tool_calls) == 1:
70
+ tool_call = response.tool_calls[0]
71
+ if on_tool:
72
+ on_tool(tool_call.name, tool_call.arguments)
73
+ result = self._exec_tool(tool_call)
74
+ self.messages.append(
75
+ {
76
+ "role": "tool",
77
+ "tool_call_id": tool_call.id,
78
+ "content": result,
79
+ }
80
+ )
81
+ else:
82
+ results = self._exec_tools_parallel(response.tool_calls, on_tool)
83
+ for tool_call, result in zip(response.tool_calls, results):
84
+ self.messages.append(
85
+ {
86
+ "role": "tool",
87
+ "tool_call_id": tool_call.id,
88
+ "content": result,
89
+ }
90
+ )
91
+
92
+ self.context.maybe_compress(self.messages, self.llm)
93
+
94
+ return "(reached maximum tool-call rounds)"
95
+
96
+ def _exec_tool(self, tool_call) -> str:
97
+ """Execute a single tool call and return the result string."""
98
+ tool = get_tool(tool_call.name)
99
+ if tool is None:
100
+ return f"Error: unknown tool '{tool_call.name}'"
101
+ try:
102
+ return tool.execute(**tool_call.arguments)
103
+ except TypeError as exc:
104
+ return f"Error: bad arguments for {tool_call.name}: {exc}"
105
+ except Exception as exc:
106
+ return f"Error executing {tool_call.name}: {exc}"
107
+
108
+ def _exec_tools_parallel(self, tool_calls, on_tool=None) -> list[str]:
109
+ """Run multiple tool calls concurrently using threads."""
110
+ for tool_call in tool_calls:
111
+ if on_tool:
112
+ on_tool(tool_call.name, tool_call.arguments)
113
+
114
+ with concurrent.futures.ThreadPoolExecutor(max_workers=8) as pool:
115
+ futures = [pool.submit(self._exec_tool, tool_call) for tool_call in tool_calls]
116
+ return [future.result() for future in futures]
117
+
118
+ def reset(self):
119
+ """Clear conversation history."""
120
+ self.messages.clear()
121
+
122
+ def refresh_skills(self, force_reload: bool = False):
123
+ """Refresh cached skill metadata and rebuild the system prompt."""
124
+ self.skills = load_skills(force_reload=force_reload)
125
+ self._system = system_prompt(self.tools, self.skills)
kittycode/cli.py ADDED
@@ -0,0 +1,367 @@
1
+ """Interactive CLI for KittyCode."""
2
+
3
+ import argparse
4
+ import os
5
+ import sys
6
+
7
+ from prompt_toolkit import prompt as pt_prompt
8
+ from prompt_toolkit.completion import Completer, Completion
9
+ from prompt_toolkit.history import FileHistory
10
+ from rich.console import Console
11
+ from rich.markdown import Markdown
12
+ from rich.panel import Panel
13
+
14
+ from . import __version__
15
+ from .agent import Agent
16
+ from .config import CONFIG_PATH, Config
17
+ from .llm import LLM
18
+ from .session import list_sessions, load_session, save_session
19
+
20
+ console = Console()
21
+
22
+ _BUILTIN_COMMANDS = {
23
+ "/help": "Show this help",
24
+ "/reset": "Clear conversation history",
25
+ "/skills": "Show loaded local skills",
26
+ "/model": "Switch model mid-conversation",
27
+ "/tokens": "Show token usage",
28
+ "/compact": "Compress conversation context",
29
+ "/save": "Save session to disk",
30
+ "/sessions": "List saved sessions",
31
+ "/quit": "Exit KittyCode",
32
+ }
33
+
34
+
35
+ class SlashCommandCompleter(Completer):
36
+ def __init__(self, command_provider):
37
+ self.command_provider = command_provider
38
+
39
+ def get_completions(self, document, complete_event):
40
+ text = document.text_before_cursor
41
+ if not text.startswith("/"):
42
+ return
43
+
44
+ typed = text.casefold()
45
+
46
+ for command in sorted(self.command_provider()):
47
+ if command.casefold().startswith(typed):
48
+ yield Completion(
49
+ command,
50
+ start_position=-len(text),
51
+ display=command,
52
+ display_meta=_BUILTIN_COMMANDS.get(command, "Use this skill"),
53
+ )
54
+
55
+
56
+ def _parse_args():
57
+ parser = argparse.ArgumentParser(
58
+ prog="kittycode",
59
+ description="Minimal AI coding agent. Supports OpenAI-compatible and Anthropic APIs.",
60
+ )
61
+ parser.add_argument("-m", "--model", help="Model name (default: value from ~/.kittycode/config.json)")
62
+ parser.add_argument("--interface", choices=["openai", "anthropic"], help="Interface type (default: value from ~/.kittycode/config.json)")
63
+ parser.add_argument("--base-url", help="API base URL (default: value from ~/.kittycode/config.json)")
64
+ parser.add_argument("--api-key", help="API key (default: value from ~/.kittycode/config.json)")
65
+ parser.add_argument("-p", "--prompt", help="One-shot prompt (non-interactive mode)")
66
+ parser.add_argument("-r", "--resume", metavar="ID", help="Resume a saved session")
67
+ parser.add_argument("-v", "--version", action="version", version=f"%(prog)s {__version__}")
68
+ return parser.parse_args()
69
+
70
+
71
+ def main():
72
+ args = _parse_args()
73
+ try:
74
+ config = Config.from_file()
75
+ except ValueError as exc:
76
+ console.print(f"[red bold]Invalid config file:[/] {CONFIG_PATH}")
77
+ console.print(str(exc))
78
+ sys.exit(1)
79
+
80
+ if args.model:
81
+ config.model = args.model
82
+ if args.interface:
83
+ config.interface = args.interface
84
+ if args.base_url:
85
+ config.base_url = args.base_url
86
+ if args.api_key:
87
+ config.api_key = args.api_key
88
+
89
+ if not config.api_key:
90
+ console.print("[red bold]No API key found.[/]")
91
+ console.print(
92
+ f"Populate {CONFIG_PATH} with JSON such as:\n"
93
+ "\n"
94
+ "{\n"
95
+ ' "interface": "openai",\n'
96
+ ' "api_key": "sk-...",\n'
97
+ ' "model": "gpt-4o",\n'
98
+ ' "base_url": "https://api.openai.com/v1",\n'
99
+ ' "max_tokens": 4096,\n'
100
+ ' "temperature": 0,\n'
101
+ ' "max_context": 128000\n'
102
+ "}\n"
103
+ )
104
+ sys.exit(1)
105
+
106
+ llm = LLM(
107
+ model=config.model,
108
+ api_key=config.api_key,
109
+ interface=config.interface,
110
+ base_url=config.base_url,
111
+ temperature=config.temperature,
112
+ max_tokens=config.max_tokens,
113
+ )
114
+ agent = Agent(llm=llm, max_context_tokens=config.max_context_tokens)
115
+
116
+ if args.resume:
117
+ loaded = load_session(args.resume)
118
+ if loaded:
119
+ agent.messages, _loaded_model = loaded
120
+ console.print(f"[green]Resumed session: {args.resume}[/green]")
121
+ else:
122
+ console.print(f"[red]Session '{args.resume}' not found.[/red]")
123
+ sys.exit(1)
124
+
125
+ if args.prompt:
126
+ _run_once(agent, args.prompt)
127
+ return
128
+
129
+ _repl(agent, config)
130
+
131
+
132
+ def _run_once(agent: Agent, prompt: str):
133
+ streamed: list[str] = []
134
+
135
+ def on_token(token):
136
+ streamed.append(token)
137
+ print(token, end="", flush=True)
138
+
139
+ def on_tool(name, kwargs):
140
+ console.print(f"\n[dim]> {name}({_brief(kwargs)})[/dim]")
141
+
142
+ response = agent.chat(prompt, on_token=on_token, on_tool=on_tool)
143
+ if streamed:
144
+ print()
145
+ else:
146
+ console.print(Markdown(response))
147
+
148
+
149
+ def _repl(agent: Agent, config: Config):
150
+ console.print(
151
+ Panel(
152
+ f"[bold]KittyCode[/bold] v{__version__}\n"
153
+ f"Model: [cyan]{config.model}[/cyan] Interface: [cyan]{config.interface}[/cyan]"
154
+ + (f" Base: [dim]{config.base_url}[/dim]" if config.base_url else "")
155
+ + "\nType [bold]/help[/bold] for commands, [bold]Ctrl+C[/bold] to cancel, [bold]/quit[/bold] to exit.",
156
+ border_style="blue",
157
+ )
158
+ )
159
+
160
+ history = FileHistory(os.path.expanduser("~/.kittycode_history"))
161
+ completer = SlashCommandCompleter(lambda: _slash_command_names(agent.skills))
162
+ pending_skill = None
163
+
164
+ while True:
165
+ try:
166
+ agent.refresh_skills()
167
+ user_input = pt_prompt(
168
+ "You > ",
169
+ history=history,
170
+ completer=completer,
171
+ complete_while_typing=True,
172
+ ).strip()
173
+ except (EOFError, KeyboardInterrupt):
174
+ console.print("\nBye!")
175
+ break
176
+
177
+ if not user_input:
178
+ continue
179
+
180
+ resolved_command, matches = _resolve_command_prefix(user_input, agent.skills)
181
+ if resolved_command and resolved_command != user_input:
182
+ user_input = resolved_command
183
+ elif user_input.startswith("/") and matches and resolved_command is None and user_input not in matches:
184
+ console.print("[yellow]Matching commands:[/yellow] " + ", ".join(matches))
185
+ continue
186
+
187
+ if user_input == "/quit":
188
+ break
189
+ if user_input == "/help":
190
+ _show_help()
191
+ continue
192
+ if user_input == "/reset":
193
+ agent.reset()
194
+ pending_skill = None
195
+ console.print("[yellow]Conversation reset.[/yellow]")
196
+ continue
197
+ if user_input == "/skills":
198
+ agent.refresh_skills()
199
+ _show_skills(agent.skills)
200
+ continue
201
+ if user_input == "/tokens":
202
+ prompt_tokens = agent.llm.total_prompt_tokens
203
+ completion_tokens = agent.llm.total_completion_tokens
204
+ console.print(
205
+ f"Tokens used this session: [cyan]{prompt_tokens}[/cyan] prompt + "
206
+ f"[cyan]{completion_tokens}[/cyan] completion = [bold]{prompt_tokens + completion_tokens}[/bold] total"
207
+ )
208
+ continue
209
+ if user_input.startswith("/model "):
210
+ new_model = user_input[7:].strip()
211
+ if new_model:
212
+ agent.llm.model = new_model
213
+ config.model = new_model
214
+ console.print(f"Switched to [cyan]{new_model}[/cyan]")
215
+ continue
216
+ if user_input == "/compact":
217
+ from .context import estimate_tokens
218
+
219
+ before = estimate_tokens(agent.messages)
220
+ compressed = agent.context.maybe_compress(agent.messages, agent.llm)
221
+ after = estimate_tokens(agent.messages)
222
+ if compressed:
223
+ console.print(
224
+ f"[green]Compressed: {before} -> {after} tokens ({len(agent.messages)} messages)[/green]"
225
+ )
226
+ else:
227
+ console.print(
228
+ f"[dim]Nothing to compress ({before} tokens, {len(agent.messages)} messages)[/dim]"
229
+ )
230
+ continue
231
+ if user_input == "/save":
232
+ session_id = save_session(agent.messages, config.model)
233
+ console.print(f"[green]Session saved: {session_id}[/green]")
234
+ console.print(f"Resume with: kittycode -r {session_id}")
235
+ continue
236
+ if user_input == "/sessions":
237
+ sessions = list_sessions()
238
+ if not sessions:
239
+ console.print("[dim]No saved sessions.[/dim]")
240
+ else:
241
+ for session in sessions:
242
+ console.print(
243
+ f" [cyan]{session['id']}[/cyan] ({session['model']}, {session['saved_at']}) {session['preview']}"
244
+ )
245
+ continue
246
+
247
+ skill_match = _match_skill_command(user_input, agent.skills)
248
+ if skill_match is not None:
249
+ skill, task = skill_match
250
+ if task:
251
+ user_input = _build_skill_request(skill, task)
252
+ else:
253
+ pending_skill = skill
254
+ console.print(f"[cyan]Selected skill:[/cyan] /{skill.name}")
255
+ console.print("[dim]Your next non-command message will use this skill.[/dim]")
256
+ continue
257
+ elif pending_skill is not None and not user_input.startswith("/"):
258
+ user_input = _build_skill_request(pending_skill, user_input)
259
+ pending_skill = None
260
+
261
+ streamed: list[str] = []
262
+
263
+ def on_token(token):
264
+ streamed.append(token)
265
+ print(token, end="", flush=True)
266
+
267
+ def on_tool(name, kwargs):
268
+ console.print(f"\n[dim]> {name}({_brief(kwargs)})[/dim]")
269
+
270
+ try:
271
+ response = agent.chat(user_input, on_token=on_token, on_tool=on_tool)
272
+ if streamed:
273
+ print()
274
+ else:
275
+ console.print(Markdown(response))
276
+ except KeyboardInterrupt:
277
+ console.print("\n[yellow]Interrupted.[/yellow]")
278
+ except Exception as exc:
279
+ console.print(f"\n[red]Error: {exc}[/red]")
280
+
281
+
282
+ def _show_help():
283
+ console.print(
284
+ Panel(
285
+ "[bold]Commands:[/bold]\n"
286
+ " /help Show this help\n"
287
+ " /reset Clear conversation history\n"
288
+ " /skills Show loaded local skills\n"
289
+ " /<skill name> Use a loaded skill\n"
290
+ " /model <name> Switch model mid-conversation\n"
291
+ " /tokens Show token usage\n"
292
+ " /compact Compress conversation context\n"
293
+ " /save Save session to disk\n"
294
+ " /sessions List saved sessions\n"
295
+ " /quit Exit KittyCode",
296
+ title="KittyCode Help",
297
+ border_style="dim",
298
+ )
299
+ )
300
+
301
+
302
+ def _show_skills(skills):
303
+ console.print(Panel(_format_skills(skills), title="KittyCode Skills", border_style="dim"))
304
+
305
+
306
+ def _format_skills(skills) -> str:
307
+ if not skills:
308
+ return "No skills loaded from ~/.kittycode/skills"
309
+
310
+ lines = []
311
+ for index, skill in enumerate(skills, 1):
312
+ lines.append(f"{index}. {skill.name}")
313
+ lines.append(f" /{skill.name}")
314
+ lines.append(f" {skill.description}")
315
+ lines.append(f" {skill.path}")
316
+ return "\n".join(lines)
317
+
318
+
319
+ def _slash_command_names(skills) -> list[str]:
320
+ names = list(_BUILTIN_COMMANDS)
321
+ for skill in skills:
322
+ command = _skill_command_name(skill)
323
+ if command not in names:
324
+ names.append(command)
325
+ return names
326
+
327
+
328
+ def _skill_command_name(skill) -> str:
329
+ return f"/{skill.name}"
330
+
331
+
332
+ def _resolve_command_prefix(user_input: str, skills) -> tuple[str | None, list[str]]:
333
+ if not user_input.startswith("/"):
334
+ return None, []
335
+
336
+ typed = user_input.casefold()
337
+ matches = [command for command in _slash_command_names(skills) if command.casefold().startswith(typed)]
338
+ if len(matches) == 1:
339
+ return matches[0], matches
340
+ return None, matches
341
+
342
+
343
+ def _match_skill_command(user_input: str, skills):
344
+ lowered = user_input.casefold()
345
+ for skill in sorted(skills, key=lambda item: len(_skill_command_name(item)), reverse=True):
346
+ command = _skill_command_name(skill)
347
+ lowered_command = command.casefold()
348
+ if lowered == lowered_command:
349
+ return skill, ""
350
+ if lowered.startswith(lowered_command + " "):
351
+ return skill, user_input[len(command):].strip()
352
+ return None
353
+
354
+
355
+ def _build_skill_request(skill, task: str) -> str:
356
+ return (
357
+ f'Use the local skill "{skill.name}" for this request.\n'
358
+ f"Skill description: {skill.description}\n"
359
+ f"Skill path: {skill.path}\n"
360
+ "Before doing other work, read its SKILL.md and any related files under that path.\n\n"
361
+ f"Task:\n{task}"
362
+ )
363
+
364
+
365
+ def _brief(kwargs: dict, maxlen: int = 80) -> str:
366
+ summary = ", ".join(f"{key}={repr(value)[:40]}" for key, value in kwargs.items())
367
+ return summary[:maxlen] + ("..." if len(summary) > maxlen else "")
kittycode/config.py ADDED
@@ -0,0 +1,67 @@
1
+ """Configuration loaded from ~/.kittycode/config.json."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+
9
+ CONFIG_PATH = Path.home() / ".kittycode" / "config.json"
10
+
11
+
12
+ def _pick(data: dict, *names: str, default=None):
13
+ for name in names:
14
+ value = data.get(name)
15
+ if value not in (None, ""):
16
+ return value
17
+ return default
18
+
19
+
20
+ @dataclass
21
+ class Config:
22
+ interface: str = "openai"
23
+ model: str = "gpt-4o"
24
+ api_key: str = ""
25
+ base_url: str | None = None
26
+ max_tokens: int = 4096
27
+ temperature: float = 0.0
28
+ max_context_tokens: int = 128_000
29
+
30
+ @classmethod
31
+ def from_file(cls, config_path: Path | str | None = None) -> "Config":
32
+ path = Path(config_path).expanduser() if config_path is not None else CONFIG_PATH
33
+
34
+ if not path.exists():
35
+ return cls()
36
+
37
+ try:
38
+ raw = json.loads(path.read_text())
39
+ except json.JSONDecodeError as exc:
40
+ raise ValueError(f"invalid JSON in {path}: {exc}") from exc
41
+
42
+ if not isinstance(raw, dict):
43
+ raise ValueError(f"{path} must contain a JSON object")
44
+
45
+ interface = str(_pick(raw, "interface", "KITTYCODE_INTERFACE", default="openai")).lower()
46
+ if interface not in {"openai", "anthropic"}:
47
+ raise ValueError("interface must be 'openai' or 'anthropic'")
48
+
49
+ default_model = "gpt-4o" if interface == "openai" else "claude-3-7-sonnet-latest"
50
+
51
+ return cls(
52
+ interface=interface,
53
+ model=str(_pick(raw, "model", "KITTYCODE_MODEL", default=default_model)),
54
+ api_key=str(_pick(raw, "api_key", "KITTYCODE_API_KEY", default="")),
55
+ base_url=_pick(raw, "base_url", "KITTYCODE_BASE_URL"),
56
+ max_tokens=int(_pick(raw, "max_tokens", "KITTYCODE_MAX_TOKENS", default=4096)),
57
+ temperature=float(_pick(raw, "temperature", "KITTYCODE_TEMPERATURE", default=0.0)),
58
+ max_context_tokens=int(
59
+ _pick(
60
+ raw,
61
+ "max_context_tokens",
62
+ "max_context",
63
+ "KITTYCODE_MAX_CONTEXT",
64
+ default=128000,
65
+ )
66
+ ),
67
+ )