god-code 0.4.0__tar.gz → 0.4.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {god_code-0.4.0 → god_code-0.4.3}/PKG-INFO +2 -1
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/cli.py +46 -19
- god_code-0.4.3/godot_agent/llm/streaming.py +93 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/runtime/config.py +1 -1
- god_code-0.4.3/godot_agent/runtime/context_manager.py +231 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/runtime/engine.py +34 -8
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/runtime/session.py +21 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/tools/shell.py +31 -6
- god_code-0.4.3/godot_agent/tui/input_handler.py +100 -0
- {god_code-0.4.0 → god_code-0.4.3}/pyproject.toml +2 -1
- {god_code-0.4.0 → god_code-0.4.3}/tests/runtime/test_context_manager.py +6 -3
- god_code-0.4.0/godot_agent/llm/streaming.py +0 -26
- god_code-0.4.0/godot_agent/runtime/context_manager.py +0 -135
- {god_code-0.4.0 → god_code-0.4.3}/.github/workflows/publish.yml +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/.gitignore +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/CHANGELOG.md +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/CLAUDE.md +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/CONTRIBUTING.md +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/LICENSE +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/README.md +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/godot/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/godot/collision_planner.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/godot/consistency_checker.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/godot/dependency_graph.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/godot/gdscript_linter.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/godot/pattern_advisor.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/godot/project.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/godot/resource_validator.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/godot/scene_parser.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/godot/scene_writer.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/godot/tscn_validator.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/llm/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/llm/client.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/llm/vision.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/prompts/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/prompts/build_discipline.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/prompts/godot_playbook.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/prompts/knowledge_selector.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/prompts/system.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/py.typed +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/runtime/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/runtime/auth.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/runtime/error_loop.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/runtime/oauth.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/tools/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/tools/base.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/tools/file_ops.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/tools/git.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/tools/godot_cli.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/tools/list_dir.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/tools/registry.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/tools/screenshot.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/tools/search.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/tui/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/godot_agent/tui/display.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/godot/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/godot/test_collision_planner.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/godot/test_consistency.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/godot/test_dependency_graph.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/godot/test_linter.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/godot/test_pattern_advisor.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/godot/test_project.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/godot/test_resource_validator.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/godot/test_scene_parser.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/godot/test_scene_writer.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/godot/test_tscn_validator.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/llm/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/llm/test_client.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/llm/test_vision.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/prompts/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/prompts/test_knowledge_selector.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/prompts/test_system_prompt.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/runtime/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/runtime/test_config.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/runtime/test_engine.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/runtime/test_error_loop.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/test_e2e.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/tools/__init__.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/tools/test_file_ops.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/tools/test_git.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/tools/test_godot_cli.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/tools/test_list_dir.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/tools/test_registry.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/tools/test_search.py +0 -0
- {god_code-0.4.0 → god_code-0.4.3}/tests/tools/test_shell.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: god-code
|
|
3
|
-
Version: 0.4.
|
|
3
|
+
Version: 0.4.3
|
|
4
4
|
Summary: AI coding agent specialized for Godot game development
|
|
5
5
|
Project-URL: Homepage, https://github.com/chuisiufai/god-code
|
|
6
6
|
Project-URL: Repository, https://github.com/chuisiufai/god-code
|
|
@@ -13,6 +13,7 @@ Requires-Dist: eval-type-backport>=0.2.0; python_version < '3.10'
|
|
|
13
13
|
Requires-Dist: httpx>=0.27
|
|
14
14
|
Requires-Dist: packaging>=21.0
|
|
15
15
|
Requires-Dist: pillow>=10.3
|
|
16
|
+
Requires-Dist: prompt-toolkit>=3.0
|
|
16
17
|
Requires-Dist: pydantic>=2.7
|
|
17
18
|
Requires-Dist: rich>=13.0
|
|
18
19
|
Provides-Extra: dev
|
|
@@ -63,7 +63,9 @@ def build_registry() -> ToolRegistry:
|
|
|
63
63
|
|
|
64
64
|
def build_engine(config: AgentConfig, project_root: Path) -> ConversationEngine:
|
|
65
65
|
from godot_agent.tools.file_ops import set_project_root
|
|
66
|
+
from godot_agent.tools.shell import set_safety_level
|
|
66
67
|
set_project_root(project_root)
|
|
68
|
+
set_safety_level(config.safety)
|
|
67
69
|
|
|
68
70
|
llm_config = LLMConfig(
|
|
69
71
|
api_key=config.api_key,
|
|
@@ -202,7 +204,7 @@ def _run_setup_wizard() -> None:
|
|
|
202
204
|
click.echo()
|
|
203
205
|
|
|
204
206
|
|
|
205
|
-
_VERSION = "0.4.
|
|
207
|
+
_VERSION = "0.4.3"
|
|
206
208
|
|
|
207
209
|
|
|
208
210
|
def _check_update() -> None:
|
|
@@ -343,6 +345,15 @@ def chat(project: str = ".", config: str | None = None):
|
|
|
343
345
|
engine.on_tool_start = lambda name, args: display.tool_start(name, engine._summarize_args(name, args))
|
|
344
346
|
engine.on_tool_end = lambda name, ok, err: display.tool_result(name, ok, err)
|
|
345
347
|
engine.on_diff = lambda old, new, fn: display.show_diff(old, new, fn)
|
|
348
|
+
engine.auto_commit = cfg.auto_commit
|
|
349
|
+
engine.use_streaming = cfg.streaming
|
|
350
|
+
|
|
351
|
+
# Streaming callback
|
|
352
|
+
if cfg.streaming:
|
|
353
|
+
engine.on_stream_chunk = lambda chunk: display.agent_streaming_chunk(chunk)
|
|
354
|
+
|
|
355
|
+
# Auto-commit callback
|
|
356
|
+
engine.on_commit_suggest = lambda: display.info("Changes made. Run 'git add -A && git commit' to save.")
|
|
346
357
|
|
|
347
358
|
# Auto-scan project on entry
|
|
348
359
|
if has_project:
|
|
@@ -350,8 +361,15 @@ def chat(project: str = ".", config: str | None = None):
|
|
|
350
361
|
if scan_result:
|
|
351
362
|
display.info(f"Project auto-scanned: {scan_result}")
|
|
352
363
|
|
|
364
|
+
# Setup prompt_toolkit for history + autocomplete
|
|
365
|
+
from godot_agent.tui.input_handler import CommandCompleter, create_session as create_input_session
|
|
366
|
+
history_file = str(Path.home() / ".config" / "god-code" / "history")
|
|
367
|
+
Path(history_file).parent.mkdir(parents=True, exist_ok=True)
|
|
368
|
+
input_session = create_input_session(history_file)
|
|
369
|
+
completer = CommandCompleter(project_root)
|
|
370
|
+
|
|
353
371
|
def _rebuild_engine(new_root: Path) -> ConversationEngine:
|
|
354
|
-
nonlocal project_root, has_project, proj_name
|
|
372
|
+
nonlocal project_root, has_project, proj_name, completer
|
|
355
373
|
project_root = new_root.resolve()
|
|
356
374
|
has_project = (project_root / "project.godot").exists()
|
|
357
375
|
if has_project:
|
|
@@ -363,6 +381,12 @@ def chat(project: str = ".", config: str | None = None):
|
|
|
363
381
|
eng.on_tool_start = lambda name, args: display.tool_start(name, eng._summarize_args(name, args))
|
|
364
382
|
eng.on_tool_end = lambda name, ok, err: display.tool_result(name, ok, err)
|
|
365
383
|
eng.on_diff = lambda old, new, fn: display.show_diff(old, new, fn)
|
|
384
|
+
eng.auto_commit = cfg.auto_commit
|
|
385
|
+
eng.use_streaming = cfg.streaming
|
|
386
|
+
if cfg.streaming:
|
|
387
|
+
eng.on_stream_chunk = lambda chunk: display.agent_streaming_chunk(chunk)
|
|
388
|
+
eng.on_commit_suggest = lambda: display.info("Changes made. Run 'git add -A && git commit' to save.")
|
|
389
|
+
completer = CommandCompleter(project_root)
|
|
366
390
|
if has_project:
|
|
367
391
|
eng.scan_project()
|
|
368
392
|
return eng
|
|
@@ -376,8 +400,9 @@ def chat(project: str = ".", config: str | None = None):
|
|
|
376
400
|
while True:
|
|
377
401
|
try:
|
|
378
402
|
if in_multiline:
|
|
379
|
-
|
|
380
|
-
|
|
403
|
+
from godot_agent.tui.input_handler import get_multiline_continuation
|
|
404
|
+
line = get_multiline_continuation(input_session)
|
|
405
|
+
if line is None or line.strip() == '"""':
|
|
381
406
|
in_multiline = False
|
|
382
407
|
user_input = "\n".join(multiline_buffer)
|
|
383
408
|
multiline_buffer = []
|
|
@@ -385,7 +410,10 @@ def chat(project: str = ".", config: str | None = None):
|
|
|
385
410
|
multiline_buffer.append(line)
|
|
386
411
|
continue
|
|
387
412
|
else:
|
|
388
|
-
|
|
413
|
+
from godot_agent.tui.input_handler import get_input
|
|
414
|
+
user_input = get_input(input_session, completer)
|
|
415
|
+
if user_input is None:
|
|
416
|
+
break
|
|
389
417
|
if user_input.strip().startswith('"""'):
|
|
390
418
|
in_multiline = True
|
|
391
419
|
rest = user_input.strip()[3:]
|
|
@@ -406,17 +434,12 @@ def chat(project: str = ".", config: str | None = None):
|
|
|
406
434
|
continue
|
|
407
435
|
|
|
408
436
|
if cmd == "/load":
|
|
409
|
-
from godot_agent.runtime.session import
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
display.info(f"Loading: {files[0].name}")
|
|
416
|
-
# Note: restore is informational — messages are raw dicts
|
|
417
|
-
display.success("Session history loaded for context")
|
|
418
|
-
else:
|
|
419
|
-
display.error("No saved sessions found")
|
|
437
|
+
from godot_agent.runtime.session import load_latest_session
|
|
438
|
+
result = load_latest_session(cfg.session_dir)
|
|
439
|
+
if result:
|
|
440
|
+
old_id, old_messages = result
|
|
441
|
+
engine.messages.extend(old_messages[1:]) # Skip system prompt
|
|
442
|
+
display.success(f"Restored session {old_id} ({len(old_messages)} messages)")
|
|
420
443
|
else:
|
|
421
444
|
display.error("No saved sessions found")
|
|
422
445
|
continue
|
|
@@ -518,14 +541,18 @@ def chat(project: str = ".", config: str | None = None):
|
|
|
518
541
|
|
|
519
542
|
# Regular message → send to LLM
|
|
520
543
|
try:
|
|
521
|
-
|
|
544
|
+
if cfg.streaming and engine.on_stream_chunk:
|
|
545
|
+
display.agent_streaming_start()
|
|
522
546
|
response = await engine.submit(user_input)
|
|
547
|
+
display.agent_streaming_end()
|
|
548
|
+
else:
|
|
549
|
+
with display.thinking():
|
|
550
|
+
response = await engine.submit(user_input)
|
|
551
|
+
display.agent_response(response)
|
|
523
552
|
except KeyboardInterrupt:
|
|
524
553
|
display.info("Cancelled")
|
|
525
554
|
continue
|
|
526
555
|
|
|
527
|
-
display.agent_response(response)
|
|
528
|
-
|
|
529
556
|
# Token usage
|
|
530
557
|
turn = engine.last_turn
|
|
531
558
|
sess = engine.session_usage
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
"""Streaming chat completions with tool call assembly."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import AsyncIterator, Callable
|
|
7
|
+
|
|
8
|
+
from godot_agent.llm.client import ChatResponse, LLMClient, Message, ToolCall, TokenUsage
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
async def stream_chat_with_callback(
|
|
12
|
+
client: LLMClient,
|
|
13
|
+
messages: list[Message],
|
|
14
|
+
tools: list[dict] | None = None,
|
|
15
|
+
on_chunk: Callable[[str], None] | None = None,
|
|
16
|
+
) -> ChatResponse:
|
|
17
|
+
"""Stream a chat completion, calling on_chunk for each text delta.
|
|
18
|
+
|
|
19
|
+
Returns the complete ChatResponse with assembled tool calls and usage.
|
|
20
|
+
"""
|
|
21
|
+
body = client._build_request_body(messages, tools)
|
|
22
|
+
body["stream"] = True
|
|
23
|
+
body["stream_options"] = {"include_usage": True}
|
|
24
|
+
|
|
25
|
+
content_parts: list[str] = []
|
|
26
|
+
tool_calls_acc: dict[int, dict] = {} # index -> {id, name, arguments}
|
|
27
|
+
usage = TokenUsage()
|
|
28
|
+
|
|
29
|
+
async with client._http.stream(
|
|
30
|
+
"POST",
|
|
31
|
+
f"{client.config.base_url}/chat/completions",
|
|
32
|
+
headers=client._build_headers(),
|
|
33
|
+
json=body,
|
|
34
|
+
) as resp:
|
|
35
|
+
resp.raise_for_status()
|
|
36
|
+
async for line in resp.aiter_lines():
|
|
37
|
+
if not line.startswith("data: "):
|
|
38
|
+
continue
|
|
39
|
+
data_str = line[6:]
|
|
40
|
+
if data_str.strip() == "[DONE]":
|
|
41
|
+
break
|
|
42
|
+
|
|
43
|
+
chunk = json.loads(data_str)
|
|
44
|
+
|
|
45
|
+
# Usage info (comes in final chunk)
|
|
46
|
+
if "usage" in chunk and chunk["usage"]:
|
|
47
|
+
u = chunk["usage"]
|
|
48
|
+
usage = TokenUsage(
|
|
49
|
+
prompt_tokens=u.get("prompt_tokens", 0),
|
|
50
|
+
completion_tokens=u.get("completion_tokens", 0),
|
|
51
|
+
total_tokens=u.get("total_tokens", 0),
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
if not chunk.get("choices"):
|
|
55
|
+
continue
|
|
56
|
+
delta = chunk["choices"][0].get("delta", {})
|
|
57
|
+
|
|
58
|
+
# Text content
|
|
59
|
+
if "content" in delta and delta["content"]:
|
|
60
|
+
text = delta["content"]
|
|
61
|
+
content_parts.append(text)
|
|
62
|
+
if on_chunk:
|
|
63
|
+
on_chunk(text)
|
|
64
|
+
|
|
65
|
+
# Tool calls (streamed incrementally)
|
|
66
|
+
if "tool_calls" in delta:
|
|
67
|
+
for tc_delta in delta["tool_calls"]:
|
|
68
|
+
idx = tc_delta["index"]
|
|
69
|
+
if idx not in tool_calls_acc:
|
|
70
|
+
tool_calls_acc[idx] = {
|
|
71
|
+
"id": tc_delta.get("id", ""),
|
|
72
|
+
"name": tc_delta.get("function", {}).get("name", ""),
|
|
73
|
+
"arguments": "",
|
|
74
|
+
}
|
|
75
|
+
if "id" in tc_delta and tc_delta["id"]:
|
|
76
|
+
tool_calls_acc[idx]["id"] = tc_delta["id"]
|
|
77
|
+
func = tc_delta.get("function", {})
|
|
78
|
+
if "name" in func and func["name"]:
|
|
79
|
+
tool_calls_acc[idx]["name"] = func["name"]
|
|
80
|
+
if "arguments" in func:
|
|
81
|
+
tool_calls_acc[idx]["arguments"] += func["arguments"]
|
|
82
|
+
|
|
83
|
+
# Assemble final message
|
|
84
|
+
content = "".join(content_parts) if content_parts else None
|
|
85
|
+
tool_calls = None
|
|
86
|
+
if tool_calls_acc:
|
|
87
|
+
tool_calls = [
|
|
88
|
+
ToolCall(id=tc["id"], name=tc["name"], arguments=tc["arguments"])
|
|
89
|
+
for tc in sorted(tool_calls_acc.values(), key=lambda x: x["id"])
|
|
90
|
+
]
|
|
91
|
+
|
|
92
|
+
msg = Message.assistant(content=content, tool_calls=tool_calls)
|
|
93
|
+
return ChatResponse(message=msg, usage=usage)
|
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
"""Smart context window management for long Godot development sessions.
|
|
2
|
+
|
|
3
|
+
Implements tiered compression that preserves critical context while staying
|
|
4
|
+
within the model's token limit:
|
|
5
|
+
|
|
6
|
+
1. System prompt — NEVER compressed
|
|
7
|
+
2. Working memory — extracted key facts from old conversations
|
|
8
|
+
3. Recent turns — kept intact (last N messages)
|
|
9
|
+
4. Old turns — compressed into summaries
|
|
10
|
+
|
|
11
|
+
The key insight: in a coding agent, the most important context is
|
|
12
|
+
"what files did I read/modify and what decisions did I make", not
|
|
13
|
+
the full conversation history.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
from __future__ import annotations
|
|
17
|
+
|
|
18
|
+
import re
|
|
19
|
+
from dataclasses import dataclass, field
|
|
20
|
+
|
|
21
|
+
from godot_agent.llm.client import Message
|
|
22
|
+
|
|
23
|
+
# Token estimation: ~3.5 chars per token (conservative for mixed en/zh content)
|
|
24
|
+
_CHARS_PER_TOKEN = 3.5
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def estimate_tokens(text: str) -> int:
|
|
28
|
+
"""Rough token estimate for mixed English/CJK text."""
|
|
29
|
+
if not text:
|
|
30
|
+
return 0
|
|
31
|
+
return max(1, int(len(text) / _CHARS_PER_TOKEN))
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def estimate_message_tokens(msg: Message) -> int:
|
|
35
|
+
"""Estimate tokens for a single message including role overhead."""
|
|
36
|
+
content = msg.content if isinstance(msg.content, str) else str(msg.content or "")
|
|
37
|
+
base = estimate_tokens(content) + 4 # role + formatting overhead
|
|
38
|
+
if msg.tool_calls:
|
|
39
|
+
for tc in msg.tool_calls:
|
|
40
|
+
base += estimate_tokens(tc.name) + estimate_tokens(tc.arguments) + 10
|
|
41
|
+
return base
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class ContextBudget:
|
|
46
|
+
max_tokens: int = 1050000 # gpt-5.4: 1.05M context
|
|
47
|
+
system_prompt_tokens: int = 0
|
|
48
|
+
message_tokens: int = 0
|
|
49
|
+
reserved_for_response: int = 4096
|
|
50
|
+
|
|
51
|
+
@property
|
|
52
|
+
def available(self) -> int:
|
|
53
|
+
return self.max_tokens - self.system_prompt_tokens - self.message_tokens - self.reserved_for_response
|
|
54
|
+
|
|
55
|
+
@property
|
|
56
|
+
def usage_ratio(self) -> float:
|
|
57
|
+
used = self.system_prompt_tokens + self.message_tokens
|
|
58
|
+
return used / self.max_tokens if self.max_tokens > 0 else 0.0
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def should_compact(self) -> bool:
|
|
62
|
+
return self.usage_ratio > 0.75
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@dataclass
|
|
66
|
+
class WorkingMemory:
|
|
67
|
+
"""Extracted key facts from compressed conversation turns."""
|
|
68
|
+
files_read: list[str] = field(default_factory=list)
|
|
69
|
+
files_modified: list[str] = field(default_factory=list)
|
|
70
|
+
decisions: list[str] = field(default_factory=list)
|
|
71
|
+
errors_encountered: list[str] = field(default_factory=list)
|
|
72
|
+
|
|
73
|
+
def to_message(self) -> str:
|
|
74
|
+
parts = ["[CONTEXT] Working memory from previous conversation:"]
|
|
75
|
+
if self.files_modified:
|
|
76
|
+
parts.append(f"Files modified: {', '.join(set(self.files_modified[-20:]))}")
|
|
77
|
+
if self.files_read:
|
|
78
|
+
parts.append(f"Files read: {', '.join(set(self.files_read[-20:]))}")
|
|
79
|
+
if self.decisions:
|
|
80
|
+
parts.append("Key decisions:\n" + "\n".join(f" - {d}" for d in self.decisions[-10:]))
|
|
81
|
+
if self.errors_encountered:
|
|
82
|
+
parts.append("Errors fixed:\n" + "\n".join(f" - {e}" for e in self.errors_encountered[-5:]))
|
|
83
|
+
return "\n".join(parts)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _extract_working_memory(messages: list[Message]) -> WorkingMemory:
|
|
87
|
+
"""Extract key facts from messages for working memory."""
|
|
88
|
+
memory = WorkingMemory()
|
|
89
|
+
|
|
90
|
+
for msg in messages:
|
|
91
|
+
content = msg.content if isinstance(msg.content, str) else str(msg.content or "")
|
|
92
|
+
|
|
93
|
+
# Extract file paths from tool calls
|
|
94
|
+
if msg.tool_calls:
|
|
95
|
+
for tc in msg.tool_calls:
|
|
96
|
+
args_str = tc.arguments
|
|
97
|
+
# Extract "path" from JSON arguments
|
|
98
|
+
path_match = re.search(r'"path"\s*:\s*"([^"]+)"', args_str)
|
|
99
|
+
if path_match:
|
|
100
|
+
path = path_match.group(1)
|
|
101
|
+
short = path.split("/")[-1] if "/" in path else path
|
|
102
|
+
if tc.name in ("write_file", "edit_file"):
|
|
103
|
+
memory.files_modified.append(short)
|
|
104
|
+
elif tc.name == "read_file":
|
|
105
|
+
memory.files_read.append(short)
|
|
106
|
+
|
|
107
|
+
# Extract error mentions from tool results
|
|
108
|
+
if msg.role == "tool" and content:
|
|
109
|
+
if '"error"' in content:
|
|
110
|
+
error_match = re.search(r'"error"\s*:\s*"([^"]{1,100})"', content)
|
|
111
|
+
if error_match:
|
|
112
|
+
memory.errors_encountered.append(error_match.group(1)[:80])
|
|
113
|
+
|
|
114
|
+
# Extract decisions from assistant messages
|
|
115
|
+
if msg.role == "assistant" and content:
|
|
116
|
+
# Look for action statements
|
|
117
|
+
for pattern in [
|
|
118
|
+
r"I (?:will|'ll|have) (?:change|modify|update|fix|add|remove|create)\w* (.{10,60})",
|
|
119
|
+
r"(?:Changed|Modified|Updated|Fixed|Added|Removed|Created) (.{10,60})",
|
|
120
|
+
]:
|
|
121
|
+
for m in re.finditer(pattern, content):
|
|
122
|
+
memory.decisions.append(m.group(0)[:80])
|
|
123
|
+
|
|
124
|
+
return memory
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def smart_compact(
|
|
128
|
+
messages: list[Message],
|
|
129
|
+
keep_recent: int = 10,
|
|
130
|
+
target_ratio: float = 0.60,
|
|
131
|
+
max_tokens: int = 1050000 # gpt-5.4: 1.05M context,
|
|
132
|
+
) -> list[Message]:
|
|
133
|
+
"""Intelligently compact conversation history.
|
|
134
|
+
|
|
135
|
+
Strategy:
|
|
136
|
+
1. ALWAYS keep: system message (index 0)
|
|
137
|
+
2. Extract working memory from old messages
|
|
138
|
+
3. Keep recent N messages intact
|
|
139
|
+
4. Replace everything in between with working memory summary
|
|
140
|
+
|
|
141
|
+
This preserves:
|
|
142
|
+
- Full system prompt (Godot knowledge, build discipline)
|
|
143
|
+
- What files were modified and why (working memory)
|
|
144
|
+
- Recent conversation context (for continuity)
|
|
145
|
+
|
|
146
|
+
While discarding:
|
|
147
|
+
- Old tool results (files can be re-read)
|
|
148
|
+
- Old LLM explanations (decisions captured in memory)
|
|
149
|
+
- Redundant back-and-forth
|
|
150
|
+
"""
|
|
151
|
+
total = sum(estimate_message_tokens(m) for m in messages)
|
|
152
|
+
if total < max_tokens * target_ratio:
|
|
153
|
+
return messages # No compaction needed
|
|
154
|
+
|
|
155
|
+
if len(messages) <= keep_recent + 1:
|
|
156
|
+
return messages # Nothing to compact
|
|
157
|
+
|
|
158
|
+
# Split: system | old | recent
|
|
159
|
+
system = messages[0] # Always a system message
|
|
160
|
+
rest = messages[1:]
|
|
161
|
+
|
|
162
|
+
if len(rest) <= keep_recent:
|
|
163
|
+
return messages
|
|
164
|
+
|
|
165
|
+
old = rest[:-keep_recent]
|
|
166
|
+
recent = rest[-keep_recent:]
|
|
167
|
+
|
|
168
|
+
# Extract working memory from old messages
|
|
169
|
+
memory = _extract_working_memory(old)
|
|
170
|
+
memory_text = memory.to_message()
|
|
171
|
+
|
|
172
|
+
# Build compacted message list
|
|
173
|
+
result = [system]
|
|
174
|
+
|
|
175
|
+
# Add working memory as a user context message
|
|
176
|
+
if memory_text.strip():
|
|
177
|
+
result.append(Message.user(memory_text))
|
|
178
|
+
|
|
179
|
+
# Add brief summary of how many turns were compressed
|
|
180
|
+
turn_count = sum(1 for m in old if m.role == "user")
|
|
181
|
+
result.append(Message.user(
|
|
182
|
+
f"[CONTEXT] {turn_count} previous conversation turns were compressed. "
|
|
183
|
+
f"Working memory above contains the key facts. "
|
|
184
|
+
f"If you need file contents, read them again with the read_file tool."
|
|
185
|
+
))
|
|
186
|
+
|
|
187
|
+
# Add recent messages intact
|
|
188
|
+
result.extend(recent)
|
|
189
|
+
|
|
190
|
+
return result
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
# Legacy aliases for backward compatibility
|
|
194
|
+
def compact_messages(
|
|
195
|
+
messages: list[Message],
|
|
196
|
+
keep_recent: int = 8,
|
|
197
|
+
keep_system: bool = True,
|
|
198
|
+
) -> list[Message]:
|
|
199
|
+
"""Legacy compaction function. Delegates to smart_compact."""
|
|
200
|
+
return smart_compact(messages, keep_recent=keep_recent)
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def select_relevant_files(
|
|
204
|
+
all_files: list[str],
|
|
205
|
+
user_prompt: str,
|
|
206
|
+
max_files: int = 10,
|
|
207
|
+
) -> list[str]:
|
|
208
|
+
"""Select the most relevant files to include in context."""
|
|
209
|
+
prompt_lower = user_prompt.lower()
|
|
210
|
+
scored: list[tuple[float, str]] = []
|
|
211
|
+
|
|
212
|
+
for f in all_files:
|
|
213
|
+
score = 0.0
|
|
214
|
+
fname = f.lower()
|
|
215
|
+
basename = fname.split("/")[-1].replace(".gd", "").replace(".tscn", "")
|
|
216
|
+
if basename in prompt_lower:
|
|
217
|
+
score += 10.0
|
|
218
|
+
if "project.godot" in fname:
|
|
219
|
+
score += 5.0
|
|
220
|
+
if "autoload" in fname or "manager" in fname:
|
|
221
|
+
score += 3.0
|
|
222
|
+
if "main" in fname or "game" in fname:
|
|
223
|
+
score += 2.0
|
|
224
|
+
if fname.endswith(".gd"):
|
|
225
|
+
score += 1.0
|
|
226
|
+
if fname.endswith(".tscn"):
|
|
227
|
+
score += 0.5
|
|
228
|
+
scored.append((score, f))
|
|
229
|
+
|
|
230
|
+
scored.sort(key=lambda x: x[0], reverse=True)
|
|
231
|
+
return [f for _, f in scored[:max_files]]
|
|
@@ -9,13 +9,14 @@ from pathlib import Path
|
|
|
9
9
|
from typing import Callable
|
|
10
10
|
|
|
11
11
|
from godot_agent.llm.client import ChatResponse, LLMClient, Message, TokenUsage
|
|
12
|
-
from godot_agent.runtime.context_manager import
|
|
12
|
+
from godot_agent.runtime.context_manager import smart_compact, estimate_message_tokens
|
|
13
13
|
from godot_agent.runtime.error_loop import format_validation_for_llm, validate_project
|
|
14
14
|
from godot_agent.tools.registry import ToolRegistry
|
|
15
15
|
|
|
16
16
|
log = logging.getLogger(__name__)
|
|
17
17
|
|
|
18
|
-
|
|
18
|
+
# Compact at 75% of 1.05M context to leave room for current turn
|
|
19
|
+
_COMPACT_THRESHOLD = 787500 # 75% of 1.05M
|
|
19
20
|
_FILE_MUTATING_TOOLS = {"write_file", "edit_file"}
|
|
20
21
|
|
|
21
22
|
|
|
@@ -54,10 +55,15 @@ class ConversationEngine:
|
|
|
54
55
|
self.session_api_calls = 0
|
|
55
56
|
self.last_turn: TurnStats | None = None
|
|
56
57
|
|
|
58
|
+
self.auto_commit = False
|
|
59
|
+
self.use_streaming = False
|
|
60
|
+
|
|
57
61
|
# TUI callbacks
|
|
58
62
|
self.on_tool_start: ToolStartCallback | None = None
|
|
59
63
|
self.on_tool_end: ToolEndCallback | None = None
|
|
60
64
|
self.on_diff: DiffCallback | None = None
|
|
65
|
+
self.on_stream_chunk: Callable[[str], None] | None = None
|
|
66
|
+
self.on_commit_suggest: Callable[[], None] | None = None
|
|
61
67
|
|
|
62
68
|
def scan_project(self) -> str | None:
|
|
63
69
|
"""Auto-scan project for context. Returns summary or None."""
|
|
@@ -90,10 +96,16 @@ class ConversationEngine:
|
|
|
90
96
|
return None
|
|
91
97
|
|
|
92
98
|
async def _maybe_compact(self) -> None:
|
|
93
|
-
total = sum(
|
|
99
|
+
total = sum(estimate_message_tokens(m) for m in self.messages)
|
|
94
100
|
if total > _COMPACT_THRESHOLD:
|
|
95
|
-
|
|
96
|
-
|
|
101
|
+
before = len(self.messages)
|
|
102
|
+
log.info("Smart compacting: ~%d tokens, %d messages", total, before)
|
|
103
|
+
self.messages = smart_compact(
|
|
104
|
+
self.messages, keep_recent=20, target_ratio=0.60, max_tokens=1050000
|
|
105
|
+
)
|
|
106
|
+
after = len(self.messages)
|
|
107
|
+
after_tokens = sum(estimate_message_tokens(m) for m in self.messages)
|
|
108
|
+
log.info("Compacted: %d → %d messages, ~%d tokens", before, after, after_tokens)
|
|
97
109
|
|
|
98
110
|
async def _post_tool_validate(self, tool_names: set[str]) -> str | None:
|
|
99
111
|
if not self.project_path or not self.auto_validate:
|
|
@@ -127,12 +139,21 @@ class ConversationEngine:
|
|
|
127
139
|
return args.get("command", "")[:40]
|
|
128
140
|
return ""
|
|
129
141
|
|
|
130
|
-
async def _run_loop(self, tools: list[dict] | None) -> str:
|
|
142
|
+
async def _run_loop(self, tools: list[dict] | None, use_streaming: bool = False) -> str:
|
|
131
143
|
turn = TurnStats()
|
|
132
144
|
|
|
133
145
|
for _ in range(self.max_tool_rounds + 1):
|
|
134
146
|
await self._maybe_compact()
|
|
135
|
-
|
|
147
|
+
|
|
148
|
+
# Use streaming for the final text response (no tool calls expected after tools done)
|
|
149
|
+
if use_streaming and self.on_stream_chunk:
|
|
150
|
+
from godot_agent.llm.streaming import stream_chat_with_callback
|
|
151
|
+
chat_resp = await stream_chat_with_callback(
|
|
152
|
+
self.client, self.messages, tools,
|
|
153
|
+
on_chunk=self.on_stream_chunk,
|
|
154
|
+
)
|
|
155
|
+
else:
|
|
156
|
+
chat_resp = await self.client.chat(self.messages, tools)
|
|
136
157
|
response = chat_resp.message
|
|
137
158
|
|
|
138
159
|
turn.usage = turn.usage + chat_resp.usage
|
|
@@ -198,13 +219,18 @@ class ConversationEngine:
|
|
|
198
219
|
f"Fix the errors before proceeding."
|
|
199
220
|
))
|
|
200
221
|
|
|
222
|
+
# Auto-commit suggestion after successful file mutations
|
|
223
|
+
if self.auto_commit and (tool_names_used & _FILE_MUTATING_TOOLS) and not validation_report:
|
|
224
|
+
if self.on_commit_suggest:
|
|
225
|
+
self.on_commit_suggest()
|
|
226
|
+
|
|
201
227
|
self.last_turn = turn
|
|
202
228
|
return "Tool call limit reached. Please simplify the request."
|
|
203
229
|
|
|
204
230
|
async def submit(self, user_input: str) -> str:
|
|
205
231
|
self.messages.append(Message.user(user_input))
|
|
206
232
|
tools = self.registry.to_openai_tools() or None
|
|
207
|
-
return await self._run_loop(tools)
|
|
233
|
+
return await self._run_loop(tools, use_streaming=self.use_streaming)
|
|
208
234
|
|
|
209
235
|
async def submit_with_images(self, text: str, images_b64: list[str]) -> str:
|
|
210
236
|
self.messages.append(Message.user_with_images(text, images_b64))
|
|
@@ -35,3 +35,24 @@ def load_session(session_dir: str, session_id: str) -> list[dict] | None:
|
|
|
35
35
|
return None
|
|
36
36
|
data = json.loads(file_path.read_text())
|
|
37
37
|
return data.get("messages", [])
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def load_latest_session(session_dir: str) -> tuple[str, list[Message]] | None:
|
|
41
|
+
"""Load the most recent session. Returns (session_id, messages) or None."""
|
|
42
|
+
dir_path = Path(session_dir)
|
|
43
|
+
if not dir_path.exists():
|
|
44
|
+
return None
|
|
45
|
+
files = sorted(dir_path.glob("*.json"), key=lambda f: f.stat().st_mtime, reverse=True)
|
|
46
|
+
if not files:
|
|
47
|
+
return None
|
|
48
|
+
data = json.loads(files[0].read_text())
|
|
49
|
+
session_id = data.get("session_id", files[0].stem)
|
|
50
|
+
raw_messages = data.get("messages", [])
|
|
51
|
+
messages: list[Message] = []
|
|
52
|
+
for m in raw_messages:
|
|
53
|
+
messages.append(Message(
|
|
54
|
+
role=m.get("role", "user"),
|
|
55
|
+
content=m.get("content"),
|
|
56
|
+
tool_call_id=m.get("tool_call_id"),
|
|
57
|
+
))
|
|
58
|
+
return session_id, messages
|
|
@@ -13,22 +13,47 @@ from godot_agent.tools.file_ops import _project_root
|
|
|
13
13
|
|
|
14
14
|
log = logging.getLogger(__name__)
|
|
15
15
|
|
|
16
|
-
#
|
|
17
|
-
|
|
16
|
+
# Safety level — set by CLI on startup
|
|
17
|
+
_safety_level: str = "normal" # strict, normal, permissive
|
|
18
|
+
|
|
19
|
+
def set_safety_level(level: str) -> None:
|
|
20
|
+
global _safety_level
|
|
21
|
+
_safety_level = level
|
|
22
|
+
|
|
23
|
+
# Patterns blocked at each safety level
|
|
24
|
+
_ALWAYS_BLOCKED = [
|
|
18
25
|
r'\brm\s+-rf\s+/', # rm -rf /
|
|
19
26
|
r'\brm\s+-rf\s+~', # rm -rf ~
|
|
27
|
+
r'\bmkfs\b', # format disk
|
|
28
|
+
r'\bdd\s+if=', # dd disk operations
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
_NORMAL_BLOCKED = _ALWAYS_BLOCKED + [
|
|
20
32
|
r'\bcurl\b.*\|.*\bsh\b', # curl | sh
|
|
21
33
|
r'\bwget\b.*\|.*\bsh\b', # wget | sh
|
|
22
34
|
r'\bchmod\s+777\b', # chmod 777
|
|
23
35
|
r'\bsudo\b', # sudo anything
|
|
24
|
-
|
|
25
|
-
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
_STRICT_BLOCKED = _NORMAL_BLOCKED + [
|
|
39
|
+
r'\bcurl\b', # any curl
|
|
40
|
+
r'\bwget\b', # any wget
|
|
41
|
+
r'\bnpm\s+install\b', # npm install
|
|
42
|
+
r'\bpip\s+install\b', # pip install
|
|
43
|
+
r'\bgit\s+push\b', # git push
|
|
44
|
+
r'\bgit\s+reset\b', # git reset
|
|
26
45
|
]
|
|
27
46
|
|
|
28
47
|
|
|
29
48
|
def _is_blocked(command: str) -> str | None:
|
|
30
|
-
"""Check if a command matches blocked patterns
|
|
31
|
-
|
|
49
|
+
"""Check if a command matches blocked patterns based on safety level."""
|
|
50
|
+
if _safety_level == "permissive":
|
|
51
|
+
patterns = _ALWAYS_BLOCKED
|
|
52
|
+
elif _safety_level == "strict":
|
|
53
|
+
patterns = _STRICT_BLOCKED
|
|
54
|
+
else:
|
|
55
|
+
patterns = _NORMAL_BLOCKED
|
|
56
|
+
for pattern in patterns:
|
|
32
57
|
if re.search(pattern, command):
|
|
33
58
|
return f"Blocked: command matches dangerous pattern '{pattern}'"
|
|
34
59
|
return None
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
"""Rich input with history, auto-complete, and multi-line support."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from prompt_toolkit import PromptSession
|
|
8
|
+
from prompt_toolkit.completion import Completer, Completion
|
|
9
|
+
from prompt_toolkit.history import FileHistory
|
|
10
|
+
from prompt_toolkit.formatted_text import HTML
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class CommandCompleter(Completer):
|
|
14
|
+
"""Auto-complete for / commands and file paths."""
|
|
15
|
+
|
|
16
|
+
COMMANDS = [
|
|
17
|
+
("/cd ", "change project directory"),
|
|
18
|
+
("/info", "show project details"),
|
|
19
|
+
("/status", "show model & auth"),
|
|
20
|
+
("/usage", "show token usage"),
|
|
21
|
+
("/settings", "show all settings"),
|
|
22
|
+
("/set ", "change a setting"),
|
|
23
|
+
("/save", "save session"),
|
|
24
|
+
("/load", "restore session"),
|
|
25
|
+
("/help", "show commands"),
|
|
26
|
+
("/quit", "exit"),
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
SETTINGS = [
|
|
30
|
+
"language", "verbosity", "auto_validate", "auto_commit",
|
|
31
|
+
"token_budget", "safety", "streaming", "extra_prompt",
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
def __init__(self, project_root: Path | None = None):
|
|
35
|
+
self.project_root = project_root
|
|
36
|
+
|
|
37
|
+
def get_completions(self, document, complete_event):
|
|
38
|
+
text = document.text_before_cursor
|
|
39
|
+
word = document.get_word_before_cursor()
|
|
40
|
+
|
|
41
|
+
# Command completion
|
|
42
|
+
if text.startswith("/"):
|
|
43
|
+
for cmd, desc in self.COMMANDS:
|
|
44
|
+
if cmd.startswith(text):
|
|
45
|
+
yield Completion(cmd, start_position=-len(text), display_meta=desc)
|
|
46
|
+
return
|
|
47
|
+
|
|
48
|
+
# /set <key> completion
|
|
49
|
+
if text.startswith("/set "):
|
|
50
|
+
parts = text.split()
|
|
51
|
+
if len(parts) == 2:
|
|
52
|
+
prefix = parts[1]
|
|
53
|
+
for s in self.SETTINGS:
|
|
54
|
+
if s.startswith(prefix):
|
|
55
|
+
yield Completion(s, start_position=-len(prefix))
|
|
56
|
+
return
|
|
57
|
+
|
|
58
|
+
# cd/path completion
|
|
59
|
+
if text.startswith("cd ") or text.startswith("/cd "):
|
|
60
|
+
prefix = text.split(None, 1)[1] if " " in text else ""
|
|
61
|
+
path = Path(prefix).expanduser()
|
|
62
|
+
parent = path.parent if not path.is_dir() else path
|
|
63
|
+
stem = path.name if not path.is_dir() else ""
|
|
64
|
+
try:
|
|
65
|
+
for item in sorted(parent.iterdir()):
|
|
66
|
+
if item.name.startswith("."):
|
|
67
|
+
continue
|
|
68
|
+
if item.name.lower().startswith(stem.lower()):
|
|
69
|
+
suffix = "/" if item.is_dir() else ""
|
|
70
|
+
yield Completion(
|
|
71
|
+
item.name + suffix,
|
|
72
|
+
start_position=-len(stem),
|
|
73
|
+
)
|
|
74
|
+
except (PermissionError, FileNotFoundError):
|
|
75
|
+
pass
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def create_session(history_file: str | None = None) -> PromptSession:
|
|
79
|
+
"""Create a PromptSession with file history."""
|
|
80
|
+
history = FileHistory(history_file) if history_file else None
|
|
81
|
+
return PromptSession(history=history)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def get_input(session: PromptSession, completer: CommandCompleter | None = None) -> str | None:
|
|
85
|
+
"""Get user input with history and completion. Returns None on EOF/interrupt."""
|
|
86
|
+
try:
|
|
87
|
+
return session.prompt(
|
|
88
|
+
HTML("<green>you></green> "),
|
|
89
|
+
completer=completer,
|
|
90
|
+
)
|
|
91
|
+
except (EOFError, KeyboardInterrupt):
|
|
92
|
+
return None
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def get_multiline_continuation(session: PromptSession) -> str | None:
|
|
96
|
+
"""Get continuation line for multi-line input."""
|
|
97
|
+
try:
|
|
98
|
+
return session.prompt(HTML("<dim>...</dim> "))
|
|
99
|
+
except (EOFError, KeyboardInterrupt):
|
|
100
|
+
return None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "god-code"
|
|
3
|
-
version = "0.4.
|
|
3
|
+
version = "0.4.3"
|
|
4
4
|
description = "AI coding agent specialized for Godot game development"
|
|
5
5
|
requires-python = ">=3.9"
|
|
6
6
|
license = {text = "GPL-3.0-or-later"}
|
|
@@ -12,6 +12,7 @@ dependencies = [
|
|
|
12
12
|
"Pillow>=10.3",
|
|
13
13
|
"rich>=13.0",
|
|
14
14
|
"packaging>=21.0",
|
|
15
|
+
"prompt_toolkit>=3.0",
|
|
15
16
|
"eval_type_backport>=0.2.0; python_version < '3.10'",
|
|
16
17
|
]
|
|
17
18
|
|
|
@@ -20,14 +20,17 @@ class TestCompactMessages:
|
|
|
20
20
|
assert len(result) == 3 # unchanged
|
|
21
21
|
|
|
22
22
|
def test_compact_old_messages(self):
|
|
23
|
+
# Use small max_tokens to force compaction even with small messages
|
|
24
|
+
from godot_agent.runtime.context_manager import smart_compact
|
|
23
25
|
msgs = [Message.system("sys")]
|
|
24
26
|
for i in range(20):
|
|
25
27
|
msgs.append(Message.user(f"question {i}"))
|
|
26
28
|
msgs.append(Message.assistant(content=f"answer {i}"))
|
|
27
|
-
|
|
29
|
+
# Force compaction with a tiny max_tokens
|
|
30
|
+
result = smart_compact(msgs, keep_recent=4, target_ratio=0.01, max_tokens=100)
|
|
28
31
|
assert len(result) < len(msgs)
|
|
29
|
-
assert result[0].role == "system"
|
|
30
|
-
assert result[-1].content == "answer 19"
|
|
32
|
+
assert result[0].role == "system"
|
|
33
|
+
assert result[-1].content == "answer 19"
|
|
31
34
|
|
|
32
35
|
|
|
33
36
|
class TestSelectRelevantFiles:
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
import json
|
|
3
|
-
from typing import AsyncIterator
|
|
4
|
-
from godot_agent.llm.client import LLMClient, Message
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
async def stream_chat(
|
|
8
|
-
client: LLMClient,
|
|
9
|
-
messages: list[Message],
|
|
10
|
-
tools: list[dict] | None = None,
|
|
11
|
-
) -> AsyncIterator[dict]:
|
|
12
|
-
body = client._build_request_body(messages, tools)
|
|
13
|
-
body["stream"] = True
|
|
14
|
-
async with client._http.stream(
|
|
15
|
-
"POST",
|
|
16
|
-
f"{client.config.base_url}/chat/completions",
|
|
17
|
-
headers=client._build_headers(),
|
|
18
|
-
json=body,
|
|
19
|
-
) as resp:
|
|
20
|
-
resp.raise_for_status()
|
|
21
|
-
async for line in resp.aiter_lines():
|
|
22
|
-
if line.startswith("data: "):
|
|
23
|
-
data = line[6:]
|
|
24
|
-
if data.strip() == "[DONE]":
|
|
25
|
-
break
|
|
26
|
-
yield json.loads(data)
|
|
@@ -1,135 +0,0 @@
|
|
|
1
|
-
"""Context window management for multi-file Godot projects.
|
|
2
|
-
|
|
3
|
-
Prevents context overflow by:
|
|
4
|
-
- Summarizing old conversation turns
|
|
5
|
-
- Selecting which files to include based on relevance
|
|
6
|
-
- Tracking token usage estimates
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
from __future__ import annotations
|
|
10
|
-
|
|
11
|
-
from dataclasses import dataclass, field
|
|
12
|
-
|
|
13
|
-
from godot_agent.llm.client import Message
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
@dataclass
|
|
17
|
-
class ContextBudget:
|
|
18
|
-
max_tokens: int = 128000
|
|
19
|
-
system_prompt_tokens: int = 0
|
|
20
|
-
message_tokens: int = 0
|
|
21
|
-
reserved_for_response: int = 4096
|
|
22
|
-
|
|
23
|
-
@property
|
|
24
|
-
def available(self) -> int:
|
|
25
|
-
return self.max_tokens - self.system_prompt_tokens - self.message_tokens - self.reserved_for_response
|
|
26
|
-
|
|
27
|
-
@property
|
|
28
|
-
def usage_ratio(self) -> float:
|
|
29
|
-
used = self.system_prompt_tokens + self.message_tokens
|
|
30
|
-
return used / self.max_tokens if self.max_tokens > 0 else 0.0
|
|
31
|
-
|
|
32
|
-
@property
|
|
33
|
-
def should_compact(self) -> bool:
|
|
34
|
-
return self.usage_ratio > 0.7
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
def estimate_tokens(text: str) -> int:
|
|
38
|
-
"""Rough token estimate: ~4 chars per token for English, ~2 for CJK."""
|
|
39
|
-
return len(text) // 3 # Conservative estimate
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
def compact_messages(
|
|
43
|
-
messages: list[Message],
|
|
44
|
-
keep_recent: int = 6,
|
|
45
|
-
keep_system: bool = True,
|
|
46
|
-
) -> list[Message]:
|
|
47
|
-
"""Compact conversation history by summarizing old turns.
|
|
48
|
-
|
|
49
|
-
Keeps:
|
|
50
|
-
- System message (always)
|
|
51
|
-
- Last N messages (recent context)
|
|
52
|
-
- Summarizes everything in between
|
|
53
|
-
"""
|
|
54
|
-
if len(messages) <= keep_recent + 1:
|
|
55
|
-
return messages # Nothing to compact
|
|
56
|
-
|
|
57
|
-
result: list[Message] = []
|
|
58
|
-
|
|
59
|
-
# Keep system message
|
|
60
|
-
if keep_system and messages and messages[0].role == "system":
|
|
61
|
-
result.append(messages[0])
|
|
62
|
-
rest = messages[1:]
|
|
63
|
-
else:
|
|
64
|
-
rest = messages
|
|
65
|
-
|
|
66
|
-
if len(rest) <= keep_recent:
|
|
67
|
-
return result + rest
|
|
68
|
-
|
|
69
|
-
# Summarize old messages
|
|
70
|
-
old = rest[:-keep_recent]
|
|
71
|
-
recent = rest[-keep_recent:]
|
|
72
|
-
|
|
73
|
-
summary_parts: list[str] = []
|
|
74
|
-
for msg in old:
|
|
75
|
-
if msg.role == "user":
|
|
76
|
-
content = msg.content if isinstance(msg.content, str) else "[image+text message]"
|
|
77
|
-
summary_parts.append(f"User asked: {content[:100]}...")
|
|
78
|
-
elif msg.role == "assistant":
|
|
79
|
-
if msg.tool_calls:
|
|
80
|
-
tools = ", ".join(tc.name for tc in msg.tool_calls)
|
|
81
|
-
summary_parts.append(f"Agent used tools: {tools}")
|
|
82
|
-
elif msg.content:
|
|
83
|
-
summary_parts.append(f"Agent replied: {msg.content[:100]}...")
|
|
84
|
-
elif msg.role == "tool":
|
|
85
|
-
summary_parts.append(f"Tool returned result")
|
|
86
|
-
|
|
87
|
-
summary = "[Conversation history summary]\n" + "\n".join(summary_parts)
|
|
88
|
-
result.append(Message.user(summary))
|
|
89
|
-
result.extend(recent)
|
|
90
|
-
|
|
91
|
-
return result
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
def select_relevant_files(
|
|
95
|
-
all_files: list[str],
|
|
96
|
-
user_prompt: str,
|
|
97
|
-
max_files: int = 10,
|
|
98
|
-
) -> list[str]:
|
|
99
|
-
"""Select the most relevant files to include in context.
|
|
100
|
-
|
|
101
|
-
Prioritizes:
|
|
102
|
-
1. Files mentioned in the prompt
|
|
103
|
-
2. Recently modified files
|
|
104
|
-
3. Core files (project.godot, autoloads, main scene)
|
|
105
|
-
"""
|
|
106
|
-
prompt_lower = user_prompt.lower()
|
|
107
|
-
scored: list[tuple[float, str]] = []
|
|
108
|
-
|
|
109
|
-
for f in all_files:
|
|
110
|
-
score = 0.0
|
|
111
|
-
fname = f.lower()
|
|
112
|
-
|
|
113
|
-
# Direct mention in prompt
|
|
114
|
-
basename = fname.split("/")[-1].replace(".gd", "").replace(".tscn", "")
|
|
115
|
-
if basename in prompt_lower:
|
|
116
|
-
score += 10.0
|
|
117
|
-
|
|
118
|
-
# Core files always relevant
|
|
119
|
-
if "project.godot" in fname:
|
|
120
|
-
score += 5.0
|
|
121
|
-
if "autoload" in fname or "manager" in fname:
|
|
122
|
-
score += 3.0
|
|
123
|
-
if "main" in fname or "game" in fname:
|
|
124
|
-
score += 2.0
|
|
125
|
-
|
|
126
|
-
# File type relevance
|
|
127
|
-
if fname.endswith(".gd"):
|
|
128
|
-
score += 1.0
|
|
129
|
-
if fname.endswith(".tscn"):
|
|
130
|
-
score += 0.5
|
|
131
|
-
|
|
132
|
-
scored.append((score, f))
|
|
133
|
-
|
|
134
|
-
scored.sort(key=lambda x: x[0], reverse=True)
|
|
135
|
-
return [f for _, f in scored[:max_files]]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|