code-puppy 0.0.53__py3-none-any.whl → 0.0.55__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/__init__.py +1 -0
- code_puppy/agent.py +20 -8
- code_puppy/agent_prompts.py +2 -3
- code_puppy/command_line/file_path_completion.py +11 -4
- code_puppy/command_line/meta_command_handler.py +48 -28
- code_puppy/command_line/model_picker_completion.py +27 -13
- code_puppy/command_line/prompt_toolkit_completion.py +95 -51
- code_puppy/command_line/utils.py +8 -6
- code_puppy/config.py +22 -11
- code_puppy/main.py +32 -22
- code_puppy/model_factory.py +7 -7
- code_puppy/session_memory.py +31 -19
- code_puppy/tools/__init__.py +1 -0
- code_puppy/tools/code_map.py +16 -11
- code_puppy/tools/command_runner.py +160 -63
- code_puppy/tools/common.py +1 -1
- code_puppy/tools/file_modifications.py +352 -302
- code_puppy/tools/file_operations.py +237 -192
- code_puppy/tools/web_search.py +24 -8
- code_puppy/version_checker.py +4 -4
- {code_puppy-0.0.53.dist-info → code_puppy-0.0.55.dist-info}/METADATA +1 -1
- code_puppy-0.0.55.dist-info/RECORD +28 -0
- code_puppy-0.0.53.dist-info/RECORD +0 -28
- {code_puppy-0.0.53.data → code_puppy-0.0.55.data}/data/code_puppy/models.json +0 -0
- {code_puppy-0.0.53.dist-info → code_puppy-0.0.55.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.53.dist-info → code_puppy-0.0.55.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.53.dist-info → code_puppy-0.0.55.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import os
|
|
2
2
|
from code_puppy.command_line.utils import list_directory
|
|
3
|
-
from code_puppy.config import get_puppy_name,
|
|
3
|
+
from code_puppy.config import get_puppy_name, get_config_keys, get_value
|
|
4
|
+
|
|
4
5
|
# ANSI color codes are no longer necessary because prompt_toolkit handles
|
|
5
6
|
# styling via the `Style` class. We keep them here commented-out in case
|
|
6
7
|
# someone needs raw ANSI later, but they are unused in the current code.
|
|
@@ -12,6 +13,7 @@ from code_puppy.config import get_puppy_name, get_owner_name, get_config_keys, g
|
|
|
12
13
|
import asyncio
|
|
13
14
|
from typing import Optional
|
|
14
15
|
from prompt_toolkit import PromptSession
|
|
16
|
+
from prompt_toolkit.formatted_text import FormattedText
|
|
15
17
|
from prompt_toolkit.completion import merge_completers
|
|
16
18
|
from prompt_toolkit.history import FileHistory
|
|
17
19
|
from prompt_toolkit.styles import Style
|
|
@@ -27,107 +29,143 @@ from code_puppy.command_line.file_path_completion import FilePathCompleter
|
|
|
27
29
|
|
|
28
30
|
from prompt_toolkit.completion import Completer, Completion
|
|
29
31
|
|
|
32
|
+
|
|
30
33
|
class SetCompleter(Completer):
|
|
31
|
-
def __init__(self, trigger: str =
|
|
34
|
+
def __init__(self, trigger: str = "~set"):
|
|
32
35
|
self.trigger = trigger
|
|
36
|
+
|
|
33
37
|
def get_completions(self, document, complete_event):
|
|
34
38
|
text = document.text_before_cursor
|
|
35
39
|
if not text.strip().startswith(self.trigger):
|
|
36
40
|
return
|
|
37
41
|
# If the only thing typed is exactly '~set', suggest space
|
|
38
42
|
if text.strip() == self.trigger:
|
|
39
|
-
yield Completion(
|
|
43
|
+
yield Completion(
|
|
44
|
+
self.trigger + " ",
|
|
45
|
+
start_position=-len(self.trigger),
|
|
46
|
+
display=f"{self.trigger} ",
|
|
47
|
+
display_meta="set config",
|
|
48
|
+
)
|
|
40
49
|
tokens = text.strip().split()
|
|
41
50
|
# completion for the first arg after ~set
|
|
42
51
|
if len(tokens) == 1:
|
|
43
52
|
# user just typed ~set <-- suggest config keys
|
|
44
|
-
base =
|
|
53
|
+
base = ""
|
|
45
54
|
else:
|
|
46
55
|
base = tokens[1]
|
|
47
56
|
# --- SPECIAL HANDLING FOR 'model' KEY ---
|
|
48
|
-
if base ==
|
|
57
|
+
if base == "model":
|
|
49
58
|
# Don't return any completions -- let ModelNameCompleter handle it
|
|
50
59
|
return
|
|
51
60
|
for key in get_config_keys():
|
|
52
|
-
if key ==
|
|
61
|
+
if key == "model":
|
|
53
62
|
continue # exclude 'model' from regular ~set completions
|
|
54
63
|
if key.startswith(base):
|
|
55
64
|
prev_value = get_value(key)
|
|
56
65
|
# Ensure there's a space after '~set' if it's the only thing typed
|
|
57
|
-
if text.strip() == self.trigger or text.strip() == self.trigger +
|
|
58
|
-
prefix = self.trigger +
|
|
59
|
-
insert_text =
|
|
66
|
+
if text.strip() == self.trigger or text.strip() == self.trigger + "":
|
|
67
|
+
prefix = self.trigger + " " # Always enforce a space
|
|
68
|
+
insert_text = (
|
|
69
|
+
f"{prefix}{key} = {prev_value}"
|
|
70
|
+
if prev_value is not None
|
|
71
|
+
else f"{prefix}{key} = "
|
|
72
|
+
)
|
|
60
73
|
sp = -len(text)
|
|
61
74
|
else:
|
|
62
|
-
insert_text =
|
|
75
|
+
insert_text = (
|
|
76
|
+
f"{key} = {prev_value}"
|
|
77
|
+
if prev_value is not None
|
|
78
|
+
else f"{key} = "
|
|
79
|
+
)
|
|
63
80
|
sp = -len(base)
|
|
64
81
|
# Make it obvious the value part is from before
|
|
65
|
-
yield Completion(
|
|
82
|
+
yield Completion(
|
|
83
|
+
insert_text,
|
|
84
|
+
start_position=sp,
|
|
85
|
+
display_meta=f"puppy.cfg key (was: {prev_value})"
|
|
86
|
+
if prev_value is not None
|
|
87
|
+
else "puppy.cfg key",
|
|
88
|
+
)
|
|
89
|
+
|
|
66
90
|
|
|
67
91
|
class CDCompleter(Completer):
|
|
68
|
-
def __init__(self, trigger: str =
|
|
92
|
+
def __init__(self, trigger: str = "~cd"):
|
|
69
93
|
self.trigger = trigger
|
|
94
|
+
|
|
70
95
|
def get_completions(self, document, complete_event):
|
|
71
96
|
text = document.text_before_cursor
|
|
72
97
|
if not text.strip().startswith(self.trigger):
|
|
73
98
|
return
|
|
74
99
|
tokens = text.strip().split()
|
|
75
100
|
if len(tokens) == 1:
|
|
76
|
-
base =
|
|
101
|
+
base = ""
|
|
77
102
|
else:
|
|
78
103
|
base = tokens[1]
|
|
79
104
|
try:
|
|
80
105
|
prefix = os.path.expanduser(base)
|
|
81
|
-
part = os.path.dirname(prefix) if os.path.dirname(prefix) else
|
|
106
|
+
part = os.path.dirname(prefix) if os.path.dirname(prefix) else "."
|
|
82
107
|
dirs, _ = list_directory(part)
|
|
83
108
|
dirnames = [d for d in dirs if d.startswith(os.path.basename(base))]
|
|
84
109
|
base_dir = os.path.dirname(base)
|
|
85
110
|
for d in dirnames:
|
|
86
111
|
# Build the completion text so we keep the already-typed directory parts.
|
|
87
|
-
if base_dir and base_dir !=
|
|
112
|
+
if base_dir and base_dir != ".":
|
|
88
113
|
suggestion = os.path.join(base_dir, d)
|
|
89
114
|
else:
|
|
90
115
|
suggestion = d
|
|
91
116
|
# Append trailing slash so the user can continue tabbing into sub-dirs.
|
|
92
117
|
suggestion = suggestion.rstrip(os.sep) + os.sep
|
|
93
|
-
yield Completion(
|
|
118
|
+
yield Completion(
|
|
119
|
+
suggestion,
|
|
120
|
+
start_position=-len(base),
|
|
121
|
+
display=d + os.sep,
|
|
122
|
+
display_meta="Directory",
|
|
123
|
+
)
|
|
94
124
|
except Exception:
|
|
95
125
|
# Silently ignore errors (e.g., permission issues, non-existent dir)
|
|
96
126
|
pass
|
|
97
127
|
|
|
98
|
-
|
|
99
|
-
def get_prompt_with_active_model(base: str =
|
|
128
|
+
|
|
129
|
+
def get_prompt_with_active_model(base: str = ">>> "):
|
|
100
130
|
puppy = get_puppy_name()
|
|
101
|
-
|
|
102
|
-
model = get_active_model() or '(default)'
|
|
131
|
+
model = get_active_model() or "(default)"
|
|
103
132
|
cwd = os.getcwd()
|
|
104
|
-
home = os.path.expanduser(
|
|
133
|
+
home = os.path.expanduser("~")
|
|
105
134
|
if cwd.startswith(home):
|
|
106
|
-
cwd_display =
|
|
135
|
+
cwd_display = "~" + cwd[len(home) :]
|
|
107
136
|
else:
|
|
108
137
|
cwd_display = cwd
|
|
109
|
-
return FormattedText(
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
138
|
+
return FormattedText(
|
|
139
|
+
[
|
|
140
|
+
("bold", "🐶 "),
|
|
141
|
+
("class:puppy", f"{puppy}"),
|
|
142
|
+
("", " "),
|
|
143
|
+
("class:model", "[" + str(model) + "] "),
|
|
144
|
+
("class:cwd", "(" + str(cwd_display) + ") "),
|
|
145
|
+
("class:arrow", str(base)),
|
|
146
|
+
]
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
async def get_input_with_combined_completion(
|
|
151
|
+
prompt_str=">>> ", history_file: Optional[str] = None
|
|
152
|
+
) -> str:
|
|
119
153
|
history = FileHistory(history_file) if history_file else None
|
|
120
|
-
completer = merge_completers(
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
154
|
+
completer = merge_completers(
|
|
155
|
+
[
|
|
156
|
+
FilePathCompleter(symbol="@"),
|
|
157
|
+
ModelNameCompleter(trigger="~m"),
|
|
158
|
+
CDCompleter(trigger="~cd"),
|
|
159
|
+
SetCompleter(trigger="~set"),
|
|
160
|
+
]
|
|
161
|
+
)
|
|
126
162
|
# Add custom key bindings for Alt+M to insert a new line without submitting
|
|
127
163
|
bindings = KeyBindings()
|
|
128
|
-
|
|
164
|
+
|
|
165
|
+
@bindings.add(Keys.Escape, "m") # Alt+M
|
|
129
166
|
def _(event):
|
|
130
|
-
event.app.current_buffer.insert_text(
|
|
167
|
+
event.app.current_buffer.insert_text("\n")
|
|
168
|
+
|
|
131
169
|
@bindings.add(Keys.Escape)
|
|
132
170
|
def _(event):
|
|
133
171
|
"""Cancel the current prompt when the user presses the ESC key alone."""
|
|
@@ -137,35 +175,40 @@ async def get_input_with_combined_completion(prompt_str = '>>> ', history_file:
|
|
|
137
175
|
completer=completer,
|
|
138
176
|
history=history,
|
|
139
177
|
complete_while_typing=True,
|
|
140
|
-
key_bindings=bindings
|
|
178
|
+
key_bindings=bindings,
|
|
141
179
|
)
|
|
142
180
|
# If they pass a string, backward-compat: convert it to formatted_text
|
|
143
181
|
if isinstance(prompt_str, str):
|
|
144
182
|
from prompt_toolkit.formatted_text import FormattedText
|
|
183
|
+
|
|
145
184
|
prompt_str = FormattedText([(None, prompt_str)])
|
|
146
|
-
style = Style.from_dict(
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
185
|
+
style = Style.from_dict(
|
|
186
|
+
{
|
|
187
|
+
# Keys must AVOID the 'class:' prefix – that prefix is used only when
|
|
188
|
+
# tagging tokens in `FormattedText`. See prompt_toolkit docs.
|
|
189
|
+
"puppy": "bold magenta",
|
|
190
|
+
"owner": "bold white",
|
|
191
|
+
"model": "bold cyan",
|
|
192
|
+
"cwd": "bold green",
|
|
193
|
+
"arrow": "bold yellow",
|
|
194
|
+
}
|
|
195
|
+
)
|
|
155
196
|
text = await session.prompt_async(prompt_str, style=style)
|
|
156
197
|
possibly_stripped = update_model_in_input(text)
|
|
157
198
|
if possibly_stripped is not None:
|
|
158
199
|
return possibly_stripped
|
|
159
200
|
return text
|
|
160
201
|
|
|
202
|
+
|
|
161
203
|
if __name__ == "__main__":
|
|
162
204
|
print("Type '@' for path-completion or '~m' to pick a model. Ctrl+D to exit.")
|
|
205
|
+
|
|
163
206
|
async def main():
|
|
164
207
|
while True:
|
|
165
208
|
try:
|
|
166
209
|
inp = await get_input_with_combined_completion(
|
|
167
210
|
get_prompt_with_active_model(),
|
|
168
|
-
history_file="~/.path_completion_history.txt"
|
|
211
|
+
history_file="~/.path_completion_history.txt",
|
|
169
212
|
)
|
|
170
213
|
print(f"You entered: {inp}")
|
|
171
214
|
except KeyboardInterrupt:
|
|
@@ -173,4 +216,5 @@ if __name__ == "__main__":
|
|
|
173
216
|
except EOFError:
|
|
174
217
|
break
|
|
175
218
|
print("\nGoodbye!")
|
|
219
|
+
|
|
176
220
|
asyncio.run(main())
|
code_puppy/command_line/utils.py
CHANGED
|
@@ -13,7 +13,7 @@ def list_directory(path: str = None) -> Tuple[List[str], List[str]]:
|
|
|
13
13
|
try:
|
|
14
14
|
entries = [e for e in os.listdir(path)]
|
|
15
15
|
except Exception as e:
|
|
16
|
-
raise RuntimeError(f
|
|
16
|
+
raise RuntimeError(f"Error listing directory: {e}")
|
|
17
17
|
dirs = [e for e in entries if os.path.isdir(os.path.join(path, e))]
|
|
18
18
|
files = [e for e in entries if not os.path.isdir(os.path.join(path, e))]
|
|
19
19
|
return dirs, files
|
|
@@ -26,11 +26,13 @@ def make_directory_table(path: str = None) -> Table:
|
|
|
26
26
|
if path is None:
|
|
27
27
|
path = os.getcwd()
|
|
28
28
|
dirs, files = list_directory(path)
|
|
29
|
-
table = Table(
|
|
30
|
-
|
|
31
|
-
|
|
29
|
+
table = Table(
|
|
30
|
+
title=f"\U0001f4c1 [bold blue]Current directory:[/bold blue] [cyan]{path}[/cyan]"
|
|
31
|
+
)
|
|
32
|
+
table.add_column("Type", style="dim", width=8)
|
|
33
|
+
table.add_column("Name", style="bold")
|
|
32
34
|
for d in sorted(dirs):
|
|
33
|
-
table.add_row(
|
|
35
|
+
table.add_row("[green]dir[/green]", f"[cyan]{d}[/cyan]")
|
|
34
36
|
for f in sorted(files):
|
|
35
|
-
table.add_row(
|
|
37
|
+
table.add_row("[yellow]file[/yellow]", f"{f}")
|
|
36
38
|
return table
|
code_puppy/config.py
CHANGED
|
@@ -7,6 +7,7 @@ CONFIG_FILE = os.path.join(CONFIG_DIR, "puppy.cfg")
|
|
|
7
7
|
DEFAULT_SECTION = "puppy"
|
|
8
8
|
REQUIRED_KEYS = ["puppy_name", "owner_name"]
|
|
9
9
|
|
|
10
|
+
|
|
10
11
|
def ensure_config_exists():
|
|
11
12
|
"""
|
|
12
13
|
Ensure that the .code_puppy dir and puppy.cfg exist, prompting if needed.
|
|
@@ -30,7 +31,9 @@ def ensure_config_exists():
|
|
|
30
31
|
if key == "puppy_name":
|
|
31
32
|
val = input("What should we name the puppy? ").strip()
|
|
32
33
|
elif key == "owner_name":
|
|
33
|
-
val = input(
|
|
34
|
+
val = input(
|
|
35
|
+
"What's your name (so Code Puppy knows its master)? "
|
|
36
|
+
).strip()
|
|
34
37
|
else:
|
|
35
38
|
val = input(f"Enter {key}: ").strip()
|
|
36
39
|
config[DEFAULT_SECTION][key] = val
|
|
@@ -38,48 +41,55 @@ def ensure_config_exists():
|
|
|
38
41
|
config.write(f)
|
|
39
42
|
return config
|
|
40
43
|
|
|
44
|
+
|
|
41
45
|
def get_value(key: str):
|
|
42
46
|
config = configparser.ConfigParser()
|
|
43
47
|
config.read(CONFIG_FILE)
|
|
44
48
|
val = config.get(DEFAULT_SECTION, key, fallback=None)
|
|
45
49
|
return val
|
|
46
50
|
|
|
51
|
+
|
|
47
52
|
def get_puppy_name():
|
|
48
53
|
return get_value("puppy_name") or "Puppy"
|
|
49
54
|
|
|
55
|
+
|
|
50
56
|
def get_owner_name():
|
|
51
57
|
return get_value("owner_name") or "Master"
|
|
52
58
|
|
|
59
|
+
|
|
53
60
|
# --- CONFIG SETTER STARTS HERE ---
|
|
54
61
|
def get_config_keys():
|
|
55
|
-
|
|
62
|
+
"""
|
|
56
63
|
Returns the list of all config keys currently in puppy.cfg,
|
|
57
64
|
plus certain preset expected keys (e.g. "yolo_mode", "model").
|
|
58
|
-
|
|
59
|
-
default_keys = [
|
|
65
|
+
"""
|
|
66
|
+
default_keys = ["yolo_mode", "model"]
|
|
60
67
|
config = configparser.ConfigParser()
|
|
61
68
|
config.read(CONFIG_FILE)
|
|
62
69
|
keys = set(config[DEFAULT_SECTION].keys()) if DEFAULT_SECTION in config else set()
|
|
63
70
|
keys.update(default_keys)
|
|
64
71
|
return sorted(keys)
|
|
65
72
|
|
|
73
|
+
|
|
66
74
|
def set_config_value(key: str, value: str):
|
|
67
|
-
|
|
75
|
+
"""
|
|
68
76
|
Sets a config value in the persistent config file.
|
|
69
|
-
|
|
77
|
+
"""
|
|
70
78
|
config = configparser.ConfigParser()
|
|
71
79
|
config.read(CONFIG_FILE)
|
|
72
80
|
if DEFAULT_SECTION not in config:
|
|
73
81
|
config[DEFAULT_SECTION] = {}
|
|
74
82
|
config[DEFAULT_SECTION][key] = value
|
|
75
|
-
with open(CONFIG_FILE,
|
|
83
|
+
with open(CONFIG_FILE, "w") as f:
|
|
76
84
|
config.write(f)
|
|
77
85
|
|
|
86
|
+
|
|
78
87
|
# --- MODEL STICKY EXTENSION STARTS HERE ---
|
|
79
88
|
def get_model_name():
|
|
80
89
|
"""Returns the last used model name stored in config, or None if unset."""
|
|
81
90
|
return get_value("model") or "gpt-4.1"
|
|
82
91
|
|
|
92
|
+
|
|
83
93
|
def set_model_name(model: str):
|
|
84
94
|
"""Sets the model name in the persistent config file."""
|
|
85
95
|
config = configparser.ConfigParser()
|
|
@@ -90,6 +100,7 @@ def set_model_name(model: str):
|
|
|
90
100
|
with open(CONFIG_FILE, "w") as f:
|
|
91
101
|
config.write(f)
|
|
92
102
|
|
|
103
|
+
|
|
93
104
|
def get_yolo_mode():
|
|
94
105
|
"""
|
|
95
106
|
Checks puppy.cfg for 'yolo_mode' (case-insensitive in value only).
|
|
@@ -99,16 +110,16 @@ def get_yolo_mode():
|
|
|
99
110
|
Allowed values for ON: 1, '1', 'true', 'yes', 'on' (all case-insensitive for value).
|
|
100
111
|
Always prioritizes the config once set!
|
|
101
112
|
"""
|
|
102
|
-
true_vals = {
|
|
103
|
-
cfg_val = get_value(
|
|
113
|
+
true_vals = {"1", "true", "yes", "on"}
|
|
114
|
+
cfg_val = get_value("yolo_mode")
|
|
104
115
|
if cfg_val is not None:
|
|
105
116
|
if str(cfg_val).strip().lower() in true_vals:
|
|
106
117
|
return True
|
|
107
118
|
return False
|
|
108
|
-
env_val = os.getenv(
|
|
119
|
+
env_val = os.getenv("YOLO_MODE")
|
|
109
120
|
if env_val is not None:
|
|
110
121
|
# Persist the env value now
|
|
111
|
-
set_config_value(
|
|
122
|
+
set_config_value("yolo_mode", env_val)
|
|
112
123
|
if str(env_val).strip().lower() in true_vals:
|
|
113
124
|
return True
|
|
114
125
|
return False
|
code_puppy/main.py
CHANGED
|
@@ -14,14 +14,14 @@ from rich.text import Text
|
|
|
14
14
|
from rich.syntax import Syntax
|
|
15
15
|
from code_puppy.command_line.prompt_toolkit_completion import (
|
|
16
16
|
get_input_with_combined_completion,
|
|
17
|
-
get_prompt_with_active_model
|
|
17
|
+
get_prompt_with_active_model,
|
|
18
18
|
)
|
|
19
19
|
|
|
20
20
|
# Initialize rich console for pretty output
|
|
21
21
|
from code_puppy.tools.common import console
|
|
22
22
|
from code_puppy.agent import get_code_generation_agent, session_memory
|
|
23
23
|
|
|
24
|
-
from code_puppy.tools import *
|
|
24
|
+
# from code_puppy.tools import * # noqa: F403
|
|
25
25
|
|
|
26
26
|
|
|
27
27
|
# Define a function to get the secret file path
|
|
@@ -36,12 +36,14 @@ async def main():
|
|
|
36
36
|
# Ensure the config directory and puppy.cfg with name info exist (prompt user if needed)
|
|
37
37
|
ensure_config_exists()
|
|
38
38
|
current_version = __version__
|
|
39
|
-
latest_version = fetch_latest_version(
|
|
40
|
-
console.print(f
|
|
41
|
-
console.print(f
|
|
39
|
+
latest_version = fetch_latest_version("code-puppy")
|
|
40
|
+
console.print(f"Current version: {current_version}")
|
|
41
|
+
console.print(f"Latest version: {latest_version}")
|
|
42
42
|
if latest_version and latest_version != current_version:
|
|
43
|
-
console.print(
|
|
44
|
-
|
|
43
|
+
console.print(
|
|
44
|
+
f"[bold yellow]A new version of code puppy is available: {latest_version}[/bold yellow]"
|
|
45
|
+
)
|
|
46
|
+
console.print("[bold green]Please consider updating![/bold green]")
|
|
45
47
|
global shutdown_flag
|
|
46
48
|
shutdown_flag = False # ensure this is initialized
|
|
47
49
|
|
|
@@ -69,11 +71,11 @@ async def main():
|
|
|
69
71
|
console.print(agent_response.output_message)
|
|
70
72
|
# Log to session memory
|
|
71
73
|
session_memory().log_task(
|
|
72
|
-
f
|
|
73
|
-
extras={
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
}
|
|
74
|
+
f"Command executed: {command}",
|
|
75
|
+
extras={
|
|
76
|
+
"output": agent_response.output_message,
|
|
77
|
+
"awaiting_user_input": agent_response.awaiting_user_input,
|
|
78
|
+
},
|
|
77
79
|
)
|
|
78
80
|
if agent_response.awaiting_user_input:
|
|
79
81
|
console.print(
|
|
@@ -96,19 +98,23 @@ async def main():
|
|
|
96
98
|
# Add the file handling functionality for interactive mode
|
|
97
99
|
async def interactive_mode(history_file_path: str) -> None:
|
|
98
100
|
from code_puppy.command_line.meta_command_handler import handle_meta_command
|
|
101
|
+
|
|
99
102
|
"""Run the agent in interactive mode."""
|
|
100
103
|
console.print("[bold green]Code Puppy[/bold green] - Interactive Mode")
|
|
101
104
|
console.print("Type 'exit' or 'quit' to exit the interactive mode.")
|
|
102
105
|
console.print("Type 'clear' to reset the conversation history.")
|
|
103
|
-
console.print(
|
|
106
|
+
console.print(
|
|
107
|
+
"Type [bold blue]@[/bold blue] for path completion, or [bold blue]~m[/bold blue] to pick a model."
|
|
108
|
+
)
|
|
104
109
|
|
|
105
110
|
# Show meta commands right at startup - DRY!
|
|
106
111
|
from code_puppy.command_line.meta_command_handler import META_COMMANDS_HELP
|
|
112
|
+
|
|
107
113
|
console.print(META_COMMANDS_HELP)
|
|
108
114
|
|
|
109
115
|
# Check if prompt_toolkit is installed
|
|
110
116
|
try:
|
|
111
|
-
import prompt_toolkit
|
|
117
|
+
import prompt_toolkit # noqa: F401
|
|
112
118
|
|
|
113
119
|
console.print("[dim]Using prompt_toolkit for enhanced tab completion[/dim]")
|
|
114
120
|
except ImportError:
|
|
@@ -152,7 +158,7 @@ async def interactive_mode(history_file_path: str) -> None:
|
|
|
152
158
|
# Use the async version of get_input_with_combined_completion
|
|
153
159
|
task = await get_input_with_combined_completion(
|
|
154
160
|
get_prompt_with_active_model(),
|
|
155
|
-
history_file=history_file_path_prompt
|
|
161
|
+
history_file=history_file_path_prompt,
|
|
156
162
|
)
|
|
157
163
|
except ImportError:
|
|
158
164
|
# Fall back to basic input if prompt_toolkit is not available
|
|
@@ -178,7 +184,7 @@ async def interactive_mode(history_file_path: str) -> None:
|
|
|
178
184
|
continue
|
|
179
185
|
|
|
180
186
|
# Handle ~ meta/config commands before anything else
|
|
181
|
-
if task.strip().startswith(
|
|
187
|
+
if task.strip().startswith("~"):
|
|
182
188
|
if handle_meta_command(task.strip(), console):
|
|
183
189
|
continue
|
|
184
190
|
if task.strip():
|
|
@@ -203,17 +209,21 @@ async def interactive_mode(history_file_path: str) -> None:
|
|
|
203
209
|
console.print(agent_response.output_message)
|
|
204
210
|
# Log to session memory
|
|
205
211
|
session_memory().log_task(
|
|
206
|
-
f
|
|
207
|
-
extras={
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
}
|
|
212
|
+
f"Interactive task: {task}",
|
|
213
|
+
extras={
|
|
214
|
+
"output": agent_response.output_message,
|
|
215
|
+
"awaiting_user_input": agent_response.awaiting_user_input,
|
|
216
|
+
},
|
|
211
217
|
)
|
|
212
218
|
|
|
213
219
|
# Update message history but apply filters & limits
|
|
214
220
|
new_msgs = result.new_messages()
|
|
215
221
|
# 1. Drop any system/config messages (e.g., "agent loaded with model")
|
|
216
|
-
filtered = [
|
|
222
|
+
filtered = [
|
|
223
|
+
m
|
|
224
|
+
for m in new_msgs
|
|
225
|
+
if not (isinstance(m, dict) and m.get("role") == "system")
|
|
226
|
+
]
|
|
217
227
|
# 2. Append to existing history and keep only the most recent 40
|
|
218
228
|
message_history.extend(filtered)
|
|
219
229
|
if len(message_history) > 40:
|
code_puppy/model_factory.py
CHANGED
|
@@ -10,7 +10,7 @@ from pydantic_ai.providers.google_gla import GoogleGLAProvider
|
|
|
10
10
|
from pydantic_ai.providers.openai import OpenAIProvider
|
|
11
11
|
from pydantic_ai.providers.anthropic import AnthropicProvider
|
|
12
12
|
from anthropic import AsyncAnthropic
|
|
13
|
-
from openai import AsyncAzureOpenAI
|
|
13
|
+
from openai import AsyncAzureOpenAI # For Azure OpenAI client
|
|
14
14
|
import httpx
|
|
15
15
|
from httpx import Response
|
|
16
16
|
import threading
|
|
@@ -121,9 +121,7 @@ def make_client(
|
|
|
121
121
|
def get_custom_config(model_config):
|
|
122
122
|
custom_config = model_config.get("custom_endpoint", {})
|
|
123
123
|
if not custom_config:
|
|
124
|
-
raise ValueError(
|
|
125
|
-
"Custom model requires 'custom_endpoint' configuration"
|
|
126
|
-
)
|
|
124
|
+
raise ValueError("Custom model requires 'custom_endpoint' configuration")
|
|
127
125
|
|
|
128
126
|
url = custom_config.get("url")
|
|
129
127
|
if not url:
|
|
@@ -192,7 +190,9 @@ class ModelFactory:
|
|
|
192
190
|
elif model_type == "anthropic":
|
|
193
191
|
api_key = os.environ.get("ANTHROPIC_API_KEY", None)
|
|
194
192
|
if not api_key:
|
|
195
|
-
raise ValueError(
|
|
193
|
+
raise ValueError(
|
|
194
|
+
"ANTHROPIC_API_KEY environment variable must be set for Anthropic models."
|
|
195
|
+
)
|
|
196
196
|
anthropic_client = AsyncAnthropic(api_key=api_key)
|
|
197
197
|
provider = AnthropicProvider(anthropic_client=anthropic_client)
|
|
198
198
|
return AnthropicModel(model_name=model_config["name"], provider=provider)
|
|
@@ -234,7 +234,7 @@ class ModelFactory:
|
|
|
234
234
|
raise ValueError(
|
|
235
235
|
f"Azure OpenAI API version environment variable '{api_version_config[1:] if api_version_config.startswith('$') else ''}' not found or is empty."
|
|
236
236
|
)
|
|
237
|
-
|
|
237
|
+
|
|
238
238
|
api_key_config = model_config.get("api_key")
|
|
239
239
|
if not api_key_config:
|
|
240
240
|
raise ValueError(
|
|
@@ -255,7 +255,7 @@ class ModelFactory:
|
|
|
255
255
|
azure_endpoint=azure_endpoint,
|
|
256
256
|
api_version=api_version,
|
|
257
257
|
api_key=api_key,
|
|
258
|
-
max_retries=azure_max_retries
|
|
258
|
+
max_retries=azure_max_retries,
|
|
259
259
|
)
|
|
260
260
|
provider = OpenAIProvider(openai_client=azure_client)
|
|
261
261
|
return OpenAIModel(model_name=model_config["name"], provider=provider)
|