code-puppy 0.0.198__py3-none-any.whl → 0.0.200__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,12 +6,13 @@ import math
6
6
  import signal
7
7
  import uuid
8
8
  from abc import ABC, abstractmethod
9
- from typing import Any, Dict, List, Optional, Set, Tuple, Union
9
+ from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Union
10
10
 
11
11
  import mcp
12
12
  import pydantic
13
13
  import pydantic_ai.models
14
14
  from pydantic_ai import Agent as PydanticAgent
15
+ from pydantic_ai import BinaryContent, DocumentUrl, ImageUrl
15
16
  from pydantic_ai import RunContext, UsageLimitExceeded
16
17
  from pydantic_ai.messages import (
17
18
  ModelMessage,
@@ -180,6 +181,21 @@ class BaseAgent(ABC):
180
181
  return get_global_model_name()
181
182
  return pinned
182
183
 
184
+ def _clean_binaries(self, messages: List[ModelMessage]) -> List[ModelMessage]:
185
+ cleaned = []
186
+ for message in messages:
187
+ parts = []
188
+ for part in message.parts:
189
+ if hasattr(part, "content") and isinstance(part.content, list):
190
+ content = []
191
+ for item in part.content:
192
+ if not isinstance(item, BinaryContent):
193
+ content.append(item)
194
+ part.content = content
195
+ parts.append(part)
196
+ cleaned.append(message)
197
+ return cleaned
198
+
183
199
  # Message history processing methods (moved from state_management.py and message_history_processor.py)
184
200
  def _stringify_part(self, part: Any) -> str:
185
201
  """Create a stable string representation for a message part.
@@ -213,6 +229,12 @@ class BaseAgent(ABC):
213
229
  )
214
230
  elif isinstance(content, dict):
215
231
  attributes.append(f"content={json.dumps(content, sort_keys=True)}")
232
+ elif isinstance(content, list):
233
+ for item in content:
234
+ if isinstance(item, str):
235
+ attributes.append(f"content={item}")
236
+ if isinstance(item, BinaryContent):
237
+ attributes.append(f"BinaryContent={hash(item.data)}")
216
238
  else:
217
239
  attributes.append(f"content={repr(content)}")
218
240
  result = "|".join(attributes)
@@ -259,6 +281,13 @@ class BaseAgent(ABC):
259
281
  result = json.dumps(part.content.model_dump())
260
282
  elif isinstance(part.content, dict):
261
283
  result = json.dumps(part.content)
284
+ elif isinstance(part.content, list):
285
+ result = ""
286
+ for item in part.content:
287
+ if isinstance(item, str):
288
+ result += item + "\n"
289
+ if isinstance(item, BinaryContent):
290
+ result += f"BinaryContent={hash(item.data)}\n"
262
291
  else:
263
292
  result = str(part.content)
264
293
 
@@ -460,16 +489,21 @@ class BaseAgent(ABC):
460
489
 
461
490
  def get_model_context_length(self) -> int:
462
491
  """
463
- Get the context length for the currently configured model from models.json
464
- """
465
- model_configs = ModelFactory.load_config()
466
- model_name = get_global_model_name()
467
-
468
- # Get context length from model config
469
- model_config = model_configs.get(model_name, {})
470
- context_length = model_config.get("context_length", 128000) # Default value
492
+ Return the context length for this agent's effective model.
471
493
 
472
- return int(context_length)
494
+ Honors per-agent pinned model via `self.get_model_name()`; falls back
495
+ to global model when no pin is set. Defaults conservatively on failure.
496
+ """
497
+ try:
498
+ model_configs = ModelFactory.load_config()
499
+ # Use the agent's effective model (respects /pin_model)
500
+ model_name = self.get_model_name()
501
+ model_config = model_configs.get(model_name, {})
502
+ context_length = model_config.get("context_length", 128000)
503
+ return int(context_length)
504
+ except Exception:
505
+ # Be safe; don't blow up status/compaction if model lookup fails
506
+ return 128000
473
507
 
474
508
  def prune_interrupted_tool_calls(
475
509
  self, messages: List[ModelMessage]
@@ -601,6 +635,7 @@ class BaseAgent(ABC):
601
635
  f"Final token count after processing: {final_token_count}",
602
636
  message_group="token_context_status",
603
637
  )
638
+
604
639
  self.set_message_history(result_messages)
605
640
  for m in summarized_messages:
606
641
  self.add_compacted_message_hash(self.hash_message(m))
@@ -869,28 +904,47 @@ class BaseAgent(ABC):
869
904
  self.message_history_processor(ctx, _message_history)
870
905
  return self.get_message_history()
871
906
 
872
- async def run_with_mcp(self, prompt: str, **kwargs) -> Any:
873
- """
874
- Run the agent with MCP servers and full cancellation support.
875
-
876
- This method ensures we're always using the current agent instance
877
- and handles Ctrl+C interruption properly by creating a cancellable task.
907
+ async def run_with_mcp(
908
+ self,
909
+ prompt: str,
910
+ *,
911
+ attachments: Optional[Sequence[BinaryContent]] = None,
912
+ link_attachments: Optional[Sequence[Union[ImageUrl, DocumentUrl]]] = None,
913
+ **kwargs,
914
+ ) -> Any:
915
+ """Run the agent with MCP servers, attachments, and full cancellation support.
878
916
 
879
917
  Args:
880
- prompt: The user prompt to process
881
- usage_limits: Optional usage limits for the agent
882
- **kwargs: Additional arguments to pass to agent.run (e.g., message_history)
918
+ prompt: Primary user prompt text (may be empty when attachments present).
919
+ attachments: Local binary payloads (e.g., dragged images) to include.
920
+ link_attachments: Remote assets (image/document URLs) to include.
921
+ **kwargs: Additional arguments forwarded to `pydantic_ai.Agent.run`.
883
922
 
884
923
  Returns:
885
- The agent's response
924
+ The agent's response.
886
925
 
887
926
  Raises:
888
- asyncio.CancelledError: When execution is cancelled by user
927
+ asyncio.CancelledError: When execution is cancelled by user.
889
928
  """
890
929
  group_id = str(uuid.uuid4())
891
930
  # Avoid double-loading: reuse existing agent if already built
892
931
  pydantic_agent = self._code_generation_agent or self.reload_code_generation_agent()
893
932
 
933
+ # Build combined prompt payload when attachments are provided.
934
+ attachment_parts: List[Any] = []
935
+ if attachments:
936
+ attachment_parts.extend(list(attachments))
937
+ if link_attachments:
938
+ attachment_parts.extend(list(link_attachments))
939
+
940
+ if attachment_parts:
941
+ prompt_payload: Union[str, List[Any]] = []
942
+ if prompt:
943
+ prompt_payload.append(prompt)
944
+ prompt_payload.extend(attachment_parts)
945
+ else:
946
+ prompt_payload = prompt
947
+
894
948
  async def run_agent_task():
895
949
  try:
896
950
  self.set_message_history(
@@ -898,7 +952,7 @@ class BaseAgent(ABC):
898
952
  )
899
953
  usage_limits = pydantic_ai.agent._usage.UsageLimits(request_limit=get_message_limit())
900
954
  result_ = await pydantic_agent.run(
901
- prompt,
955
+ prompt_payload,
902
956
  message_history=self.get_message_history(),
903
957
  usage_limits=usage_limits,
904
958
  **kwargs,
@@ -0,0 +1,375 @@
1
+ """Helpers for parsing file attachments from interactive prompts."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import mimetypes
6
+ import os
7
+ import shlex
8
+ from dataclasses import dataclass
9
+ from pathlib import Path
10
+ from typing import Iterable, List, Sequence
11
+
12
+ from pydantic_ai import BinaryContent, DocumentUrl, ImageUrl
13
+
14
+ SUPPORTED_INLINE_SCHEMES = {"http", "https"}
15
+
16
+ # Allow common extensions people drag in the terminal.
17
+ DEFAULT_ACCEPTED_IMAGE_EXTENSIONS = {
18
+ ".png",
19
+ ".jpg",
20
+ ".jpeg",
21
+ ".gif",
22
+ ".bmp",
23
+ ".webp",
24
+ ".tiff",
25
+ }
26
+ DEFAULT_ACCEPTED_DOCUMENT_EXTENSIONS = set()
27
+
28
+
29
+ @dataclass
30
+ class PromptAttachment:
31
+ """Represents a binary attachment parsed from the input prompt."""
32
+
33
+ placeholder: str
34
+ content: BinaryContent
35
+
36
+
37
+ @dataclass
38
+ class PromptLinkAttachment:
39
+ """Represents a URL attachment supported by pydantic-ai."""
40
+
41
+ placeholder: str
42
+ url_part: ImageUrl | DocumentUrl
43
+
44
+
45
+ @dataclass
46
+ class ProcessedPrompt:
47
+ """Container for parsed input prompt and attachments."""
48
+
49
+ prompt: str
50
+ attachments: List[PromptAttachment]
51
+ link_attachments: List[PromptLinkAttachment]
52
+ warnings: List[str]
53
+
54
+
55
+ class AttachmentParsingError(RuntimeError):
56
+ """Raised when we fail to load a user-provided attachment."""
57
+
58
+
59
+ def _is_probable_path(token: str) -> bool:
60
+ """Heuristically determine whether a token is a local filesystem path."""
61
+
62
+ if not token:
63
+ return False
64
+ if token.startswith("#"):
65
+ return False
66
+ # Windows drive letters or Unix absolute/relative paths
67
+ if token.startswith(("/", "~", "./", "../")):
68
+ return True
69
+ if len(token) >= 2 and token[1] == ":":
70
+ return True
71
+ # Things like `path/to/file.png`
72
+ return os.sep in token or "\"" in token
73
+
74
+
75
+ def _unescape_dragged_path(token: str) -> str:
76
+ """Convert backslash-escaped spaces used by drag-and-drop to literal spaces."""
77
+ # Shell/terminal escaping typically produces '\ ' sequences
78
+ return token.replace(r"\ ", " ")
79
+
80
+
81
+ def _normalise_path(token: str) -> Path:
82
+ """Expand user shortcuts and resolve relative components without touching fs."""
83
+ # First unescape any drag-and-drop backslash spaces before other expansions
84
+ unescaped = _unescape_dragged_path(token)
85
+ expanded = os.path.expanduser(unescaped)
86
+ try:
87
+ # This will not resolve against symlinks because we do not call resolve()
88
+ return Path(expanded).absolute()
89
+ except Exception as exc:
90
+ raise AttachmentParsingError(f"Invalid path '{token}': {exc}") from exc
91
+
92
+
93
+ def _determine_media_type(path: Path) -> str:
94
+ """Best-effort media type detection for images only."""
95
+
96
+ mime, _ = mimetypes.guess_type(path.name)
97
+ if mime:
98
+ return mime
99
+ if path.suffix.lower() in DEFAULT_ACCEPTED_IMAGE_EXTENSIONS:
100
+ return "image/png"
101
+ return "application/octet-stream"
102
+
103
+
104
+ def _load_binary(path: Path) -> bytes:
105
+ try:
106
+ return path.read_bytes()
107
+ except FileNotFoundError as exc:
108
+ raise AttachmentParsingError(f"Attachment not found: {path}") from exc
109
+ except PermissionError as exc:
110
+ raise AttachmentParsingError(f"Cannot read attachment (permission denied): {path}") from exc
111
+ except OSError as exc:
112
+ raise AttachmentParsingError(f"Failed to read attachment {path}: {exc}") from exc
113
+
114
+
115
+ def _tokenise(prompt: str) -> Iterable[str]:
116
+ """Split the prompt preserving quoted segments using shell-like semantics."""
117
+
118
+ if not prompt:
119
+ return []
120
+ try:
121
+ # On Windows, avoid POSIX escaping so backslashes are preserved
122
+ posix_mode = os.name != "nt"
123
+ return shlex.split(prompt, posix=posix_mode)
124
+ except ValueError:
125
+ # Fallback naive split when shlex fails (e.g. unmatched quotes)
126
+ return prompt.split()
127
+
128
+
129
+ def _strip_attachment_token(token: str) -> str:
130
+ """Trim surrounding whitespace/punctuation terminals tack onto paths."""
131
+
132
+ return token.strip().strip(",;:()[]{}")
133
+
134
+
135
+ def _candidate_paths(
136
+ tokens: Sequence[str],
137
+ start: int,
138
+ max_span: int = 5,
139
+ ) -> Iterable[tuple[str, int]]:
140
+ """Yield space-joined token slices to reconstruct paths with spaces."""
141
+
142
+ collected: list[str] = []
143
+ for offset, raw in enumerate(tokens[start : start + max_span]):
144
+ collected.append(raw)
145
+ yield " ".join(collected), start + offset + 1
146
+
147
+
148
+ def _is_supported_extension(path: Path) -> bool:
149
+ suffix = path.suffix.lower()
150
+ return suffix in DEFAULT_ACCEPTED_IMAGE_EXTENSIONS | DEFAULT_ACCEPTED_DOCUMENT_EXTENSIONS
151
+
152
+
153
+ def _parse_link(token: str) -> PromptLinkAttachment | None:
154
+ if "://" not in token:
155
+ return None
156
+ scheme = token.split(":", 1)[0].lower()
157
+ if scheme not in SUPPORTED_INLINE_SCHEMES:
158
+ return None
159
+ if token.lower().endswith(".pdf"):
160
+ return PromptLinkAttachment(
161
+ placeholder=token,
162
+ url_part=DocumentUrl(url=token),
163
+ )
164
+ return PromptLinkAttachment(
165
+ placeholder=token,
166
+ url_part=ImageUrl(url=token),
167
+ )
168
+
169
+
170
+ @dataclass
171
+ class _DetectedPath:
172
+ placeholder: str
173
+ path: Path | None
174
+ start_index: int
175
+ consumed_until: int
176
+ unsupported: bool = False
177
+ link: PromptLinkAttachment | None = None
178
+
179
+ def has_path(self) -> bool:
180
+ return self.path is not None and not self.unsupported
181
+
182
+
183
+ def _detect_path_tokens(prompt: str) -> tuple[list[_DetectedPath], list[str]]:
184
+ # Preserve backslash-spaces from drag-and-drop before shlex tokenization
185
+ # Replace '\ ' with a marker that shlex won't split, then restore later
186
+ ESCAPE_MARKER = "\u0000ESCAPED_SPACE\u0000"
187
+ masked_prompt = prompt.replace(r"\ ", ESCAPE_MARKER)
188
+ tokens = list(_tokenise(masked_prompt))
189
+ # Restore escaped spaces in individual tokens
190
+ tokens = [t.replace(ESCAPE_MARKER, " ") for t in tokens]
191
+
192
+ detections: list[_DetectedPath] = []
193
+ warnings: list[str] = []
194
+
195
+ index = 0
196
+ while index < len(tokens):
197
+ token = tokens[index]
198
+
199
+ link_attachment = _parse_link(token)
200
+ if link_attachment:
201
+ detections.append(
202
+ _DetectedPath(
203
+ placeholder=token,
204
+ path=None,
205
+ start_index=index,
206
+ consumed_until=index + 1,
207
+ link=link_attachment,
208
+ )
209
+ )
210
+ index += 1
211
+ continue
212
+
213
+ stripped_token = _strip_attachment_token(token)
214
+ if not _is_probable_path(stripped_token):
215
+ index += 1
216
+ continue
217
+
218
+ start_index = index
219
+ consumed_until = index + 1
220
+ candidate_path_token = stripped_token
221
+ # For placeholder: try to reconstruct escaped representation; if none, use raw token
222
+ original_tokens_for_slice = list(_tokenise(masked_prompt))[index:consumed_until]
223
+ candidate_placeholder = "".join(
224
+ ot.replace(ESCAPE_MARKER, r"\ ") if ESCAPE_MARKER in ot else ot
225
+ for ot in original_tokens_for_slice
226
+ )
227
+ # If placeholder seems identical to raw token, just use the raw token
228
+ if candidate_placeholder == token.replace(" ", r"\ "):
229
+ candidate_placeholder = token
230
+
231
+ try:
232
+ path = _normalise_path(candidate_path_token)
233
+ except AttachmentParsingError as exc:
234
+ warnings.append(str(exc))
235
+ index = consumed_until
236
+ continue
237
+
238
+ if not path.exists() or not path.is_file():
239
+ found_span = False
240
+ last_path = path
241
+ for joined, end_index in _candidate_paths(tokens, index):
242
+ stripped_joined = _strip_attachment_token(joined)
243
+ if not _is_probable_path(stripped_joined):
244
+ continue
245
+ candidate_path_token = stripped_joined
246
+ candidate_placeholder = joined
247
+ consumed_until = end_index
248
+ try:
249
+ last_path = _normalise_path(candidate_path_token)
250
+ except AttachmentParsingError as exc:
251
+ warnings.append(str(exc))
252
+ found_span = False
253
+ break
254
+ if last_path.exists() and last_path.is_file():
255
+ path = last_path
256
+ found_span = True
257
+ # We'll rebuild escaped placeholder after this block
258
+ break
259
+ if not found_span:
260
+ warnings.append(f"Attachment ignored (not a file): {path}")
261
+ index += 1
262
+ continue
263
+ # Reconstruct escaped placeholder for multi-token paths
264
+ original_tokens_for_path = tokens[index:consumed_until]
265
+ escaped_placeholder = " ".join(original_tokens_for_path).replace(" ", r"\ ")
266
+ candidate_placeholder = escaped_placeholder
267
+ if not _is_supported_extension(path):
268
+ detections.append(
269
+ _DetectedPath(
270
+ placeholder=candidate_placeholder,
271
+ path=path,
272
+ start_index=start_index,
273
+ consumed_until=consumed_until,
274
+ unsupported=True,
275
+ )
276
+ )
277
+ index = consumed_until
278
+ continue
279
+
280
+ # Reconstruct escaped placeholder for exact replacement later
281
+ # For unquoted spaces, keep the original literal token from the prompt
282
+ # so replacement matches precisely
283
+ escaped_placeholder = candidate_placeholder
284
+
285
+ detections.append(
286
+ _DetectedPath(
287
+ placeholder=candidate_placeholder,
288
+ path=path,
289
+ start_index=start_index,
290
+ consumed_until=consumed_until,
291
+ )
292
+ )
293
+ index = consumed_until
294
+
295
+ return detections, warnings
296
+
297
+
298
+ def parse_prompt_attachments(prompt: str) -> ProcessedPrompt:
299
+ """Extract attachments from the prompt returning cleaned text and metadata."""
300
+
301
+ attachments: List[PromptAttachment] = []
302
+
303
+ detections, detection_warnings = _detect_path_tokens(prompt)
304
+ warnings: List[str] = list(detection_warnings)
305
+
306
+ link_attachments = [d.link for d in detections if d.link is not None]
307
+
308
+ for detection in detections:
309
+ if detection.link is not None and detection.path is None:
310
+ continue
311
+ if detection.path is None:
312
+ continue
313
+ if detection.unsupported:
314
+ warnings.append(
315
+ f"Unsupported attachment type: {detection.path.suffix or detection.path.name}"
316
+ )
317
+ continue
318
+
319
+ try:
320
+ media_type = _determine_media_type(detection.path)
321
+ data = _load_binary(detection.path)
322
+ except AttachmentParsingError as exc:
323
+ warnings.append(str(exc))
324
+ continue
325
+ attachments.append(
326
+ PromptAttachment(
327
+ placeholder=detection.placeholder,
328
+ content=BinaryContent(data=data, media_type=media_type),
329
+ )
330
+ )
331
+
332
+ # Rebuild cleaned_prompt by skipping tokens consumed as file paths.
333
+ # This preserves original punctuation and spacing for non-attachment tokens.
334
+ ESCAPE_MARKER = "\u0000ESCAPED_SPACE\u0000"
335
+ masked = prompt.replace(r"\ ", ESCAPE_MARKER)
336
+ tokens = list(_tokenise(masked))
337
+
338
+ # Build exact token spans for file attachments (supported or unsupported)
339
+ # Skip spans for: supported files (path present and not unsupported) and links.
340
+ spans = [
341
+ (d.start_index, d.consumed_until)
342
+ for d in detections
343
+ if (d.path is not None and not d.unsupported) or (d.link is not None and d.path is None)
344
+ ]
345
+ cleaned_parts: list[str] = []
346
+ i = 0
347
+ while i < len(tokens):
348
+ span = next((s for s in spans if s[0] <= i < s[1]), None)
349
+ if span is not None:
350
+ i = span[1]
351
+ continue
352
+ cleaned_parts.append(tokens[i].replace(ESCAPE_MARKER, " "))
353
+ i += 1
354
+
355
+ cleaned_prompt = " ".join(cleaned_parts).strip()
356
+ cleaned_prompt = " ".join(cleaned_prompt.split())
357
+
358
+ if cleaned_prompt == "" and attachments:
359
+ cleaned_prompt = "Describe the attached files in detail."
360
+
361
+ return ProcessedPrompt(
362
+ prompt=cleaned_prompt,
363
+ attachments=attachments,
364
+ link_attachments=link_attachments,
365
+ warnings=warnings,
366
+ )
367
+
368
+
369
+ __all__ = [
370
+ "ProcessedPrompt",
371
+ "PromptAttachment",
372
+ "PromptLinkAttachment",
373
+ "AttachmentParsingError",
374
+ "parse_prompt_attachments",
375
+ ]
@@ -29,13 +29,22 @@ def set_active_model(model_name: str):
29
29
  Sets the active model name by updating the config (for persistence).
30
30
  """
31
31
  set_model_name(model_name)
32
- # Reload agent globally
32
+ # Reload the currently active agent so the new model takes effect immediately
33
33
  try:
34
- from code_puppy.agent import reload_code_generation_agent
35
-
36
- reload_code_generation_agent() # This will reload dynamically everywhere
34
+ from code_puppy.agents import get_current_agent
35
+
36
+ current_agent = get_current_agent()
37
+ # JSON agents may need to refresh their config before reload
38
+ if hasattr(current_agent, "refresh_config"):
39
+ try:
40
+ current_agent.refresh_config()
41
+ except Exception:
42
+ # Non-fatal, continue to reload
43
+ ...
44
+ current_agent.reload_code_generation_agent()
37
45
  except Exception:
38
- pass # If reload fails, agent will still be switched next interpreter run
46
+ # Swallow errors to avoid breaking the prompt flow; model persists for next run
47
+ pass
39
48
 
40
49
 
41
50
  class ModelNameCompleter(Completer):
@@ -17,6 +17,7 @@ from prompt_toolkit.history import FileHistory
17
17
  from prompt_toolkit.filters import is_searching
18
18
  from prompt_toolkit.key_binding import KeyBindings
19
19
  from prompt_toolkit.keys import Keys
20
+ from prompt_toolkit.layout.processors import Processor, Transformation
20
21
  from prompt_toolkit.styles import Style
21
22
 
22
23
  from code_puppy.command_line.file_path_completion import FilePathCompleter
@@ -33,6 +34,11 @@ from code_puppy.config import (
33
34
  get_puppy_name,
34
35
  get_value,
35
36
  )
37
+ from code_puppy.command_line.attachments import (
38
+ DEFAULT_ACCEPTED_DOCUMENT_EXTENSIONS,
39
+ DEFAULT_ACCEPTED_IMAGE_EXTENSIONS,
40
+ _detect_path_tokens, _tokenise,
41
+ )
36
42
 
37
43
 
38
44
  class SetCompleter(Completer):
@@ -98,6 +104,117 @@ class SetCompleter(Completer):
98
104
  )
99
105
 
100
106
 
107
+ class AttachmentPlaceholderProcessor(Processor):
108
+ """Display friendly placeholders for recognised attachments."""
109
+
110
+ _PLACEHOLDER_STYLE = "class:attachment-placeholder"
111
+
112
+ def apply_transformation(self, transformation_input):
113
+ document = transformation_input.document
114
+ text = document.text
115
+ if not text:
116
+ return Transformation(list(transformation_input.fragments))
117
+
118
+ detections, _warnings = _detect_path_tokens(text)
119
+ replacements: list[tuple[int, int, str]] = []
120
+ search_cursor = 0
121
+ ESCAPE_MARKER = "\u0000ESCAPED_SPACE\u0000"
122
+ masked_text = text.replace(r"\ ", ESCAPE_MARKER)
123
+ token_view = list(_tokenise(masked_text))
124
+ for detection in detections:
125
+ display_text: str | None = None
126
+ if detection.path and detection.has_path():
127
+ suffix = detection.path.suffix.lower()
128
+ if suffix in DEFAULT_ACCEPTED_IMAGE_EXTENSIONS:
129
+ display_text = f"[{suffix.lstrip('.') or 'image'} image]"
130
+ elif suffix in DEFAULT_ACCEPTED_DOCUMENT_EXTENSIONS:
131
+ display_text = f"[{suffix.lstrip('.') or 'file'} document]"
132
+ else:
133
+ display_text = "[file attachment]"
134
+ elif detection.link is not None:
135
+ display_text = "[link]"
136
+
137
+ if not display_text:
138
+ continue
139
+
140
+ # Use token-span for robust lookup (handles escaped spaces)
141
+ span_tokens = token_view[detection.start_index:detection.consumed_until]
142
+ raw_span = " ".join(span_tokens).replace(ESCAPE_MARKER, r"\ ")
143
+ index = text.find(raw_span, search_cursor)
144
+ span_len = len(raw_span)
145
+ if index == -1:
146
+ # Fallback to placeholder string
147
+ placeholder = detection.placeholder
148
+ index = text.find(placeholder, search_cursor)
149
+ span_len = len(placeholder)
150
+ if index == -1:
151
+ continue
152
+ replacements.append((index, index + span_len, display_text))
153
+ search_cursor = index + span_len
154
+
155
+ if not replacements:
156
+ return Transformation(list(transformation_input.fragments))
157
+
158
+ replacements.sort(key=lambda item: item[0])
159
+
160
+ new_fragments: list[tuple[str, str]] = []
161
+ source_to_display_map: list[int] = []
162
+ display_to_source_map: list[int] = []
163
+
164
+ source_index = 0
165
+ display_index = 0
166
+
167
+ def append_plain_segment(segment: str) -> None:
168
+ nonlocal source_index, display_index
169
+ if not segment:
170
+ return
171
+ new_fragments.append(("", segment))
172
+ for _ in segment:
173
+ source_to_display_map.append(display_index)
174
+ display_to_source_map.append(source_index)
175
+ source_index += 1
176
+ display_index += 1
177
+
178
+ for start, end, replacement_text in replacements:
179
+ if start > source_index:
180
+ append_plain_segment(text[source_index:start])
181
+
182
+ placeholder = replacement_text or ""
183
+ placeholder_start = display_index
184
+ if placeholder:
185
+ new_fragments.append((self._PLACEHOLDER_STYLE, placeholder))
186
+ for _ in placeholder:
187
+ display_to_source_map.append(start)
188
+ display_index += 1
189
+
190
+ for _ in text[source_index:end]:
191
+ source_to_display_map.append(placeholder_start if placeholder else display_index)
192
+ source_index += 1
193
+
194
+ if source_index < len(text):
195
+ append_plain_segment(text[source_index:])
196
+
197
+ def source_to_display(pos: int) -> int:
198
+ if pos < 0:
199
+ return 0
200
+ if pos < len(source_to_display_map):
201
+ return source_to_display_map[pos]
202
+ return display_index
203
+
204
+ def display_to_source(pos: int) -> int:
205
+ if pos < 0:
206
+ return 0
207
+ if pos < len(display_to_source_map):
208
+ return display_to_source_map[pos]
209
+ return len(source_to_display_map)
210
+
211
+ return Transformation(
212
+ new_fragments,
213
+ source_to_display=source_to_display,
214
+ display_to_source=display_to_source,
215
+ )
216
+
217
+
101
218
  class CDCompleter(Completer):
102
219
  def __init__(self, trigger: str = "/cd"):
103
220
  self.trigger = trigger
@@ -247,6 +364,7 @@ async def get_input_with_combined_completion(
247
364
  history=history,
248
365
  complete_while_typing=True,
249
366
  key_bindings=bindings,
367
+ input_processors=[AttachmentPlaceholderProcessor()],
250
368
  )
251
369
  # If they pass a string, backward-compat: convert it to formatted_text
252
370
  if isinstance(prompt_str, str):
@@ -263,6 +381,7 @@ async def get_input_with_combined_completion(
263
381
  "model": "bold cyan",
264
382
  "cwd": "bold green",
265
383
  "arrow": "bold yellow",
384
+ "attachment-placeholder": "italic cyan",
266
385
  }
267
386
  )
268
387
  text = await session.prompt_async(prompt_str, style=style)
code_puppy/main.py CHANGED
@@ -1,12 +1,10 @@
1
1
  import argparse
2
2
  import asyncio
3
- import json
4
3
  import os
5
4
  import subprocess
6
5
  import sys
7
6
  import time
8
7
  import webbrowser
9
- from datetime import datetime
10
8
  from pathlib import Path
11
9
 
12
10
  from rich.console import Console, ConsoleOptions, RenderResult
@@ -20,6 +18,7 @@ from code_puppy.command_line.prompt_toolkit_completion import (
20
18
  get_input_with_combined_completion,
21
19
  get_prompt_with_active_model,
22
20
  )
21
+ from code_puppy.command_line.attachments import parse_prompt_attachments
23
22
  from code_puppy.config import (
24
23
  AUTOSAVE_DIR,
25
24
  COMMAND_HISTORY_FILE,
@@ -28,7 +27,7 @@ from code_puppy.config import (
28
27
  initialize_command_history_file,
29
28
  save_command_to_history,
30
29
  )
31
- from code_puppy.session_storage import list_sessions, load_session, restore_autosave_interactively
30
+ from code_puppy.session_storage import restore_autosave_interactively
32
31
  from code_puppy.http_utils import find_available_port
33
32
  from code_puppy.tools.common import console
34
33
 
@@ -313,33 +312,24 @@ async def interactive_mode(message_renderer, initial_command: str = None) -> Non
313
312
  awaiting_input = False
314
313
 
315
314
  # Run with or without spinner based on whether we're awaiting input
316
- if awaiting_input:
317
- # No spinner - use agent_manager's run_with_mcp method
315
+ response = await run_prompt_with_attachments(
316
+ agent,
317
+ initial_command,
318
+ spinner_console=display_console,
319
+ use_spinner=not awaiting_input,
320
+ )
321
+ if response is not None:
322
+ agent_response = response.output
318
323
 
319
- response = await agent.run_with_mcp(
320
- initial_command,
324
+ emit_system_message(
325
+ f"\n[bold purple]AGENT RESPONSE: [/bold purple]\n{agent_response}"
321
326
  )
322
- else:
323
- # Use our custom spinner for better compatibility with user input
324
- from code_puppy.messaging.spinner import ConsoleSpinner
325
-
326
- with ConsoleSpinner(console=display_console):
327
- # Use agent_manager's run_with_mcp method
328
- response = await agent.run_with_mcp(
329
- initial_command,
330
- )
331
-
332
- agent_response = response.output
333
-
334
- emit_system_message(
335
- f"\n[bold purple]AGENT RESPONSE: [/bold purple]\n{agent_response}"
336
- )
337
- emit_system_message("\n" + "=" * 50)
338
- emit_info("[bold green]🐶 Continuing in Interactive Mode[/bold green]")
339
- emit_system_message(
340
- "Your command and response are preserved in the conversation history."
341
- )
342
- emit_system_message("=" * 50 + "\n")
327
+ emit_system_message("\n" + "=" * 50)
328
+ emit_info("[bold green]🐶 Continuing in Interactive Mode[/bold green]")
329
+ emit_system_message(
330
+ "Your command and response are preserved in the conversation history."
331
+ )
332
+ emit_system_message("=" * 50 + "\n")
343
333
 
344
334
  except Exception as e:
345
335
  from code_puppy.messaging import emit_error
@@ -425,9 +415,13 @@ async def interactive_mode(message_renderer, initial_command: str = None) -> Non
425
415
  emit_info(f"[dim]Auto-save session rotated to: {new_session_id}[/dim]")
426
416
  continue
427
417
 
428
- # Handle / commands before anything else
429
- if task.strip().startswith("/"):
430
- command_result = handle_command(task.strip())
418
+ # Parse attachments first so leading paths aren't misread as commands
419
+ processed_for_commands = parse_prompt_attachments(task)
420
+ cleaned_for_commands = (processed_for_commands.prompt or "").strip()
421
+
422
+ # Handle / commands based on cleaned prompt (after stripping attachments)
423
+ if cleaned_for_commands.startswith("/"):
424
+ command_result = handle_command(cleaned_for_commands)
431
425
  if command_result is True:
432
426
  continue
433
427
  elif isinstance(command_result, str):
@@ -446,14 +440,12 @@ async def interactive_mode(message_renderer, initial_command: str = None) -> Non
446
440
 
447
441
  # No need to get agent directly - use manager's run methods
448
442
 
449
- # Use our custom spinner for better compatibility with user input
450
- from code_puppy.messaging import emit_warning
451
- from code_puppy.messaging.spinner import ConsoleSpinner
452
-
453
- with ConsoleSpinner(console=message_renderer.console):
454
- result = await current_agent.run_with_mcp(
455
- task,
456
- )
443
+ # Use our custom helper to enable attachment handling with spinner support
444
+ result = await run_prompt_with_attachments(
445
+ current_agent,
446
+ task,
447
+ spinner_console=message_renderer.console,
448
+ )
457
449
  # Check if the task was cancelled (but don't show message if we just killed processes)
458
450
  if result is None:
459
451
  continue
@@ -504,6 +496,57 @@ def prettier_code_blocks():
504
496
  Markdown.elements["fence"] = SimpleCodeBlock
505
497
 
506
498
 
499
+ async def run_prompt_with_attachments(
500
+ agent,
501
+ raw_prompt: str,
502
+ *,
503
+ spinner_console=None,
504
+ use_spinner: bool = True,
505
+ ):
506
+ """Run the agent after parsing CLI attachments for image/document support."""
507
+ from code_puppy.messaging import emit_system_message, emit_warning
508
+
509
+ processed_prompt = parse_prompt_attachments(raw_prompt)
510
+
511
+ for warning in processed_prompt.warnings:
512
+ emit_warning(warning)
513
+
514
+ summary_parts = []
515
+ if processed_prompt.attachments:
516
+ summary_parts.append(f"binary files: {len(processed_prompt.attachments)}")
517
+ if processed_prompt.link_attachments:
518
+ summary_parts.append(f"urls: {len(processed_prompt.link_attachments)}")
519
+ if summary_parts:
520
+ emit_system_message(
521
+ "[dim]Attachments detected -> " + ", ".join(summary_parts) + "[/dim]"
522
+ )
523
+
524
+ if not processed_prompt.prompt:
525
+ emit_warning(
526
+ "Prompt is empty after removing attachments; add instructions and retry."
527
+ )
528
+ return None
529
+
530
+ attachments = [attachment.content for attachment in processed_prompt.attachments]
531
+ link_attachments = [link.url_part for link in processed_prompt.link_attachments]
532
+
533
+ if use_spinner and spinner_console is not None:
534
+ from code_puppy.messaging.spinner import ConsoleSpinner
535
+
536
+ with ConsoleSpinner(console=spinner_console):
537
+ return await agent.run_with_mcp(
538
+ processed_prompt.prompt,
539
+ attachments=attachments,
540
+ link_attachments=link_attachments,
541
+ )
542
+
543
+ return await agent.run_with_mcp(
544
+ processed_prompt.prompt,
545
+ attachments=attachments,
546
+ link_attachments=link_attachments,
547
+ )
548
+
549
+
507
550
  async def execute_single_prompt(prompt: str, message_renderer) -> None:
508
551
  """Execute a single prompt and exit (for -p flag)."""
509
552
  from code_puppy.messaging import emit_info, emit_system_message
@@ -511,14 +554,15 @@ async def execute_single_prompt(prompt: str, message_renderer) -> None:
511
554
  emit_info(f"[bold blue]Executing prompt:[/bold blue] {prompt}")
512
555
 
513
556
  try:
514
- # Get agent through runtime manager and use its run_with_mcp method
557
+ # Get agent through runtime manager and use helper for attachments
515
558
  agent = get_current_agent()
516
- from code_puppy.messaging.spinner import ConsoleSpinner
517
-
518
- with ConsoleSpinner(console=message_renderer.console):
519
- response = await agent.run_with_mcp(
520
- prompt,
521
- )
559
+ response = await run_prompt_with_attachments(
560
+ agent,
561
+ prompt,
562
+ spinner_console=message_renderer.console,
563
+ )
564
+ if response is None:
565
+ return
522
566
 
523
567
  agent_response = response.output
524
568
  emit_system_message(
@@ -217,6 +217,20 @@ class SettingsScreen(ModalScreen):
217
217
  # Save model selection
218
218
  if selected_model:
219
219
  set_model_name(selected_model)
220
+ # Reload the active agent so model switch takes effect immediately
221
+ try:
222
+ from code_puppy.agents import get_current_agent
223
+
224
+ current_agent = get_current_agent()
225
+ if hasattr(current_agent, "refresh_config"):
226
+ try:
227
+ current_agent.refresh_config()
228
+ except Exception:
229
+ ...
230
+ current_agent.reload_code_generation_agent()
231
+ except Exception:
232
+ # Non-fatal: settings saved; reload will happen on next run if needed
233
+ pass
220
234
 
221
235
  set_config_value("yolo_mode", yolo_mode)
222
236
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code-puppy
3
- Version: 0.0.198
3
+ Version: 0.0.200
4
4
  Summary: Code generation agent
5
5
  Project-URL: repository, https://github.com/mpfaffenberger/code_puppy
6
6
  Project-URL: HomePage, https://github.com/mpfaffenberger/code_puppy
@@ -3,7 +3,7 @@ code_puppy/__main__.py,sha256=pDVssJOWP8A83iFkxMLY9YteHYat0EyWDQqMkKHpWp4,203
3
3
  code_puppy/callbacks.py,sha256=ukSgVFaEO68o6J09qFwDrnmNanrVv3toTLQhS504Meo,6162
4
4
  code_puppy/config.py,sha256=xT-nU1U4n7u8pyzJPG18-cJZBKv5OZI2CtHLt9DGRzU,26065
5
5
  code_puppy/http_utils.py,sha256=YLd8Y16idbI32JGeBXG8n5rT4o4X_zxk9FgUvK9XFo8,8248
6
- code_puppy/main.py,sha256=TIFaySHV3um9Q3BDUjCjh6s-WWqcNnTY3EsD2WdW6MQ,22245
6
+ code_puppy/main.py,sha256=WqlOivWMzm0ijLB9qBHK5Q_adJurzkD_ywGEUVD16RA,23770
7
7
  code_puppy/model_factory.py,sha256=ZbIAJWMNKNdTCEMQK8Ig6TDDZlVNyGO9hOLHoLLPMYw,15397
8
8
  code_puppy/models.json,sha256=dClUciCo2RlVDs0ZAQCIur8MOavZUEAXHEecn0uPa-4,1629
9
9
  code_puppy/reopenable_async_client.py,sha256=4UJRaMp5np8cbef9F0zKQ7TPKOfyf5U-Kv-0zYUWDho,8274
@@ -27,15 +27,16 @@ code_puppy/agents/agent_qa_expert.py,sha256=wCGXzuAVElT5c-QigQVb8JX9Gw0JmViCUQQn
27
27
  code_puppy/agents/agent_qa_kitten.py,sha256=5PeFFSwCFlTUvP6h5bGntx0xv5NmRwBiw0HnMqY8nLI,9107
28
28
  code_puppy/agents/agent_security_auditor.py,sha256=ADafi2x4gqXw6m-Nch5vjiKjO0Urcbj0x4zxHti3gDw,3712
29
29
  code_puppy/agents/agent_typescript_reviewer.py,sha256=EDY1mFkVpuJ1BPXsJFu2wQ2pfAV-90ipc_8w9ymrKPg,4054
30
- code_puppy/agents/base_agent.py,sha256=ikeV6Sui3HAagJBPTtI9T9pCDCFTCYDNEkFFw7XU21Y,39084
30
+ code_puppy/agents/base_agent.py,sha256=dsnoFEHXVhXgK-WIOskCU1Ccx6uz5onfiycWAMxBhaw,41421
31
31
  code_puppy/agents/json_agent.py,sha256=lhopDJDoiSGHvD8A6t50hi9ZBoNRKgUywfxd0Po_Dzc,4886
32
32
  code_puppy/command_line/__init__.py,sha256=y7WeRemfYppk8KVbCGeAIiTuiOszIURCDjOMZv_YRmU,45
33
+ code_puppy/command_line/attachments.py,sha256=GSK-clGmJ1nmZD1XDqnWW7f9YiYLwjgu3D2iwvG6OMc,12362
33
34
  code_puppy/command_line/command_handler.py,sha256=alxMe5v_4jq8Sm6HETsgfF-VoDtgExj9dVzxP77fwmY,31614
34
35
  code_puppy/command_line/file_path_completion.py,sha256=gw8NpIxa6GOpczUJRyh7VNZwoXKKn-yvCqit7h2y6Gg,2931
35
36
  code_puppy/command_line/load_context_completion.py,sha256=6eZxV6Bs-EFwZjN93V8ZDZUC-6RaWxvtZk-04Wtikyw,2240
36
- code_puppy/command_line/model_picker_completion.py,sha256=vYNCZS1QWu6fxF__hTwpc7jwH7h_48wUxrnITawc83E,4140
37
+ code_puppy/command_line/model_picker_completion.py,sha256=uqwpbMYnCcWUZZ10Y4pMBKBfW52wQ-KdML2PO4Xjwr0,4501
37
38
  code_puppy/command_line/motd.py,sha256=PEdkp3ZnydVfvd7mNJylm8YyFNUKg9jmY6uwkA1em8c,2152
38
- code_puppy/command_line/prompt_toolkit_completion.py,sha256=_8SUUCKfjOxgNMkcHTeB08NIjCeqL3utHljROXLTEZE,10656
39
+ code_puppy/command_line/prompt_toolkit_completion.py,sha256=VZanuLcv1Py4sknTJWlJOQ6IqWqDjlHPoBcoeHEcPCA,15325
39
40
  code_puppy/command_line/utils.py,sha256=7eyxDHjPjPB9wGDJQQcXV_zOsGdYsFgI0SGCetVmTqE,1251
40
41
  code_puppy/command_line/mcp/__init__.py,sha256=0-OQuwjq_pLiTVJ1_NrirVwdRerghyKs_MTZkwPC7YY,315
41
42
  code_puppy/command_line/mcp/add_command.py,sha256=lZ09RpFDIeghX1zhc2YIAqBASs5Ra52x5YAasUKvqJg,6409
@@ -120,11 +121,11 @@ code_puppy/tui/screens/__init__.py,sha256=qxiJKyO3MKCNdPjUuHA2-Pnpda0JN20n7e9sU2
120
121
  code_puppy/tui/screens/autosave_picker.py,sha256=9bazha2C5N3Xg_VcmpcTv-CYOBq_mwcECBfrQM9tHxA,5416
121
122
  code_puppy/tui/screens/help.py,sha256=eJuPaOOCp7ZSUlecearqsuX6caxWv7NQszUh0tZJjBM,3232
122
123
  code_puppy/tui/screens/mcp_install_wizard.py,sha256=vObpQwLbXjQsxmSg-WCasoev1usEi0pollKnL0SHu9U,27693
123
- code_puppy/tui/screens/settings.py,sha256=EsoL_gbN5FpEXGuDqhtdDznNZy_eGNMMuZnWuARSWi8,10790
124
+ code_puppy/tui/screens/settings.py,sha256=EoMxiguyeF0srwV1bj4_MG9rrxkNthh6TdTNsxnXLfE,11460
124
125
  code_puppy/tui/screens/tools.py,sha256=3pr2Xkpa9Js6Yhf1A3_wQVRzFOui-KDB82LwrsdBtyk,1715
125
- code_puppy-0.0.198.data/data/code_puppy/models.json,sha256=dClUciCo2RlVDs0ZAQCIur8MOavZUEAXHEecn0uPa-4,1629
126
- code_puppy-0.0.198.dist-info/METADATA,sha256=p-54EnWxhZ2h9yAcVbjToU39Fqch7Si6kmSVHCtqAWA,20759
127
- code_puppy-0.0.198.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
128
- code_puppy-0.0.198.dist-info/entry_points.txt,sha256=Tp4eQC99WY3HOKd3sdvb22vZODRq0XkZVNpXOag_KdI,91
129
- code_puppy-0.0.198.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
130
- code_puppy-0.0.198.dist-info/RECORD,,
126
+ code_puppy-0.0.200.data/data/code_puppy/models.json,sha256=dClUciCo2RlVDs0ZAQCIur8MOavZUEAXHEecn0uPa-4,1629
127
+ code_puppy-0.0.200.dist-info/METADATA,sha256=sOBkce80U23YzafPl1s4WILA0Ij0fkYCFICRcIAObXg,20759
128
+ code_puppy-0.0.200.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
129
+ code_puppy-0.0.200.dist-info/entry_points.txt,sha256=Tp4eQC99WY3HOKd3sdvb22vZODRq0XkZVNpXOag_KdI,91
130
+ code_puppy-0.0.200.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
131
+ code_puppy-0.0.200.dist-info/RECORD,,