code-puppy 0.0.77__py3-none-any.whl → 0.0.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
code_puppy/agent.py CHANGED
@@ -8,6 +8,7 @@ from pydantic_ai.mcp import MCPServerSSE
8
8
  from code_puppy.agent_prompts import get_system_prompt
9
9
  from code_puppy.model_factory import ModelFactory
10
10
  from code_puppy.session_memory import SessionMemory
11
+ from code_puppy.state_management import message_history_accumulator
11
12
  from code_puppy.tools import register_all_tools
12
13
  from code_puppy.tools.common import console
13
14
 
@@ -83,11 +84,6 @@ def reload_code_generation_agent():
83
84
  global _code_generation_agent, _LAST_MODEL_NAME
84
85
  from code_puppy.config import get_model_name
85
86
 
86
- model_name = get_model_name()
87
- console.print(f"[bold cyan]Loading Model: {model_name}")
88
- global _code_generation_agent, _LAST_MODEL_NAME
89
- from code_puppy.config import get_model_name
90
-
91
87
  model_name = get_model_name()
92
88
  console.print(f"[bold cyan]Loading Model: {model_name}[/bold cyan]")
93
89
  models_path = (
@@ -105,6 +101,7 @@ def reload_code_generation_agent():
105
101
  instructions=instructions,
106
102
  output_type=str,
107
103
  retries=3,
104
+ history_processors=[message_history_accumulator]
108
105
  )
109
106
  register_all_tools(agent)
110
107
  _code_generation_agent = agent
code_puppy/main.py CHANGED
@@ -9,17 +9,19 @@ from rich.markdown import CodeBlock, Markdown
9
9
  from rich.syntax import Syntax
10
10
  from rich.text import Text
11
11
 
12
- from code_puppy import __version__
12
+ from code_puppy import __version__, state_management
13
13
  from code_puppy.agent import get_code_generation_agent, session_memory
14
14
  from code_puppy.command_line.prompt_toolkit_completion import (
15
15
  get_input_with_combined_completion,
16
16
  get_prompt_with_active_model,
17
17
  )
18
18
  from code_puppy.config import ensure_config_exists
19
+ from code_puppy.state_management import get_message_history, set_message_history
19
20
 
20
21
  # Initialize rich console for pretty output
21
22
  from code_puppy.tools.common import console
22
23
  from code_puppy.version_checker import fetch_latest_version
24
+ from code_puppy.message_history_processor import message_history_processor
23
25
 
24
26
  # from code_puppy.tools import * # noqa: F403
25
27
 
@@ -130,8 +132,6 @@ async def interactive_mode(history_file_path: str) -> None:
130
132
  "[yellow]Falling back to basic input without tab completion[/yellow]"
131
133
  )
132
134
 
133
- message_history = []
134
-
135
135
  # Set up history file in home directory
136
136
  history_file_path_prompt = os.path.expanduser("~/.code_puppy_history.txt")
137
137
  history_dir = os.path.dirname(history_file_path_prompt)
@@ -172,7 +172,7 @@ async def interactive_mode(history_file_path: str) -> None:
172
172
 
173
173
  # Check for clear command (supports both `clear` and `~clear`)
174
174
  if task.strip().lower() in ("clear", "~clear"):
175
- message_history = []
175
+ state_management._message_history = []
176
176
  console.print("[bold yellow]Conversation history cleared![/bold yellow]")
177
177
  console.print(
178
178
  "[dim]The agent will not remember previous interactions.[/dim]\n"
@@ -192,71 +192,56 @@ async def interactive_mode(history_file_path: str) -> None:
192
192
 
193
193
  try:
194
194
  prettier_code_blocks()
195
-
196
- console.log(f"Asking: {task}...", style="cyan")
197
-
198
- # Store agent's full response
199
- agent_response = None
200
-
201
- agent = get_code_generation_agent()
202
- async with agent.run_mcp_servers():
203
- result = await agent.run(task, message_history=message_history)
204
- # Get the structured response
205
- agent_response = result.output
206
- console.print(agent_response)
207
- # Log to session memory
208
-
209
- # Update message history but apply filters & limits
210
- new_msgs = result.new_messages()
211
- # 1. Drop any system/config messages (e.g., "agent loaded with model")
212
- filtered = [
213
- m
214
- for m in new_msgs
215
- if not (isinstance(m, dict) and m.get("role") == "system")
216
- ]
217
- # 2. Append to existing history and keep only the most recent set by config
218
- from code_puppy.config import get_message_history_limit
219
-
220
- message_history.extend(filtered)
221
-
222
- # --- BEGIN GROUP-AWARE TRUNCATION LOGIC ---
223
- limit = get_message_history_limit()
224
- if len(message_history) > limit:
225
-
226
- def group_by_tool_call_id(msgs):
227
- grouped = {}
228
- no_group = []
229
- for m in msgs:
230
- # Find all tool_call_id in message parts
231
- tool_call_ids = set()
232
- for part in getattr(m, "parts", []):
233
- if hasattr(part, "tool_call_id") and part.tool_call_id:
234
- tool_call_ids.add(part.tool_call_id)
235
- if tool_call_ids:
236
- for tcid in tool_call_ids:
237
- grouped.setdefault(tcid, []).append(m)
238
- else:
239
- no_group.append(m)
240
- return grouped, no_group
241
-
242
- grouped, no_group = group_by_tool_call_id(message_history)
243
- # Flatten into groups or singletons
244
- grouped_msgs = list(grouped.values()) + [[m] for m in no_group]
245
- # Flattened history (latest groups/singletons last, trunc to N messages total),
246
- # but always keep complete tool_call_id groups together
247
- truncated = []
248
- count = 0
249
- for group in reversed(grouped_msgs):
250
- if count + len(group) > limit:
251
- break
252
- truncated[:0] = group # insert at front
253
- count += len(group)
254
- message_history = truncated
255
- # --- END GROUP-AWARE TRUNCATION LOGIC ---
195
+ local_cancelled = False
196
+ async def run_agent_task():
197
+ try:
198
+ agent = get_code_generation_agent()
199
+ async with agent.run_mcp_servers():
200
+ return await agent.run(
201
+ task,
202
+ message_history=get_message_history()
203
+ )
204
+ except Exception as e:
205
+ console.log("Task failed", e)
206
+
207
+ agent_task = asyncio.create_task(run_agent_task())
208
+
209
+ import signal
210
+
211
+ original_handler = None
212
+
213
+ def keyboard_interrupt_handler(sig, frame):
214
+ nonlocal local_cancelled
215
+ if not agent_task.done():
216
+ set_message_history(
217
+ message_history_processor(
218
+ get_message_history()
219
+ )
220
+ )
221
+ agent_task.cancel()
222
+ local_cancelled = True
223
+
224
+ try:
225
+ original_handler = signal.getsignal(signal.SIGINT)
226
+ signal.signal(signal.SIGINT, keyboard_interrupt_handler)
227
+ result = await agent_task
228
+ except asyncio.CancelledError:
229
+ pass
230
+ finally:
231
+ if original_handler:
232
+ signal.signal(signal.SIGINT, original_handler)
233
+
234
+ if local_cancelled:
235
+ console.print("Task canceled by user")
236
+ else:
237
+ agent_response = result.output
238
+ console.print(agent_response)
239
+ filtered = message_history_processor(get_message_history())
240
+ set_message_history(filtered)
256
241
 
257
242
  # Show context status
258
243
  console.print(
259
- f"[dim]Context: {len(message_history)} messages in history[/dim]\n"
244
+ f"[dim]Context: {len(get_message_history())} messages in history[/dim]\n"
260
245
  )
261
246
 
262
247
  except Exception:
@@ -0,0 +1,78 @@
1
+ import queue
2
+ from typing import List
3
+
4
+ from pydantic_ai.messages import ModelMessage, ToolCallPart, ToolReturnPart
5
+
6
+ from code_puppy.config import get_message_history_limit
7
+ from code_puppy.tools.common import console
8
+
9
+
10
+ def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage]:
11
+ """
12
+ Truncate message history to manage token usage while preserving context.
13
+
14
+ This implementation:
15
+ - Uses the configurable message_history_limit from puppy.cfg (defaults to 40)
16
+ - Preserves system messages at the beginning
17
+ - Maintains tool call/response pairs together
18
+ - Follows PydanticAI best practices for message ordering
19
+
20
+ Args:
21
+ messages: List of ModelMessage objects from conversation history
22
+
23
+ Returns:
24
+ Truncated list of ModelMessage objects
25
+ """
26
+ if not messages:
27
+ return messages
28
+
29
+ # Get the configurable limit from puppy.cfg
30
+ max_messages = get_message_history_limit()
31
+ # If we have max_messages or fewer, no truncation needed
32
+ if len(messages) <= max_messages:
33
+ return messages
34
+
35
+ console.print(
36
+ f"Truncating message history to manage token usage: {max_messages}"
37
+ )
38
+ result = []
39
+ result.append(messages[0]) # this is the system prompt
40
+ remaining_messages_to_fill = max_messages - 1
41
+ stack = queue.LifoQueue()
42
+ count = 0
43
+ tool_call_parts = set()
44
+ tool_return_parts = set()
45
+ for message in reversed(messages):
46
+ stack.put(message)
47
+ count += 1
48
+ if count >= remaining_messages_to_fill:
49
+ break
50
+
51
+ while not stack.empty():
52
+ item = stack.get()
53
+ for part in item.parts:
54
+ if hasattr(part, "tool_call_id") and part.tool_call_id:
55
+ if isinstance(part, ToolCallPart):
56
+ tool_call_parts.add(part.tool_call_id)
57
+ if isinstance(part, ToolReturnPart):
58
+ tool_return_parts.add(part.tool_call_id)
59
+
60
+ result.append(item)
61
+
62
+ missmatched_tool_call_ids = (tool_call_parts.union(tool_return_parts)) - (
63
+ tool_call_parts.intersection(tool_return_parts)
64
+ )
65
+ # trust...
66
+ final_result = result
67
+ if missmatched_tool_call_ids:
68
+ final_result = []
69
+ for msg in result:
70
+ is_missmatched = False
71
+ for part in msg.parts:
72
+ if hasattr(part, "tool_call_id"):
73
+ if part.tool_call_id in missmatched_tool_call_ids:
74
+ is_missmatched = True
75
+ if is_missmatched:
76
+ continue
77
+ final_result.append(msg)
78
+ return final_result
code_puppy/models.json CHANGED
@@ -11,6 +11,10 @@
11
11
  "type": "openai",
12
12
  "name": "gpt-4.1-mini"
13
13
  },
14
+ "gpt-5": {
15
+ "type": "openai",
16
+ "name": "gpt-5"
17
+ },
14
18
  "gpt-4.1-nano": {
15
19
  "type": "openai",
16
20
  "name": "gpt-4.1-nano"
@@ -88,6 +92,14 @@
88
92
  "api_key": "$CEREBRAS_API_KEY"
89
93
  }
90
94
  },
95
+ "Cerebras-gpt-oss-120b": {
96
+ "type": "custom_openai",
97
+ "name": "gpt-oss-120b",
98
+ "custom_endpoint": {
99
+ "url": "https://api.cerebras.ai/v1",
100
+ "api_key": "$CEREBRAS_API_KEY"
101
+ }
102
+ },
91
103
  "Cerebras-Qwen-3-32b": {
92
104
  "type": "custom_openai",
93
105
  "name": "qwen-3-32b",
@@ -0,0 +1,42 @@
1
+ from typing import Any, List
2
+
3
+ from code_puppy.tools.common import console
4
+
5
+ _message_history: List[Any] = []
6
+
7
+ def get_message_history() -> List[Any]:
8
+ return _message_history
9
+
10
+ def set_message_history(history: List[Any]) -> None:
11
+ global _message_history
12
+ _message_history = history
13
+
14
+ def clear_message_history() -> None:
15
+ global _message_history
16
+ _message_history = []
17
+
18
+ def append_to_message_history(message: Any) -> None:
19
+ _message_history.append(message)
20
+
21
+ def extend_message_history(history: List[Any]) -> None:
22
+ _message_history.extend(history)
23
+
24
+
25
+ def hash_message(message):
26
+ hashable_entities = []
27
+ for part in message.parts:
28
+ if hasattr(part, "timestamp"):
29
+ hashable_entities.append(part.timestamp.isoformat())
30
+ elif hasattr(part, "tool_call_id"):
31
+ hashable_entities.append(part.tool_call_id)
32
+ else:
33
+ hashable_entities.append(part.content)
34
+ return hash(",".join(hashable_entities))
35
+
36
+
37
+ def message_history_accumulator(messages: List[Any]):
38
+ message_history_hashes = set([hash_message(m) for m in _message_history])
39
+ for msg in messages:
40
+ if hash_message(msg) not in message_history_hashes:
41
+ _message_history.append(msg)
42
+ return messages
@@ -11,6 +11,10 @@
11
11
  "type": "openai",
12
12
  "name": "gpt-4.1-mini"
13
13
  },
14
+ "gpt-5": {
15
+ "type": "openai",
16
+ "name": "gpt-5"
17
+ },
14
18
  "gpt-4.1-nano": {
15
19
  "type": "openai",
16
20
  "name": "gpt-4.1-nano"
@@ -88,6 +92,14 @@
88
92
  "api_key": "$CEREBRAS_API_KEY"
89
93
  }
90
94
  },
95
+ "Cerebras-gpt-oss-120b": {
96
+ "type": "custom_openai",
97
+ "name": "gpt-oss-120b",
98
+ "custom_endpoint": {
99
+ "url": "https://api.cerebras.ai/v1",
100
+ "api_key": "$CEREBRAS_API_KEY"
101
+ }
102
+ },
91
103
  "Cerebras-Qwen-3-32b": {
92
104
  "type": "custom_openai",
93
105
  "name": "qwen-3-32b",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code-puppy
3
- Version: 0.0.77
3
+ Version: 0.0.78
4
4
  Summary: Code generation agent
5
5
  Author: Michael Pfaffenberger
6
6
  License: MIT
@@ -1,11 +1,13 @@
1
1
  code_puppy/__init__.py,sha256=-ANvE6Xe5NlWDIRCIfL1x-rgtCZ6zM2Ye9NphFoULSY,82
2
- code_puppy/agent.py,sha256=kkuiYbI2n5VOwzimq3c7ZVj-kzf8VtFE4v7e4vInRag,4007
2
+ code_puppy/agent.py,sha256=e_czIW7bv6W9qna0pOm-RBLLWz6RL3boDiIGR8aGf4g,3935
3
3
  code_puppy/agent_prompts.py,sha256=13YIpTZa3R3lg60-fdkll7t7hgSBtQL0M53wcE1gzyQ,6834
4
4
  code_puppy/config.py,sha256=r5nw5ChOP8xd_K5yo8U5OtO2gy2bFhARiyNtDp1JrwQ,5013
5
- code_puppy/main.py,sha256=VTR-aqjmjqSt_sLBcDXP2fqvs0ZExdKqQL3REkY238E,11325
5
+ code_puppy/main.py,sha256=uKMG0WNrFjEbsiEb_OwL_fNJbqMyTgztGjPKIOoYdSs,10444
6
+ code_puppy/message_history_processor.py,sha256=NaFYEUdHCZlzl5jR-XK4Rh2EHVsupT-SROxe4jXgUaQ,2584
6
7
  code_puppy/model_factory.py,sha256=P2E3KgTHMVaMhHyGHmdascjYmdRxUKBWotlP61i-03A,8291
7
- code_puppy/models.json,sha256=NwdV7vit8j4eyOFPLXJFEKtKuVpqHruTYmX8lG1sOnU,2452
8
+ code_puppy/models.json,sha256=hqSvFzSPcwxMwst6xePlcppm0c_pjyEVSBsWvgbAG98,2714
8
9
  code_puppy/session_memory.py,sha256=4sgAAjbXdLSi8hETpd56tgtrG6hqMUuZWDlJOu6BQjA,2735
10
+ code_puppy/state_management.py,sha256=aymPVogToEeF_vPc2c75rs8WLYNd8s2vMRGNXvTriW0,1248
9
11
  code_puppy/version_checker.py,sha256=aRGulzuY4C4CdFvU1rITduyL-1xTFsn4GiD1uSfOl_Y,396
10
12
  code_puppy/command_line/__init__.py,sha256=y7WeRemfYppk8KVbCGeAIiTuiOszIURCDjOMZv_YRmU,45
11
13
  code_puppy/command_line/file_path_completion.py,sha256=gw8NpIxa6GOpczUJRyh7VNZwoXKKn-yvCqit7h2y6Gg,2931
@@ -20,9 +22,9 @@ code_puppy/tools/common.py,sha256=M53zhiXZAmPdvi1Y_bzCxgvEmifOvRRJvYPARYRZqHw,22
20
22
  code_puppy/tools/file_modifications.py,sha256=qCfkZ7BxTG8U4xydHzS44UtOioj8XvhRKgjoOAnMHTo,13310
21
23
  code_puppy/tools/file_operations.py,sha256=5ESOCS3m4Lpnvrg2XiJAx0m4-0Yar6LZKIdyRCRjENM,11218
22
24
  code_puppy/tools/ts_code_map.py,sha256=o-u8p5vsYwitfDtVEoPS-7MwWn2xHzwtIQLo1_WMhQs,17647
23
- code_puppy-0.0.77.data/data/code_puppy/models.json,sha256=NwdV7vit8j4eyOFPLXJFEKtKuVpqHruTYmX8lG1sOnU,2452
24
- code_puppy-0.0.77.dist-info/METADATA,sha256=d16V_O5nidwrIITCMsOtv0Q0cxky7vGrFujDIE0Aeww,6512
25
- code_puppy-0.0.77.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
26
- code_puppy-0.0.77.dist-info/entry_points.txt,sha256=d8YkBvIUxF-dHNJAj-x4fPEqizbY5d_TwvYpc01U5kw,58
27
- code_puppy-0.0.77.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
28
- code_puppy-0.0.77.dist-info/RECORD,,
25
+ code_puppy-0.0.78.data/data/code_puppy/models.json,sha256=hqSvFzSPcwxMwst6xePlcppm0c_pjyEVSBsWvgbAG98,2714
26
+ code_puppy-0.0.78.dist-info/METADATA,sha256=mBMAVB0GZQCto_2S7fIZX3HxsR5ReddEbzFKp62QYfc,6512
27
+ code_puppy-0.0.78.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
28
+ code_puppy-0.0.78.dist-info/entry_points.txt,sha256=d8YkBvIUxF-dHNJAj-x4fPEqizbY5d_TwvYpc01U5kw,58
29
+ code_puppy-0.0.78.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
30
+ code_puppy-0.0.78.dist-info/RECORD,,