code-puppy 0.0.76__py3-none-any.whl → 0.0.78__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/agent.py +2 -5
- code_puppy/agent_prompts.py +0 -1
- code_puppy/main.py +51 -66
- code_puppy/message_history_processor.py +78 -0
- code_puppy/models.json +28 -0
- code_puppy/state_management.py +42 -0
- code_puppy/tools/file_modifications.py +1 -1
- {code_puppy-0.0.76.data → code_puppy-0.0.78.data}/data/code_puppy/models.json +28 -0
- {code_puppy-0.0.76.dist-info → code_puppy-0.0.78.dist-info}/METADATA +1 -1
- {code_puppy-0.0.76.dist-info → code_puppy-0.0.78.dist-info}/RECORD +13 -11
- {code_puppy-0.0.76.dist-info → code_puppy-0.0.78.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.76.dist-info → code_puppy-0.0.78.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.76.dist-info → code_puppy-0.0.78.dist-info}/licenses/LICENSE +0 -0
code_puppy/agent.py
CHANGED
|
@@ -8,6 +8,7 @@ from pydantic_ai.mcp import MCPServerSSE
|
|
|
8
8
|
from code_puppy.agent_prompts import get_system_prompt
|
|
9
9
|
from code_puppy.model_factory import ModelFactory
|
|
10
10
|
from code_puppy.session_memory import SessionMemory
|
|
11
|
+
from code_puppy.state_management import message_history_accumulator
|
|
11
12
|
from code_puppy.tools import register_all_tools
|
|
12
13
|
from code_puppy.tools.common import console
|
|
13
14
|
|
|
@@ -83,11 +84,6 @@ def reload_code_generation_agent():
|
|
|
83
84
|
global _code_generation_agent, _LAST_MODEL_NAME
|
|
84
85
|
from code_puppy.config import get_model_name
|
|
85
86
|
|
|
86
|
-
model_name = get_model_name()
|
|
87
|
-
console.print(f"[bold cyan]Loading Model: {model_name}")
|
|
88
|
-
global _code_generation_agent, _LAST_MODEL_NAME
|
|
89
|
-
from code_puppy.config import get_model_name
|
|
90
|
-
|
|
91
87
|
model_name = get_model_name()
|
|
92
88
|
console.print(f"[bold cyan]Loading Model: {model_name}[/bold cyan]")
|
|
93
89
|
models_path = (
|
|
@@ -105,6 +101,7 @@ def reload_code_generation_agent():
|
|
|
105
101
|
instructions=instructions,
|
|
106
102
|
output_type=str,
|
|
107
103
|
retries=3,
|
|
104
|
+
history_processors=[message_history_accumulator]
|
|
108
105
|
)
|
|
109
106
|
register_all_tools(agent)
|
|
110
107
|
_code_generation_agent = agent
|
code_puppy/agent_prompts.py
CHANGED
|
@@ -30,7 +30,6 @@ File Operations:
|
|
|
30
30
|
- edit_file(path, diff): Use this single tool to create new files, overwrite entire files, perform targeted replacements, or delete snippets depending on the JSON/raw payload provided.
|
|
31
31
|
- delete_file(file_path): Use this to remove files when needed
|
|
32
32
|
- grep(search_string, directory="."): Use this to recursively search for a string across files starting from the specified directory, capping results at 200 matches.
|
|
33
|
-
- code_map(directory="."): Use this to generate a code map for the specified directory.
|
|
34
33
|
|
|
35
34
|
Tool Usage Instructions:
|
|
36
35
|
|
code_puppy/main.py
CHANGED
|
@@ -9,17 +9,19 @@ from rich.markdown import CodeBlock, Markdown
|
|
|
9
9
|
from rich.syntax import Syntax
|
|
10
10
|
from rich.text import Text
|
|
11
11
|
|
|
12
|
-
from code_puppy import __version__
|
|
12
|
+
from code_puppy import __version__, state_management
|
|
13
13
|
from code_puppy.agent import get_code_generation_agent, session_memory
|
|
14
14
|
from code_puppy.command_line.prompt_toolkit_completion import (
|
|
15
15
|
get_input_with_combined_completion,
|
|
16
16
|
get_prompt_with_active_model,
|
|
17
17
|
)
|
|
18
18
|
from code_puppy.config import ensure_config_exists
|
|
19
|
+
from code_puppy.state_management import get_message_history, set_message_history
|
|
19
20
|
|
|
20
21
|
# Initialize rich console for pretty output
|
|
21
22
|
from code_puppy.tools.common import console
|
|
22
23
|
from code_puppy.version_checker import fetch_latest_version
|
|
24
|
+
from code_puppy.message_history_processor import message_history_processor
|
|
23
25
|
|
|
24
26
|
# from code_puppy.tools import * # noqa: F403
|
|
25
27
|
|
|
@@ -130,8 +132,6 @@ async def interactive_mode(history_file_path: str) -> None:
|
|
|
130
132
|
"[yellow]Falling back to basic input without tab completion[/yellow]"
|
|
131
133
|
)
|
|
132
134
|
|
|
133
|
-
message_history = []
|
|
134
|
-
|
|
135
135
|
# Set up history file in home directory
|
|
136
136
|
history_file_path_prompt = os.path.expanduser("~/.code_puppy_history.txt")
|
|
137
137
|
history_dir = os.path.dirname(history_file_path_prompt)
|
|
@@ -172,7 +172,7 @@ async def interactive_mode(history_file_path: str) -> None:
|
|
|
172
172
|
|
|
173
173
|
# Check for clear command (supports both `clear` and `~clear`)
|
|
174
174
|
if task.strip().lower() in ("clear", "~clear"):
|
|
175
|
-
|
|
175
|
+
state_management._message_history = []
|
|
176
176
|
console.print("[bold yellow]Conversation history cleared![/bold yellow]")
|
|
177
177
|
console.print(
|
|
178
178
|
"[dim]The agent will not remember previous interactions.[/dim]\n"
|
|
@@ -192,71 +192,56 @@ async def interactive_mode(history_file_path: str) -> None:
|
|
|
192
192
|
|
|
193
193
|
try:
|
|
194
194
|
prettier_code_blocks()
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
if not
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
grouped, no_group = group_by_tool_call_id(message_history)
|
|
243
|
-
# Flatten into groups or singletons
|
|
244
|
-
grouped_msgs = list(grouped.values()) + [[m] for m in no_group]
|
|
245
|
-
# Flattened history (latest groups/singletons last, trunc to N messages total),
|
|
246
|
-
# but always keep complete tool_call_id groups together
|
|
247
|
-
truncated = []
|
|
248
|
-
count = 0
|
|
249
|
-
for group in reversed(grouped_msgs):
|
|
250
|
-
if count + len(group) > limit:
|
|
251
|
-
break
|
|
252
|
-
truncated[:0] = group # insert at front
|
|
253
|
-
count += len(group)
|
|
254
|
-
message_history = truncated
|
|
255
|
-
# --- END GROUP-AWARE TRUNCATION LOGIC ---
|
|
195
|
+
local_cancelled = False
|
|
196
|
+
async def run_agent_task():
|
|
197
|
+
try:
|
|
198
|
+
agent = get_code_generation_agent()
|
|
199
|
+
async with agent.run_mcp_servers():
|
|
200
|
+
return await agent.run(
|
|
201
|
+
task,
|
|
202
|
+
message_history=get_message_history()
|
|
203
|
+
)
|
|
204
|
+
except Exception as e:
|
|
205
|
+
console.log("Task failed", e)
|
|
206
|
+
|
|
207
|
+
agent_task = asyncio.create_task(run_agent_task())
|
|
208
|
+
|
|
209
|
+
import signal
|
|
210
|
+
|
|
211
|
+
original_handler = None
|
|
212
|
+
|
|
213
|
+
def keyboard_interrupt_handler(sig, frame):
|
|
214
|
+
nonlocal local_cancelled
|
|
215
|
+
if not agent_task.done():
|
|
216
|
+
set_message_history(
|
|
217
|
+
message_history_processor(
|
|
218
|
+
get_message_history()
|
|
219
|
+
)
|
|
220
|
+
)
|
|
221
|
+
agent_task.cancel()
|
|
222
|
+
local_cancelled = True
|
|
223
|
+
|
|
224
|
+
try:
|
|
225
|
+
original_handler = signal.getsignal(signal.SIGINT)
|
|
226
|
+
signal.signal(signal.SIGINT, keyboard_interrupt_handler)
|
|
227
|
+
result = await agent_task
|
|
228
|
+
except asyncio.CancelledError:
|
|
229
|
+
pass
|
|
230
|
+
finally:
|
|
231
|
+
if original_handler:
|
|
232
|
+
signal.signal(signal.SIGINT, original_handler)
|
|
233
|
+
|
|
234
|
+
if local_cancelled:
|
|
235
|
+
console.print("Task canceled by user")
|
|
236
|
+
else:
|
|
237
|
+
agent_response = result.output
|
|
238
|
+
console.print(agent_response)
|
|
239
|
+
filtered = message_history_processor(get_message_history())
|
|
240
|
+
set_message_history(filtered)
|
|
256
241
|
|
|
257
242
|
# Show context status
|
|
258
243
|
console.print(
|
|
259
|
-
f"[dim]Context: {len(
|
|
244
|
+
f"[dim]Context: {len(get_message_history())} messages in history[/dim]\n"
|
|
260
245
|
)
|
|
261
246
|
|
|
262
247
|
except Exception:
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import queue
|
|
2
|
+
from typing import List
|
|
3
|
+
|
|
4
|
+
from pydantic_ai.messages import ModelMessage, ToolCallPart, ToolReturnPart
|
|
5
|
+
|
|
6
|
+
from code_puppy.config import get_message_history_limit
|
|
7
|
+
from code_puppy.tools.common import console
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
11
|
+
"""
|
|
12
|
+
Truncate message history to manage token usage while preserving context.
|
|
13
|
+
|
|
14
|
+
This implementation:
|
|
15
|
+
- Uses the configurable message_history_limit from puppy.cfg (defaults to 40)
|
|
16
|
+
- Preserves system messages at the beginning
|
|
17
|
+
- Maintains tool call/response pairs together
|
|
18
|
+
- Follows PydanticAI best practices for message ordering
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
messages: List of ModelMessage objects from conversation history
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
Truncated list of ModelMessage objects
|
|
25
|
+
"""
|
|
26
|
+
if not messages:
|
|
27
|
+
return messages
|
|
28
|
+
|
|
29
|
+
# Get the configurable limit from puppy.cfg
|
|
30
|
+
max_messages = get_message_history_limit()
|
|
31
|
+
# If we have max_messages or fewer, no truncation needed
|
|
32
|
+
if len(messages) <= max_messages:
|
|
33
|
+
return messages
|
|
34
|
+
|
|
35
|
+
console.print(
|
|
36
|
+
f"Truncating message history to manage token usage: {max_messages}"
|
|
37
|
+
)
|
|
38
|
+
result = []
|
|
39
|
+
result.append(messages[0]) # this is the system prompt
|
|
40
|
+
remaining_messages_to_fill = max_messages - 1
|
|
41
|
+
stack = queue.LifoQueue()
|
|
42
|
+
count = 0
|
|
43
|
+
tool_call_parts = set()
|
|
44
|
+
tool_return_parts = set()
|
|
45
|
+
for message in reversed(messages):
|
|
46
|
+
stack.put(message)
|
|
47
|
+
count += 1
|
|
48
|
+
if count >= remaining_messages_to_fill:
|
|
49
|
+
break
|
|
50
|
+
|
|
51
|
+
while not stack.empty():
|
|
52
|
+
item = stack.get()
|
|
53
|
+
for part in item.parts:
|
|
54
|
+
if hasattr(part, "tool_call_id") and part.tool_call_id:
|
|
55
|
+
if isinstance(part, ToolCallPart):
|
|
56
|
+
tool_call_parts.add(part.tool_call_id)
|
|
57
|
+
if isinstance(part, ToolReturnPart):
|
|
58
|
+
tool_return_parts.add(part.tool_call_id)
|
|
59
|
+
|
|
60
|
+
result.append(item)
|
|
61
|
+
|
|
62
|
+
missmatched_tool_call_ids = (tool_call_parts.union(tool_return_parts)) - (
|
|
63
|
+
tool_call_parts.intersection(tool_return_parts)
|
|
64
|
+
)
|
|
65
|
+
# trust...
|
|
66
|
+
final_result = result
|
|
67
|
+
if missmatched_tool_call_ids:
|
|
68
|
+
final_result = []
|
|
69
|
+
for msg in result:
|
|
70
|
+
is_missmatched = False
|
|
71
|
+
for part in msg.parts:
|
|
72
|
+
if hasattr(part, "tool_call_id"):
|
|
73
|
+
if part.tool_call_id in missmatched_tool_call_ids:
|
|
74
|
+
is_missmatched = True
|
|
75
|
+
if is_missmatched:
|
|
76
|
+
continue
|
|
77
|
+
final_result.append(msg)
|
|
78
|
+
return final_result
|
code_puppy/models.json
CHANGED
|
@@ -11,6 +11,10 @@
|
|
|
11
11
|
"type": "openai",
|
|
12
12
|
"name": "gpt-4.1-mini"
|
|
13
13
|
},
|
|
14
|
+
"gpt-5": {
|
|
15
|
+
"type": "openai",
|
|
16
|
+
"name": "gpt-5"
|
|
17
|
+
},
|
|
14
18
|
"gpt-4.1-nano": {
|
|
15
19
|
"type": "openai",
|
|
16
20
|
"name": "gpt-4.1-nano"
|
|
@@ -79,5 +83,29 @@
|
|
|
79
83
|
"url": "https://api.cerebras.ai/v1",
|
|
80
84
|
"api_key": "$CEREBRAS_API_KEY"
|
|
81
85
|
}
|
|
86
|
+
},
|
|
87
|
+
"Cerebras-Qwen3-235b-a22b-instruct-2507": {
|
|
88
|
+
"type": "custom_openai",
|
|
89
|
+
"name": "qwen-3-235b-a22b-instruct-2507",
|
|
90
|
+
"custom_endpoint": {
|
|
91
|
+
"url": "https://api.cerebras.ai/v1",
|
|
92
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
93
|
+
}
|
|
94
|
+
},
|
|
95
|
+
"Cerebras-gpt-oss-120b": {
|
|
96
|
+
"type": "custom_openai",
|
|
97
|
+
"name": "gpt-oss-120b",
|
|
98
|
+
"custom_endpoint": {
|
|
99
|
+
"url": "https://api.cerebras.ai/v1",
|
|
100
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
101
|
+
}
|
|
102
|
+
},
|
|
103
|
+
"Cerebras-Qwen-3-32b": {
|
|
104
|
+
"type": "custom_openai",
|
|
105
|
+
"name": "qwen-3-32b",
|
|
106
|
+
"custom_endpoint": {
|
|
107
|
+
"url": "https://api.cerebras.ai/v1",
|
|
108
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
109
|
+
}
|
|
82
110
|
}
|
|
83
111
|
}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from typing import Any, List
|
|
2
|
+
|
|
3
|
+
from code_puppy.tools.common import console
|
|
4
|
+
|
|
5
|
+
_message_history: List[Any] = []
|
|
6
|
+
|
|
7
|
+
def get_message_history() -> List[Any]:
|
|
8
|
+
return _message_history
|
|
9
|
+
|
|
10
|
+
def set_message_history(history: List[Any]) -> None:
|
|
11
|
+
global _message_history
|
|
12
|
+
_message_history = history
|
|
13
|
+
|
|
14
|
+
def clear_message_history() -> None:
|
|
15
|
+
global _message_history
|
|
16
|
+
_message_history = []
|
|
17
|
+
|
|
18
|
+
def append_to_message_history(message: Any) -> None:
|
|
19
|
+
_message_history.append(message)
|
|
20
|
+
|
|
21
|
+
def extend_message_history(history: List[Any]) -> None:
|
|
22
|
+
_message_history.extend(history)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def hash_message(message):
|
|
26
|
+
hashable_entities = []
|
|
27
|
+
for part in message.parts:
|
|
28
|
+
if hasattr(part, "timestamp"):
|
|
29
|
+
hashable_entities.append(part.timestamp.isoformat())
|
|
30
|
+
elif hasattr(part, "tool_call_id"):
|
|
31
|
+
hashable_entities.append(part.tool_call_id)
|
|
32
|
+
else:
|
|
33
|
+
hashable_entities.append(part.content)
|
|
34
|
+
return hash(",".join(hashable_entities))
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def message_history_accumulator(messages: List[Any]):
|
|
38
|
+
message_history_hashes = set([hash_message(m) for m in _message_history])
|
|
39
|
+
for msg in messages:
|
|
40
|
+
if hash_message(msg) not in message_history_hashes:
|
|
41
|
+
_message_history.append(msg)
|
|
42
|
+
return messages
|
|
@@ -347,7 +347,7 @@ def _delete_file(context: RunContext, file_path: str = "") -> Dict[str, Any]:
|
|
|
347
347
|
|
|
348
348
|
class EditFileOutput(BaseModel):
|
|
349
349
|
success: bool | None
|
|
350
|
-
|
|
350
|
+
path: str | None
|
|
351
351
|
message: str | None
|
|
352
352
|
changed: bool | None
|
|
353
353
|
diff: str | None
|
|
@@ -11,6 +11,10 @@
|
|
|
11
11
|
"type": "openai",
|
|
12
12
|
"name": "gpt-4.1-mini"
|
|
13
13
|
},
|
|
14
|
+
"gpt-5": {
|
|
15
|
+
"type": "openai",
|
|
16
|
+
"name": "gpt-5"
|
|
17
|
+
},
|
|
14
18
|
"gpt-4.1-nano": {
|
|
15
19
|
"type": "openai",
|
|
16
20
|
"name": "gpt-4.1-nano"
|
|
@@ -79,5 +83,29 @@
|
|
|
79
83
|
"url": "https://api.cerebras.ai/v1",
|
|
80
84
|
"api_key": "$CEREBRAS_API_KEY"
|
|
81
85
|
}
|
|
86
|
+
},
|
|
87
|
+
"Cerebras-Qwen3-235b-a22b-instruct-2507": {
|
|
88
|
+
"type": "custom_openai",
|
|
89
|
+
"name": "qwen-3-235b-a22b-instruct-2507",
|
|
90
|
+
"custom_endpoint": {
|
|
91
|
+
"url": "https://api.cerebras.ai/v1",
|
|
92
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
93
|
+
}
|
|
94
|
+
},
|
|
95
|
+
"Cerebras-gpt-oss-120b": {
|
|
96
|
+
"type": "custom_openai",
|
|
97
|
+
"name": "gpt-oss-120b",
|
|
98
|
+
"custom_endpoint": {
|
|
99
|
+
"url": "https://api.cerebras.ai/v1",
|
|
100
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
101
|
+
}
|
|
102
|
+
},
|
|
103
|
+
"Cerebras-Qwen-3-32b": {
|
|
104
|
+
"type": "custom_openai",
|
|
105
|
+
"name": "qwen-3-32b",
|
|
106
|
+
"custom_endpoint": {
|
|
107
|
+
"url": "https://api.cerebras.ai/v1",
|
|
108
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
109
|
+
}
|
|
82
110
|
}
|
|
83
111
|
}
|
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
code_puppy/__init__.py,sha256=-ANvE6Xe5NlWDIRCIfL1x-rgtCZ6zM2Ye9NphFoULSY,82
|
|
2
|
-
code_puppy/agent.py,sha256=
|
|
3
|
-
code_puppy/agent_prompts.py,sha256=
|
|
2
|
+
code_puppy/agent.py,sha256=e_czIW7bv6W9qna0pOm-RBLLWz6RL3boDiIGR8aGf4g,3935
|
|
3
|
+
code_puppy/agent_prompts.py,sha256=13YIpTZa3R3lg60-fdkll7t7hgSBtQL0M53wcE1gzyQ,6834
|
|
4
4
|
code_puppy/config.py,sha256=r5nw5ChOP8xd_K5yo8U5OtO2gy2bFhARiyNtDp1JrwQ,5013
|
|
5
|
-
code_puppy/main.py,sha256=
|
|
5
|
+
code_puppy/main.py,sha256=uKMG0WNrFjEbsiEb_OwL_fNJbqMyTgztGjPKIOoYdSs,10444
|
|
6
|
+
code_puppy/message_history_processor.py,sha256=NaFYEUdHCZlzl5jR-XK4Rh2EHVsupT-SROxe4jXgUaQ,2584
|
|
6
7
|
code_puppy/model_factory.py,sha256=P2E3KgTHMVaMhHyGHmdascjYmdRxUKBWotlP61i-03A,8291
|
|
7
|
-
code_puppy/models.json,sha256=
|
|
8
|
+
code_puppy/models.json,sha256=hqSvFzSPcwxMwst6xePlcppm0c_pjyEVSBsWvgbAG98,2714
|
|
8
9
|
code_puppy/session_memory.py,sha256=4sgAAjbXdLSi8hETpd56tgtrG6hqMUuZWDlJOu6BQjA,2735
|
|
10
|
+
code_puppy/state_management.py,sha256=aymPVogToEeF_vPc2c75rs8WLYNd8s2vMRGNXvTriW0,1248
|
|
9
11
|
code_puppy/version_checker.py,sha256=aRGulzuY4C4CdFvU1rITduyL-1xTFsn4GiD1uSfOl_Y,396
|
|
10
12
|
code_puppy/command_line/__init__.py,sha256=y7WeRemfYppk8KVbCGeAIiTuiOszIURCDjOMZv_YRmU,45
|
|
11
13
|
code_puppy/command_line/file_path_completion.py,sha256=gw8NpIxa6GOpczUJRyh7VNZwoXKKn-yvCqit7h2y6Gg,2931
|
|
@@ -17,12 +19,12 @@ code_puppy/command_line/utils.py,sha256=7eyxDHjPjPB9wGDJQQcXV_zOsGdYsFgI0SGCetVm
|
|
|
17
19
|
code_puppy/tools/__init__.py,sha256=ozIGpLM7pKSjH4UeojkTodhfVYZeNzMsLtK_oyw41HA,456
|
|
18
20
|
code_puppy/tools/command_runner.py,sha256=S-kB8S_mxHEK12tax6v54uziyNpl5-n2NzE8PAq-J5k,7302
|
|
19
21
|
code_puppy/tools/common.py,sha256=M53zhiXZAmPdvi1Y_bzCxgvEmifOvRRJvYPARYRZqHw,2253
|
|
20
|
-
code_puppy/tools/file_modifications.py,sha256=
|
|
22
|
+
code_puppy/tools/file_modifications.py,sha256=qCfkZ7BxTG8U4xydHzS44UtOioj8XvhRKgjoOAnMHTo,13310
|
|
21
23
|
code_puppy/tools/file_operations.py,sha256=5ESOCS3m4Lpnvrg2XiJAx0m4-0Yar6LZKIdyRCRjENM,11218
|
|
22
24
|
code_puppy/tools/ts_code_map.py,sha256=o-u8p5vsYwitfDtVEoPS-7MwWn2xHzwtIQLo1_WMhQs,17647
|
|
23
|
-
code_puppy-0.0.
|
|
24
|
-
code_puppy-0.0.
|
|
25
|
-
code_puppy-0.0.
|
|
26
|
-
code_puppy-0.0.
|
|
27
|
-
code_puppy-0.0.
|
|
28
|
-
code_puppy-0.0.
|
|
25
|
+
code_puppy-0.0.78.data/data/code_puppy/models.json,sha256=hqSvFzSPcwxMwst6xePlcppm0c_pjyEVSBsWvgbAG98,2714
|
|
26
|
+
code_puppy-0.0.78.dist-info/METADATA,sha256=mBMAVB0GZQCto_2S7fIZX3HxsR5ReddEbzFKp62QYfc,6512
|
|
27
|
+
code_puppy-0.0.78.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
28
|
+
code_puppy-0.0.78.dist-info/entry_points.txt,sha256=d8YkBvIUxF-dHNJAj-x4fPEqizbY5d_TwvYpc01U5kw,58
|
|
29
|
+
code_puppy-0.0.78.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
|
|
30
|
+
code_puppy-0.0.78.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|