code-puppy 0.0.89__py3-none-any.whl → 0.0.91__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/agent.py +1 -0
- code_puppy/main.py +173 -10
- code_puppy/message_history_processor.py +24 -1
- code_puppy/status_display.py +209 -0
- {code_puppy-0.0.89.dist-info → code_puppy-0.0.91.dist-info}/METADATA +1 -1
- {code_puppy-0.0.89.dist-info → code_puppy-0.0.91.dist-info}/RECORD +10 -9
- {code_puppy-0.0.89.data → code_puppy-0.0.91.data}/data/code_puppy/models.json +0 -0
- {code_puppy-0.0.89.dist-info → code_puppy-0.0.91.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.89.dist-info → code_puppy-0.0.91.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.89.dist-info → code_puppy-0.0.91.dist-info}/licenses/LICENSE +0 -0
code_puppy/agent.py
CHANGED
code_puppy/main.py
CHANGED
|
@@ -17,15 +17,16 @@ from code_puppy.command_line.prompt_toolkit_completion import (
|
|
|
17
17
|
)
|
|
18
18
|
from code_puppy.config import ensure_config_exists
|
|
19
19
|
from code_puppy.state_management import get_message_history, set_message_history
|
|
20
|
+
from code_puppy.status_display import StatusDisplay
|
|
20
21
|
|
|
21
22
|
# Initialize rich console for pretty output
|
|
22
23
|
from code_puppy.tools.common import console
|
|
23
24
|
from code_puppy.version_checker import fetch_latest_version
|
|
24
|
-
from code_puppy.message_history_processor import message_history_processor
|
|
25
|
+
from code_puppy.message_history_processor import message_history_processor
|
|
25
26
|
|
|
26
27
|
|
|
27
28
|
# from code_puppy.tools import * # noqa: F403
|
|
28
|
-
|
|
29
|
+
import logfire
|
|
29
30
|
|
|
30
31
|
# Define a function to get the secret file path
|
|
31
32
|
def get_secret_file_path():
|
|
@@ -37,7 +38,10 @@ def get_secret_file_path():
|
|
|
37
38
|
|
|
38
39
|
async def main():
|
|
39
40
|
# Ensure the config directory and puppy.cfg with name info exist (prompt user if needed)
|
|
41
|
+
logfire.configure(token="pylf_v1_us_8G5nLznQtHMRsL4hsNG5v3fPWKjyXbysrMgrQ1bV1wRP")
|
|
42
|
+
logfire.instrument_pydantic_ai()
|
|
40
43
|
ensure_config_exists()
|
|
44
|
+
|
|
41
45
|
current_version = __version__
|
|
42
46
|
latest_version = fetch_latest_version("code-puppy")
|
|
43
47
|
console.print(f"Current version: {current_version}")
|
|
@@ -194,17 +198,167 @@ async def interactive_mode(history_file_path: str) -> None:
|
|
|
194
198
|
try:
|
|
195
199
|
prettier_code_blocks()
|
|
196
200
|
local_cancelled = False
|
|
197
|
-
|
|
201
|
+
|
|
202
|
+
# Initialize status display for tokens per second and loading messages
|
|
203
|
+
status_display = StatusDisplay(console)
|
|
204
|
+
|
|
205
|
+
# Print a message indicating we're about to start processing
|
|
206
|
+
console.print("\nStarting task processing...")
|
|
207
|
+
|
|
208
|
+
async def track_tokens_from_messages():
|
|
209
|
+
"""
|
|
210
|
+
Track real token counts from message history.
|
|
211
|
+
|
|
212
|
+
This async function runs in the background and periodically checks
|
|
213
|
+
the message history for new tokens. When new tokens are detected,
|
|
214
|
+
it updates the StatusDisplay with the incremental count to calculate
|
|
215
|
+
an accurate tokens-per-second rate.
|
|
216
|
+
|
|
217
|
+
It also looks for SSE stream time_info data to get precise token rate
|
|
218
|
+
calculations using the formula: completion_tokens * 1 / completion_time
|
|
219
|
+
|
|
220
|
+
The function continues running until status_display.is_active becomes False.
|
|
221
|
+
"""
|
|
222
|
+
from code_puppy.message_history_processor import estimate_tokens_for_message
|
|
223
|
+
import json
|
|
224
|
+
import re
|
|
225
|
+
|
|
226
|
+
last_token_total = 0
|
|
227
|
+
last_sse_data = None
|
|
228
|
+
|
|
229
|
+
while status_display.is_active:
|
|
230
|
+
# Get real token count from message history
|
|
231
|
+
messages = get_message_history()
|
|
232
|
+
if messages:
|
|
233
|
+
# Calculate total tokens across all messages
|
|
234
|
+
current_token_total = sum(estimate_tokens_for_message(msg) for msg in messages)
|
|
235
|
+
|
|
236
|
+
# If tokens increased, update the display with the incremental count
|
|
237
|
+
if current_token_total > last_token_total:
|
|
238
|
+
status_display.update_token_count(current_token_total - last_token_total)
|
|
239
|
+
last_token_total = current_token_total
|
|
240
|
+
|
|
241
|
+
# Try to find SSE stream data in assistant messages
|
|
242
|
+
for msg in messages:
|
|
243
|
+
# Handle different message types (dict or ModelMessage objects)
|
|
244
|
+
if hasattr(msg, 'role') and msg.role == 'assistant':
|
|
245
|
+
# ModelMessage object with role attribute
|
|
246
|
+
content = msg.content if hasattr(msg, 'content') else ''
|
|
247
|
+
elif isinstance(msg, dict) and msg.get('role') == 'assistant':
|
|
248
|
+
# Dictionary with 'role' key
|
|
249
|
+
content = msg.get('content', '')
|
|
250
|
+
# Support for ModelRequest/ModelResponse objects
|
|
251
|
+
elif hasattr(msg, 'message') and hasattr(msg.message, 'role') and msg.message.role == 'assistant':
|
|
252
|
+
# Access content through the message attribute
|
|
253
|
+
content = msg.message.content if hasattr(msg.message, 'content') else ''
|
|
254
|
+
else:
|
|
255
|
+
# Skip if not an assistant message or unrecognized format
|
|
256
|
+
continue
|
|
257
|
+
|
|
258
|
+
# Convert content to string if it's not already
|
|
259
|
+
if not isinstance(content, str):
|
|
260
|
+
try:
|
|
261
|
+
content = str(content)
|
|
262
|
+
except:
|
|
263
|
+
continue
|
|
264
|
+
|
|
265
|
+
# Look for SSE usage data pattern in the message content
|
|
266
|
+
sse_matches = re.findall(r'\{\s*"usage".*?"time_info".*?\}', content, re.DOTALL)
|
|
267
|
+
for match in sse_matches:
|
|
268
|
+
try:
|
|
269
|
+
# Parse the JSON data
|
|
270
|
+
sse_data = json.loads(match)
|
|
271
|
+
if sse_data != last_sse_data: # Only process new data
|
|
272
|
+
# Check if we have time_info and completion_tokens
|
|
273
|
+
if 'time_info' in sse_data and 'completion_time' in sse_data['time_info'] and \
|
|
274
|
+
'usage' in sse_data and 'completion_tokens' in sse_data['usage']:
|
|
275
|
+
completion_time = float(sse_data['time_info']['completion_time'])
|
|
276
|
+
completion_tokens = int(sse_data['usage']['completion_tokens'])
|
|
277
|
+
|
|
278
|
+
# Update rate using the accurate SSE data
|
|
279
|
+
if completion_time > 0 and completion_tokens > 0:
|
|
280
|
+
status_display.update_rate_from_sse(completion_tokens, completion_time)
|
|
281
|
+
last_sse_data = sse_data
|
|
282
|
+
except (json.JSONDecodeError, KeyError, ValueError):
|
|
283
|
+
# Ignore parsing errors and continue
|
|
284
|
+
pass
|
|
285
|
+
|
|
286
|
+
# Small sleep interval for responsive updates without excessive CPU usage
|
|
287
|
+
await asyncio.sleep(0.1)
|
|
288
|
+
|
|
289
|
+
async def wrap_agent_run(original_run, *args, **kwargs):
|
|
290
|
+
"""
|
|
291
|
+
Wraps the agent's run method to enable token tracking.
|
|
292
|
+
|
|
293
|
+
This wrapper preserves the original functionality while allowing
|
|
294
|
+
us to track tokens as they are generated by the model. No additional
|
|
295
|
+
logic is needed here since the token tracking happens in a separate task.
|
|
296
|
+
|
|
297
|
+
Args:
|
|
298
|
+
original_run: The original agent.run method
|
|
299
|
+
*args, **kwargs: Arguments to pass to the original run method
|
|
300
|
+
|
|
301
|
+
Returns:
|
|
302
|
+
The result from the original run method
|
|
303
|
+
"""
|
|
304
|
+
result = await original_run(*args, **kwargs)
|
|
305
|
+
return result
|
|
306
|
+
|
|
198
307
|
async def run_agent_task():
|
|
308
|
+
"""
|
|
309
|
+
Main task runner for the agent with token tracking.
|
|
310
|
+
|
|
311
|
+
This function:
|
|
312
|
+
1. Sets up the agent with token tracking
|
|
313
|
+
2. Starts the status display showing token rate
|
|
314
|
+
3. Runs the agent with the user's task
|
|
315
|
+
4. Ensures proper cleanup of all resources
|
|
316
|
+
|
|
317
|
+
Returns the agent's result or raises any exceptions that occurred.
|
|
318
|
+
"""
|
|
319
|
+
# Token tracking task reference for cleanup
|
|
320
|
+
token_tracking_task = None
|
|
321
|
+
|
|
199
322
|
try:
|
|
323
|
+
# Initialize the agent
|
|
200
324
|
agent = get_code_generation_agent()
|
|
325
|
+
|
|
326
|
+
# Start status display
|
|
327
|
+
status_display.start()
|
|
328
|
+
|
|
329
|
+
# Start token tracking
|
|
330
|
+
token_tracking_task = asyncio.create_task(track_tokens_from_messages())
|
|
331
|
+
|
|
332
|
+
# Create a wrapper for the agent's run method
|
|
333
|
+
original_run = agent.run
|
|
334
|
+
|
|
335
|
+
async def wrapped_run(*args, **kwargs):
|
|
336
|
+
return await wrap_agent_run(original_run, *args, **kwargs)
|
|
337
|
+
|
|
338
|
+
agent.run = wrapped_run
|
|
339
|
+
|
|
340
|
+
# Run the agent with MCP servers
|
|
201
341
|
async with agent.run_mcp_servers():
|
|
202
|
-
|
|
203
|
-
task,
|
|
342
|
+
result = await agent.run(
|
|
343
|
+
task,
|
|
344
|
+
message_history=get_message_history()
|
|
204
345
|
)
|
|
346
|
+
return result
|
|
205
347
|
except Exception as e:
|
|
206
348
|
console.log("Task failed", e)
|
|
207
|
-
|
|
349
|
+
raise
|
|
350
|
+
finally:
|
|
351
|
+
# Clean up resources
|
|
352
|
+
if status_display.is_active:
|
|
353
|
+
status_display.stop()
|
|
354
|
+
if token_tracking_task and not token_tracking_task.done():
|
|
355
|
+
token_tracking_task.cancel()
|
|
356
|
+
if not agent_task.done():
|
|
357
|
+
set_message_history(
|
|
358
|
+
message_history_processor(
|
|
359
|
+
get_message_history()
|
|
360
|
+
)
|
|
361
|
+
)
|
|
208
362
|
agent_task = asyncio.create_task(run_agent_task())
|
|
209
363
|
|
|
210
364
|
import signal
|
|
@@ -251,11 +405,20 @@ async def interactive_mode(history_file_path: str) -> None:
|
|
|
251
405
|
|
|
252
406
|
if local_cancelled:
|
|
253
407
|
console.print("Task canceled by user")
|
|
408
|
+
# Ensure status display is stopped if canceled
|
|
409
|
+
if status_display.is_active:
|
|
410
|
+
status_display.stop()
|
|
254
411
|
else:
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
412
|
+
if result is not None and hasattr(result, 'output'):
|
|
413
|
+
agent_response = result.output
|
|
414
|
+
console.print(agent_response)
|
|
415
|
+
filtered = message_history_processor(get_message_history())
|
|
416
|
+
set_message_history(filtered)
|
|
417
|
+
else:
|
|
418
|
+
console.print("[yellow]No result received from the agent[/yellow]")
|
|
419
|
+
# Still process history if possible
|
|
420
|
+
filtered = message_history_processor(get_message_history())
|
|
421
|
+
set_message_history(filtered)
|
|
259
422
|
|
|
260
423
|
# Show context status
|
|
261
424
|
console.print(
|
|
@@ -17,6 +17,13 @@ from code_puppy.tools.common import console
|
|
|
17
17
|
from code_puppy.model_factory import ModelFactory
|
|
18
18
|
from code_puppy.config import get_model_name
|
|
19
19
|
|
|
20
|
+
# Import the status display to get token rate info
|
|
21
|
+
try:
|
|
22
|
+
from code_puppy.status_display import StatusDisplay
|
|
23
|
+
STATUS_DISPLAY_AVAILABLE = True
|
|
24
|
+
except ImportError:
|
|
25
|
+
STATUS_DISPLAY_AVAILABLE = False
|
|
26
|
+
|
|
20
27
|
# Import summarization agent
|
|
21
28
|
try:
|
|
22
29
|
from code_puppy.summarization_agent import (
|
|
@@ -246,9 +253,25 @@ def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage
|
|
|
246
253
|
model_max = get_model_context_length()
|
|
247
254
|
|
|
248
255
|
proportion_used = total_current_tokens / model_max
|
|
256
|
+
|
|
257
|
+
# Include token per second rate if available
|
|
258
|
+
token_rate_info = ""
|
|
259
|
+
if STATUS_DISPLAY_AVAILABLE:
|
|
260
|
+
current_rate = StatusDisplay.get_current_rate()
|
|
261
|
+
if current_rate > 0:
|
|
262
|
+
# Format with improved precision when using SSE data
|
|
263
|
+
if current_rate > 1000:
|
|
264
|
+
token_rate_info = f", {current_rate:.0f} t/s"
|
|
265
|
+
else:
|
|
266
|
+
token_rate_info = f", {current_rate:.1f} t/s"
|
|
267
|
+
|
|
268
|
+
# Print blue status bar - ALWAYS at top
|
|
249
269
|
console.print(f"""
|
|
250
|
-
[bold white on blue] Tokens in context: {total_current_tokens}, total model capacity: {model_max}, proportion used: {proportion_used:.2f}
|
|
270
|
+
[bold white on blue] Tokens in context: {total_current_tokens}, total model capacity: {model_max}, proportion used: {proportion_used:.2f}{token_rate_info}
|
|
251
271
|
""")
|
|
272
|
+
|
|
273
|
+
# Print extra line to ensure separation
|
|
274
|
+
console.print("\n")
|
|
252
275
|
|
|
253
276
|
if proportion_used > 0.85:
|
|
254
277
|
summary = summarize_messages(messages)
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import random
|
|
3
|
+
import time
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import List, Optional
|
|
6
|
+
|
|
7
|
+
from rich.console import Console, RenderableType
|
|
8
|
+
from rich.live import Live
|
|
9
|
+
from rich.panel import Panel
|
|
10
|
+
from rich.spinner import Spinner
|
|
11
|
+
from rich.text import Text
|
|
12
|
+
|
|
13
|
+
# Global variable to track current token per second rate
|
|
14
|
+
CURRENT_TOKEN_RATE = 0.0
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class StatusDisplay:
|
|
18
|
+
"""
|
|
19
|
+
Displays real-time status information during model execution,
|
|
20
|
+
including token per second rate and rotating loading messages.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self, console: Console):
|
|
24
|
+
self.console = console
|
|
25
|
+
self.token_count = 0
|
|
26
|
+
self.start_time = None
|
|
27
|
+
self.last_update_time = None
|
|
28
|
+
self.last_token_count = 0
|
|
29
|
+
self.current_rate = 0
|
|
30
|
+
self.is_active = False
|
|
31
|
+
self.task = None
|
|
32
|
+
self.live = None
|
|
33
|
+
self.loading_messages = [
|
|
34
|
+
"Fetching...",
|
|
35
|
+
"Sniffing around...",
|
|
36
|
+
"Wagging tail...",
|
|
37
|
+
"Pawsing for a moment...",
|
|
38
|
+
"Chasing tail...",
|
|
39
|
+
"Digging up results...",
|
|
40
|
+
"Barking at the data...",
|
|
41
|
+
"Rolling over...",
|
|
42
|
+
"Panting with excitement...",
|
|
43
|
+
"Chewing on it...",
|
|
44
|
+
"Prancing along...",
|
|
45
|
+
"Howling at the code...",
|
|
46
|
+
"Snuggling up to the task...",
|
|
47
|
+
"Bounding through data...",
|
|
48
|
+
"Puppy pondering..."
|
|
49
|
+
]
|
|
50
|
+
self.current_message_index = 0
|
|
51
|
+
self.spinner = Spinner("dots", text="")
|
|
52
|
+
|
|
53
|
+
def _calculate_rate(self) -> float:
|
|
54
|
+
"""Calculate the current token rate"""
|
|
55
|
+
current_time = time.time()
|
|
56
|
+
if self.last_update_time:
|
|
57
|
+
time_diff = current_time - self.last_update_time
|
|
58
|
+
token_diff = self.token_count - self.last_token_count
|
|
59
|
+
if time_diff > 0:
|
|
60
|
+
rate = token_diff / time_diff
|
|
61
|
+
# Smooth the rate calculation with the current rate
|
|
62
|
+
if self.current_rate > 0:
|
|
63
|
+
self.current_rate = (self.current_rate * 0.7) + (rate * 0.3)
|
|
64
|
+
else:
|
|
65
|
+
self.current_rate = rate
|
|
66
|
+
|
|
67
|
+
# Only ensure rate is not negative
|
|
68
|
+
self.current_rate = max(0, self.current_rate)
|
|
69
|
+
|
|
70
|
+
# Update the global rate for other components to access
|
|
71
|
+
global CURRENT_TOKEN_RATE
|
|
72
|
+
CURRENT_TOKEN_RATE = self.current_rate
|
|
73
|
+
|
|
74
|
+
self.last_update_time = current_time
|
|
75
|
+
self.last_token_count = self.token_count
|
|
76
|
+
return self.current_rate
|
|
77
|
+
|
|
78
|
+
def update_rate_from_sse(self, completion_tokens: int, completion_time: float) -> None:
|
|
79
|
+
"""Update the token rate directly using SSE time_info data
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
completion_tokens: Number of tokens in the completion (from SSE stream)
|
|
83
|
+
completion_time: Time taken for completion in seconds (from SSE stream)
|
|
84
|
+
"""
|
|
85
|
+
if completion_time > 0:
|
|
86
|
+
# Using the direct t/s formula: tokens / time
|
|
87
|
+
rate = completion_tokens / completion_time
|
|
88
|
+
|
|
89
|
+
# Use a lighter smoothing for this more accurate data
|
|
90
|
+
if self.current_rate > 0:
|
|
91
|
+
self.current_rate = (self.current_rate * 0.3) + (rate * 0.7) # Weight SSE data more heavily
|
|
92
|
+
else:
|
|
93
|
+
self.current_rate = rate
|
|
94
|
+
|
|
95
|
+
# Update the global rate
|
|
96
|
+
global CURRENT_TOKEN_RATE
|
|
97
|
+
CURRENT_TOKEN_RATE = self.current_rate
|
|
98
|
+
|
|
99
|
+
@staticmethod
|
|
100
|
+
def get_current_rate() -> float:
|
|
101
|
+
"""Get the current token rate for use in other components"""
|
|
102
|
+
global CURRENT_TOKEN_RATE
|
|
103
|
+
return CURRENT_TOKEN_RATE
|
|
104
|
+
|
|
105
|
+
def update_token_count(self, tokens: int) -> None:
|
|
106
|
+
"""Update the token count and recalculate the rate"""
|
|
107
|
+
if self.start_time is None:
|
|
108
|
+
self.start_time = time.time()
|
|
109
|
+
self.last_update_time = self.start_time
|
|
110
|
+
|
|
111
|
+
# Allow for incremental updates (common for streaming) or absolute updates
|
|
112
|
+
if tokens > self.token_count or tokens < 0:
|
|
113
|
+
# Incremental update or reset
|
|
114
|
+
self.token_count = tokens if tokens >= 0 else 0
|
|
115
|
+
else:
|
|
116
|
+
# If tokens <= current count but > 0, treat as incremental
|
|
117
|
+
# This handles simulated token streaming
|
|
118
|
+
self.token_count += tokens
|
|
119
|
+
|
|
120
|
+
self._calculate_rate()
|
|
121
|
+
|
|
122
|
+
def _get_status_panel(self) -> Panel:
|
|
123
|
+
"""Generate a status panel with current rate and animated message"""
|
|
124
|
+
rate_text = f"{self.current_rate:.1f} t/s" if self.current_rate > 0 else "Warming up..."
|
|
125
|
+
|
|
126
|
+
# Update spinner
|
|
127
|
+
self.spinner.update()
|
|
128
|
+
|
|
129
|
+
# Rotate through loading messages every few updates
|
|
130
|
+
if int(time.time() * 2) % 4 == 0:
|
|
131
|
+
self.current_message_index = (self.current_message_index + 1) % len(self.loading_messages)
|
|
132
|
+
|
|
133
|
+
# Create a highly visible status message
|
|
134
|
+
status_text = Text.assemble(
|
|
135
|
+
Text(f"⏳ {rate_text} ", style="bold cyan"),
|
|
136
|
+
self.spinner,
|
|
137
|
+
Text(f" {self.loading_messages[self.current_message_index]} ⏳", style="bold yellow")
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Use expanded panel with more visible formatting
|
|
141
|
+
return Panel(
|
|
142
|
+
status_text,
|
|
143
|
+
title="[bold blue]Code Puppy Status[/bold blue]",
|
|
144
|
+
border_style="bright_blue",
|
|
145
|
+
expand=False,
|
|
146
|
+
padding=(1, 2)
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
def _get_status_text(self) -> Text:
|
|
150
|
+
"""Generate a status text with current rate and animated message"""
|
|
151
|
+
rate_text = f"{self.current_rate:.1f} t/s" if self.current_rate > 0 else "Warming up..."
|
|
152
|
+
|
|
153
|
+
# Update spinner
|
|
154
|
+
self.spinner.update()
|
|
155
|
+
|
|
156
|
+
# Rotate through loading messages
|
|
157
|
+
self.current_message_index = (self.current_message_index + 1) % len(self.loading_messages)
|
|
158
|
+
message = self.loading_messages[self.current_message_index]
|
|
159
|
+
|
|
160
|
+
# Create a highly visible status text
|
|
161
|
+
return Text.assemble(
|
|
162
|
+
Text(f"⏳ {rate_text} 🐾", style="bold cyan"),
|
|
163
|
+
Text(f" {message}", style="yellow")
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
async def _update_display(self) -> None:
|
|
167
|
+
"""Update the display continuously while active using Rich Live display"""
|
|
168
|
+
# Add a newline to ensure we're below the blue bar
|
|
169
|
+
self.console.print("\n")
|
|
170
|
+
|
|
171
|
+
# Create a Live display that will update in-place
|
|
172
|
+
with Live(
|
|
173
|
+
self._get_status_text(),
|
|
174
|
+
console=self.console,
|
|
175
|
+
refresh_per_second=2, # Update twice per second
|
|
176
|
+
transient=False # Keep the final state visible
|
|
177
|
+
) as live:
|
|
178
|
+
# Keep updating the live display while active
|
|
179
|
+
while self.is_active:
|
|
180
|
+
live.update(self._get_status_text())
|
|
181
|
+
await asyncio.sleep(0.5)
|
|
182
|
+
|
|
183
|
+
def start(self) -> None:
|
|
184
|
+
"""Start the status display"""
|
|
185
|
+
if not self.is_active:
|
|
186
|
+
self.is_active = True
|
|
187
|
+
self.start_time = time.time()
|
|
188
|
+
self.last_update_time = self.start_time
|
|
189
|
+
self.token_count = 0
|
|
190
|
+
self.last_token_count = 0
|
|
191
|
+
self.current_rate = 0
|
|
192
|
+
self.task = asyncio.create_task(self._update_display())
|
|
193
|
+
|
|
194
|
+
def stop(self) -> None:
|
|
195
|
+
"""Stop the status display"""
|
|
196
|
+
if self.is_active:
|
|
197
|
+
self.is_active = False
|
|
198
|
+
if self.task:
|
|
199
|
+
self.task.cancel()
|
|
200
|
+
self.task = None
|
|
201
|
+
|
|
202
|
+
# Print final stats
|
|
203
|
+
elapsed = time.time() - self.start_time if self.start_time else 0
|
|
204
|
+
avg_rate = self.token_count / elapsed if elapsed > 0 else 0
|
|
205
|
+
self.console.print(f"[dim]Completed: {self.token_count} tokens in {elapsed:.1f}s ({avg_rate:.1f} t/s avg)[/dim]")
|
|
206
|
+
|
|
207
|
+
# Reset
|
|
208
|
+
self.start_time = None
|
|
209
|
+
self.token_count = 0
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
code_puppy/__init__.py,sha256=CWH46ZAmJRmHAbOiAhG07OrWYEcEt4yvDTkZU341Wag,169
|
|
2
|
-
code_puppy/agent.py,sha256=
|
|
2
|
+
code_puppy/agent.py,sha256=Hwlblm9Om4qZPOur28auxBkdJRAA9Kz-36YPQg7i3SI,3321
|
|
3
3
|
code_puppy/agent_prompts.py,sha256=t3-lqDKrDxCKxFa_va4Suze9BT-JOu1dh9iGiAVNFO4,6828
|
|
4
4
|
code_puppy/config.py,sha256=r5nw5ChOP8xd_K5yo8U5OtO2gy2bFhARiyNtDp1JrwQ,5013
|
|
5
|
-
code_puppy/main.py,sha256=
|
|
6
|
-
code_puppy/message_history_processor.py,sha256=
|
|
5
|
+
code_puppy/main.py,sha256=_9PWsk1GLVvfH9w8KbcoMXvUCwhzxu6WRqguTIYVccE,21183
|
|
6
|
+
code_puppy/message_history_processor.py,sha256=cL9DgSqpwWk0dSamc1I7NR1n1CNefvKlIMSCSA4YkM8,10034
|
|
7
7
|
code_puppy/model_factory.py,sha256=HXuFHNkVjkCcorAd3ScFmSvBILO932UTq6OmNAqisT8,10898
|
|
8
8
|
code_puppy/models.json,sha256=jr0-LW87aJS79GosVwoZdHeeq5eflPzgdPoMbcqpVA8,2728
|
|
9
9
|
code_puppy/state_management.py,sha256=JkTkmq6f9rl_RHPDoBqJvbAzgaMsIkJf-k38ragItIo,1692
|
|
10
|
+
code_puppy/status_display.py,sha256=tzllkO0ssmRZSn20mFXnEt1BZXH4oP9eHSSN-7Jyrok,7946
|
|
10
11
|
code_puppy/summarization_agent.py,sha256=jHUQe6iYJsMT0ywEwO7CrhUIKEamO5imhAsDwvNuvow,2684
|
|
11
12
|
code_puppy/token_utils.py,sha256=g7Jj6NAy_a2ab7BXpwyhktruR-QlUV670H_mCPZV1N4,2110
|
|
12
13
|
code_puppy/version_checker.py,sha256=aRGulzuY4C4CdFvU1rITduyL-1xTFsn4GiD1uSfOl_Y,396
|
|
@@ -23,9 +24,9 @@ code_puppy/tools/common.py,sha256=UkhnfLG1bmd4f9nZCcmno088AtKtAnEES1tydxUN-Fk,32
|
|
|
23
24
|
code_puppy/tools/file_modifications.py,sha256=BzQrGEacS2NZr2ru9N30x_Qd70JDudBKOAPO1XjBohg,13861
|
|
24
25
|
code_puppy/tools/file_operations.py,sha256=5oJSJCKPLSgJ9YcD1CV9D-SJ0hnzVzmuAs-eiriLPLo,13615
|
|
25
26
|
code_puppy/tools/token_check.py,sha256=F3eygdI8fgb6dfCrSkGw_OLI7cb_Kpa5ILft4BQ7hvY,525
|
|
26
|
-
code_puppy-0.0.
|
|
27
|
-
code_puppy-0.0.
|
|
28
|
-
code_puppy-0.0.
|
|
29
|
-
code_puppy-0.0.
|
|
30
|
-
code_puppy-0.0.
|
|
31
|
-
code_puppy-0.0.
|
|
27
|
+
code_puppy-0.0.91.data/data/code_puppy/models.json,sha256=jr0-LW87aJS79GosVwoZdHeeq5eflPzgdPoMbcqpVA8,2728
|
|
28
|
+
code_puppy-0.0.91.dist-info/METADATA,sha256=PaIPBOhLU92HD2QkF1lZVCgFBi8KpTV46pzzAIf620M,6351
|
|
29
|
+
code_puppy-0.0.91.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
30
|
+
code_puppy-0.0.91.dist-info/entry_points.txt,sha256=d8YkBvIUxF-dHNJAj-x4fPEqizbY5d_TwvYpc01U5kw,58
|
|
31
|
+
code_puppy-0.0.91.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
|
|
32
|
+
code_puppy-0.0.91.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|