astraagent 2.25.6 → 2.26.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/astra/chat.py CHANGED
@@ -1,6 +1,7 @@
1
1
  """Chat Mode - Enhanced with keyboard shortcuts, file access, and settings."""
2
2
 
3
3
  import asyncio
4
+ import sys
4
5
  import json
5
6
  import os
6
7
  from pathlib import Path
@@ -15,11 +16,50 @@ from astra.tasks import TaskExecutor, format_task_result
15
16
  from astra.updater import UpdateManager, format_update_info
16
17
 
17
18
 
19
+ class ThinkingIndicator:
20
+ """Animated thinking indicator."""
21
+ def __init__(self, message: str = "Thinking"):
22
+ self.message = message
23
+ self.running = False
24
+ self.task = None
25
+
26
+ async def _animate(self):
27
+ chars = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
28
+ idx = 0
29
+ while self.running:
30
+ sys.stdout.write(f"\r🤖 {chars[idx]} {self.message}...")
31
+ sys.stdout.flush()
32
+ idx = (idx + 1) % len(chars)
33
+ await asyncio.sleep(0.1)
34
+
35
+ async def __aenter__(self):
36
+ self.running = True
37
+ self.task = asyncio.create_task(self._animate())
38
+ return self
39
+
40
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
41
+ self.running = False
42
+ if self.task:
43
+ self.task.cancel()
44
+ try:
45
+ await self.task
46
+ except asyncio.CancelledError:
47
+ pass
48
+ # Clear line completely
49
+ sys.stdout.write("\r" + " " * 40 + "\r")
50
+ sys.stdout.flush()
51
+
52
+
18
53
  class ChatHistory:
19
54
  """Manages chat history and logging."""
20
55
 
21
- def __init__(self, logs_dir: str = "./logs/chats"):
22
- self.logs_dir = Path(logs_dir)
56
+ def __init__(self, logs_dir: str = None):
57
+ if logs_dir:
58
+ self.logs_dir = Path(logs_dir)
59
+ else:
60
+ # Use user's home directory for logs
61
+ self.logs_dir = Path.home() / ".astra" / "logs" / "chats"
62
+
23
63
  self.logs_dir.mkdir(parents=True, exist_ok=True)
24
64
  self.messages: List[dict] = []
25
65
  self.provider_name: str = ""
@@ -192,8 +232,7 @@ TIPS:
192
232
  • Search works across entire device
193
233
  • Tasks handle file operations safely
194
234
  • Updates check npm and PyPI simultaneously
195
- """)
196
- print("="*70 + "\n")
235
+
197
236
  • Use Ctrl+C to interrupt response
198
237
  • Use Tab for auto-completion (if available)
199
238
  • Multi-line input: Use Shift+Enter
@@ -396,7 +435,11 @@ async def chat_with_provider(provider: LLMProvider, history: ChatHistory, provid
396
435
  print(f"Provider: {history.provider_name} | Model: {history.model_name}")
397
436
  print(f"Type '/help' for commands or just start typing\n")
398
437
 
399
- conversation = []
438
+ from astra.prompts import SYSTEM_PROMPT
439
+
440
+ conversation = [
441
+ {"role": "system", "content": SYSTEM_PROMPT.format(workspace="./", tools="Internal Tools")}
442
+ ]
400
443
  should_close = False
401
444
 
402
445
  while True:
@@ -699,22 +742,40 @@ async def chat_with_provider(provider: LLMProvider, history: ChatHistory, provid
699
742
  history.add_message("user", user_input)
700
743
 
701
744
  # Get AI response
702
- try:
703
- print("🤖 ", end="", flush=True)
704
- messages = [Message(role=msg["role"], content=msg["content"]) for msg in conversation]
705
- response = await provider.generate(messages, temperature=0.7)
706
-
707
- ai_response = response.content
708
- print(ai_response, end="\n\n", flush=True)
709
-
710
- # Add to history
711
- conversation.append({"role": "assistant", "content": ai_response})
712
- history.add_message("assistant", ai_response)
713
-
714
- except RuntimeError as e:
715
- print(f"\n❌ Error: {str(e)}\n")
716
- except Exception as e:
717
- print(f"\n❌ Unexpected error: {str(e)}\n")
745
+ messages = [Message(role=msg["role"], content=msg["content"]) for msg in conversation]
746
+ max_retries = 3
747
+ retry_delay = 2
748
+
749
+ for attempt in range(max_retries):
750
+ try:
751
+ # Use Thinking Animation
752
+ async with ThinkingIndicator("Thinking"):
753
+ response = await provider.generate(messages, temperature=0.7)
754
+
755
+ # Print response (Animation clears line automatically)
756
+ ai_response = response.content
757
+ print(f"🤖 {ai_response}\n")
758
+
759
+ # Add to history
760
+ conversation.append({"role": "assistant", "content": ai_response})
761
+ history.add_message("assistant", ai_response)
762
+ break # Success
763
+
764
+ except RuntimeError as e:
765
+ err_msg = str(e)
766
+ if "429" in err_msg or "rate limit" in err_msg.lower():
767
+ if attempt < max_retries - 1:
768
+ print(f"\r⚠️ Rate limit hit. Retrying in {retry_delay}s... (Attempt {attempt+1}/{max_retries})")
769
+ await asyncio.sleep(retry_delay)
770
+ retry_delay *= 2
771
+ continue
772
+
773
+ print(f"\n❌ Error: {err_msg}\n")
774
+ break
775
+
776
+ except Exception as e:
777
+ print(f"\n❌ Unexpected error: {str(e)}\n")
778
+ break
718
779
 
719
780
  return should_close
720
781