abstractassistant 0.3.4__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractassistant/app.py +69 -6
- abstractassistant/cli.py +104 -85
- abstractassistant/core/agent_host.py +583 -0
- abstractassistant/core/llm_manager.py +338 -431
- abstractassistant/core/session_index.py +293 -0
- abstractassistant/core/session_store.py +79 -0
- abstractassistant/core/tool_policy.py +58 -0
- abstractassistant/core/transcript_summary.py +434 -0
- abstractassistant/ui/history_dialog.py +504 -29
- abstractassistant/ui/provider_manager.py +2 -2
- abstractassistant/ui/qt_bubble.py +2289 -489
- abstractassistant-0.4.0.dist-info/METADATA +168 -0
- abstractassistant-0.4.0.dist-info/RECORD +32 -0
- {abstractassistant-0.3.4.dist-info → abstractassistant-0.4.0.dist-info}/WHEEL +1 -1
- {abstractassistant-0.3.4.dist-info → abstractassistant-0.4.0.dist-info}/entry_points.txt +1 -0
- abstractassistant-0.3.4.dist-info/METADATA +0 -297
- abstractassistant-0.3.4.dist-info/RECORD +0 -27
- {abstractassistant-0.3.4.dist-info → abstractassistant-0.4.0.dist-info}/licenses/LICENSE +0 -0
- {abstractassistant-0.3.4.dist-info → abstractassistant-0.4.0.dist-info}/top_level.txt +0 -0
|
@@ -1,475 +1,382 @@
|
|
|
1
|
-
"""
|
|
2
|
-
LLM Manager for AbstractAssistant.
|
|
1
|
+
"""Agentic manager for AbstractAssistant (legacy name retained for UI compatibility).
|
|
3
2
|
|
|
4
|
-
|
|
5
|
-
|
|
3
|
+
This module used to wrap `abstractcore.BasicSession`. It now hosts an agentic backend
|
|
4
|
+
powered by:
|
|
5
|
+
- AbstractAgent (ReAct/CodeAct/MemAct patterns)
|
|
6
|
+
- AbstractRuntime (durable runs + waits)
|
|
7
|
+
- AbstractCore (providers + tool schemas/normalization)
|
|
6
8
|
"""
|
|
7
9
|
|
|
8
|
-
from
|
|
9
|
-
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
10
12
|
from dataclasses import dataclass
|
|
13
|
+
import threading
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Any, Dict, List, Optional
|
|
11
16
|
|
|
12
|
-
|
|
13
|
-
from
|
|
14
|
-
from
|
|
15
|
-
list_available_providers,
|
|
16
|
-
get_all_providers_with_models,
|
|
17
|
-
get_available_models_for_provider,
|
|
18
|
-
is_provider_available
|
|
19
|
-
)
|
|
20
|
-
|
|
21
|
-
# Import common tools as requested
|
|
22
|
-
try:
|
|
23
|
-
from abstractcore.tools.common_tools import (
|
|
24
|
-
list_files, search_files, read_file, edit_file,
|
|
25
|
-
write_file, execute_command, web_search
|
|
26
|
-
)
|
|
27
|
-
TOOLS_AVAILABLE = True
|
|
28
|
-
except ImportError as e:
|
|
29
|
-
TOOLS_AVAILABLE = False
|
|
17
|
+
from .agent_host import AgentHost, AgentHostConfig
|
|
18
|
+
from .session_index import SessionIndex
|
|
19
|
+
from .session_store import SessionStore
|
|
30
20
|
|
|
31
21
|
|
|
32
22
|
@dataclass
|
|
33
23
|
class TokenUsage:
|
|
34
|
-
"""
|
|
24
|
+
"""Best-effort token usage information for UI display."""
|
|
25
|
+
|
|
35
26
|
current_session: int = 0
|
|
36
27
|
max_context: int = 0
|
|
37
28
|
input_tokens: int = 0
|
|
38
29
|
output_tokens: int = 0
|
|
39
30
|
|
|
40
31
|
|
|
32
|
+
class _SessionMessage:
|
|
33
|
+
"""Simple message object with `.role`/`.content` attributes (UI expects this shape)."""
|
|
34
|
+
|
|
35
|
+
def __init__(self, role: str, content: str):
|
|
36
|
+
self.role = str(role or "")
|
|
37
|
+
self.content = str(content or "")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class _SessionView:
|
|
41
|
+
"""Minimal session view exposed to the Qt UI."""
|
|
42
|
+
|
|
43
|
+
def __init__(self, messages: List[_SessionMessage]):
|
|
44
|
+
self.messages = list(messages)
|
|
45
|
+
|
|
46
|
+
def get_token_estimate(self) -> int:
|
|
47
|
+
# Heuristic: ~4 chars per token for English-ish text.
|
|
48
|
+
total_chars = sum(len(m.content or "") for m in self.messages)
|
|
49
|
+
return max(0, int(total_chars // 4))
|
|
50
|
+
|
|
51
|
+
|
|
41
52
|
class LLMManager:
|
|
42
|
-
"""
|
|
43
|
-
|
|
44
|
-
def __init__(self, config=None, debug=False):
|
|
45
|
-
"""Initialize the LLM manager.
|
|
46
|
-
|
|
47
|
-
Args:
|
|
48
|
-
config: Configuration object with LLM settings
|
|
49
|
-
debug: Enable debug mode
|
|
50
|
-
"""
|
|
51
|
-
# Import config here to avoid circular imports
|
|
53
|
+
"""Back-compat façade: drive an agentic backend and expose a session-like view."""
|
|
54
|
+
|
|
55
|
+
def __init__(self, config=None, debug: bool = False, *, data_dir: Optional[Path] = None):
|
|
52
56
|
if config is None:
|
|
53
57
|
from ..config import Config
|
|
58
|
+
|
|
54
59
|
config = Config.default()
|
|
55
|
-
|
|
60
|
+
|
|
56
61
|
self.config = config
|
|
57
|
-
self.debug = debug
|
|
58
|
-
|
|
59
|
-
self.
|
|
60
|
-
self.
|
|
61
|
-
self.
|
|
62
|
-
|
|
63
|
-
|
|
62
|
+
self.debug = bool(debug)
|
|
63
|
+
|
|
64
|
+
self.data_dir = (Path(data_dir).expanduser() if data_dir is not None else (Path.home() / ".abstractassistant"))
|
|
65
|
+
self._session_index = SessionIndex(self.data_dir)
|
|
66
|
+
self._title_seeds: Dict[str, str] = {}
|
|
67
|
+
self._title_lock = threading.Lock()
|
|
68
|
+
|
|
69
|
+
self.current_provider: str = str(getattr(config.llm, "default_provider", "") or "ollama")
|
|
70
|
+
self.current_model: str = str(getattr(config.llm, "default_model", "") or "qwen3:4b-instruct")
|
|
71
|
+
|
|
72
|
+
self._tts_mode: bool = False
|
|
73
|
+
self._host = self._build_host_for_active_session()
|
|
74
|
+
|
|
75
|
+
# UI-facing compatibility fields.
|
|
64
76
|
self.token_usage = TokenUsage()
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
77
|
+
self.current_session: Optional[_SessionView] = None
|
|
78
|
+
self.llm = self._best_effort_llm_for_ui()
|
|
79
|
+
self._refresh_session_view()
|
|
80
|
+
|
|
81
|
+
@property
|
|
82
|
+
def agent_host(self) -> AgentHost:
|
|
83
|
+
return self._host
|
|
84
|
+
|
|
85
|
+
@property
|
|
86
|
+
def active_session_id(self) -> str:
|
|
87
|
+
return self._session_index.active_session_id
|
|
88
|
+
|
|
89
|
+
def list_sessions(self) -> List[Dict[str, str]]:
|
|
90
|
+
out: List[Dict[str, str]] = []
|
|
91
|
+
for rec in self._session_index.records():
|
|
92
|
+
title = rec.title
|
|
93
|
+
if str(title).strip().lower() in {"", "new session"}:
|
|
94
|
+
fallback = self._fallback_title_for_session(rec.session_id)
|
|
95
|
+
if fallback:
|
|
96
|
+
title = fallback
|
|
97
|
+
out.append(
|
|
98
|
+
{
|
|
99
|
+
"session_id": rec.session_id,
|
|
100
|
+
"title": str(title),
|
|
101
|
+
"created_at": rec.created_at,
|
|
102
|
+
"updated_at": rec.updated_at,
|
|
103
|
+
}
|
|
89
104
|
)
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
if self.debug:
|
|
105
|
-
print("🔄 Updating existing session with new LLM (preserving history)")
|
|
106
|
-
self._update_session_llm()
|
|
107
|
-
|
|
108
|
-
# Use AbstractCore's built-in token detection
|
|
109
|
-
self._update_token_limits_from_abstractcore()
|
|
110
|
-
|
|
111
|
-
except Exception as e:
|
|
112
|
-
if self.debug:
|
|
113
|
-
print(f"❌ Error initializing LLM: {e}")
|
|
114
|
-
import traceback
|
|
115
|
-
traceback.print_exc()
|
|
116
|
-
# Keep previous LLM if initialization fails
|
|
117
|
-
|
|
118
|
-
def _update_session_llm(self):
|
|
119
|
-
"""Update existing session with new LLM while preserving message history.
|
|
120
|
-
|
|
121
|
-
This method allows switching providers/models without losing chat history.
|
|
122
|
-
"""
|
|
123
|
-
if not self.current_session or not self.llm:
|
|
105
|
+
return out
|
|
106
|
+
|
|
107
|
+
def create_new_session(self) -> str:
|
|
108
|
+
rec = self._session_index.create_session()
|
|
109
|
+
self._host = self._build_host_for_session(rec.session_id)
|
|
110
|
+
self.llm = self._best_effort_llm_for_ui()
|
|
111
|
+
self._refresh_session_view()
|
|
112
|
+
return rec.session_id
|
|
113
|
+
|
|
114
|
+
def switch_session(self, session_id: str) -> None:
|
|
115
|
+
sid = str(session_id or "").strip()
|
|
116
|
+
if not sid:
|
|
117
|
+
raise ValueError("session_id must be non-empty")
|
|
118
|
+
if sid == self.active_session_id:
|
|
124
119
|
return
|
|
125
|
-
|
|
120
|
+
self._session_index.set_active(sid)
|
|
121
|
+
self._host = self._build_host_for_session(sid)
|
|
122
|
+
self.llm = self._best_effort_llm_for_ui()
|
|
123
|
+
self._refresh_session_view()
|
|
124
|
+
|
|
125
|
+
def refresh(self) -> None:
|
|
126
|
+
"""Refresh the UI-facing session view from the durable snapshot."""
|
|
126
127
|
try:
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
write_file, execute_command, web_search
|
|
137
|
-
]
|
|
138
|
-
|
|
139
|
-
# Create new session with new LLM but preserve system prompt
|
|
140
|
-
system_prompt = existing_system_prompt or (
|
|
141
|
-
"""
|
|
142
|
-
You are a helpful AI assistant who has access to tools to help the user.
|
|
143
|
-
Always be a critical and creative thinker who leverage constructive skepticism to progress and evolve its reasoning and answers.
|
|
144
|
-
Always answer in nicely formatted markdown.
|
|
145
|
-
"""
|
|
146
|
-
)
|
|
147
|
-
|
|
148
|
-
# Create new session with preserved system prompt
|
|
149
|
-
new_session = BasicSession(
|
|
150
|
-
self.llm,
|
|
151
|
-
system_prompt=system_prompt,
|
|
152
|
-
tools=tools
|
|
153
|
-
)
|
|
154
|
-
|
|
155
|
-
# Restore message history by replaying messages
|
|
156
|
-
# Skip system message (first message) as it's already set
|
|
157
|
-
for msg in existing_messages[1:] if len(existing_messages) > 1 else []:
|
|
158
|
-
if hasattr(msg, 'role') and hasattr(msg, 'content'):
|
|
159
|
-
# Add message to new session's history without generating response
|
|
160
|
-
new_session.messages.append(msg)
|
|
161
|
-
|
|
162
|
-
# Replace current session
|
|
163
|
-
self.current_session = new_session
|
|
164
|
-
|
|
165
|
-
if self.debug:
|
|
166
|
-
if self.debug:
|
|
167
|
-
print(f"🔄 Session updated with new LLM - preserved {len(existing_messages)} messages")
|
|
168
|
-
|
|
169
|
-
except Exception as e:
|
|
170
|
-
if self.debug:
|
|
171
|
-
if self.debug:
|
|
172
|
-
print(f"❌ Error updating session LLM (preserving existing session): {e}")
|
|
173
|
-
# CRITICAL: Do NOT create new session on error - preserve existing session
|
|
174
|
-
# The user's chat history is more important than a perfect LLM update
|
|
175
|
-
|
|
176
|
-
def _update_token_limits_from_abstractcore(self):
|
|
177
|
-
"""Update token limits using AbstractCore's built-in detection."""
|
|
178
|
-
if self.llm:
|
|
179
|
-
# AbstractCore automatically detects and configures token limits
|
|
180
|
-
self.token_usage.max_context = self.llm.max_tokens
|
|
181
|
-
self.token_usage.input_tokens = 0
|
|
182
|
-
self.token_usage.output_tokens = 0
|
|
183
|
-
|
|
184
|
-
if self.debug:
|
|
185
|
-
# Show AbstractCore's token configuration
|
|
186
|
-
if self.debug:
|
|
187
|
-
print(f"📊 {self.llm.get_token_configuration_summary()}")
|
|
188
|
-
|
|
189
|
-
def create_new_session(self, tts_mode: bool = False):
|
|
190
|
-
"""Create a new session with tools - CLEAN AND SIMPLE as per AbstractCore docs.
|
|
191
|
-
|
|
192
|
-
WARNING: This method creates a completely new session, destroying existing chat history.
|
|
193
|
-
Use update_session_mode() to switch TTS mode while preserving history.
|
|
194
|
-
|
|
195
|
-
Args:
|
|
196
|
-
tts_mode: If True, use concise prompts optimized for text-to-speech
|
|
128
|
+
self._session_index.touch(self.active_session_id)
|
|
129
|
+
except Exception:
|
|
130
|
+
pass
|
|
131
|
+
self._refresh_session_view()
|
|
132
|
+
|
|
133
|
+
def update_active_session_title_async(self, *, provider: str, model: str, on_done: Optional[Any] = None) -> None:
|
|
134
|
+
"""Best-effort: generate and persist a 1-line title for the active session.
|
|
135
|
+
|
|
136
|
+
Uses the active provider/model. Runs in a background thread.
|
|
197
137
|
"""
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
138
|
+
try:
|
|
139
|
+
messages = getattr(self._host.snapshot, "messages", None)
|
|
140
|
+
except Exception:
|
|
141
|
+
messages = None
|
|
142
|
+
if not isinstance(messages, list) or not messages:
|
|
201
143
|
return
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
if self.
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
144
|
+
|
|
145
|
+
first_q, last_q = self._extract_first_last_questions(messages)
|
|
146
|
+
if not first_q or not last_q:
|
|
147
|
+
return
|
|
148
|
+
|
|
149
|
+
seed = f"{first_q}\n---\n{last_q}"
|
|
150
|
+
sid = self.active_session_id
|
|
151
|
+
with self._title_lock:
|
|
152
|
+
if self._title_seeds.get(sid) == seed:
|
|
153
|
+
return
|
|
154
|
+
self._title_seeds[sid] = seed
|
|
155
|
+
|
|
156
|
+
def _run() -> None:
|
|
157
|
+
title = self._generate_session_title(provider=provider, model=model, first=first_q, last=last_q)
|
|
158
|
+
if not title:
|
|
159
|
+
return
|
|
160
|
+
try:
|
|
161
|
+
self._session_index.update_title(sid, title)
|
|
162
|
+
except Exception:
|
|
163
|
+
return
|
|
164
|
+
if callable(on_done):
|
|
165
|
+
try:
|
|
166
|
+
on_done(sid, title)
|
|
167
|
+
except Exception:
|
|
168
|
+
return
|
|
169
|
+
|
|
170
|
+
threading.Thread(target=_run, daemon=True).start()
|
|
171
|
+
|
|
172
|
+
@staticmethod
|
|
173
|
+
def _extract_first_last_questions(messages: List[Dict[str, Any]]) -> tuple[Optional[str], Optional[str]]:
|
|
174
|
+
prompts: List[str] = []
|
|
175
|
+
for m in messages:
|
|
176
|
+
if not isinstance(m, dict):
|
|
177
|
+
continue
|
|
178
|
+
if str(m.get("role") or "") != "user":
|
|
179
|
+
continue
|
|
180
|
+
content = str(m.get("content") or "").strip()
|
|
181
|
+
if not content:
|
|
182
|
+
continue
|
|
183
|
+
# Ignore runtime ask_user responses (not "questions").
|
|
184
|
+
if content.startswith("[User response]:"):
|
|
185
|
+
continue
|
|
186
|
+
prompts.append(content)
|
|
187
|
+
if not prompts:
|
|
188
|
+
return None, None
|
|
189
|
+
return prompts[0], prompts[-1]
|
|
190
|
+
|
|
191
|
+
def _fallback_title_for_session(self, session_id: str) -> Optional[str]:
|
|
192
|
+
"""Local-only fallback title derived from transcript (no network)."""
|
|
193
|
+
sid = str(session_id or "").strip()
|
|
194
|
+
if not sid:
|
|
195
|
+
return None
|
|
196
|
+
try:
|
|
197
|
+
data_dir = self._session_index.data_dir_for(sid)
|
|
198
|
+
snap = SessionStore(Path(data_dir) / "session.json").load()
|
|
199
|
+
except Exception:
|
|
200
|
+
return None
|
|
201
|
+
if snap is None or not isinstance(getattr(snap, "messages", None), list):
|
|
202
|
+
return None
|
|
203
|
+
first, last = self._extract_first_last_questions(list(snap.messages))
|
|
204
|
+
if not first and not last:
|
|
205
|
+
return None
|
|
206
|
+
|
|
207
|
+
def _clean(s: Optional[str]) -> str:
|
|
208
|
+
txt = str(s or "").replace("\n", " ").replace("\r", " ").strip()
|
|
209
|
+
return " ".join(txt.split())
|
|
210
|
+
|
|
211
|
+
def _trunc(txt: str, n: int) -> str:
|
|
212
|
+
t = _clean(txt)
|
|
213
|
+
if len(t) <= n:
|
|
214
|
+
return t
|
|
215
|
+
return (t[: max(0, n - 1)].rstrip() + "…").strip()
|
|
216
|
+
|
|
217
|
+
first_txt = _clean(first)
|
|
218
|
+
last_txt = _clean(last)
|
|
219
|
+
if not first_txt:
|
|
220
|
+
return _trunc(last_txt, 80) if last_txt else None
|
|
221
|
+
if not last_txt or first_txt == last_txt:
|
|
222
|
+
return _trunc(first_txt, 80)
|
|
223
|
+
return f"{_trunc(first_txt, 34)} → {_trunc(last_txt, 34)}"
|
|
224
|
+
|
|
225
|
+
@staticmethod
|
|
226
|
+
def _generate_session_title(*, provider: str, model: str, first: str, last: str) -> Optional[str]:
|
|
227
|
+
"""Return a single-line title or None (best-effort)."""
|
|
228
|
+
try:
|
|
229
|
+
from abstractcore import create_llm
|
|
230
|
+
except Exception:
|
|
231
|
+
return None
|
|
232
|
+
|
|
233
|
+
try:
|
|
234
|
+
llm = create_llm(str(provider), model=str(model))
|
|
235
|
+
prompt = (
|
|
236
|
+
"Generate a single-line title for this chat session.\n"
|
|
237
|
+
"- Max 60 characters.\n"
|
|
238
|
+
"- No quotes.\n"
|
|
239
|
+
"- Be specific.\n\n"
|
|
240
|
+
f"First question: {first}\n"
|
|
241
|
+
f"Most recent question: {last}\n"
|
|
221
242
|
)
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
243
|
+
resp = llm.generate(prompt, max_output_tokens=64, temperature=0.2)
|
|
244
|
+
text = getattr(resp, "content", None)
|
|
245
|
+
if text is None:
|
|
246
|
+
text = str(resp)
|
|
247
|
+
title = str(text or "").strip().splitlines()[0].strip()
|
|
248
|
+
title = title.strip(" \"'“”")
|
|
249
|
+
if len(title) > 80:
|
|
250
|
+
title = title[:80].rstrip()
|
|
251
|
+
return title or None
|
|
252
|
+
except Exception:
|
|
253
|
+
return None
|
|
254
|
+
|
|
255
|
+
def _build_host_for_active_session(self) -> AgentHost:
|
|
256
|
+
return self._build_host_for_session(self.active_session_id)
|
|
257
|
+
|
|
258
|
+
def _build_host_for_session(self, session_id: str) -> AgentHost:
|
|
259
|
+
data_dir = self._session_index.data_dir_for(session_id)
|
|
260
|
+
return AgentHost(
|
|
261
|
+
AgentHostConfig(
|
|
262
|
+
provider=self.current_provider,
|
|
263
|
+
model=self.current_model,
|
|
264
|
+
agent_kind="react",
|
|
265
|
+
data_dir=data_dir,
|
|
229
266
|
)
|
|
230
|
-
|
|
231
|
-
# Create session with tools (tool execution enabled at provider level)
|
|
232
|
-
self.current_session = BasicSession(
|
|
233
|
-
self.llm,
|
|
234
|
-
system_prompt=system_prompt,
|
|
235
|
-
tools=tools
|
|
236
267
|
)
|
|
237
|
-
|
|
238
|
-
# Reset token count for new session
|
|
239
|
-
self.token_usage.current_session = 0
|
|
240
|
-
|
|
241
|
-
if self.debug:
|
|
242
|
-
if TOOLS_AVAILABLE:
|
|
243
|
-
if self.debug:
|
|
244
|
-
print(f"✅ Created new AbstractCore session with tools ({'TTS mode' if tts_mode else 'normal mode'})")
|
|
245
|
-
else:
|
|
246
|
-
if self.debug:
|
|
247
|
-
print(f"✅ Created new AbstractCore session (no tools available, {'TTS mode' if tts_mode else 'normal mode'})")
|
|
248
268
|
|
|
249
|
-
def
|
|
250
|
-
"""
|
|
251
|
-
|
|
252
|
-
This method changes the system prompt behavior without destroying the session.
|
|
253
|
-
|
|
254
|
-
Args:
|
|
255
|
-
tts_mode: If True, switch to TTS-optimized mode; if False, switch to normal mode
|
|
256
|
-
"""
|
|
257
|
-
if not self.current_session:
|
|
258
|
-
# No existing session - only create if this is initial startup
|
|
259
|
-
if self.debug:
|
|
260
|
-
if self.debug:
|
|
261
|
-
print("⚠️ No session exists - creating initial session for mode update")
|
|
262
|
-
self.create_new_session(tts_mode=tts_mode)
|
|
263
|
-
return
|
|
264
|
-
|
|
269
|
+
def _best_effort_llm_for_ui(self) -> Optional[Any]:
|
|
270
|
+
"""Return the underlying AbstractCore provider instance when available (best-effort)."""
|
|
265
271
|
try:
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
if hasattr(msg, 'role') and hasattr(msg, 'content'):
|
|
305
|
-
# Add message to new session's history without generating response
|
|
306
|
-
new_session.messages.append(msg)
|
|
307
|
-
|
|
308
|
-
# Replace current session
|
|
309
|
-
self.current_session = new_session
|
|
310
|
-
|
|
311
|
-
if self.debug:
|
|
312
|
-
if self.debug:
|
|
313
|
-
print(f"🔄 Session mode updated to {'TTS' if tts_mode else 'normal'} - preserved {len(existing_messages)} messages")
|
|
314
|
-
|
|
315
|
-
except Exception as e:
|
|
316
|
-
if self.debug:
|
|
317
|
-
if self.debug:
|
|
318
|
-
print(f"❌ Error updating session mode (preserving existing session): {e}")
|
|
319
|
-
# CRITICAL: Do NOT create new session on error - preserve existing session
|
|
320
|
-
# The user's chat history is more important than a perfect mode switch
|
|
321
|
-
|
|
272
|
+
rt = getattr(self._host, "_runtime", None)
|
|
273
|
+
client = getattr(rt, "_abstractcore_llm_client", None)
|
|
274
|
+
getter = getattr(client, "get_provider_instance", None)
|
|
275
|
+
if callable(getter):
|
|
276
|
+
return getter(provider=self.current_provider, model=self.current_model)
|
|
277
|
+
except Exception:
|
|
278
|
+
return None
|
|
279
|
+
return None
|
|
280
|
+
|
|
281
|
+
def _refresh_session_view(self) -> None:
|
|
282
|
+
snap = self._host.snapshot
|
|
283
|
+
msgs: List[_SessionMessage] = []
|
|
284
|
+
for m in snap.messages:
|
|
285
|
+
if not isinstance(m, dict):
|
|
286
|
+
continue
|
|
287
|
+
role = str(m.get("role") or "")
|
|
288
|
+
content = str(m.get("content") or "")
|
|
289
|
+
if role == "system":
|
|
290
|
+
continue
|
|
291
|
+
msgs.append(_SessionMessage(role=role, content=content))
|
|
292
|
+
self.current_session = _SessionView(msgs)
|
|
293
|
+
if self.current_session:
|
|
294
|
+
self.token_usage.current_session = self.current_session.get_token_estimate()
|
|
295
|
+
|
|
296
|
+
# Best-effort max context from AbstractCore detection (when `llm` is present).
|
|
297
|
+
max_tokens = None
|
|
298
|
+
try:
|
|
299
|
+
max_tokens = getattr(self.llm, "max_tokens", None)
|
|
300
|
+
except Exception:
|
|
301
|
+
max_tokens = None
|
|
302
|
+
if isinstance(max_tokens, int) and max_tokens > 0:
|
|
303
|
+
self.token_usage.max_context = max_tokens
|
|
304
|
+
|
|
305
|
+
def reset_active_session(self, tts_mode: bool = False) -> None:
|
|
306
|
+
self._tts_mode = bool(tts_mode)
|
|
307
|
+
self._host.clear_messages()
|
|
308
|
+
self._refresh_session_view()
|
|
309
|
+
|
|
322
310
|
def clear_session(self):
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
311
|
+
self.reset_active_session(tts_mode=False)
|
|
312
|
+
|
|
313
|
+
def update_session_mode(self, tts_mode: bool = False):
|
|
314
|
+
self._tts_mode = bool(tts_mode)
|
|
315
|
+
|
|
316
|
+
def save_session(self, filepath: str) -> bool:
|
|
328
317
|
try:
|
|
329
|
-
|
|
330
|
-
if self.debug:
|
|
331
|
-
if self.debug:
|
|
332
|
-
print("⚠️ No session to save")
|
|
333
|
-
return False
|
|
334
|
-
|
|
335
|
-
# Use AbstractCore's built-in save method
|
|
336
|
-
self.current_session.save(filepath)
|
|
337
|
-
|
|
338
|
-
if self.debug:
|
|
339
|
-
if self.debug:
|
|
340
|
-
print(f"✅ Session saved to {filepath}")
|
|
318
|
+
self._host.export_messages(Path(filepath))
|
|
341
319
|
return True
|
|
342
|
-
|
|
343
|
-
except Exception as e:
|
|
344
|
-
if self.debug:
|
|
345
|
-
if self.debug:
|
|
346
|
-
print(f"❌ Error saving session: {e}")
|
|
320
|
+
except Exception:
|
|
347
321
|
return False
|
|
348
|
-
|
|
349
|
-
def load_session(self, filepath: str):
|
|
350
|
-
"""Load session from file using AbstractCore's built-in load."""
|
|
322
|
+
|
|
323
|
+
def load_session(self, filepath: str) -> bool:
|
|
351
324
|
try:
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
if TOOLS_AVAILABLE:
|
|
355
|
-
tools = [
|
|
356
|
-
list_files, search_files, read_file, edit_file,
|
|
357
|
-
write_file, execute_command, web_search
|
|
358
|
-
]
|
|
359
|
-
|
|
360
|
-
# Use AbstractCore's built-in load method (class method)
|
|
361
|
-
self.current_session = BasicSession.load(filepath, provider=self.llm, tools=tools)
|
|
362
|
-
|
|
363
|
-
# Update token limits
|
|
364
|
-
self._update_token_limits_from_abstractcore()
|
|
365
|
-
|
|
366
|
-
if self.debug:
|
|
367
|
-
if self.debug:
|
|
368
|
-
print(f"✅ Session loaded from {filepath}")
|
|
325
|
+
self._host.import_messages(Path(filepath))
|
|
326
|
+
self._refresh_session_view()
|
|
369
327
|
return True
|
|
370
|
-
|
|
371
|
-
except Exception as e:
|
|
372
|
-
if self.debug:
|
|
373
|
-
if self.debug:
|
|
374
|
-
print(f"❌ Error loading session: {e}")
|
|
328
|
+
except Exception:
|
|
375
329
|
return False
|
|
376
|
-
|
|
377
|
-
def get_providers(self) -> List[Dict[str, Any]]:
|
|
378
|
-
"""Get available providers using AbstractCore's discovery system."""
|
|
379
|
-
return get_all_providers_with_models()
|
|
380
|
-
|
|
381
|
-
def get_models(self, provider: str) -> List[str]:
|
|
382
|
-
"""Get available models for a provider using AbstractCore."""
|
|
383
|
-
try:
|
|
384
|
-
return get_available_models_for_provider(provider)
|
|
385
|
-
except Exception as e:
|
|
386
|
-
if self.debug:
|
|
387
|
-
if self.debug:
|
|
388
|
-
print(f"⚠️ Could not get models for {provider}: {e}")
|
|
389
|
-
return []
|
|
390
|
-
|
|
330
|
+
|
|
391
331
|
def set_provider(self, provider: str, model: Optional[str] = None):
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
# Set model if provided, otherwise keep current or use first available
|
|
398
|
-
if model:
|
|
399
|
-
self.current_model = model
|
|
400
|
-
|
|
401
|
-
# Reinitialize LLM
|
|
402
|
-
self._initialize_llm()
|
|
403
|
-
elif self.debug:
|
|
404
|
-
if self.debug:
|
|
405
|
-
print(f"⚠️ Provider {provider} not available")
|
|
406
|
-
|
|
332
|
+
self.current_provider = str(provider or "").strip() or self.current_provider
|
|
333
|
+
if model is not None:
|
|
334
|
+
self.current_model = str(model or "").strip() or self.current_model
|
|
335
|
+
self.llm = self._best_effort_llm_for_ui()
|
|
336
|
+
|
|
407
337
|
def set_model(self, model: str):
|
|
408
|
-
|
|
409
|
-
self.
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
338
|
+
self.current_model = str(model or "").strip() or self.current_model
|
|
339
|
+
self.llm = self._best_effort_llm_for_ui()
|
|
340
|
+
|
|
341
|
+
def generate_response(
|
|
342
|
+
self,
|
|
343
|
+
message: str,
|
|
344
|
+
provider: Optional[str] = None,
|
|
345
|
+
model: Optional[str] = None,
|
|
346
|
+
media: Optional[List[str]] = None,
|
|
347
|
+
) -> str:
|
|
348
|
+
"""Run one agentic turn and return the final answer text.
|
|
349
|
+
|
|
350
|
+
Note: tool approval is currently auto-managed:
|
|
351
|
+
- safe/known read-only tools are auto-approved
|
|
352
|
+
- dangerous/unknown tools are denied unless explicitly enabled in the UI layer
|
|
423
353
|
"""
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
354
|
+
provider_eff = str(provider or "").strip() or self.current_provider
|
|
355
|
+
model_eff = str(model or "").strip() or self.current_model
|
|
356
|
+
|
|
357
|
+
system_extra = None
|
|
358
|
+
if self._tts_mode:
|
|
359
|
+
system_extra = (
|
|
360
|
+
"You are in voice mode.\n"
|
|
361
|
+
"- Keep responses concise and conversational.\n"
|
|
362
|
+
"- Avoid markdown and heavy formatting.\n"
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
final = ""
|
|
366
|
+
for ev in self._host.run_turn(
|
|
367
|
+
user_text=str(message),
|
|
368
|
+
attachments=list(media) if media else None,
|
|
369
|
+
provider=provider_eff,
|
|
370
|
+
model=model_eff,
|
|
371
|
+
system_prompt_extra=system_extra,
|
|
372
|
+
):
|
|
373
|
+
if isinstance(ev, dict) and ev.get("type") == "assistant":
|
|
374
|
+
final = str(ev.get("content") or "")
|
|
375
|
+
|
|
376
|
+
# Refresh session view for history/token display.
|
|
377
|
+
self._refresh_session_view()
|
|
378
|
+
return final
|
|
429
379
|
|
|
430
|
-
try:
|
|
431
|
-
# Ensure we have a session - but only create if absolutely necessary
|
|
432
|
-
if self.current_session is None:
|
|
433
|
-
if self.debug:
|
|
434
|
-
if self.debug:
|
|
435
|
-
print("⚠️ No session exists - creating initial session for first use")
|
|
436
|
-
self.create_new_session()
|
|
437
|
-
|
|
438
|
-
# Generate response using session with optional media files
|
|
439
|
-
# AbstractCore 2.4.5+ supports media=[] parameter for file attachments
|
|
440
|
-
if media and len(media) > 0:
|
|
441
|
-
response = self.current_session.generate(message, media=media)
|
|
442
|
-
else:
|
|
443
|
-
response = self.current_session.generate(message)
|
|
444
|
-
|
|
445
|
-
# Handle response format
|
|
446
|
-
if hasattr(response, 'content'):
|
|
447
|
-
response_text = response.content
|
|
448
|
-
else:
|
|
449
|
-
response_text = str(response)
|
|
450
|
-
|
|
451
|
-
return response_text
|
|
452
|
-
|
|
453
|
-
except Exception as e:
|
|
454
|
-
return f"Error generating response: {str(e)}"
|
|
455
|
-
|
|
456
380
|
def get_token_usage(self) -> TokenUsage:
|
|
457
|
-
|
|
458
|
-
if self.current_session:
|
|
459
|
-
# Use AbstractCore's token estimation
|
|
460
|
-
estimated = self.current_session.get_token_estimate()
|
|
461
|
-
self.token_usage.current_session = estimated
|
|
381
|
+
self._refresh_session_view()
|
|
462
382
|
return self.token_usage
|
|
463
|
-
|
|
464
|
-
def get_status_info(self) -> Dict[str, Any]:
|
|
465
|
-
"""Get current status information for UI display."""
|
|
466
|
-
# Get fresh token estimate from AbstractCore
|
|
467
|
-
token_estimate = self.current_session.get_token_estimate() if self.current_session else 0
|
|
468
|
-
|
|
469
|
-
return {
|
|
470
|
-
"provider": self.current_provider,
|
|
471
|
-
"model": self.current_model,
|
|
472
|
-
"tokens_current": token_estimate,
|
|
473
|
-
"tokens_max": self.token_usage.max_context,
|
|
474
|
-
"status": "ready" # Will be updated by app state
|
|
475
|
-
}
|