cite-agent 1.0.4__py3-none-any.whl → 1.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cite-agent might be problematic. Click here for more details.
- cite_agent/__init__.py +1 -1
- cite_agent/account_client.py +19 -46
- cite_agent/agent_backend_only.py +30 -4
- cite_agent/cli.py +397 -64
- cite_agent/cli_conversational.py +294 -0
- cite_agent/cli_workflow.py +276 -0
- cite_agent/enhanced_ai_agent.py +3222 -117
- cite_agent/session_manager.py +215 -0
- cite_agent/setup_config.py +5 -21
- cite_agent/streaming_ui.py +252 -0
- cite_agent/updater.py +50 -17
- cite_agent/workflow.py +427 -0
- cite_agent/workflow_integration.py +275 -0
- cite_agent-1.2.3.dist-info/METADATA +442 -0
- cite_agent-1.2.3.dist-info/RECORD +54 -0
- {cite_agent-1.0.4.dist-info → cite_agent-1.2.3.dist-info}/top_level.txt +1 -0
- src/__init__.py +1 -0
- src/services/__init__.py +132 -0
- src/services/auth_service/__init__.py +3 -0
- src/services/auth_service/auth_manager.py +33 -0
- src/services/graph/__init__.py +1 -0
- src/services/graph/knowledge_graph.py +194 -0
- src/services/llm_service/__init__.py +5 -0
- src/services/llm_service/llm_manager.py +495 -0
- src/services/paper_service/__init__.py +5 -0
- src/services/paper_service/openalex.py +231 -0
- src/services/performance_service/__init__.py +1 -0
- src/services/performance_service/rust_performance.py +395 -0
- src/services/research_service/__init__.py +23 -0
- src/services/research_service/chatbot.py +2056 -0
- src/services/research_service/citation_manager.py +436 -0
- src/services/research_service/context_manager.py +1441 -0
- src/services/research_service/conversation_manager.py +597 -0
- src/services/research_service/critical_paper_detector.py +577 -0
- src/services/research_service/enhanced_research.py +121 -0
- src/services/research_service/enhanced_synthesizer.py +375 -0
- src/services/research_service/query_generator.py +777 -0
- src/services/research_service/synthesizer.py +1273 -0
- src/services/search_service/__init__.py +5 -0
- src/services/search_service/indexer.py +186 -0
- src/services/search_service/search_engine.py +342 -0
- src/services/simple_enhanced_main.py +287 -0
- cite_agent/__distribution__.py +0 -7
- cite_agent-1.0.4.dist-info/METADATA +0 -234
- cite_agent-1.0.4.dist-info/RECORD +0 -23
- {cite_agent-1.0.4.dist-info → cite_agent-1.2.3.dist-info}/WHEEL +0 -0
- {cite_agent-1.0.4.dist-info → cite_agent-1.2.3.dist-info}/entry_points.txt +0 -0
- {cite_agent-1.0.4.dist-info → cite_agent-1.2.3.dist-info}/licenses/LICENSE +0 -0
cite_agent/enhanced_ai_agent.py
CHANGED
|
@@ -1,172 +1,3277 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
1
2
|
"""
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
Local API keys are not supported.
|
|
3
|
+
Enhanced Nocturnal AI Agent - Production-Ready Research Assistant
|
|
4
|
+
Integrates with Archive API and FinSight API for comprehensive research capabilities
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
+
import asyncio
|
|
8
|
+
import hashlib
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
7
11
|
import os
|
|
8
|
-
import
|
|
9
|
-
|
|
10
|
-
|
|
12
|
+
import re
|
|
13
|
+
import shlex
|
|
14
|
+
import subprocess
|
|
15
|
+
import time
|
|
16
|
+
from importlib import resources
|
|
17
|
+
|
|
18
|
+
import aiohttp
|
|
11
19
|
from datetime import datetime, timezone
|
|
20
|
+
from typing import Dict, Any, List, Optional, Tuple
|
|
21
|
+
from urllib.parse import urlparse
|
|
22
|
+
from dataclasses import dataclass, field
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
|
|
25
|
+
from .telemetry import TelemetryManager
|
|
26
|
+
from .setup_config import DEFAULT_QUERY_LIMIT
|
|
27
|
+
|
|
28
|
+
# Suppress noise
|
|
29
|
+
logging.basicConfig(level=logging.ERROR)
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
# Removed: No direct Groq import in production
|
|
33
|
+
# All LLM calls go through backend API for monetization
|
|
34
|
+
# Backend has the API keys, not the client
|
|
12
35
|
|
|
13
36
|
@dataclass
|
|
14
37
|
class ChatRequest:
|
|
15
38
|
question: str
|
|
16
39
|
user_id: str = "default"
|
|
17
40
|
conversation_id: str = "default"
|
|
18
|
-
context: Dict[str, Any] =
|
|
41
|
+
context: Dict[str, Any] = field(default_factory=dict)
|
|
42
|
+
|
|
19
43
|
|
|
20
44
|
@dataclass
|
|
21
45
|
class ChatResponse:
|
|
22
46
|
response: str
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
model: str = "
|
|
26
|
-
timestamp: str =
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
self.citations = []
|
|
33
|
-
if self.tools_used is None:
|
|
34
|
-
self.tools_used = []
|
|
47
|
+
tools_used: List[str] = field(default_factory=list)
|
|
48
|
+
reasoning_steps: List[str] = field(default_factory=list)
|
|
49
|
+
model: str = "enhanced-nocturnal-agent"
|
|
50
|
+
timestamp: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat())
|
|
51
|
+
tokens_used: int = 0
|
|
52
|
+
confidence_score: float = 0.0
|
|
53
|
+
execution_results: Dict[str, Any] = field(default_factory=dict)
|
|
54
|
+
api_results: Dict[str, Any] = field(default_factory=dict)
|
|
55
|
+
error_message: Optional[str] = None
|
|
35
56
|
|
|
36
57
|
class EnhancedNocturnalAgent:
|
|
37
58
|
"""
|
|
38
|
-
|
|
39
|
-
|
|
59
|
+
Enhanced AI Agent with full API integration:
|
|
60
|
+
- Archive API for academic research
|
|
61
|
+
- FinSight API for financial data
|
|
62
|
+
- Shell access for system operations
|
|
63
|
+
- Memory system for context retention
|
|
40
64
|
"""
|
|
41
|
-
|
|
65
|
+
|
|
42
66
|
def __init__(self):
|
|
43
|
-
self.
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
self.
|
|
48
|
-
self.
|
|
67
|
+
self.client = None
|
|
68
|
+
self.conversation_history = []
|
|
69
|
+
self.shell_session = None
|
|
70
|
+
self.memory = {}
|
|
71
|
+
self.daily_token_usage = 0
|
|
72
|
+
self.daily_limit = 100000
|
|
73
|
+
self.daily_query_limit = self._resolve_daily_query_limit()
|
|
74
|
+
self.per_user_query_limit = self.daily_query_limit
|
|
75
|
+
self.daily_query_count = 0
|
|
76
|
+
self.total_cost = 0.0
|
|
77
|
+
self.cost_per_1k_tokens = 0.0001 # Groq pricing estimate
|
|
78
|
+
self._auto_update_enabled = True
|
|
79
|
+
|
|
80
|
+
# Workflow integration
|
|
81
|
+
from .workflow import WorkflowManager
|
|
82
|
+
self.workflow = WorkflowManager()
|
|
83
|
+
self.last_paper_result = None # Track last paper mentioned for "save that"
|
|
84
|
+
try:
|
|
85
|
+
self.per_user_token_limit = int(os.getenv("GROQ_PER_USER_TOKENS", 50000))
|
|
86
|
+
except (TypeError, ValueError):
|
|
87
|
+
self.per_user_token_limit = 50000 # 50 queries at ~1000 tokens each
|
|
88
|
+
self.user_token_usage: Dict[str, int] = {}
|
|
89
|
+
self.user_query_counts: Dict[str, int] = {}
|
|
90
|
+
self._usage_day = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
|
91
|
+
self._initialized = False
|
|
92
|
+
self._env_loaded = False
|
|
93
|
+
self._init_lock: Optional[asyncio.Lock] = None
|
|
94
|
+
self._default_headers: Dict[str, str] = {}
|
|
49
95
|
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
self.
|
|
96
|
+
# API clients
|
|
97
|
+
self.archive_client = None
|
|
98
|
+
self.finsight_client = None
|
|
99
|
+
self.session = None
|
|
100
|
+
self.company_name_to_ticker = {}
|
|
54
101
|
|
|
55
|
-
#
|
|
56
|
-
|
|
102
|
+
# Groq key rotation state
|
|
103
|
+
self.api_keys: List[str] = []
|
|
104
|
+
self.current_key_index: int = 0
|
|
105
|
+
self.current_api_key: Optional[str] = None
|
|
106
|
+
self.exhausted_keys: Dict[str, float] = {}
|
|
107
|
+
try:
|
|
108
|
+
self.key_recheck_seconds = float(
|
|
109
|
+
os.getenv("GROQ_KEY_RECHECK_SECONDS", 3600)
|
|
110
|
+
)
|
|
111
|
+
except Exception:
|
|
112
|
+
self.key_recheck_seconds = 3600.0
|
|
113
|
+
|
|
114
|
+
self._service_roots: List[str] = []
|
|
115
|
+
self._backend_health_cache: Dict[str, Dict[str, Any]] = {}
|
|
116
|
+
|
|
117
|
+
# Initialize authentication
|
|
118
|
+
self.auth_token = None
|
|
119
|
+
self.user_id = None
|
|
120
|
+
self._load_authentication()
|
|
121
|
+
try:
|
|
122
|
+
self._health_ttl = float(os.getenv("NOCTURNAL_HEALTH_TTL", 30))
|
|
123
|
+
except Exception:
|
|
124
|
+
self._health_ttl = 30.0
|
|
125
|
+
self._recent_sources: List[Dict[str, Any]] = []
|
|
126
|
+
|
|
127
|
+
def _load_authentication(self):
|
|
128
|
+
"""Load authentication from session file"""
|
|
129
|
+
use_local_keys = os.getenv("USE_LOCAL_KEYS", "true").lower() == "true"
|
|
130
|
+
|
|
131
|
+
if not use_local_keys:
|
|
132
|
+
# Backend mode - load auth token from session
|
|
57
133
|
from pathlib import Path
|
|
58
|
-
|
|
59
|
-
if
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
134
|
+
session_file = Path.home() / ".nocturnal_archive" / "session.json"
|
|
135
|
+
if session_file.exists():
|
|
136
|
+
try:
|
|
137
|
+
import json
|
|
138
|
+
with open(session_file, 'r') as f:
|
|
139
|
+
session_data = json.load(f)
|
|
140
|
+
self.auth_token = session_data.get('access_token')
|
|
141
|
+
self.user_id = session_data.get('user_id')
|
|
142
|
+
except Exception:
|
|
143
|
+
self.auth_token = None
|
|
144
|
+
self.user_id = None
|
|
145
|
+
else:
|
|
146
|
+
self.auth_token = None
|
|
147
|
+
self.user_id = None
|
|
148
|
+
else:
|
|
149
|
+
# Local keys mode
|
|
150
|
+
self.auth_token = None
|
|
151
|
+
self.user_id = None
|
|
152
|
+
self._session_topics: Dict[str, Dict[str, Any]] = {}
|
|
65
153
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
154
|
+
# Initialize API clients
|
|
155
|
+
self._init_api_clients()
|
|
156
|
+
self._load_ticker_map()
|
|
157
|
+
|
|
158
|
+
def get_usage_stats(self) -> Dict[str, Any]:
|
|
159
|
+
"""Get current usage statistics and cost information"""
|
|
160
|
+
limit = self.daily_limit if self.daily_limit > 0 else 1
|
|
161
|
+
remaining = max(self.daily_limit - self.daily_token_usage, 0)
|
|
162
|
+
usage_percentage = (self.daily_token_usage / limit) * 100 if limit else 0.0
|
|
163
|
+
return {
|
|
164
|
+
"daily_tokens_used": self.daily_token_usage,
|
|
165
|
+
"daily_token_limit": self.daily_limit,
|
|
166
|
+
"remaining_tokens": remaining,
|
|
167
|
+
"usage_percentage": usage_percentage,
|
|
168
|
+
"total_cost": self.total_cost,
|
|
169
|
+
"cost_per_1k_tokens": self.cost_per_1k_tokens,
|
|
170
|
+
"estimated_monthly_cost": self.total_cost * 30, # Rough estimate
|
|
171
|
+
"per_user_token_limit": self.per_user_token_limit,
|
|
172
|
+
"daily_queries_used": self.daily_query_count,
|
|
173
|
+
"daily_query_limit": self.daily_query_limit,
|
|
174
|
+
"per_user_query_limit": self.per_user_query_limit,
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
async def close(self):
|
|
178
|
+
"""Cleanly close resources (HTTP session and shell)."""
|
|
179
|
+
lock = self._get_init_lock()
|
|
180
|
+
async with lock:
|
|
181
|
+
await self._close_resources()
|
|
182
|
+
|
|
183
|
+
async def _close_resources(self):
|
|
184
|
+
try:
|
|
185
|
+
if self.session and not self.session.closed:
|
|
186
|
+
await self.session.close()
|
|
187
|
+
except Exception:
|
|
188
|
+
pass
|
|
189
|
+
finally:
|
|
190
|
+
self.session = None
|
|
191
|
+
|
|
192
|
+
try:
|
|
193
|
+
if self.shell_session:
|
|
194
|
+
self.shell_session.terminate()
|
|
195
|
+
except Exception:
|
|
196
|
+
pass
|
|
197
|
+
finally:
|
|
198
|
+
self.shell_session = None
|
|
199
|
+
|
|
200
|
+
self.client = None
|
|
201
|
+
self.current_api_key = None
|
|
202
|
+
self.current_key_index = 0
|
|
203
|
+
self._initialized = False
|
|
204
|
+
self.exhausted_keys.clear()
|
|
205
|
+
|
|
206
|
+
def _init_api_clients(self):
|
|
207
|
+
"""Initialize API clients for Archive and FinSight"""
|
|
208
|
+
try:
|
|
209
|
+
def _normalize_base(value: Optional[str], fallback: str) -> str:
|
|
210
|
+
candidate = (value or fallback).strip()
|
|
211
|
+
return candidate[:-1] if candidate.endswith('/') else candidate
|
|
212
|
+
|
|
213
|
+
archive_env = (
|
|
214
|
+
os.getenv("ARCHIVE_API_URL")
|
|
215
|
+
or os.getenv("NOCTURNAL_ARCHIVE_API_URL")
|
|
216
|
+
)
|
|
217
|
+
finsight_env = (
|
|
218
|
+
os.getenv("FINSIGHT_API_URL")
|
|
219
|
+
or os.getenv("NOCTURNAL_FINSIGHT_API_URL")
|
|
71
220
|
)
|
|
72
|
-
print(f"✅ Connected to backend: {self.backend_url}")
|
|
73
221
|
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
Send chat request to backend API.
|
|
222
|
+
# Archive API client
|
|
223
|
+
self.archive_base_url = _normalize_base(archive_env, "https://cite-agent-api-720dfadd602c.herokuapp.com/api")
|
|
77
224
|
|
|
78
|
-
|
|
79
|
-
|
|
225
|
+
# FinSight API client
|
|
226
|
+
self.finsight_base_url = _normalize_base(finsight_env, "https://cite-agent-api-720dfadd602c.herokuapp.com/v1/finance")
|
|
80
227
|
|
|
81
|
-
|
|
82
|
-
|
|
228
|
+
# Workspace Files API client
|
|
229
|
+
files_env = os.getenv("FILES_API_URL")
|
|
230
|
+
self.files_base_url = _normalize_base(files_env, "http://127.0.0.1:8000/v1/files")
|
|
83
231
|
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
"
|
|
232
|
+
# Shared API key handling for protected routes
|
|
233
|
+
self.api_key = (
|
|
234
|
+
os.getenv("NOCTURNAL_KEY")
|
|
235
|
+
or os.getenv("NOCTURNAL_API_KEY")
|
|
236
|
+
or os.getenv("X_API_KEY")
|
|
237
|
+
or "demo-key-123"
|
|
90
238
|
)
|
|
239
|
+
self._default_headers.clear()
|
|
240
|
+
if self.api_key:
|
|
241
|
+
self._default_headers["X-API-Key"] = self.api_key
|
|
242
|
+
|
|
243
|
+
self._update_service_roots()
|
|
244
|
+
|
|
245
|
+
# Only show init messages in debug mode
|
|
246
|
+
debug_mode = os.getenv("NOCTURNAL_DEBUG", "").lower() == "1"
|
|
247
|
+
if debug_mode:
|
|
248
|
+
if self.api_key == "demo-key-123":
|
|
249
|
+
print("⚠️ Using demo API key")
|
|
250
|
+
print(f"✅ API clients initialized (Archive={self.archive_base_url}, FinSight={self.finsight_base_url})")
|
|
251
|
+
|
|
252
|
+
except Exception as e:
|
|
253
|
+
print(f"⚠️ API client initialization warning: {e}")
|
|
254
|
+
|
|
255
|
+
def _update_service_roots(self) -> None:
|
|
256
|
+
roots = set()
|
|
257
|
+
for base in (getattr(self, "archive_base_url", None), getattr(self, "finsight_base_url", None), getattr(self, "files_base_url", None)):
|
|
258
|
+
if not base:
|
|
259
|
+
continue
|
|
260
|
+
parsed = urlparse(base)
|
|
261
|
+
if parsed.scheme and parsed.netloc:
|
|
262
|
+
roots.add(f"{parsed.scheme}://{parsed.netloc}")
|
|
263
|
+
|
|
264
|
+
if not roots:
|
|
265
|
+
roots.add("http://127.0.0.1:8000")
|
|
266
|
+
|
|
267
|
+
self._service_roots = sorted(roots)
|
|
268
|
+
# Drop caches for roots that no longer exist
|
|
269
|
+
for cached in list(self._backend_health_cache.keys()):
|
|
270
|
+
if cached not in self._service_roots:
|
|
271
|
+
self._backend_health_cache.pop(cached, None)
|
|
272
|
+
|
|
273
|
+
async def _probe_health_endpoint(self, root: str) -> Tuple[bool, str]:
|
|
274
|
+
if not self.session:
|
|
275
|
+
return False, "HTTP session not initialized"
|
|
276
|
+
|
|
277
|
+
if not hasattr(self.session, "get"):
|
|
278
|
+
# Assume healthy when using lightweight mocks that lack GET semantics
|
|
279
|
+
return True, ""
|
|
280
|
+
|
|
281
|
+
candidates = ["/readyz", "/health", "/api/health", "/livez"]
|
|
282
|
+
last_detail = ""
|
|
283
|
+
|
|
284
|
+
for endpoint in candidates:
|
|
285
|
+
try:
|
|
286
|
+
async with self.session.get(f"{root}{endpoint}", timeout=5) as response:
|
|
287
|
+
if response.status == 200:
|
|
288
|
+
return True, ""
|
|
289
|
+
body = await response.text()
|
|
290
|
+
if response.status == 404:
|
|
291
|
+
# Endpoint absent—record detail but keep probing
|
|
292
|
+
last_detail = (
|
|
293
|
+
f"{endpoint} missing (404)."
|
|
294
|
+
if not body else f"{endpoint} missing (404): {body.strip()}"
|
|
295
|
+
)
|
|
296
|
+
continue
|
|
297
|
+
last_detail = (
|
|
298
|
+
f"{endpoint} returned {response.status}"
|
|
299
|
+
if not body else f"{endpoint} returned {response.status}: {body.strip()}"
|
|
300
|
+
)
|
|
301
|
+
except Exception as exc:
|
|
302
|
+
last_detail = f"{endpoint} failed: {exc}"
|
|
303
|
+
|
|
304
|
+
# Fall back to a lightweight root probe so services without explicit
|
|
305
|
+
# health endpoints don't register as offline.
|
|
306
|
+
try:
|
|
307
|
+
async with self.session.get(root, timeout=5) as response:
|
|
308
|
+
if response.status < 500:
|
|
309
|
+
fallback_detail = f"fallback probe returned {response.status}"
|
|
310
|
+
if response.status == 200:
|
|
311
|
+
detail = (f"{last_detail}; {fallback_detail}" if last_detail else "")
|
|
312
|
+
else:
|
|
313
|
+
detail = (
|
|
314
|
+
f"{last_detail}; {fallback_detail}"
|
|
315
|
+
if last_detail else f"Health endpoint unavailable; {fallback_detail}"
|
|
316
|
+
)
|
|
317
|
+
return True, detail
|
|
318
|
+
except Exception as exc: # pragma: no cover - network failure already captured above
|
|
319
|
+
last_detail = last_detail or f"Fallback probe failed: {exc}"
|
|
320
|
+
|
|
321
|
+
return False, last_detail or f"Health check failed for {root}"
|
|
322
|
+
|
|
323
|
+
async def _check_backend_health(self, force: bool = False) -> Dict[str, Any]:
|
|
324
|
+
now = time.monotonic()
|
|
325
|
+
overall_ok = True
|
|
326
|
+
details: List[str] = []
|
|
327
|
+
|
|
328
|
+
if not self._service_roots:
|
|
329
|
+
self._update_service_roots()
|
|
330
|
+
|
|
331
|
+
for root in self._service_roots:
|
|
332
|
+
cache = self._backend_health_cache.get(root)
|
|
333
|
+
if cache and not force and now - cache.get("timestamp", 0.0) < self._health_ttl:
|
|
334
|
+
if not cache.get("ok", False) and cache.get("detail"):
|
|
335
|
+
details.append(cache["detail"])
|
|
336
|
+
overall_ok = False
|
|
337
|
+
overall_ok = overall_ok and cache.get("ok", False)
|
|
338
|
+
continue
|
|
339
|
+
|
|
340
|
+
ok, detail = await self._probe_health_endpoint(root)
|
|
341
|
+
self._backend_health_cache[root] = {"ok": ok, "detail": detail, "timestamp": now}
|
|
342
|
+
if not ok and detail:
|
|
343
|
+
details.append(detail)
|
|
344
|
+
overall_ok = overall_ok and ok
|
|
345
|
+
|
|
346
|
+
return {"ok": overall_ok, "detail": "; ".join(details) if details else ""}
|
|
347
|
+
|
|
348
|
+
async def _ensure_backend_ready(self) -> Tuple[bool, str]:
|
|
349
|
+
status = await self._check_backend_health()
|
|
350
|
+
return status["ok"], status.get("detail", "")
|
|
351
|
+
|
|
352
|
+
def _record_data_source(self, service: str, endpoint: str, success: bool, detail: str = "") -> None:
|
|
353
|
+
entry = {
|
|
354
|
+
"service": service,
|
|
355
|
+
"endpoint": endpoint,
|
|
356
|
+
"success": success,
|
|
357
|
+
"detail": detail,
|
|
358
|
+
}
|
|
359
|
+
self._recent_sources.append(entry)
|
|
360
|
+
if len(self._recent_sources) > 10:
|
|
361
|
+
self._recent_sources = self._recent_sources[-10:]
|
|
362
|
+
|
|
363
|
+
def _format_data_sources_footer(self) -> str:
|
|
364
|
+
if not self._recent_sources:
|
|
365
|
+
return ""
|
|
366
|
+
|
|
367
|
+
snippets: List[str] = []
|
|
368
|
+
for item in self._recent_sources[:4]:
|
|
369
|
+
status = "ok" if item.get("success") else f"error ({item.get('detail')})" if item.get("detail") else "error"
|
|
370
|
+
snippets.append(f"{item.get('service')} {item.get('endpoint')} – {status}")
|
|
371
|
+
if len(self._recent_sources) > 4:
|
|
372
|
+
snippets.append("…")
|
|
373
|
+
return "Data sources: " + "; ".join(snippets)
|
|
374
|
+
|
|
375
|
+
def _reset_data_sources(self) -> None:
|
|
376
|
+
self._recent_sources = []
|
|
377
|
+
|
|
378
|
+
def _load_ticker_map(self):
|
|
379
|
+
"""Load a simple company name -> ticker map for FinSight lookups."""
|
|
380
|
+
# Start with common aliases
|
|
381
|
+
mapping = {
|
|
382
|
+
"apple": "AAPL",
|
|
383
|
+
"microsoft": "MSFT",
|
|
384
|
+
"alphabet": "GOOGL",
|
|
385
|
+
"google": "GOOGL",
|
|
386
|
+
"amazon": "AMZN",
|
|
387
|
+
"nvidia": "NVDA",
|
|
388
|
+
"palantir": "PLTR",
|
|
389
|
+
"shopify": "SHOP",
|
|
390
|
+
"target": "TGT",
|
|
391
|
+
"amd": "AMD",
|
|
392
|
+
"tesla": "TSLA",
|
|
393
|
+
"meta": "META",
|
|
394
|
+
"netflix": "NFLX",
|
|
395
|
+
"goldman sachs": "GS",
|
|
396
|
+
"goldman": "GS",
|
|
397
|
+
"exxonmobil": "XOM",
|
|
398
|
+
"exxon": "XOM",
|
|
399
|
+
"jpmorgan": "JPM",
|
|
400
|
+
"square": "SQ"
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
def _augment_from_records(records: List[Dict[str, Any]]) -> None:
|
|
404
|
+
for item in records:
|
|
405
|
+
name = str(item.get("name", "")).lower()
|
|
406
|
+
symbol = item.get("symbol")
|
|
407
|
+
if name and symbol:
|
|
408
|
+
mapping.setdefault(name, symbol)
|
|
409
|
+
short = (
|
|
410
|
+
name.replace("inc.", "")
|
|
411
|
+
.replace("inc", "")
|
|
412
|
+
.replace("corporation", "")
|
|
413
|
+
.replace("corp.", "")
|
|
414
|
+
.strip()
|
|
415
|
+
)
|
|
416
|
+
if short and short != name:
|
|
417
|
+
mapping.setdefault(short, symbol)
|
|
418
|
+
|
|
419
|
+
try:
|
|
420
|
+
supplemental: List[Dict[str, Any]] = []
|
|
421
|
+
|
|
422
|
+
try:
|
|
423
|
+
package_resource = resources.files("nocturnal_archive.data").joinpath("company_tickers.json")
|
|
424
|
+
if package_resource.is_file():
|
|
425
|
+
supplemental = json.loads(package_resource.read_text(encoding="utf-8"))
|
|
426
|
+
except (FileNotFoundError, ModuleNotFoundError, AttributeError):
|
|
427
|
+
supplemental = []
|
|
428
|
+
|
|
429
|
+
if not supplemental:
|
|
430
|
+
candidate_paths = [
|
|
431
|
+
Path(__file__).resolve().parent / "data" / "company_tickers.json",
|
|
432
|
+
Path("./data/company_tickers.json"),
|
|
433
|
+
]
|
|
434
|
+
for data_path in candidate_paths:
|
|
435
|
+
if data_path.exists():
|
|
436
|
+
supplemental = json.loads(data_path.read_text(encoding="utf-8"))
|
|
437
|
+
break
|
|
438
|
+
|
|
439
|
+
if supplemental:
|
|
440
|
+
_augment_from_records(supplemental)
|
|
441
|
+
|
|
442
|
+
override_candidates: List[Path] = []
|
|
443
|
+
override_env = os.getenv("NOCTURNAL_TICKER_MAP")
|
|
444
|
+
if override_env:
|
|
445
|
+
override_candidates.append(Path(override_env).expanduser())
|
|
446
|
+
|
|
447
|
+
default_override = Path.home() / ".nocturnal_archive" / "tickers.json"
|
|
448
|
+
override_candidates.append(default_override)
|
|
449
|
+
|
|
450
|
+
for override_path in override_candidates:
|
|
451
|
+
if not override_path or not override_path.exists():
|
|
452
|
+
continue
|
|
453
|
+
try:
|
|
454
|
+
override_records = json.loads(override_path.read_text(encoding="utf-8"))
|
|
455
|
+
if isinstance(override_records, list):
|
|
456
|
+
_augment_from_records(override_records)
|
|
457
|
+
except Exception as override_exc:
|
|
458
|
+
logger.warning(f"Failed to load ticker override from {override_path}: {override_exc}")
|
|
459
|
+
except Exception:
|
|
460
|
+
pass
|
|
461
|
+
|
|
462
|
+
self.company_name_to_ticker = mapping
|
|
463
|
+
|
|
464
|
+
def _ensure_environment_loaded(self):
|
|
465
|
+
if self._env_loaded:
|
|
466
|
+
return
|
|
467
|
+
|
|
468
|
+
try:
|
|
469
|
+
from .setup_config import NocturnalConfig
|
|
470
|
+
|
|
471
|
+
config = NocturnalConfig()
|
|
472
|
+
config.setup_environment()
|
|
473
|
+
except ImportError:
|
|
474
|
+
pass
|
|
475
|
+
except Exception as exc:
|
|
476
|
+
print(f"⚠️ Environment setup warning: {exc}")
|
|
91
477
|
|
|
92
478
|
try:
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
479
|
+
from dotenv import load_dotenv
|
|
480
|
+
|
|
481
|
+
load_dotenv('.env.local')
|
|
482
|
+
except ImportError:
|
|
483
|
+
print("⚠️ python-dotenv not installed, using system environment variables")
|
|
484
|
+
except Exception as exc:
|
|
485
|
+
print(f"⚠️ Could not load .env.local: {exc}")
|
|
486
|
+
finally:
|
|
487
|
+
self._env_loaded = True
|
|
488
|
+
|
|
489
|
+
def _get_init_lock(self) -> asyncio.Lock:
|
|
490
|
+
if self._init_lock is None:
|
|
491
|
+
self._init_lock = asyncio.Lock()
|
|
492
|
+
return self._init_lock
|
|
493
|
+
|
|
494
|
+
async def _get_workspace_listing(self, limit: int = 20) -> Dict[str, Any]:
|
|
495
|
+
params = {"path": ".", "limit": limit, "include_hidden": "false"}
|
|
496
|
+
result = await self._call_files_api("GET", "/", params=params)
|
|
497
|
+
if "error" not in result:
|
|
498
|
+
return result
|
|
499
|
+
|
|
500
|
+
fallback = self._fallback_workspace_listing(limit)
|
|
501
|
+
fallback["error"] = result["error"]
|
|
502
|
+
return fallback
|
|
503
|
+
|
|
504
|
+
def _fallback_workspace_listing(self, limit: int = 20) -> Dict[str, Any]:
|
|
505
|
+
base = Path.cwd().resolve()
|
|
506
|
+
items: List[Dict[str, str]] = []
|
|
507
|
+
try:
|
|
508
|
+
for entry in sorted(base.iterdir(), key=lambda e: e.name.lower()):
|
|
509
|
+
if entry.name.startswith('.'):
|
|
510
|
+
continue
|
|
511
|
+
item = {
|
|
512
|
+
"name": entry.name,
|
|
513
|
+
"type": "directory" if entry.is_dir() else "file"
|
|
514
|
+
}
|
|
515
|
+
items.append(item)
|
|
516
|
+
if len(items) >= limit:
|
|
517
|
+
break
|
|
518
|
+
except Exception as exc:
|
|
519
|
+
return {
|
|
520
|
+
"base": str(base),
|
|
521
|
+
"items": [],
|
|
522
|
+
"error": f"Unable to list workspace: {exc}"
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
return {
|
|
526
|
+
"base": str(base),
|
|
527
|
+
"items": items,
|
|
528
|
+
"note": "Showing up to first {limit} non-hidden entries.".format(limit=limit)
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
def _format_workspace_listing_response(self, listing: Dict[str, Any]) -> str:
|
|
532
|
+
base = listing.get("base", Path.cwd().resolve())
|
|
533
|
+
items = listing.get("items")
|
|
534
|
+
if not items:
|
|
535
|
+
items = listing.get("entries", []) or []
|
|
536
|
+
note = listing.get("note")
|
|
537
|
+
error = listing.get("error")
|
|
538
|
+
truncated_flag = listing.get("truncated")
|
|
539
|
+
|
|
540
|
+
if not items:
|
|
541
|
+
summary_lines = ["(no visible files in the current directory)"]
|
|
542
|
+
else:
|
|
543
|
+
max_entries = min(len(items), 12)
|
|
544
|
+
summary_lines = [
|
|
545
|
+
f"- {item.get('name')} ({item.get('type', 'unknown')})"
|
|
546
|
+
for item in items[:max_entries]
|
|
547
|
+
]
|
|
548
|
+
if len(items) > max_entries:
|
|
549
|
+
remaining = len(items) - max_entries
|
|
550
|
+
summary_lines.append(f"… and {remaining} more")
|
|
551
|
+
|
|
552
|
+
message_parts = [
|
|
553
|
+
f"Workspace root: {base}",
|
|
554
|
+
"Here are the first entries I can see:",
|
|
555
|
+
"\n".join(summary_lines)
|
|
556
|
+
]
|
|
557
|
+
|
|
558
|
+
if note:
|
|
559
|
+
message_parts.append(note)
|
|
560
|
+
if error:
|
|
561
|
+
message_parts.append(f"Workspace API warning: {error}")
|
|
562
|
+
if truncated_flag:
|
|
563
|
+
message_parts.append("(Listing truncated by workspace service)")
|
|
564
|
+
|
|
565
|
+
footer = self._format_data_sources_footer()
|
|
566
|
+
if footer:
|
|
567
|
+
message_parts.append(f"_{footer}_")
|
|
568
|
+
|
|
569
|
+
return "\n\n".join(part for part in message_parts if part)
|
|
570
|
+
|
|
571
|
+
def _respond_with_workspace_listing(self, request: ChatRequest, listing: Dict[str, Any]) -> ChatResponse:
|
|
572
|
+
message = self._format_workspace_listing_response(listing)
|
|
573
|
+
|
|
574
|
+
self.conversation_history.append({"role": "user", "content": request.question})
|
|
575
|
+
self.conversation_history.append({"role": "assistant", "content": message})
|
|
576
|
+
self._update_memory(request.user_id, request.conversation_id, f"Q: {request.question[:100]}... A: {message[:100]}...")
|
|
577
|
+
|
|
578
|
+
items = listing.get("items") or listing.get("entries") or []
|
|
579
|
+
success = "error" not in listing
|
|
580
|
+
self._emit_telemetry(
|
|
581
|
+
"workspace_listing",
|
|
582
|
+
request,
|
|
583
|
+
success=success,
|
|
584
|
+
extra={
|
|
585
|
+
"item_count": len(items),
|
|
586
|
+
"truncated": bool(listing.get("truncated")),
|
|
587
|
+
},
|
|
588
|
+
)
|
|
589
|
+
|
|
590
|
+
return ChatResponse(
|
|
591
|
+
response=message,
|
|
592
|
+
tools_used=["files_listing"],
|
|
593
|
+
reasoning_steps=["Direct workspace listing response"],
|
|
594
|
+
tokens_used=0,
|
|
595
|
+
confidence_score=0.7,
|
|
596
|
+
api_results={"workspace_listing": listing}
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
def _respond_with_shell_command(self, request: ChatRequest, command: str) -> ChatResponse:
|
|
600
|
+
command_stub = command.split()[0] if command else ""
|
|
601
|
+
if not self._is_safe_shell_command(command):
|
|
602
|
+
message = (
|
|
603
|
+
"I couldn't run that command because it violates the safety policy. "
|
|
604
|
+
"Please try a simpler shell command (no pipes, redirection, or file writes)."
|
|
605
|
+
)
|
|
606
|
+
tools = ["shell_blocked"]
|
|
607
|
+
execution_results = {"command": command, "output": "Command blocked by safety policy", "success": False}
|
|
608
|
+
telemetry_event = "shell_blocked"
|
|
609
|
+
success = False
|
|
610
|
+
output_len = 0
|
|
611
|
+
else:
|
|
612
|
+
output = self.execute_command(command)
|
|
613
|
+
truncated_output = output if len(output) <= 2000 else output[:2000] + "\n… (truncated)"
|
|
614
|
+
message = (
|
|
615
|
+
f"Running the command: `{command}`\n\n"
|
|
616
|
+
"Output:\n```\n"
|
|
617
|
+
f"{truncated_output}\n"
|
|
618
|
+
"```"
|
|
619
|
+
)
|
|
620
|
+
tools = ["shell_execution"]
|
|
621
|
+
success = not output.startswith("ERROR:")
|
|
622
|
+
execution_results = {"command": command, "output": truncated_output, "success": success}
|
|
623
|
+
telemetry_event = "shell_execution"
|
|
624
|
+
output_len = len(truncated_output)
|
|
625
|
+
|
|
626
|
+
footer = self._format_data_sources_footer()
|
|
627
|
+
if footer:
|
|
628
|
+
message = f"{message}\n\n_{footer}_"
|
|
629
|
+
|
|
630
|
+
self.conversation_history.append({"role": "user", "content": request.question})
|
|
631
|
+
self.conversation_history.append({"role": "assistant", "content": message})
|
|
632
|
+
self._update_memory(
|
|
633
|
+
request.user_id,
|
|
634
|
+
request.conversation_id,
|
|
635
|
+
f"Q: {request.question[:100]}... A: {message[:100]}..."
|
|
636
|
+
)
|
|
637
|
+
|
|
638
|
+
self._emit_telemetry(
|
|
639
|
+
telemetry_event,
|
|
640
|
+
request,
|
|
641
|
+
success=success,
|
|
642
|
+
extra={
|
|
643
|
+
"command": command_stub,
|
|
644
|
+
"output_len": output_len,
|
|
645
|
+
},
|
|
646
|
+
)
|
|
647
|
+
|
|
648
|
+
return ChatResponse(
|
|
649
|
+
response=message,
|
|
650
|
+
tools_used=tools,
|
|
651
|
+
reasoning_steps=["Direct shell execution"],
|
|
652
|
+
tokens_used=0,
|
|
653
|
+
confidence_score=0.75 if tools == ["shell_execution"] else 0.4,
|
|
654
|
+
execution_results=execution_results
|
|
655
|
+
)
|
|
656
|
+
def _format_currency_value(self, value: float) -> str:
|
|
657
|
+
try:
|
|
658
|
+
abs_val = abs(value)
|
|
659
|
+
if abs_val >= 1e12:
|
|
660
|
+
return f"${value / 1e12:.2f} trillion"
|
|
661
|
+
if abs_val >= 1e9:
|
|
662
|
+
return f"${value / 1e9:.2f} billion"
|
|
663
|
+
if abs_val >= 1e6:
|
|
664
|
+
return f"${value / 1e6:.2f} million"
|
|
665
|
+
return f"${value:,.2f}"
|
|
666
|
+
except Exception:
|
|
667
|
+
return str(value)
|
|
668
|
+
|
|
669
|
+
def _respond_with_financial_metrics(self, request: ChatRequest, payload: Dict[str, Any]) -> ChatResponse:
|
|
670
|
+
ticker, metrics = next(iter(payload.items()))
|
|
671
|
+
headline = [f"{ticker} key metrics:"]
|
|
672
|
+
citations: List[str] = []
|
|
673
|
+
|
|
674
|
+
for metric_name, metric_data in metrics.items():
|
|
675
|
+
if not isinstance(metric_data, dict):
|
|
676
|
+
continue
|
|
677
|
+
value = metric_data.get("value")
|
|
678
|
+
if value is None:
|
|
679
|
+
inner_inputs = metric_data.get("inputs", {})
|
|
680
|
+
entry = inner_inputs.get(metric_name) or next(iter(inner_inputs.values()), {})
|
|
681
|
+
value = entry.get("value")
|
|
682
|
+
formatted_value = self._format_currency_value(value) if value is not None else "(value unavailable)"
|
|
683
|
+
period = metric_data.get("period")
|
|
684
|
+
if not period or (isinstance(period, str) and period.lower().startswith("latest")):
|
|
685
|
+
inner_inputs = metric_data.get("inputs", {})
|
|
686
|
+
entry = inner_inputs.get(metric_name) or next(iter(inner_inputs.values()), {})
|
|
687
|
+
period = entry.get("period")
|
|
688
|
+
sources = metric_data.get("citations") or []
|
|
689
|
+
if sources:
|
|
690
|
+
source_url = sources[0].get("source_url")
|
|
691
|
+
if source_url:
|
|
692
|
+
citations.append(source_url)
|
|
693
|
+
label = metric_name.replace("Gross", "Gross ").replace("Income", " Income").replace("Net", "Net ")
|
|
694
|
+
label = label.replace("operating", "operating ").replace("Ratio", " Ratio").title()
|
|
695
|
+
if period:
|
|
696
|
+
headline.append(f"• {label}: {formatted_value} (as of {period})")
|
|
697
|
+
else:
|
|
698
|
+
headline.append(f"• {label}: {formatted_value}")
|
|
699
|
+
|
|
700
|
+
unique_citations = []
|
|
701
|
+
for c in citations:
|
|
702
|
+
if c not in unique_citations:
|
|
703
|
+
unique_citations.append(c)
|
|
704
|
+
|
|
705
|
+
message_parts = ["\n".join(headline)]
|
|
706
|
+
if unique_citations:
|
|
707
|
+
message_parts.append("Sources:\n" + "\n".join(unique_citations))
|
|
708
|
+
|
|
709
|
+
footer = self._format_data_sources_footer()
|
|
710
|
+
if footer:
|
|
711
|
+
message_parts.append(f"_{footer}_")
|
|
712
|
+
|
|
713
|
+
message = "\n\n".join(message_parts)
|
|
714
|
+
|
|
715
|
+
self.conversation_history.append({"role": "user", "content": request.question})
|
|
716
|
+
self.conversation_history.append({"role": "assistant", "content": message})
|
|
717
|
+
self._update_memory(
|
|
718
|
+
request.user_id,
|
|
719
|
+
request.conversation_id,
|
|
720
|
+
f"Q: {request.question[:100]}... A: {message[:100]}..."
|
|
721
|
+
)
|
|
722
|
+
|
|
723
|
+
self._emit_telemetry(
|
|
724
|
+
"financial_metrics",
|
|
725
|
+
request,
|
|
726
|
+
success=True,
|
|
727
|
+
extra={
|
|
728
|
+
"ticker": ticker,
|
|
729
|
+
"metric_count": len(metrics),
|
|
730
|
+
},
|
|
731
|
+
)
|
|
732
|
+
|
|
733
|
+
return ChatResponse(
|
|
734
|
+
response=message,
|
|
735
|
+
tools_used=["finsight_api"],
|
|
736
|
+
reasoning_steps=["Direct financial metrics response"],
|
|
737
|
+
tokens_used=0,
|
|
738
|
+
confidence_score=0.8,
|
|
739
|
+
api_results={"financial": payload}
|
|
740
|
+
)
|
|
741
|
+
|
|
742
|
+
def _local_file_preview(self, path_str: str) -> Optional[Dict[str, Any]]:
|
|
743
|
+
try:
|
|
744
|
+
p = Path(path_str)
|
|
745
|
+
if not p.exists():
|
|
746
|
+
return None
|
|
747
|
+
if p.is_dir():
|
|
748
|
+
entries = sorted([e.name for e in p.iterdir()][:10])
|
|
749
|
+
return {
|
|
750
|
+
"path": str(p),
|
|
751
|
+
"type": "directory",
|
|
752
|
+
"preview": "\n".join(entries),
|
|
753
|
+
"encoding": "utf-8",
|
|
754
|
+
"truncated": False,
|
|
755
|
+
"size": None,
|
|
756
|
+
}
|
|
757
|
+
|
|
758
|
+
stat_result = p.stat()
|
|
759
|
+
if p.suffix.lower() in {".pdf", ".png", ".jpg", ".jpeg", ".parquet", ".zip", ".gif"}:
|
|
760
|
+
return {
|
|
761
|
+
"path": str(p),
|
|
762
|
+
"type": "binary",
|
|
763
|
+
"preview": "(binary file preview skipped)",
|
|
764
|
+
"encoding": "binary",
|
|
765
|
+
"truncated": False,
|
|
766
|
+
"size": stat_result.st_size,
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
content = p.read_text(errors="ignore")
|
|
770
|
+
truncated = len(content) > 65536
|
|
771
|
+
snippet = content[:65536]
|
|
772
|
+
preview = "\n".join(snippet.splitlines()[:60])
|
|
773
|
+
return {
|
|
774
|
+
"path": str(p),
|
|
775
|
+
"type": "text",
|
|
776
|
+
"preview": preview,
|
|
777
|
+
"encoding": "utf-8",
|
|
778
|
+
"truncated": truncated,
|
|
779
|
+
"size": stat_result.st_size,
|
|
780
|
+
}
|
|
781
|
+
except Exception as exc:
|
|
782
|
+
return {
|
|
783
|
+
"path": path_str,
|
|
784
|
+
"type": "error",
|
|
785
|
+
"preview": f"error: {exc}",
|
|
786
|
+
"encoding": "utf-8",
|
|
787
|
+
"truncated": False,
|
|
788
|
+
"size": None,
|
|
789
|
+
}
|
|
790
|
+
|
|
791
|
+
async def _preview_file(self, path_str: str) -> Optional[Dict[str, Any]]:
|
|
792
|
+
params = {"path": path_str}
|
|
793
|
+
result = await self._call_files_api("GET", "/preview", params=params)
|
|
794
|
+
if "error" not in result:
|
|
795
|
+
encoding = result.get("encoding", "utf-8")
|
|
796
|
+
return {
|
|
797
|
+
"path": result.get("path", path_str),
|
|
798
|
+
"type": "text" if encoding == "utf-8" else "binary",
|
|
799
|
+
"preview": result.get("content", ""),
|
|
800
|
+
"encoding": encoding,
|
|
801
|
+
"truncated": bool(result.get("truncated", False)),
|
|
802
|
+
"size": result.get("size"),
|
|
803
|
+
}
|
|
804
|
+
|
|
805
|
+
message = result.get("error", "")
|
|
806
|
+
if message and "does not exist" in message.lower():
|
|
807
|
+
return None
|
|
808
|
+
|
|
809
|
+
fallback = self._local_file_preview(path_str)
|
|
810
|
+
if fallback:
|
|
811
|
+
fallback.setdefault("error", message)
|
|
812
|
+
return fallback
|
|
813
|
+
return {
|
|
814
|
+
"path": path_str,
|
|
815
|
+
"type": "error",
|
|
816
|
+
"preview": "",
|
|
817
|
+
"encoding": "utf-8",
|
|
818
|
+
"truncated": False,
|
|
819
|
+
"size": None,
|
|
820
|
+
"error": message,
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
async def __aenter__(self):
|
|
824
|
+
await self.initialize()
|
|
825
|
+
return self
|
|
826
|
+
|
|
827
|
+
async def __aexit__(self, exc_type, exc, tb):
|
|
828
|
+
await self.close()
|
|
829
|
+
return False
|
|
830
|
+
|
|
831
|
+
def _is_simple_greeting(self, text: str) -> bool:
|
|
832
|
+
greetings = {"hi", "hello", "hey", "hola", "howdy", "greetings"}
|
|
833
|
+
normalized = text.lower().strip()
|
|
834
|
+
return any(normalized.startswith(greet) for greet in greetings)
|
|
835
|
+
|
|
836
|
+
def _is_casual_acknowledgment(self, text: str) -> bool:
|
|
837
|
+
acknowledgments = {
|
|
838
|
+
"thanks",
|
|
839
|
+
"thank you",
|
|
840
|
+
"thx",
|
|
841
|
+
"ty",
|
|
842
|
+
"appreciate it",
|
|
843
|
+
"got it",
|
|
844
|
+
"cool",
|
|
845
|
+
"great",
|
|
846
|
+
"awesome"
|
|
847
|
+
}
|
|
848
|
+
normalized = text.lower().strip()
|
|
849
|
+
return any(normalized.startswith(ack) for ack in acknowledgments)
|
|
850
|
+
|
|
851
|
+
def _format_api_results_for_prompt(self, api_results: Dict[str, Any]) -> str:
|
|
852
|
+
if not api_results:
|
|
853
|
+
logger.info("🔍 DEBUG: _format_api_results_for_prompt called with EMPTY api_results")
|
|
854
|
+
return "No API results yet."
|
|
855
|
+
try:
|
|
856
|
+
serialized = json.dumps(api_results, indent=2)
|
|
857
|
+
except Exception:
|
|
858
|
+
serialized = str(api_results)
|
|
859
|
+
max_len = 8000 # Keep under 12K token limit (backend + context)
|
|
860
|
+
if len(serialized) > max_len:
|
|
861
|
+
serialized = serialized[:max_len] + "\n... (truncated for length)"
|
|
862
|
+
|
|
863
|
+
# DEBUG: Log formatted results length and preview
|
|
864
|
+
logger.info(f"🔍 DEBUG: _format_api_results_for_prompt returning {len(serialized)} chars")
|
|
865
|
+
if "research" in api_results:
|
|
866
|
+
papers_count = len(api_results.get("research", {}).get("results", []))
|
|
867
|
+
logger.info(f"🔍 DEBUG: api_results contains 'research' with {papers_count} papers")
|
|
868
|
+
|
|
869
|
+
return serialized
|
|
870
|
+
|
|
871
|
+
def _build_system_prompt(
|
|
872
|
+
self,
|
|
873
|
+
request_analysis: Dict[str, Any],
|
|
874
|
+
memory_context: str,
|
|
875
|
+
api_results: Dict[str, Any]
|
|
876
|
+
) -> str:
|
|
877
|
+
sections: List[str] = []
|
|
878
|
+
|
|
879
|
+
# TRUTH-SEEKING CORE IDENTITY
|
|
880
|
+
# Adapt intro based on analysis mode
|
|
881
|
+
analysis_mode = request_analysis.get("analysis_mode", "quantitative")
|
|
882
|
+
|
|
883
|
+
if analysis_mode == "qualitative":
|
|
884
|
+
intro = (
|
|
885
|
+
"You are Nocturnal, a truth-seeking research AI specialized in QUALITATIVE ANALYSIS. "
|
|
886
|
+
"PRIMARY DIRECTIVE: Accuracy > Agreeableness. Quote verbatim, never paraphrase. "
|
|
887
|
+
"You analyze text, identify themes, extract quotes with context, and synthesize patterns. "
|
|
888
|
+
"You have direct access to academic sources and can perform thematic coding."
|
|
889
|
+
)
|
|
890
|
+
elif analysis_mode == "mixed":
|
|
891
|
+
intro = (
|
|
892
|
+
"You are Nocturnal, a truth-seeking research AI handling MIXED METHODS analysis. "
|
|
893
|
+
"PRIMARY DIRECTIVE: Accuracy > Agreeableness. "
|
|
894
|
+
"You work with both quantitative data (numbers, stats) and qualitative data (themes, quotes). "
|
|
895
|
+
"For numbers: calculate and cite. For text: quote verbatim and identify patterns. "
|
|
896
|
+
"You have access to production data sources and can write/execute code (Python, R, SQL)."
|
|
897
|
+
)
|
|
898
|
+
else: # quantitative
|
|
899
|
+
intro = (
|
|
900
|
+
"You are Cite Agent, a truth-seeking research and finance AI. "
|
|
901
|
+
"PRIMARY DIRECTIVE: Accuracy > Agreeableness. Ask clarifying questions when context is missing. "
|
|
902
|
+
"You are a fact-checker and analyst, NOT a people-pleaser. "
|
|
903
|
+
"You have direct access to production-grade data sources and can write/execute code (Python, R, SQL)."
|
|
904
|
+
)
|
|
905
|
+
|
|
906
|
+
sections.append(intro)
|
|
907
|
+
|
|
908
|
+
apis = request_analysis.get("apis", [])
|
|
909
|
+
capability_lines: List[str] = []
|
|
910
|
+
if "archive" in apis:
|
|
911
|
+
capability_lines.append("• Archive Research API for academic search and synthesis")
|
|
912
|
+
if "finsight" in apis:
|
|
913
|
+
capability_lines.append("• FinSight Finance API for SEC-quality metrics and citations")
|
|
914
|
+
if "shell" in apis:
|
|
915
|
+
capability_lines.append("• Persistent shell session for system inspection and code execution")
|
|
916
|
+
if not capability_lines:
|
|
917
|
+
capability_lines.append("• Core reasoning, code generation (Python/R/SQL), memory recall")
|
|
918
|
+
|
|
919
|
+
# Add workflow capabilities
|
|
920
|
+
capability_lines.append("")
|
|
921
|
+
capability_lines.append("📚 WORKFLOW INTEGRATION (Always available):")
|
|
922
|
+
capability_lines.append("• You can SAVE papers to user's local library")
|
|
923
|
+
capability_lines.append("• You can LIST papers from library")
|
|
924
|
+
capability_lines.append("• You can EXPORT citations to BibTeX or APA")
|
|
925
|
+
capability_lines.append("• You can SEARCH user's paper collection")
|
|
926
|
+
capability_lines.append("• You can COPY text to user's clipboard")
|
|
927
|
+
capability_lines.append("• User's query history is automatically tracked")
|
|
928
|
+
|
|
929
|
+
sections.append("Capabilities in play:\n" + "\n".join(capability_lines))
|
|
930
|
+
|
|
931
|
+
# ENHANCED TRUTH-SEEKING RULES (adapt based on mode)
|
|
932
|
+
base_rules = [
|
|
933
|
+
"🚨 BE PATIENT: Don't rush to tools. Have a conversation to understand intent FIRST.",
|
|
934
|
+
"🚨 CLARIFY BEFORE SEARCH: If you see '2008, 2015, 2019' → ask 'Are you looking for crisis patterns? Economic events? Papers published in those years?' DON'T just search '2008'.",
|
|
935
|
+
"🚨 KNOW YOUR TOOLS' LIMITS: SEC has revenue, not market share. Archive has papers, not market data. If tool can't answer, say 'I don't have that data' or use web search.",
|
|
936
|
+
"🚨 TOOL != ANSWER: Don't use tools just because you have them. Revenue ≠ Market Share. Published year ≠ Subject matter.",
|
|
937
|
+
"",
|
|
938
|
+
"💬 CONVERSATIONAL FLOW:",
|
|
939
|
+
"1. User asks vague question → YOU ask clarifying questions",
|
|
940
|
+
"2. User provides context → YOU confirm understanding",
|
|
941
|
+
"3. YOU make tool calls → Present results",
|
|
942
|
+
"NEVER skip step 1 or 2. Be deliberate, not eager.",
|
|
943
|
+
"",
|
|
944
|
+
"🚨 ANTI-APPEASEMENT: If user states something incorrect, CORRECT THEM immediately. Do not agree to be polite.",
|
|
945
|
+
"🚨 UNCERTAINTY: If you're uncertain, SAY SO explicitly. 'I don't know' is better than a wrong answer.",
|
|
946
|
+
"🚨 CONTRADICTIONS: If data contradicts user's assumption, SHOW THE CONTRADICTION clearly.",
|
|
947
|
+
"🚨 FUTURE PREDICTIONS: You CANNOT predict the future. For 'will X happen?' questions, emphasize uncertainty and multiple possible outcomes.",
|
|
948
|
+
"",
|
|
949
|
+
"📊 SOURCE GROUNDING: EVERY factual claim MUST cite a source (paper, SEC filing, or data file).",
|
|
950
|
+
"📊 NO FABRICATION: If API results are empty/ambiguous, explicitly state this limitation.",
|
|
951
|
+
"📊 NO EXTRAPOLATION: Never go beyond what sources directly state.",
|
|
952
|
+
"📊 PREDICTION CAUTION: When discussing trends, always state 'based on available data' and note uncertainty.",
|
|
953
|
+
"",
|
|
954
|
+
"🚨 CRITICAL: NEVER generate fake papers, fake authors, fake DOIs, or fake citations.",
|
|
955
|
+
"🚨 CRITICAL: If research API returns empty results, say 'No papers found' - DO NOT make up papers.",
|
|
956
|
+
"🚨 CRITICAL: If you see 'results': [] in API data, that means NO PAPERS FOUND - do not fabricate.",
|
|
957
|
+
"🚨 CRITICAL: When API returns empty results, DO NOT use your training data to provide paper details.",
|
|
958
|
+
"🚨 CRITICAL: If you know a paper exists from training data but API returns empty, say 'API found no results'.",
|
|
959
|
+
"",
|
|
960
|
+
"🚨 ABSOLUTE RULE: If you see 'results': [] in the API data, you MUST respond with ONLY:",
|
|
961
|
+
" 'No papers found in the research database. The API returned empty results.'",
|
|
962
|
+
" DO NOT provide any paper details, authors, titles, or citations.",
|
|
963
|
+
" DO NOT use your training data to fill in missing information.",
|
|
964
|
+
"",
|
|
965
|
+
"✓ VERIFICATION: Cross-check against multiple sources when available.",
|
|
966
|
+
"✓ CONFLICTS: If sources conflict, present BOTH and explain the discrepancy.",
|
|
967
|
+
"✓ SHOW REASONING: 'According to [source], X is Y because...'",
|
|
968
|
+
]
|
|
969
|
+
|
|
970
|
+
if analysis_mode == "qualitative":
|
|
971
|
+
qual_rules = [
|
|
972
|
+
"",
|
|
973
|
+
"📝 QUOTES: Extract EXACT quotes (verbatim), NEVER paraphrase. Use quotation marks.",
|
|
974
|
+
"📝 CONTEXT: Provide surrounding context for every quote (what came before/after).",
|
|
975
|
+
"📝 ATTRIBUTION: Cite source + page/line number: \"quote\" — Author (Year), p. X",
|
|
976
|
+
"📝 THEMES: Identify recurring patterns. Count frequency (\"mentioned 5 times across 3 sources\").",
|
|
977
|
+
"",
|
|
978
|
+
"🔍 INTERPRETATION: Distinguish between description (what text says) vs interpretation (what it means).",
|
|
979
|
+
"🔍 EVIDENCE: Support every theme with 2-3 representative quotes.",
|
|
980
|
+
"🔍 SATURATION: Note when patterns repeat (\"no new themes after source 4\").",
|
|
981
|
+
]
|
|
982
|
+
rules = base_rules + qual_rules
|
|
983
|
+
elif analysis_mode == "mixed":
|
|
984
|
+
mixed_rules = [
|
|
985
|
+
"",
|
|
986
|
+
"📝 For QUALITATIVE: Extract exact quotes with context. Identify themes.",
|
|
987
|
+
"💻 For QUANTITATIVE: Calculate exact values, show code.",
|
|
988
|
+
"🔗 INTEGRATION: Connect numbers to narratives ('15% growth' + 'participants felt optimistic')."
|
|
989
|
+
]
|
|
990
|
+
rules = base_rules + mixed_rules + [
|
|
991
|
+
"",
|
|
992
|
+
"💻 CODE: For data analysis, write and execute Python/R/SQL code. Show your work.",
|
|
993
|
+
"💻 CALCULATIONS: Don't estimate - calculate exact values and show the code.",
|
|
994
|
+
]
|
|
995
|
+
else: # quantitative
|
|
996
|
+
quant_rules = [
|
|
997
|
+
"",
|
|
998
|
+
"💻 CODE: For data analysis, write and execute Python/R/SQL code. Show your work.",
|
|
999
|
+
"💻 CALCULATIONS: Don't estimate - calculate exact values and show the code.",
|
|
1000
|
+
]
|
|
1001
|
+
rules = base_rules + quant_rules
|
|
1002
|
+
|
|
1003
|
+
rules.append("")
|
|
1004
|
+
rules.append("Keep responses concise but complete. Quote exact text from sources when possible.")
|
|
1005
|
+
|
|
1006
|
+
# Add workflow behavior rules
|
|
1007
|
+
workflow_rules = [
|
|
1008
|
+
"",
|
|
1009
|
+
"📚 WORKFLOW BEHAVIOR:",
|
|
1010
|
+
"• After finding papers, OFFER to save them: 'Would you like me to save this to your library?'",
|
|
1011
|
+
"• After showing a citation, ASK: 'Want me to copy that to your clipboard?'",
|
|
1012
|
+
"• If user says 'save that' or 'add to library', ACKNOWLEDGE and confirm the save",
|
|
1013
|
+
"• If user mentions 'my library', LIST their saved papers",
|
|
1014
|
+
"• If user asks for 'bibtex' or 'apa', PROVIDE the formatted citation",
|
|
1015
|
+
"• Be PROACTIVE: suggest exports, show library stats, offer clipboard copies",
|
|
1016
|
+
"• Example: 'I found 3 papers. I can save them to your library or export to BibTeX if you'd like.'",
|
|
1017
|
+
]
|
|
1018
|
+
rules.extend(workflow_rules)
|
|
1019
|
+
|
|
1020
|
+
sections.append("CRITICAL RULES:\n" + "\n".join(rules))
|
|
1021
|
+
|
|
1022
|
+
# CORRECTION EXAMPLES (adapt based on mode)
|
|
1023
|
+
if analysis_mode == "qualitative":
|
|
1024
|
+
examples = (
|
|
1025
|
+
"EXAMPLE RESPONSES:\n"
|
|
1026
|
+
"User: 'So participants felt happy about the change?'\n"
|
|
1027
|
+
"You: '⚠️ Mixed. 3 participants expressed satisfaction: \"I welcomed the new policy\" (P2, line 45), "
|
|
1028
|
+
"but 2 expressed concern: \"It felt rushed\" (P4, line 67). Theme: Ambivalence about pace.'\n\n"
|
|
1029
|
+
"User: 'What's the main theme?'\n"
|
|
1030
|
+
"You: 'THEME 1: Trust in leadership (8 mentions across 4 interviews)\n"
|
|
1031
|
+
"\"I trust my manager to make the right call\" — Interview 2, Line 34\n"
|
|
1032
|
+
"\"Leadership has been transparent\" — Interview 5, Line 89\n"
|
|
1033
|
+
"[Context: Both quotes from questions about organizational changes]'"
|
|
1034
|
+
)
|
|
1035
|
+
else:
|
|
1036
|
+
examples = (
|
|
1037
|
+
"EXAMPLE 1: Be Patient, Don't Rush\n"
|
|
1038
|
+
"User: 'Find papers on 2008, 2015, 2019'\n"
|
|
1039
|
+
"❌ BAD: [Searches for year:2008 immediately] 'Found 50 papers from 2008...'\n"
|
|
1040
|
+
"✅ GOOD: 'Are you looking for papers ABOUT events in those years (financial crises, policy changes), "
|
|
1041
|
+
"or papers PUBLISHED in those years? Also, what topic? (Economics? Healthcare? Climate?)'\n\n"
|
|
1042
|
+
|
|
1043
|
+
"EXAMPLE 2: Know Your Tools' Limits\n"
|
|
1044
|
+
"User: 'What's Palantir's market share?'\n"
|
|
1045
|
+
"❌ BAD: 'Palantir's latest revenue is $1B...' (Revenue ≠ Market Share! SEC doesn't have market share!)\n"
|
|
1046
|
+
"✅ GOOD: 'Market share requires: (1) Palantir's revenue, (2) total market size. SEC has #1, not #2. "
|
|
1047
|
+
"Which market? (Data analytics = ~$50B, Gov contracts = ~$200B). I can web search for total market size if you specify.'\n\n"
|
|
1048
|
+
|
|
1049
|
+
"EXAMPLE 3: Conversational Flow\n"
|
|
1050
|
+
"User: 'Compare Tesla and Ford'\n"
|
|
1051
|
+
"❌ BAD: [Immediately fetches both revenues] 'Tesla: $81B, Ford: $158B'\n"
|
|
1052
|
+
"✅ GOOD: 'Compare on what dimension? Revenue? (Ford larger). Market cap? (Tesla larger). EV sales? (Tesla dominates). "
|
|
1053
|
+
"Production volume? (Ford higher). Each tells a different story. Which matters to you?'\n\n"
|
|
1054
|
+
|
|
1055
|
+
"EXAMPLE CORRECTIONS:\n"
|
|
1056
|
+
"User: 'So revenue went up 50%?'\n"
|
|
1057
|
+
"You: '❌ No. According to 10-K page 23, revenue increased 15%, not 50%. "
|
|
1058
|
+
"You may be thinking of gross margin (30%→45%, a 15pp increase).'\n\n"
|
|
1059
|
+
"User: 'What will the stock price be?'\n"
|
|
1060
|
+
"You: '⚠️ Cannot predict future prices. I can show: historical trends, current fundamentals, analyst data (if in filings).'"
|
|
1061
|
+
)
|
|
1062
|
+
|
|
1063
|
+
sections.append(examples)
|
|
1064
|
+
|
|
1065
|
+
if memory_context:
|
|
1066
|
+
sections.append("CONTEXT:\n" + memory_context.strip())
|
|
1067
|
+
|
|
1068
|
+
sections.append(
|
|
1069
|
+
"REQUEST ANALYSIS: "
|
|
1070
|
+
f"type={request_analysis.get('type')}, "
|
|
1071
|
+
f"apis={apis}, "
|
|
1072
|
+
f"confidence={request_analysis.get('confidence')}"
|
|
1073
|
+
)
|
|
1074
|
+
|
|
1075
|
+
# Add explicit instruction before API results
|
|
1076
|
+
api_instructions = (
|
|
1077
|
+
"🚨 CRITICAL: The following API RESULTS are REAL DATA from production APIs.\n"
|
|
1078
|
+
"🚨 These are NOT examples or templates - they are ACTUAL results to use in your response.\n"
|
|
1079
|
+
"🚨 DO NOT generate new/fake data - USE EXACTLY what is shown below.\n"
|
|
1080
|
+
"🚨 If you see paper titles, authors, DOIs below - these are REAL papers you MUST cite.\n"
|
|
1081
|
+
"🚨 If API results show empty/no papers, say 'No papers found' - DO NOT make up papers.\n"
|
|
1082
|
+
)
|
|
1083
|
+
|
|
1084
|
+
sections.append(api_instructions + "\nAPI RESULTS:\n" + self._format_api_results_for_prompt(api_results))
|
|
1085
|
+
|
|
1086
|
+
return "\n\n".join(sections)
|
|
1087
|
+
|
|
1088
|
+
def _quick_reply(
|
|
1089
|
+
self,
|
|
1090
|
+
request: ChatRequest,
|
|
1091
|
+
message: str,
|
|
1092
|
+
tools_used: Optional[List[str]] = None,
|
|
1093
|
+
confidence: float = 0.6
|
|
1094
|
+
) -> ChatResponse:
|
|
1095
|
+
tools = tools_used or []
|
|
1096
|
+
self.conversation_history.append({"role": "user", "content": request.question})
|
|
1097
|
+
self.conversation_history.append({"role": "assistant", "content": message})
|
|
1098
|
+
self._update_memory(
|
|
1099
|
+
request.user_id,
|
|
1100
|
+
request.conversation_id,
|
|
1101
|
+
f"Q: {request.question[:100]}... A: {message[:100]}..."
|
|
1102
|
+
)
|
|
1103
|
+
self._emit_telemetry(
|
|
1104
|
+
"quick_reply",
|
|
1105
|
+
request,
|
|
1106
|
+
success=True,
|
|
1107
|
+
extra={
|
|
1108
|
+
"tools_used": tools,
|
|
1109
|
+
},
|
|
1110
|
+
)
|
|
1111
|
+
return ChatResponse(
|
|
1112
|
+
response=message,
|
|
1113
|
+
tools_used=tools,
|
|
1114
|
+
reasoning_steps=["Quick reply without LLM"],
|
|
1115
|
+
timestamp=datetime.now().isoformat(),
|
|
1116
|
+
tokens_used=0,
|
|
1117
|
+
confidence_score=confidence,
|
|
1118
|
+
execution_results={},
|
|
1119
|
+
api_results={}
|
|
1120
|
+
)
|
|
1121
|
+
|
|
1122
|
+
def _select_model(
|
|
1123
|
+
self,
|
|
1124
|
+
request: ChatRequest,
|
|
1125
|
+
request_analysis: Dict[str, Any],
|
|
1126
|
+
api_results: Dict[str, Any]
|
|
1127
|
+
) -> Dict[str, Any]:
|
|
1128
|
+
question = request.question.strip()
|
|
1129
|
+
apis = request_analysis.get("apis", [])
|
|
1130
|
+
use_light_model = False
|
|
1131
|
+
|
|
1132
|
+
if len(question) <= 180 and not api_results and not apis:
|
|
1133
|
+
use_light_model = True
|
|
1134
|
+
elif len(question) <= 220 and set(apis).issubset({"shell"}):
|
|
1135
|
+
use_light_model = True
|
|
1136
|
+
elif len(question.split()) <= 40 and request_analysis.get("type") in {"general", "system"} and not api_results:
|
|
1137
|
+
use_light_model = True
|
|
1138
|
+
|
|
1139
|
+
# Select model based on LLM provider
|
|
1140
|
+
if getattr(self, 'llm_provider', 'groq') == 'cerebras':
|
|
1141
|
+
if use_light_model:
|
|
1142
|
+
return {
|
|
1143
|
+
"model": "llama3.1-8b", # Cerebras 8B model
|
|
1144
|
+
"max_tokens": 520,
|
|
1145
|
+
"temperature": 0.2
|
|
1146
|
+
}
|
|
1147
|
+
return {
|
|
1148
|
+
"model": "llama-3.3-70b", # Cerebras 70B model
|
|
1149
|
+
"max_tokens": 900,
|
|
1150
|
+
"temperature": 0.3
|
|
1151
|
+
}
|
|
1152
|
+
else:
|
|
1153
|
+
# Groq models
|
|
1154
|
+
if use_light_model:
|
|
1155
|
+
return {
|
|
1156
|
+
"model": "llama-3.1-8b-instant",
|
|
1157
|
+
"max_tokens": 520,
|
|
1158
|
+
"temperature": 0.2
|
|
1159
|
+
}
|
|
1160
|
+
return {
|
|
1161
|
+
"model": "llama-3.3-70b-versatile",
|
|
1162
|
+
"max_tokens": 900,
|
|
1163
|
+
"temperature": 0.3
|
|
1164
|
+
}
|
|
1165
|
+
|
|
1166
|
+
def _mark_current_key_exhausted(self, reason: str = "rate_limit"):
|
|
1167
|
+
if not self.api_keys:
|
|
1168
|
+
return
|
|
1169
|
+
key = self.api_keys[self.current_key_index]
|
|
1170
|
+
self.exhausted_keys[key] = time.time()
|
|
1171
|
+
logger.warning(f"Groq key index {self.current_key_index} marked exhausted ({reason})")
|
|
1172
|
+
|
|
1173
|
+
def _rotate_to_next_available_key(self) -> bool:
|
|
1174
|
+
if not self.api_keys:
|
|
1175
|
+
return False
|
|
1176
|
+
|
|
1177
|
+
attempts = 0
|
|
1178
|
+
total = len(self.api_keys)
|
|
1179
|
+
now = time.time()
|
|
1180
|
+
|
|
1181
|
+
while attempts < total:
|
|
1182
|
+
self.current_key_index = (self.current_key_index + 1) % total
|
|
1183
|
+
key = self.api_keys[self.current_key_index]
|
|
1184
|
+
exhausted_at = self.exhausted_keys.get(key)
|
|
1185
|
+
if exhausted_at:
|
|
1186
|
+
if now - exhausted_at >= self.key_recheck_seconds:
|
|
1187
|
+
del self.exhausted_keys[key]
|
|
1188
|
+
else:
|
|
1189
|
+
attempts += 1
|
|
1190
|
+
continue
|
|
1191
|
+
try:
|
|
1192
|
+
if self.llm_provider == "cerebras":
|
|
1193
|
+
from openai import OpenAI
|
|
1194
|
+
self.client = OpenAI(
|
|
1195
|
+
api_key=key,
|
|
1196
|
+
base_url="https://api.cerebras.ai/v1"
|
|
1197
|
+
)
|
|
1198
|
+
else:
|
|
1199
|
+
self.client = Groq(api_key=key)
|
|
1200
|
+
self.current_api_key = key
|
|
1201
|
+
return True
|
|
1202
|
+
except Exception as e:
|
|
1203
|
+
logger.error(f"Failed to initialize {self.llm_provider.upper()} client for rotated key: {e}")
|
|
1204
|
+
self.exhausted_keys[key] = now
|
|
1205
|
+
attempts += 1
|
|
1206
|
+
return False
|
|
1207
|
+
|
|
1208
|
+
def _ensure_client_ready(self) -> bool:
|
|
1209
|
+
if self.client and self.current_api_key:
|
|
1210
|
+
return True
|
|
1211
|
+
|
|
1212
|
+
if not self.api_keys:
|
|
1213
|
+
return False
|
|
1214
|
+
|
|
1215
|
+
total = len(self.api_keys)
|
|
1216
|
+
attempts = 0
|
|
1217
|
+
now = time.time()
|
|
1218
|
+
|
|
1219
|
+
while attempts < total:
|
|
1220
|
+
key = self.api_keys[self.current_key_index]
|
|
1221
|
+
exhausted_at = self.exhausted_keys.get(key)
|
|
1222
|
+
if exhausted_at and (now - exhausted_at) < self.key_recheck_seconds:
|
|
1223
|
+
attempts += 1
|
|
1224
|
+
self.current_key_index = (self.current_key_index + 1) % total
|
|
1225
|
+
continue
|
|
1226
|
+
|
|
1227
|
+
if exhausted_at and (now - exhausted_at) >= self.key_recheck_seconds:
|
|
1228
|
+
del self.exhausted_keys[key]
|
|
1229
|
+
|
|
1230
|
+
try:
|
|
1231
|
+
if self.llm_provider == "cerebras":
|
|
1232
|
+
from openai import OpenAI
|
|
1233
|
+
self.client = OpenAI(
|
|
1234
|
+
api_key=key,
|
|
1235
|
+
base_url="https://api.cerebras.ai/v1"
|
|
1236
|
+
)
|
|
1237
|
+
else:
|
|
1238
|
+
self.client = Groq(api_key=key)
|
|
1239
|
+
self.current_api_key = key
|
|
1240
|
+
return True
|
|
1241
|
+
except Exception as e:
|
|
1242
|
+
logger.error(f"Failed to initialize {self.llm_provider.upper()} client for key index {self.current_key_index}: {e}")
|
|
1243
|
+
self.exhausted_keys[key] = now
|
|
1244
|
+
attempts += 1
|
|
1245
|
+
self.current_key_index = (self.current_key_index + 1) % total
|
|
1246
|
+
|
|
1247
|
+
return False
|
|
1248
|
+
|
|
1249
|
+
def _schedule_next_key_rotation(self):
|
|
1250
|
+
if len(self.api_keys) <= 1:
|
|
1251
|
+
return
|
|
1252
|
+
self.current_key_index = (self.current_key_index + 1) % len(self.api_keys)
|
|
1253
|
+
self.current_api_key = None
|
|
1254
|
+
self.client = None
|
|
1255
|
+
|
|
1256
|
+
def _is_rate_limit_error(self, error: Exception) -> bool:
|
|
1257
|
+
message = str(error).lower()
|
|
1258
|
+
return "rate limit" in message or "429" in message
|
|
1259
|
+
|
|
1260
|
+
def _respond_with_fallback(
|
|
1261
|
+
self,
|
|
1262
|
+
request: ChatRequest,
|
|
1263
|
+
tools_used: List[str],
|
|
1264
|
+
api_results: Dict[str, Any],
|
|
1265
|
+
failure_reason: str,
|
|
1266
|
+
error_message: Optional[str] = None
|
|
1267
|
+
) -> ChatResponse:
|
|
1268
|
+
tools = list(tools_used) if tools_used else []
|
|
1269
|
+
if "fallback" not in tools:
|
|
1270
|
+
tools.append("fallback")
|
|
1271
|
+
|
|
1272
|
+
header = "⚠️ Temporary LLM downtime\n\n"
|
|
1273
|
+
|
|
1274
|
+
if self._is_simple_greeting(request.question):
|
|
1275
|
+
body = (
|
|
1276
|
+
"Hi there! I'm currently at my Groq capacity, so I can't craft a full narrative response just yet. "
|
|
1277
|
+
"You're welcome to try again in a little while, or I can still fetch finance and research data for you."
|
|
106
1278
|
)
|
|
1279
|
+
else:
|
|
1280
|
+
details: List[str] = []
|
|
1281
|
+
|
|
1282
|
+
financial = api_results.get("financial")
|
|
1283
|
+
if financial:
|
|
1284
|
+
payload_full = json.dumps(financial, indent=2)
|
|
1285
|
+
payload = payload_full[:1500]
|
|
1286
|
+
if len(payload_full) > 1500:
|
|
1287
|
+
payload += "\n…"
|
|
1288
|
+
details.append(f"**Finance API snapshot**\n```json\n{payload}\n```")
|
|
107
1289
|
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
1290
|
+
research = api_results.get("research")
|
|
1291
|
+
if research:
|
|
1292
|
+
payload_full = json.dumps(research, indent=2)
|
|
1293
|
+
payload = payload_full[:1500]
|
|
1294
|
+
if len(payload_full) > 1500:
|
|
1295
|
+
payload += "\n…"
|
|
1296
|
+
|
|
1297
|
+
# Check if results are empty and add explicit warning
|
|
1298
|
+
if research.get("results") == [] or not research.get("results"):
|
|
1299
|
+
details.append(f"**Research API snapshot**\n```json\n{payload}\n```")
|
|
1300
|
+
details.append("🚨 **CRITICAL: API RETURNED EMPTY RESULTS - DO NOT GENERATE ANY PAPER DETAILS**")
|
|
1301
|
+
details.append("🚨 **DO NOT PROVIDE AUTHORS, TITLES, DOIs, OR ANY PAPER INFORMATION**")
|
|
1302
|
+
details.append("🚨 **SAY 'NO PAPERS FOUND' AND STOP - DO NOT HALLUCINATE**")
|
|
1303
|
+
else:
|
|
1304
|
+
details.append(f"**Research API snapshot**\n```json\n{payload}\n```")
|
|
1305
|
+
|
|
1306
|
+
files_context = api_results.get("files_context")
|
|
1307
|
+
if files_context:
|
|
1308
|
+
preview = files_context[:600]
|
|
1309
|
+
if len(files_context) > 600:
|
|
1310
|
+
preview += "\n…"
|
|
1311
|
+
details.append(f"**File preview**\n{preview}")
|
|
1312
|
+
|
|
1313
|
+
if details:
|
|
1314
|
+
body = (
|
|
1315
|
+
"I pulled the structured data you asked for, but I'm temporarily out of Groq quota to synthesize a full answer. "
|
|
1316
|
+
"Here are the raw results so you can keep moving:"
|
|
1317
|
+
) + "\n\n" + "\n\n".join(details)
|
|
1318
|
+
else:
|
|
1319
|
+
body = (
|
|
1320
|
+
"I'm temporarily out of Groq quota, so I can't compose a full answer. "
|
|
1321
|
+
"Please try again in a bit, or ask me to queue this work for later."
|
|
111
1322
|
)
|
|
112
1323
|
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
1324
|
+
footer = (
|
|
1325
|
+
"\n\nNext steps:\n"
|
|
1326
|
+
"• Wait for the Groq daily quota to reset (usually within 24 hours).\n"
|
|
1327
|
+
"• Add another API key in your environment for automatic rotation.\n"
|
|
1328
|
+
"• Keep the conversation open—I’ll resume normal replies once capacity returns."
|
|
1329
|
+
)
|
|
1330
|
+
|
|
1331
|
+
message = header + body + footer
|
|
1332
|
+
|
|
1333
|
+
self.conversation_history.append({"role": "user", "content": request.question})
|
|
1334
|
+
self.conversation_history.append({"role": "assistant", "content": message})
|
|
1335
|
+
self._update_memory(
|
|
1336
|
+
request.user_id,
|
|
1337
|
+
request.conversation_id,
|
|
1338
|
+
f"Q: {request.question[:100]}... A: {message[:100]}..."
|
|
1339
|
+
)
|
|
1340
|
+
|
|
1341
|
+
self._emit_telemetry(
|
|
1342
|
+
"fallback_response",
|
|
1343
|
+
request,
|
|
1344
|
+
success=False,
|
|
1345
|
+
extra={
|
|
1346
|
+
"failure_reason": failure_reason,
|
|
1347
|
+
"has_financial_payload": bool(api_results.get("financial")),
|
|
1348
|
+
"has_research_payload": bool(api_results.get("research")),
|
|
1349
|
+
},
|
|
1350
|
+
)
|
|
1351
|
+
|
|
1352
|
+
return ChatResponse(
|
|
1353
|
+
response=message,
|
|
1354
|
+
tools_used=tools,
|
|
1355
|
+
reasoning_steps=["Fallback response activated"],
|
|
1356
|
+
timestamp=datetime.now().isoformat(),
|
|
1357
|
+
tokens_used=0,
|
|
1358
|
+
confidence_score=0.2,
|
|
1359
|
+
execution_results={},
|
|
1360
|
+
api_results=api_results,
|
|
1361
|
+
error_message=error_message or failure_reason
|
|
1362
|
+
)
|
|
1363
|
+
|
|
1364
|
+
def _extract_tickers_from_text(self, text: str) -> List[str]:
|
|
1365
|
+
"""Find tickers either as explicit symbols or from known company names."""
|
|
1366
|
+
text_lower = text.lower()
|
|
1367
|
+
# Explicit ticker-like symbols
|
|
1368
|
+
ticker_candidates: List[str] = []
|
|
1369
|
+
for token in re.findall(r"\b[A-Z]{1,5}(?:\d{0,2})\b", text):
|
|
1370
|
+
ticker_candidates.append(token)
|
|
1371
|
+
# Company name matches
|
|
1372
|
+
for name, sym in self.company_name_to_ticker.items():
|
|
1373
|
+
if name and name in text_lower:
|
|
1374
|
+
ticker_candidates.append(sym)
|
|
1375
|
+
# Deduplicate preserve order
|
|
1376
|
+
seen = set()
|
|
1377
|
+
ordered: List[str] = []
|
|
1378
|
+
for t in ticker_candidates:
|
|
1379
|
+
if t not in seen:
|
|
1380
|
+
seen.add(t)
|
|
1381
|
+
ordered.append(t)
|
|
1382
|
+
return ordered[:4]
|
|
1383
|
+
|
|
1384
|
+
async def initialize(self, force_reload: bool = False):
|
|
1385
|
+
"""Initialize the agent with API keys and shell session."""
|
|
1386
|
+
lock = self._get_init_lock()
|
|
1387
|
+
async with lock:
|
|
1388
|
+
if self._initialized and not force_reload:
|
|
1389
|
+
return True
|
|
1390
|
+
|
|
1391
|
+
if self._initialized and force_reload:
|
|
1392
|
+
await self._close_resources()
|
|
1393
|
+
|
|
1394
|
+
# Check for updates automatically (silent background check)
|
|
1395
|
+
self._check_updates_background()
|
|
1396
|
+
self._ensure_environment_loaded()
|
|
1397
|
+
self._init_api_clients()
|
|
1398
|
+
|
|
1399
|
+
# Suppress verbose initialization messages in production
|
|
1400
|
+
import logging
|
|
1401
|
+
logging.getLogger("aiohttp").setLevel(logging.ERROR)
|
|
1402
|
+
logging.getLogger("asyncio").setLevel(logging.ERROR)
|
|
1403
|
+
|
|
1404
|
+
# SECURITY FIX: No API keys on client!
|
|
1405
|
+
# All API calls go through our secure backend
|
|
1406
|
+
# This prevents key extraction and piracy
|
|
1407
|
+
# DISABLED for beta testing - set USE_LOCAL_KEYS=false to enable backend-only mode
|
|
1408
|
+
|
|
1409
|
+
# SECURITY: Production users MUST use backend for monetization
|
|
1410
|
+
# Dev mode only available via undocumented env var (not in user docs)
|
|
1411
|
+
use_local_keys_env = os.getenv("USE_LOCAL_KEYS", "").lower()
|
|
1412
|
+
|
|
1413
|
+
if use_local_keys_env == "true":
|
|
1414
|
+
# Dev mode - use local keys
|
|
1415
|
+
use_local_keys = True
|
|
1416
|
+
elif use_local_keys_env == "false":
|
|
1417
|
+
# Explicit backend mode
|
|
1418
|
+
use_local_keys = False
|
|
1419
|
+
else:
|
|
1420
|
+
# Default: Always use backend (for monetization)
|
|
1421
|
+
# Even if session doesn't exist, we'll prompt for login
|
|
1422
|
+
use_local_keys = False
|
|
1423
|
+
|
|
1424
|
+
if not use_local_keys:
|
|
1425
|
+
self.api_keys = [] # Empty - keys stay on server
|
|
1426
|
+
self.current_key_index = 0
|
|
1427
|
+
self.current_api_key = None
|
|
1428
|
+
self.client = None # Will use HTTP client instead
|
|
1429
|
+
|
|
1430
|
+
# Get backend API URL from config
|
|
1431
|
+
self.backend_api_url = os.getenv(
|
|
1432
|
+
"NOCTURNAL_API_URL",
|
|
1433
|
+
"https://cite-agent-api-720dfadd602c.herokuapp.com/api" # Production Heroku backend
|
|
116
1434
|
)
|
|
117
1435
|
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
1436
|
+
# Get auth token from session (set by auth.py after login)
|
|
1437
|
+
from pathlib import Path
|
|
1438
|
+
session_file = Path.home() / ".nocturnal_archive" / "session.json"
|
|
1439
|
+
if session_file.exists():
|
|
1440
|
+
try:
|
|
1441
|
+
import json
|
|
1442
|
+
with open(session_file, 'r') as f:
|
|
1443
|
+
session_data = json.load(f)
|
|
1444
|
+
self.auth_token = session_data.get('access_token')
|
|
1445
|
+
self.user_id = session_data.get('user_id')
|
|
1446
|
+
except Exception:
|
|
1447
|
+
self.auth_token = None
|
|
1448
|
+
self.user_id = None
|
|
1449
|
+
else:
|
|
1450
|
+
self.auth_token = None
|
|
1451
|
+
self.user_id = None
|
|
121
1452
|
|
|
122
|
-
|
|
1453
|
+
# Suppress messages in production (only show in debug mode)
|
|
1454
|
+
debug_mode = os.getenv("NOCTURNAL_DEBUG", "").lower() == "1"
|
|
1455
|
+
if debug_mode:
|
|
1456
|
+
if self.auth_token:
|
|
1457
|
+
print(f"✅ Enhanced Nocturnal Agent Ready! (Authenticated)")
|
|
1458
|
+
else:
|
|
1459
|
+
print("⚠️ Not authenticated. Please log in to use the agent.")
|
|
1460
|
+
else:
|
|
1461
|
+
# Local keys mode - load Cerebras API keys (primary) with Groq fallback
|
|
1462
|
+
self.auth_token = None
|
|
1463
|
+
self.user_id = None
|
|
123
1464
|
|
|
1465
|
+
# Load Cerebras keys from environment (PRIMARY)
|
|
1466
|
+
self.api_keys = []
|
|
1467
|
+
for i in range(1, 10): # Check CEREBRAS_API_KEY_1 through CEREBRAS_API_KEY_9
|
|
1468
|
+
key = os.getenv(f"CEREBRAS_API_KEY_{i}") or os.getenv(f"CEREBRAS_API_KEY")
|
|
1469
|
+
if key and key not in self.api_keys:
|
|
1470
|
+
self.api_keys.append(key)
|
|
1471
|
+
|
|
1472
|
+
# Fallback to Groq keys if no Cerebras keys found
|
|
1473
|
+
if not self.api_keys:
|
|
1474
|
+
for i in range(1, 10):
|
|
1475
|
+
key = os.getenv(f"GROQ_API_KEY_{i}") or os.getenv(f"GROQ_API_KEY")
|
|
1476
|
+
if key and key not in self.api_keys:
|
|
1477
|
+
self.api_keys.append(key)
|
|
1478
|
+
self.llm_provider = "groq"
|
|
1479
|
+
else:
|
|
1480
|
+
self.llm_provider = "cerebras"
|
|
1481
|
+
|
|
1482
|
+
debug_mode = os.getenv("NOCTURNAL_DEBUG", "").lower() == "1"
|
|
1483
|
+
if not self.api_keys:
|
|
1484
|
+
if debug_mode:
|
|
1485
|
+
print("⚠️ No LLM API keys found. Set CEREBRAS_API_KEY or GROQ_API_KEY")
|
|
1486
|
+
else:
|
|
1487
|
+
if debug_mode:
|
|
1488
|
+
print(f"✅ Loaded {len(self.api_keys)} {self.llm_provider.upper()} API key(s)")
|
|
1489
|
+
# Initialize first client - Cerebras uses OpenAI-compatible API
|
|
1490
|
+
try:
|
|
1491
|
+
if self.llm_provider == "cerebras":
|
|
1492
|
+
# Cerebras uses OpenAI client with custom base URL
|
|
1493
|
+
from openai import OpenAI
|
|
1494
|
+
self.client = OpenAI(
|
|
1495
|
+
api_key=self.api_keys[0],
|
|
1496
|
+
base_url="https://api.cerebras.ai/v1"
|
|
1497
|
+
)
|
|
1498
|
+
else:
|
|
1499
|
+
# Groq fallback
|
|
1500
|
+
from groq import Groq
|
|
1501
|
+
self.client = Groq(api_key=self.api_keys[0])
|
|
1502
|
+
self.current_api_key = self.api_keys[0]
|
|
1503
|
+
self.current_key_index = 0
|
|
1504
|
+
except Exception as e:
|
|
1505
|
+
print(f"⚠️ Failed to initialize {self.llm_provider.upper()} client: {e}")
|
|
1506
|
+
|
|
1507
|
+
if self.shell_session and self.shell_session.poll() is not None:
|
|
1508
|
+
self.shell_session = None
|
|
1509
|
+
|
|
1510
|
+
if self.shell_session is None:
|
|
1511
|
+
try:
|
|
1512
|
+
self.shell_session = subprocess.Popen(
|
|
1513
|
+
['bash'],
|
|
1514
|
+
stdin=subprocess.PIPE,
|
|
1515
|
+
stdout=subprocess.PIPE,
|
|
1516
|
+
stderr=subprocess.STDOUT,
|
|
1517
|
+
text=True,
|
|
1518
|
+
cwd=os.getcwd()
|
|
1519
|
+
)
|
|
1520
|
+
except Exception as exc:
|
|
1521
|
+
print(f"⚠️ Unable to launch persistent shell session: {exc}")
|
|
1522
|
+
self.shell_session = None
|
|
1523
|
+
|
|
1524
|
+
if self.session is None or getattr(self.session, "closed", False):
|
|
1525
|
+
if self.session and not self.session.closed:
|
|
1526
|
+
await self.session.close()
|
|
1527
|
+
default_headers = dict(getattr(self, "_default_headers", {}))
|
|
1528
|
+
self.session = aiohttp.ClientSession(headers=default_headers)
|
|
1529
|
+
|
|
1530
|
+
self._initialized = True
|
|
1531
|
+
return True
|
|
1532
|
+
|
|
1533
|
+
def _check_updates_background(self):
|
|
1534
|
+
"""Check for updates and auto-install if available"""
|
|
1535
|
+
if not self._auto_update_enabled:
|
|
1536
|
+
return
|
|
1537
|
+
|
|
1538
|
+
# Check for updates (synchronous, fast)
|
|
1539
|
+
try:
|
|
1540
|
+
from .updater import NocturnalUpdater
|
|
1541
|
+
updater = NocturnalUpdater()
|
|
1542
|
+
update_info = updater.check_for_updates()
|
|
1543
|
+
|
|
1544
|
+
if update_info and update_info["available"]:
|
|
1545
|
+
# Auto-update silently in background
|
|
1546
|
+
import threading
|
|
1547
|
+
def do_update():
|
|
1548
|
+
try:
|
|
1549
|
+
updater.update_package(silent=True)
|
|
1550
|
+
except:
|
|
1551
|
+
pass
|
|
1552
|
+
threading.Thread(target=do_update, daemon=True).start()
|
|
1553
|
+
|
|
1554
|
+
except Exception:
|
|
1555
|
+
# Silently ignore update check failures
|
|
1556
|
+
pass
|
|
1557
|
+
|
|
1558
|
+
async def call_backend_query(self, query: str, conversation_history: Optional[List[Dict]] = None,
|
|
1559
|
+
api_results: Optional[Dict[str, Any]] = None, tools_used: Optional[List[str]] = None) -> ChatResponse:
|
|
1560
|
+
"""
|
|
1561
|
+
Call backend /query endpoint instead of Groq directly
|
|
1562
|
+
This is the SECURE method - all API keys stay on server
|
|
1563
|
+
Includes API results (Archive, FinSight) in context for better responses
|
|
1564
|
+
"""
|
|
1565
|
+
if not self.auth_token:
|
|
124
1566
|
return ChatResponse(
|
|
125
|
-
response=
|
|
126
|
-
|
|
127
|
-
tools_used=data.get("tools_used", []),
|
|
128
|
-
model=data.get("model", "backend"),
|
|
1567
|
+
response="❌ Not authenticated. Please log in first.",
|
|
1568
|
+
error_message="Authentication required"
|
|
129
1569
|
)
|
|
1570
|
+
|
|
1571
|
+
if not self.session:
|
|
1572
|
+
return ChatResponse(
|
|
1573
|
+
response="❌ HTTP session not initialized",
|
|
1574
|
+
error_message="Session not initialized"
|
|
1575
|
+
)
|
|
1576
|
+
|
|
1577
|
+
try:
|
|
1578
|
+
# Build request with API context as separate field
|
|
1579
|
+
payload = {
|
|
1580
|
+
"query": query, # Keep query clean
|
|
1581
|
+
"conversation_history": conversation_history or [],
|
|
1582
|
+
"api_context": api_results, # Send API results separately
|
|
1583
|
+
"model": "llama-3.3-70b", # Compatible with Cerebras (priority) and Groq
|
|
1584
|
+
"temperature": 0.2, # Low temp for accuracy
|
|
1585
|
+
"max_tokens": 4000
|
|
1586
|
+
}
|
|
1587
|
+
|
|
1588
|
+
# Call backend
|
|
1589
|
+
headers = {
|
|
1590
|
+
"Authorization": f"Bearer {self.auth_token}",
|
|
1591
|
+
"Content-Type": "application/json"
|
|
1592
|
+
}
|
|
1593
|
+
|
|
1594
|
+
url = f"{self.backend_api_url}/query/"
|
|
1595
|
+
|
|
1596
|
+
async with self.session.post(url, json=payload, headers=headers, timeout=60) as response:
|
|
1597
|
+
if response.status == 401:
|
|
1598
|
+
return ChatResponse(
|
|
1599
|
+
response="❌ Authentication expired. Please log in again.",
|
|
1600
|
+
error_message="Authentication expired"
|
|
1601
|
+
)
|
|
1602
|
+
|
|
1603
|
+
elif response.status == 429:
|
|
1604
|
+
# Rate limit exceeded
|
|
1605
|
+
data = await response.json()
|
|
1606
|
+
detail = data.get('detail', {})
|
|
1607
|
+
tokens_remaining = detail.get('tokens_remaining', 0)
|
|
1608
|
+
return ChatResponse(
|
|
1609
|
+
response=f"❌ Daily token limit reached. You have {tokens_remaining} tokens remaining today. The limit resets tomorrow.",
|
|
1610
|
+
error_message="Rate limit exceeded",
|
|
1611
|
+
tokens_used=detail.get('tokens_used_today', 0)
|
|
1612
|
+
)
|
|
1613
|
+
|
|
1614
|
+
elif response.status == 200:
|
|
1615
|
+
data = await response.json()
|
|
1616
|
+
response_text = data.get('response', '')
|
|
1617
|
+
tokens = data.get('tokens_used', 0)
|
|
1618
|
+
|
|
1619
|
+
# Combine tools used
|
|
1620
|
+
all_tools = tools_used or []
|
|
1621
|
+
all_tools.append("backend_llm")
|
|
1622
|
+
|
|
1623
|
+
# Save to workflow history
|
|
1624
|
+
self.workflow.save_query_result(
|
|
1625
|
+
query=query,
|
|
1626
|
+
response=response_text,
|
|
1627
|
+
metadata={
|
|
1628
|
+
"tools_used": all_tools,
|
|
1629
|
+
"tokens_used": tokens,
|
|
1630
|
+
"model": data.get('model'),
|
|
1631
|
+
"provider": data.get('provider')
|
|
1632
|
+
}
|
|
1633
|
+
)
|
|
1634
|
+
|
|
1635
|
+
return ChatResponse(
|
|
1636
|
+
response=response_text,
|
|
1637
|
+
tokens_used=tokens,
|
|
1638
|
+
tools_used=all_tools,
|
|
1639
|
+
model=data.get('model', 'llama-3.3-70b-versatile'),
|
|
1640
|
+
timestamp=data.get('timestamp', datetime.now(timezone.utc).isoformat()),
|
|
1641
|
+
api_results=api_results
|
|
1642
|
+
)
|
|
1643
|
+
|
|
1644
|
+
else:
|
|
1645
|
+
error_text = await response.text()
|
|
1646
|
+
return ChatResponse(
|
|
1647
|
+
response=f"❌ Backend error (HTTP {response.status}): {error_text}",
|
|
1648
|
+
error_message=f"HTTP {response.status}"
|
|
1649
|
+
)
|
|
1650
|
+
|
|
1651
|
+
except asyncio.TimeoutError:
|
|
1652
|
+
return ChatResponse(
|
|
1653
|
+
response="❌ Request timeout. Please try again.",
|
|
1654
|
+
error_message="Timeout"
|
|
1655
|
+
)
|
|
1656
|
+
except Exception as e:
|
|
1657
|
+
return ChatResponse(
|
|
1658
|
+
response=f"❌ Error calling backend: {str(e)}",
|
|
1659
|
+
error_message=str(e)
|
|
1660
|
+
)
|
|
1661
|
+
|
|
1662
|
+
async def _call_files_api(
|
|
1663
|
+
self,
|
|
1664
|
+
method: str,
|
|
1665
|
+
endpoint: str,
|
|
1666
|
+
*,
|
|
1667
|
+
params: Optional[Dict[str, Any]] = None,
|
|
1668
|
+
json_body: Optional[Dict[str, Any]] = None,
|
|
1669
|
+
data: Any = None,
|
|
1670
|
+
) -> Dict[str, Any]:
|
|
1671
|
+
if not self.session:
|
|
1672
|
+
return {"error": "HTTP session not initialized"}
|
|
130
1673
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
1674
|
+
ok, detail = await self._ensure_backend_ready()
|
|
1675
|
+
if not ok:
|
|
1676
|
+
self._record_data_source("Files", f"{method.upper()} {endpoint}", False, detail)
|
|
1677
|
+
return {"error": f"Workspace API unavailable: {detail or 'backend offline'}"}
|
|
135
1678
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
1679
|
+
url = f"{self.files_base_url}{endpoint}"
|
|
1680
|
+
request_method = getattr(self.session, method.lower(), None)
|
|
1681
|
+
if not request_method:
|
|
1682
|
+
return {"error": f"Unsupported HTTP method: {method}"}
|
|
1683
|
+
|
|
1684
|
+
try:
|
|
1685
|
+
async with request_method(url, params=params, json=json_body, data=data, timeout=20) as response:
|
|
1686
|
+
payload: Any
|
|
1687
|
+
if response.content_type and "json" in response.content_type:
|
|
1688
|
+
payload = await response.json()
|
|
1689
|
+
else:
|
|
1690
|
+
payload = {"raw": await response.text()}
|
|
1691
|
+
|
|
1692
|
+
success = response.status == 200
|
|
1693
|
+
self._record_data_source(
|
|
1694
|
+
"Files",
|
|
1695
|
+
f"{method.upper()} {endpoint}",
|
|
1696
|
+
success,
|
|
1697
|
+
"" if success else f"HTTP {response.status}"
|
|
1698
|
+
)
|
|
1699
|
+
|
|
1700
|
+
if success:
|
|
1701
|
+
return payload if isinstance(payload, dict) else {"data": payload}
|
|
1702
|
+
|
|
1703
|
+
detail_msg = payload.get("detail") if isinstance(payload, dict) else None
|
|
1704
|
+
return {"error": detail_msg or f"Files API error: {response.status}"}
|
|
1705
|
+
except Exception as exc:
|
|
1706
|
+
self._record_data_source("Files", f"{method.upper()} {endpoint}", False, str(exc))
|
|
1707
|
+
return {"error": f"Files API call failed: {exc}"}
|
|
1708
|
+
|
|
1709
|
+
async def _call_archive_api(self, endpoint: str, data: Dict[str, Any]) -> Dict[str, Any]:
|
|
1710
|
+
"""Call Archive API endpoint with retry mechanism"""
|
|
1711
|
+
max_retries = 3
|
|
1712
|
+
retry_delay = 1
|
|
1713
|
+
|
|
1714
|
+
ok, detail = await self._ensure_backend_ready()
|
|
1715
|
+
if not ok:
|
|
1716
|
+
self._record_data_source("Archive", f"POST {endpoint}", False, detail)
|
|
1717
|
+
return {"error": f"Archive backend unavailable: {detail or 'backend offline'}"}
|
|
1718
|
+
|
|
1719
|
+
for attempt in range(max_retries):
|
|
1720
|
+
try:
|
|
1721
|
+
if not self.session:
|
|
1722
|
+
return {"error": "HTTP session not initialized"}
|
|
1723
|
+
|
|
1724
|
+
url = f"{self.archive_base_url}/{endpoint}"
|
|
1725
|
+
# Start fresh with headers
|
|
1726
|
+
headers = {}
|
|
1727
|
+
|
|
1728
|
+
# Always use demo key for Archive (public research data)
|
|
1729
|
+
headers["X-API-Key"] = "demo-key-123"
|
|
1730
|
+
headers["Content-Type"] = "application/json"
|
|
1731
|
+
|
|
1732
|
+
# Also add JWT if we have it
|
|
1733
|
+
if self.auth_token:
|
|
1734
|
+
headers["Authorization"] = f"Bearer {self.auth_token}"
|
|
1735
|
+
|
|
1736
|
+
debug_mode = os.getenv("NOCTURNAL_DEBUG", "").lower() == "1"
|
|
1737
|
+
if debug_mode:
|
|
1738
|
+
print(f"🔍 Archive headers: {list(headers.keys())}, X-API-Key={headers.get('X-API-Key')}")
|
|
1739
|
+
print(f"🔍 Archive URL: {url}")
|
|
1740
|
+
print(f"🔍 Archive data: {data}")
|
|
1741
|
+
|
|
1742
|
+
async with self.session.post(url, json=data, headers=headers, timeout=30) as response:
|
|
1743
|
+
if debug_mode:
|
|
1744
|
+
print(f"🔍 Archive response status: {response.status}")
|
|
1745
|
+
|
|
1746
|
+
if response.status == 200:
|
|
1747
|
+
payload = await response.json()
|
|
1748
|
+
self._record_data_source("Archive", f"POST {endpoint}", True)
|
|
1749
|
+
return payload
|
|
1750
|
+
elif response.status == 422: # Validation error
|
|
1751
|
+
try:
|
|
1752
|
+
error_detail = await response.json()
|
|
1753
|
+
logger.error(f"Archive API validation error (HTTP 422): {error_detail}")
|
|
1754
|
+
except Exception:
|
|
1755
|
+
error_detail = await response.text()
|
|
1756
|
+
logger.error(f"Archive API validation error (HTTP 422): {error_detail}")
|
|
1757
|
+
|
|
1758
|
+
if attempt < max_retries - 1:
|
|
1759
|
+
# Retry with simplified request
|
|
1760
|
+
if "sources" in data and len(data["sources"]) > 1:
|
|
1761
|
+
data["sources"] = [data["sources"][0]] # Try single source
|
|
1762
|
+
logger.info(f"Retrying with single source: {data['sources']}")
|
|
1763
|
+
await asyncio.sleep(retry_delay)
|
|
1764
|
+
continue
|
|
1765
|
+
self._record_data_source("Archive", f"POST {endpoint}", False, "422 validation error")
|
|
1766
|
+
return {"error": f"Archive API validation error: {error_detail}"}
|
|
1767
|
+
elif response.status == 429: # Rate limited
|
|
1768
|
+
if attempt < max_retries - 1:
|
|
1769
|
+
await asyncio.sleep(retry_delay * (2 ** attempt)) # Exponential backoff
|
|
1770
|
+
continue
|
|
1771
|
+
self._record_data_source("Archive", f"POST {endpoint}", False, "rate limited")
|
|
1772
|
+
return {"error": "Archive API rate limited. Please try again later."}
|
|
1773
|
+
elif response.status == 401:
|
|
1774
|
+
self._record_data_source("Archive", f"POST {endpoint}", False, "401 unauthorized")
|
|
1775
|
+
return {"error": "Archive API authentication failed. Please check API key."}
|
|
1776
|
+
else:
|
|
1777
|
+
error_text = await response.text()
|
|
1778
|
+
logger.error(f"Archive API error (HTTP {response.status}): {error_text}")
|
|
1779
|
+
self._record_data_source("Archive", f"POST {endpoint}", False, f"HTTP {response.status}")
|
|
1780
|
+
return {"error": f"Archive API error: {response.status}"}
|
|
1781
|
+
|
|
1782
|
+
except asyncio.TimeoutError:
|
|
1783
|
+
if attempt < max_retries - 1:
|
|
1784
|
+
await asyncio.sleep(retry_delay * (2 ** attempt))
|
|
1785
|
+
continue
|
|
1786
|
+
self._record_data_source("Archive", f"POST {endpoint}", False, "timeout")
|
|
1787
|
+
return {"error": "Archive API timeout. Please try again later."}
|
|
1788
|
+
except Exception as e:
|
|
1789
|
+
if attempt < max_retries - 1:
|
|
1790
|
+
await asyncio.sleep(retry_delay * (2 ** attempt))
|
|
1791
|
+
continue
|
|
1792
|
+
self._record_data_source("Archive", f"POST {endpoint}", False, str(e))
|
|
1793
|
+
return {"error": f"Archive API call failed: {e}"}
|
|
1794
|
+
|
|
1795
|
+
return {"error": "Archive API call failed after all retries"}
|
|
1796
|
+
|
|
1797
|
+
async def _call_finsight_api(self, endpoint: str, params: Dict[str, Any] = None) -> Dict[str, Any]:
|
|
1798
|
+
"""Call FinSight API endpoint with retry mechanism"""
|
|
1799
|
+
max_retries = 3
|
|
1800
|
+
retry_delay = 1
|
|
1801
|
+
|
|
1802
|
+
ok, detail = await self._ensure_backend_ready()
|
|
1803
|
+
if not ok:
|
|
1804
|
+
self._record_data_source("FinSight", f"GET {endpoint}", False, detail)
|
|
1805
|
+
return {"error": f"FinSight backend unavailable: {detail or 'backend offline'}"}
|
|
1806
|
+
|
|
1807
|
+
for attempt in range(max_retries):
|
|
1808
|
+
try:
|
|
1809
|
+
if not self.session:
|
|
1810
|
+
return {"error": "HTTP session not initialized"}
|
|
1811
|
+
|
|
1812
|
+
url = f"{self.finsight_base_url}/{endpoint}"
|
|
1813
|
+
# Start fresh with headers - don't use _default_headers which might be wrong
|
|
1814
|
+
headers = {}
|
|
1815
|
+
|
|
1816
|
+
# Always use demo key for FinSight (SEC data is public)
|
|
1817
|
+
headers["X-API-Key"] = "demo-key-123"
|
|
1818
|
+
|
|
1819
|
+
# Also add JWT if we have it
|
|
1820
|
+
if self.auth_token:
|
|
1821
|
+
headers["Authorization"] = f"Bearer {self.auth_token}"
|
|
1822
|
+
|
|
1823
|
+
debug_mode = os.getenv("NOCTURNAL_DEBUG", "").lower() == "1"
|
|
1824
|
+
if debug_mode:
|
|
1825
|
+
print(f"🔍 FinSight headers: {list(headers.keys())}, X-API-Key={headers.get('X-API-Key')}")
|
|
1826
|
+
print(f"🔍 FinSight URL: {url}")
|
|
1827
|
+
|
|
1828
|
+
async with self.session.get(url, params=params, headers=headers, timeout=30) as response:
|
|
1829
|
+
if response.status == 200:
|
|
1830
|
+
payload = await response.json()
|
|
1831
|
+
self._record_data_source("FinSight", f"GET {endpoint}", True)
|
|
1832
|
+
return payload
|
|
1833
|
+
elif response.status == 429: # Rate limited
|
|
1834
|
+
if attempt < max_retries - 1:
|
|
1835
|
+
await asyncio.sleep(retry_delay * (2 ** attempt)) # Exponential backoff
|
|
1836
|
+
continue
|
|
1837
|
+
self._record_data_source("FinSight", f"GET {endpoint}", False, "rate limited")
|
|
1838
|
+
return {"error": "FinSight API rate limited. Please try again later."}
|
|
1839
|
+
elif response.status == 401:
|
|
1840
|
+
self._record_data_source("FinSight", f"GET {endpoint}", False, "401 unauthorized")
|
|
1841
|
+
return {"error": "FinSight API authentication failed. Please check API key."}
|
|
1842
|
+
else:
|
|
1843
|
+
self._record_data_source("FinSight", f"GET {endpoint}", False, f"HTTP {response.status}")
|
|
1844
|
+
return {"error": f"FinSight API error: {response.status}"}
|
|
1845
|
+
|
|
1846
|
+
except asyncio.TimeoutError:
|
|
1847
|
+
if attempt < max_retries - 1:
|
|
1848
|
+
await asyncio.sleep(retry_delay * (2 ** attempt))
|
|
1849
|
+
continue
|
|
1850
|
+
self._record_data_source("FinSight", f"GET {endpoint}", False, "timeout")
|
|
1851
|
+
return {"error": "FinSight API timeout. Please try again later."}
|
|
1852
|
+
except Exception as e:
|
|
1853
|
+
if attempt < max_retries - 1:
|
|
1854
|
+
await asyncio.sleep(retry_delay * (2 ** attempt))
|
|
1855
|
+
continue
|
|
1856
|
+
self._record_data_source("FinSight", f"GET {endpoint}", False, str(e))
|
|
1857
|
+
return {"error": f"FinSight API call failed: {e}"}
|
|
1858
|
+
|
|
1859
|
+
return {"error": "FinSight API call failed after all retries"}
|
|
1860
|
+
|
|
1861
|
+
async def _call_finsight_api_post(self, endpoint: str, data: Dict[str, Any] = None) -> Dict[str, Any]:
|
|
1862
|
+
"""Call FinSight API endpoint with POST request"""
|
|
1863
|
+
ok, detail = await self._ensure_backend_ready()
|
|
1864
|
+
if not ok:
|
|
1865
|
+
self._record_data_source("FinSight", f"POST {endpoint}", False, detail)
|
|
1866
|
+
return {"error": f"FinSight backend unavailable: {detail or 'backend offline'}"}
|
|
139
1867
|
|
|
140
|
-
def get_health_status(self) -> Dict[str, Any]:
|
|
141
|
-
"""Get backend health status"""
|
|
142
1868
|
try:
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
1869
|
+
if not self.session:
|
|
1870
|
+
return {"error": "HTTP session not initialized"}
|
|
1871
|
+
|
|
1872
|
+
url = f"{self.finsight_base_url}/{endpoint}"
|
|
1873
|
+
headers = getattr(self, "_default_headers", None)
|
|
1874
|
+
if headers:
|
|
1875
|
+
headers = dict(headers)
|
|
1876
|
+
async with self.session.post(url, json=data, headers=headers) as response:
|
|
1877
|
+
if response.status == 200:
|
|
1878
|
+
payload = await response.json()
|
|
1879
|
+
self._record_data_source("FinSight", f"POST {endpoint}", True)
|
|
1880
|
+
return payload
|
|
1881
|
+
self._record_data_source("FinSight", f"POST {endpoint}", False, f"HTTP {response.status}")
|
|
1882
|
+
return {"error": f"FinSight API error: {response.status}"}
|
|
1883
|
+
|
|
1884
|
+
except Exception as e:
|
|
1885
|
+
self._record_data_source("FinSight", f"POST {endpoint}", False, str(e))
|
|
1886
|
+
return {"error": f"FinSight API call failed: {e}"}
|
|
1887
|
+
|
|
1888
|
+
async def search_academic_papers(self, query: str, limit: int = 10) -> Dict[str, Any]:
|
|
1889
|
+
"""Search academic papers using Archive API with resilient fallbacks."""
|
|
1890
|
+
source_sets: List[List[str]] = [
|
|
1891
|
+
["semantic_scholar", "openalex"],
|
|
1892
|
+
["semantic_scholar"],
|
|
1893
|
+
["openalex"],
|
|
1894
|
+
["pubmed"],
|
|
1895
|
+
["offline"],
|
|
1896
|
+
]
|
|
1897
|
+
|
|
1898
|
+
tried: List[List[str]] = []
|
|
1899
|
+
provider_errors: List[Dict[str, Any]] = []
|
|
1900
|
+
aggregated_payload: Dict[str, Any] = {"results": []}
|
|
1901
|
+
|
|
1902
|
+
for sources in source_sets:
|
|
1903
|
+
data = {"query": query, "limit": limit, "sources": sources}
|
|
1904
|
+
tried.append(list(sources))
|
|
1905
|
+
result = await self._call_archive_api("search", data)
|
|
1906
|
+
|
|
1907
|
+
if "error" in result:
|
|
1908
|
+
provider_errors.append({"sources": sources, "error": result["error"]})
|
|
1909
|
+
continue
|
|
1910
|
+
|
|
1911
|
+
results = result.get("results") or result.get("papers") or []
|
|
1912
|
+
# Validate papers have minimal required fields
|
|
1913
|
+
validated_results = []
|
|
1914
|
+
for paper in results:
|
|
1915
|
+
if isinstance(paper, dict) and paper.get("title") and paper.get("year"):
|
|
1916
|
+
validated_results.append(paper)
|
|
1917
|
+
else:
|
|
1918
|
+
logger.warning(f"Skipping invalid paper: {paper}")
|
|
1919
|
+
|
|
1920
|
+
if validated_results:
|
|
1921
|
+
aggregated_payload = dict(result)
|
|
1922
|
+
aggregated_payload["results"] = validated_results
|
|
1923
|
+
aggregated_payload["validation_note"] = f"Validated {len(validated_results)} out of {len(results)} papers"
|
|
1924
|
+
break
|
|
1925
|
+
|
|
1926
|
+
aggregated_payload.setdefault("results", [])
|
|
1927
|
+
aggregated_payload["sources_tried"] = [",".join(s) for s in tried]
|
|
1928
|
+
|
|
1929
|
+
if provider_errors:
|
|
1930
|
+
aggregated_payload["provider_errors"] = provider_errors
|
|
1931
|
+
|
|
1932
|
+
# CRITICAL: Add explicit marker for empty results to prevent hallucination
|
|
1933
|
+
if not aggregated_payload["results"]:
|
|
1934
|
+
aggregated_payload["notes"] = (
|
|
1935
|
+
"No papers were returned by the research providers. This often occurs during "
|
|
1936
|
+
"temporary rate limits; please retry in a minute or adjust the query scope."
|
|
146
1937
|
)
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
return {"status": "unavailable"}
|
|
1938
|
+
aggregated_payload["EMPTY_RESULTS"] = True
|
|
1939
|
+
aggregated_payload["warning"] = "DO NOT GENERATE FAKE PAPERS - API returned zero results"
|
|
150
1940
|
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
1941
|
+
return aggregated_payload
|
|
1942
|
+
|
|
1943
|
+
async def synthesize_research(self, paper_ids: List[str], max_words: int = 500) -> Dict[str, Any]:
|
|
1944
|
+
"""Synthesize research papers using Archive API"""
|
|
1945
|
+
data = {
|
|
1946
|
+
"paper_ids": paper_ids,
|
|
1947
|
+
"max_words": max_words,
|
|
1948
|
+
"focus": "key_findings",
|
|
1949
|
+
"style": "academic"
|
|
1950
|
+
}
|
|
1951
|
+
return await self._call_archive_api("synthesize", data)
|
|
1952
|
+
|
|
1953
|
+
async def get_financial_data(self, ticker: str, metric: str, limit: int = 12) -> Dict[str, Any]:
|
|
1954
|
+
"""Get financial data using FinSight API"""
|
|
1955
|
+
params = {
|
|
1956
|
+
"freq": "Q",
|
|
1957
|
+
"limit": limit
|
|
1958
|
+
}
|
|
1959
|
+
return await self._call_finsight_api(f"kpis/{ticker}/{metric}", params)
|
|
1960
|
+
|
|
1961
|
+
async def get_financial_metrics(self, ticker: str, metrics: List[str] = None) -> Dict[str, Any]:
|
|
1962
|
+
"""Get financial metrics using FinSight KPI endpoints (with schema drift fixes)"""
|
|
1963
|
+
if metrics is None:
|
|
1964
|
+
metrics = ["revenue", "grossProfit", "operatingIncome", "netIncome"]
|
|
1965
|
+
|
|
1966
|
+
if not metrics:
|
|
1967
|
+
return {}
|
|
1968
|
+
|
|
1969
|
+
async def _fetch_metric(metric_name: str) -> Dict[str, Any]:
|
|
1970
|
+
params = {"period": "latest", "freq": "Q"}
|
|
1971
|
+
try:
|
|
1972
|
+
result = await self._call_finsight_api(f"calc/{ticker}/{metric_name}", params)
|
|
1973
|
+
except Exception as exc:
|
|
1974
|
+
return {metric_name: {"error": str(exc)}}
|
|
1975
|
+
|
|
1976
|
+
if "error" in result:
|
|
1977
|
+
return {metric_name: {"error": result["error"]}}
|
|
1978
|
+
return {metric_name: result}
|
|
1979
|
+
|
|
1980
|
+
tasks = [asyncio.create_task(_fetch_metric(metric)) for metric in metrics]
|
|
1981
|
+
results: Dict[str, Any] = {}
|
|
1982
|
+
|
|
1983
|
+
for payload in await asyncio.gather(*tasks):
|
|
1984
|
+
results.update(payload)
|
|
1985
|
+
|
|
1986
|
+
return results
|
|
1987
|
+
|
|
1988
|
+
def execute_command(self, command: str) -> str:
|
|
1989
|
+
"""Execute command in persistent shell session and return output"""
|
|
1990
|
+
try:
|
|
1991
|
+
if self.shell_session is None:
|
|
1992
|
+
return "ERROR: Shell session not initialized"
|
|
1993
|
+
|
|
1994
|
+
# Send command to persistent shell
|
|
1995
|
+
self.shell_session.stdin.write(command + '\n')
|
|
1996
|
+
self.shell_session.stdin.flush()
|
|
1997
|
+
|
|
1998
|
+
# Read output with timeout
|
|
1999
|
+
try:
|
|
2000
|
+
import select
|
|
2001
|
+
use_select = True
|
|
2002
|
+
except ImportError:
|
|
2003
|
+
# Windows doesn't have select module
|
|
2004
|
+
use_select = False
|
|
2005
|
+
|
|
2006
|
+
output_lines = []
|
|
2007
|
+
start_time = time.time()
|
|
2008
|
+
timeout = 10 # seconds
|
|
2009
|
+
|
|
2010
|
+
if use_select:
|
|
2011
|
+
while time.time() - start_time < timeout:
|
|
2012
|
+
if select.select([self.shell_session.stdout], [], [], 0.1)[0]:
|
|
2013
|
+
line = self.shell_session.stdout.readline()
|
|
2014
|
+
if line:
|
|
2015
|
+
output_lines.append(line.rstrip())
|
|
2016
|
+
else:
|
|
2017
|
+
break
|
|
2018
|
+
else:
|
|
2019
|
+
# No more output available
|
|
2020
|
+
break
|
|
2021
|
+
else:
|
|
2022
|
+
# Fallback for Windows - simpler approach
|
|
2023
|
+
import threading
|
|
2024
|
+
|
|
2025
|
+
def read_output():
|
|
2026
|
+
try:
|
|
2027
|
+
while True:
|
|
2028
|
+
line = self.shell_session.stdout.readline()
|
|
2029
|
+
if line:
|
|
2030
|
+
output_lines.append(line.rstrip())
|
|
2031
|
+
else:
|
|
2032
|
+
break
|
|
2033
|
+
except:
|
|
2034
|
+
pass
|
|
2035
|
+
|
|
2036
|
+
reader_thread = threading.Thread(target=read_output, daemon=True)
|
|
2037
|
+
reader_thread.start()
|
|
2038
|
+
reader_thread.join(timeout=timeout)
|
|
2039
|
+
|
|
2040
|
+
output = '\n'.join(output_lines)
|
|
2041
|
+
return output if output else "Command executed successfully"
|
|
2042
|
+
|
|
2043
|
+
except Exception as e:
|
|
2044
|
+
return f"ERROR: {e}"
|
|
2045
|
+
|
|
2046
|
+
def _is_safe_shell_command(self, cmd: str) -> bool:
|
|
2047
|
+
"""
|
|
2048
|
+
Minimal safety check - only block truly catastrophic commands.
|
|
2049
|
+
Philosophy: This is the user's machine. They can do anything in terminal anyway.
|
|
2050
|
+
We only block commands that could cause immediate, irreversible system damage.
|
|
2051
|
+
"""
|
|
2052
|
+
cmd = cmd.strip()
|
|
2053
|
+
if not cmd:
|
|
2054
|
+
return False
|
|
2055
|
+
|
|
2056
|
+
# Block ONLY truly catastrophic commands
|
|
2057
|
+
nuclear_patterns = [
|
|
2058
|
+
'rm -rf /', # Wipe root filesystem
|
|
2059
|
+
'rm -rf ~/*', # Wipe home directory
|
|
2060
|
+
'dd if=/dev/zero of=/dev/sda', # Wipe disk
|
|
2061
|
+
'dd if=/dev/zero of=/dev/hda',
|
|
2062
|
+
'mkfs', # Format filesystem
|
|
2063
|
+
'fdisk', # Partition disk
|
|
2064
|
+
':(){ :|:& };:', # Fork bomb
|
|
2065
|
+
'chmod -R 777 /', # Make everything executable
|
|
2066
|
+
]
|
|
2067
|
+
|
|
2068
|
+
cmd_lower = cmd.lower()
|
|
2069
|
+
for pattern in nuclear_patterns:
|
|
2070
|
+
if pattern.lower() in cmd_lower:
|
|
2071
|
+
return False
|
|
2072
|
+
|
|
2073
|
+
# Allow everything else - pip, npm, git, pipes, redirection, etc.
|
|
2074
|
+
# User asked for it, user gets it. Just like Cursor.
|
|
2075
|
+
return True
|
|
2076
|
+
|
|
2077
|
+
def _check_token_budget(self, estimated_tokens: int) -> bool:
|
|
2078
|
+
"""Check if we have enough token budget"""
|
|
2079
|
+
self._ensure_usage_day()
|
|
2080
|
+
return (self.daily_token_usage + estimated_tokens) < self.daily_limit
|
|
2081
|
+
|
|
2082
|
+
def _check_user_token_budget(self, user_id: str, estimated_tokens: int) -> bool:
|
|
2083
|
+
self._ensure_usage_day()
|
|
2084
|
+
current = self.user_token_usage.get(user_id, 0)
|
|
2085
|
+
return (current + estimated_tokens) < self.per_user_token_limit
|
|
2086
|
+
|
|
2087
|
+
def _resolve_daily_query_limit(self) -> int:
|
|
2088
|
+
limit_env = os.getenv("NOCTURNAL_QUERY_LIMIT")
|
|
2089
|
+
if limit_env and limit_env != str(DEFAULT_QUERY_LIMIT):
|
|
2090
|
+
logger.warning("Ignoring attempted query-limit override (%s); enforcing default %s", limit_env, DEFAULT_QUERY_LIMIT)
|
|
2091
|
+
os.environ["NOCTURNAL_QUERY_LIMIT"] = str(DEFAULT_QUERY_LIMIT)
|
|
2092
|
+
os.environ.pop("NOCTURNAL_QUERY_LIMIT_SIG", None)
|
|
2093
|
+
return DEFAULT_QUERY_LIMIT
|
|
2094
|
+
|
|
2095
|
+
def _check_query_budget(self, user_id: Optional[str]) -> bool:
|
|
2096
|
+
self._ensure_usage_day()
|
|
2097
|
+
if self.daily_query_limit > 0 and self.daily_query_count >= self.daily_query_limit:
|
|
2098
|
+
return False
|
|
2099
|
+
|
|
2100
|
+
effective_limit = self.per_user_query_limit if self.per_user_query_limit > 0 else self.daily_query_limit
|
|
2101
|
+
if user_id and effective_limit > 0 and self.user_query_counts.get(user_id, 0) >= effective_limit:
|
|
2102
|
+
return False
|
|
2103
|
+
|
|
2104
|
+
return True
|
|
2105
|
+
|
|
2106
|
+
def _record_query_usage(self, user_id: Optional[str]):
|
|
2107
|
+
self._ensure_usage_day()
|
|
2108
|
+
self.daily_query_count += 1
|
|
2109
|
+
if user_id:
|
|
2110
|
+
self.user_query_counts[user_id] = self.user_query_counts.get(user_id, 0) + 1
|
|
2111
|
+
|
|
2112
|
+
def _ensure_usage_day(self):
|
|
2113
|
+
current_day = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
|
2114
|
+
if current_day != self._usage_day:
|
|
2115
|
+
self._usage_day = current_day
|
|
2116
|
+
self.daily_token_usage = 0
|
|
2117
|
+
self.user_token_usage = {}
|
|
2118
|
+
self.daily_query_count = 0
|
|
2119
|
+
self.user_query_counts = {}
|
|
2120
|
+
|
|
2121
|
+
def _charge_tokens(self, user_id: Optional[str], tokens: int):
|
|
2122
|
+
"""Charge tokens to daily and per-user usage"""
|
|
2123
|
+
self._ensure_usage_day()
|
|
2124
|
+
self.daily_token_usage += tokens
|
|
2125
|
+
if user_id:
|
|
2126
|
+
self.user_token_usage[user_id] = self.user_token_usage.get(user_id, 0) + tokens
|
|
2127
|
+
|
|
2128
|
+
def _get_memory_context(self, user_id: str, conversation_id: str) -> str:
|
|
2129
|
+
"""Get relevant memory context for the conversation"""
|
|
2130
|
+
if user_id not in self.memory:
|
|
2131
|
+
self.memory[user_id] = {}
|
|
2132
|
+
|
|
2133
|
+
if conversation_id not in self.memory[user_id]:
|
|
2134
|
+
self.memory[user_id][conversation_id] = []
|
|
2135
|
+
|
|
2136
|
+
# Get last 3 interactions for context
|
|
2137
|
+
recent_memory = self.memory[user_id][conversation_id][-3:]
|
|
2138
|
+
if not recent_memory:
|
|
2139
|
+
return ""
|
|
2140
|
+
|
|
2141
|
+
context = "Recent conversation context:\n"
|
|
2142
|
+
for mem in recent_memory:
|
|
2143
|
+
context += f"- {mem}\n"
|
|
2144
|
+
return context
|
|
2145
|
+
|
|
2146
|
+
def _update_memory(self, user_id: str, conversation_id: str, interaction: str):
|
|
2147
|
+
"""Update memory with new interaction"""
|
|
2148
|
+
if user_id not in self.memory:
|
|
2149
|
+
self.memory[user_id] = {}
|
|
2150
|
+
|
|
2151
|
+
if conversation_id not in self.memory[user_id]:
|
|
2152
|
+
self.memory[user_id][conversation_id] = []
|
|
2153
|
+
|
|
2154
|
+
self.memory[user_id][conversation_id].append(interaction)
|
|
2155
|
+
|
|
2156
|
+
# Keep only last 10 interactions
|
|
2157
|
+
if len(self.memory[user_id][conversation_id]) > 10:
|
|
2158
|
+
self.memory[user_id][conversation_id] = self.memory[user_id][conversation_id][-10:]
|
|
2159
|
+
|
|
2160
|
+
@staticmethod
|
|
2161
|
+
def _hash_identifier(value: Optional[str]) -> Optional[str]:
|
|
2162
|
+
if not value:
|
|
2163
|
+
return None
|
|
2164
|
+
digest = hashlib.sha256(value.encode("utf-8")).hexdigest()
|
|
2165
|
+
return digest[:16]
|
|
2166
|
+
|
|
2167
|
+
def _emit_telemetry(
|
|
2168
|
+
self,
|
|
2169
|
+
event: str,
|
|
2170
|
+
request: Optional[ChatRequest] = None,
|
|
2171
|
+
*,
|
|
2172
|
+
success: Optional[bool] = None,
|
|
2173
|
+
extra: Optional[Dict[str, Any]] = None,
|
|
2174
|
+
) -> None:
|
|
2175
|
+
manager = TelemetryManager.get()
|
|
2176
|
+
if not manager:
|
|
2177
|
+
return
|
|
155
2178
|
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
2179
|
+
payload: Dict[str, Any] = {}
|
|
2180
|
+
if request:
|
|
2181
|
+
payload["user"] = self._hash_identifier(request.user_id)
|
|
2182
|
+
payload["conversation"] = self._hash_identifier(request.conversation_id)
|
|
2183
|
+
if success is not None:
|
|
2184
|
+
payload["success"] = bool(success)
|
|
2185
|
+
if extra:
|
|
2186
|
+
for key, value in extra.items():
|
|
2187
|
+
if value is None:
|
|
2188
|
+
continue
|
|
2189
|
+
payload[key] = value
|
|
2190
|
+
|
|
2191
|
+
manager.record(event, payload)
|
|
2192
|
+
|
|
2193
|
+
@staticmethod
|
|
2194
|
+
def _format_model_error(details: str) -> str:
|
|
2195
|
+
headline = "⚠️ I couldn't finish the reasoning step because the language model call failed."
|
|
2196
|
+
advice = "Please retry shortly or verify your Groq API keys and network connectivity."
|
|
2197
|
+
if details:
|
|
2198
|
+
return f"{headline}\n\nDetails: {details}\n\n{advice}"
|
|
2199
|
+
return f"{headline}\n\n{advice}"
|
|
2200
|
+
|
|
2201
|
+
def _summarize_command_output(
|
|
2202
|
+
self,
|
|
2203
|
+
request: ChatRequest,
|
|
2204
|
+
command: str,
|
|
2205
|
+
truncated_output: str,
|
|
2206
|
+
base_response: str
|
|
2207
|
+
) -> Tuple[str, int]:
|
|
2208
|
+
"""Attach a deterministic shell output block to the agent response."""
|
|
2209
|
+
|
|
2210
|
+
rendered_output = truncated_output.rstrip()
|
|
2211
|
+
if not rendered_output:
|
|
2212
|
+
rendered_output = "(no output)"
|
|
2213
|
+
|
|
2214
|
+
formatted = (
|
|
2215
|
+
f"{base_response.strip()}\n\n"
|
|
2216
|
+
"```shell\n"
|
|
2217
|
+
f"$ {command}\n"
|
|
2218
|
+
f"{rendered_output}\n"
|
|
2219
|
+
"```"
|
|
160
2220
|
)
|
|
161
2221
|
|
|
162
|
-
|
|
163
|
-
|
|
2222
|
+
return formatted, 0
|
|
2223
|
+
|
|
2224
|
+
async def _handle_workflow_commands(self, request: ChatRequest) -> Optional[ChatResponse]:
|
|
2225
|
+
"""Handle natural language workflow commands directly"""
|
|
2226
|
+
question_lower = request.question.lower()
|
|
2227
|
+
|
|
2228
|
+
# Show library
|
|
2229
|
+
if any(phrase in question_lower for phrase in ["show my library", "list my papers", "what's in my library", "my saved papers"]):
|
|
2230
|
+
papers = self.workflow.list_papers()
|
|
2231
|
+
if not papers:
|
|
2232
|
+
message = "Your library is empty. As you find papers, I can save them for you."
|
|
2233
|
+
else:
|
|
2234
|
+
paper_list = []
|
|
2235
|
+
for i, paper in enumerate(papers[:10], 1):
|
|
2236
|
+
authors_str = paper.authors[0] if paper.authors else "Unknown"
|
|
2237
|
+
if len(paper.authors) > 1:
|
|
2238
|
+
authors_str += " et al."
|
|
2239
|
+
paper_list.append(f"{i}. {paper.title} ({authors_str}, {paper.year})")
|
|
2240
|
+
|
|
2241
|
+
message = f"You have {len(papers)} paper(s) in your library:\n\n" + "\n".join(paper_list)
|
|
2242
|
+
if len(papers) > 10:
|
|
2243
|
+
message += f"\n\n...and {len(papers) - 10} more."
|
|
2244
|
+
|
|
2245
|
+
return self._quick_reply(request, message, tools_used=["workflow_library"], confidence=1.0)
|
|
2246
|
+
|
|
2247
|
+
# Export to BibTeX
|
|
2248
|
+
if any(phrase in question_lower for phrase in ["export to bibtex", "export bibtex", "generate bibtex", "bibtex export"]):
|
|
2249
|
+
success = self.workflow.export_to_bibtex()
|
|
2250
|
+
if success:
|
|
2251
|
+
message = f"✅ Exported {len(self.workflow.list_papers())} papers to BibTeX.\n\nFile: {self.workflow.bibtex_file}\n\nYou can import this into Zotero, Mendeley, or use it in your LaTeX project."
|
|
2252
|
+
else:
|
|
2253
|
+
message = "❌ Failed to export BibTeX. Make sure you have papers in your library first."
|
|
2254
|
+
|
|
2255
|
+
return self._quick_reply(request, message, tools_used=["workflow_export"], confidence=1.0)
|
|
2256
|
+
|
|
2257
|
+
# Export to Markdown
|
|
2258
|
+
if any(phrase in question_lower for phrase in ["export to markdown", "export markdown", "markdown export"]):
|
|
2259
|
+
success = self.workflow.export_to_markdown()
|
|
2260
|
+
if success:
|
|
2261
|
+
message = f"✅ Exported to Markdown. Check {self.workflow.exports_dir} for the file.\n\nYou can open it in Obsidian, Notion, or any markdown editor."
|
|
2262
|
+
else:
|
|
2263
|
+
message = "❌ Failed to export Markdown."
|
|
2264
|
+
|
|
2265
|
+
return self._quick_reply(request, message, tools_used=["workflow_export"], confidence=1.0)
|
|
2266
|
+
|
|
2267
|
+
# Show history
|
|
2268
|
+
if any(phrase in question_lower for phrase in ["show history", "my history", "recent queries", "what did i search"]):
|
|
2269
|
+
history = self.workflow.get_history()[:10]
|
|
2270
|
+
if not history:
|
|
2271
|
+
message = "No query history yet."
|
|
2272
|
+
else:
|
|
2273
|
+
history_list = []
|
|
2274
|
+
for i, entry in enumerate(history, 1):
|
|
2275
|
+
timestamp = datetime.fromisoformat(entry['timestamp']).strftime("%m/%d %H:%M")
|
|
2276
|
+
query = entry['query'][:60] + "..." if len(entry['query']) > 60 else entry['query']
|
|
2277
|
+
history_list.append(f"{i}. [{timestamp}] {query}")
|
|
2278
|
+
|
|
2279
|
+
message = "Recent queries:\n\n" + "\n".join(history_list)
|
|
2280
|
+
|
|
2281
|
+
return self._quick_reply(request, message, tools_used=["workflow_history"], confidence=1.0)
|
|
2282
|
+
|
|
2283
|
+
# Search library
|
|
2284
|
+
search_match = re.match(r".*(?:search|find).*(?:in|my).*library.*[\"'](.+?)[\"']", question_lower)
|
|
2285
|
+
if not search_match:
|
|
2286
|
+
search_match = re.match(r".*search library (?:for )?(.+)", question_lower)
|
|
2287
|
+
|
|
2288
|
+
if search_match:
|
|
2289
|
+
query_term = search_match.group(1).strip()
|
|
2290
|
+
results = self.workflow.search_library(query_term)
|
|
2291
|
+
if not results:
|
|
2292
|
+
message = f"No papers found matching '{query_term}' in your library."
|
|
2293
|
+
else:
|
|
2294
|
+
result_list = []
|
|
2295
|
+
for i, paper in enumerate(results[:5], 1):
|
|
2296
|
+
authors_str = paper.authors[0] if paper.authors else "Unknown"
|
|
2297
|
+
if len(paper.authors) > 1:
|
|
2298
|
+
authors_str += " et al."
|
|
2299
|
+
result_list.append(f"{i}. {paper.title} ({authors_str}, {paper.year})")
|
|
2300
|
+
|
|
2301
|
+
message = f"Found {len(results)} paper(s) matching '{query_term}':\n\n" + "\n".join(result_list)
|
|
2302
|
+
if len(results) > 5:
|
|
2303
|
+
message += f"\n\n...and {len(results) - 5} more."
|
|
2304
|
+
|
|
2305
|
+
return self._quick_reply(request, message, tools_used=["workflow_search"], confidence=1.0)
|
|
2306
|
+
|
|
2307
|
+
# No workflow command detected
|
|
2308
|
+
return None
|
|
2309
|
+
|
|
2310
|
+
async def _analyze_request_type(self, question: str) -> Dict[str, Any]:
|
|
2311
|
+
"""Analyze what type of request this is and what APIs to use"""
|
|
2312
|
+
|
|
2313
|
+
# Financial indicators
|
|
2314
|
+
financial_keywords = [
|
|
2315
|
+
'financial', 'revenue', 'profit', 'earnings', 'stock', 'market',
|
|
2316
|
+
'ticker', 'company', 'balance sheet', 'income statement', 'cash flow',
|
|
2317
|
+
'valuation', 'pe ratio', 'debt', 'equity', 'dividend', 'growth',
|
|
2318
|
+
'ceo', 'earnings call', 'quarterly', 'annual report'
|
|
2319
|
+
]
|
|
2320
|
+
|
|
2321
|
+
# Research indicators (quantitative)
|
|
2322
|
+
research_keywords = [
|
|
2323
|
+
'research', 'paper', 'study', 'academic', 'literature', 'journal',
|
|
2324
|
+
'synthesis', 'findings', 'methodology', 'abstract', 'citation',
|
|
2325
|
+
'author', 'publication', 'peer review', 'scientific'
|
|
2326
|
+
]
|
|
2327
|
+
|
|
2328
|
+
# Qualitative indicators (NEW)
|
|
2329
|
+
qualitative_keywords = [
|
|
2330
|
+
'theme', 'themes', 'thematic', 'code', 'coding', 'qualitative',
|
|
2331
|
+
'interview', 'interviews', 'transcript', 'case study', 'narrative',
|
|
2332
|
+
'discourse', 'content analysis', 'quote', 'quotes', 'excerpt',
|
|
2333
|
+
'participant', 'respondent', 'informant', 'ethnography',
|
|
2334
|
+
'grounded theory', 'phenomenology', 'what do people say',
|
|
2335
|
+
'how do participants', 'sentiment', 'perception', 'experience',
|
|
2336
|
+
'lived experience', 'meaning', 'interpret', 'understand',
|
|
2337
|
+
'focus group', 'observation', 'field notes', 'memoir', 'diary'
|
|
2338
|
+
]
|
|
2339
|
+
|
|
2340
|
+
# Quantitative indicators (explicit stats/math)
|
|
2341
|
+
quantitative_keywords = [
|
|
2342
|
+
'calculate', 'average', 'mean', 'median', 'percentage', 'correlation',
|
|
2343
|
+
'regression', 'statistical', 'significance', 'p-value', 'variance',
|
|
2344
|
+
'standard deviation', 'trend', 'forecast', 'model', 'predict',
|
|
2345
|
+
'rate of', 'ratio', 'growth rate', 'change in', 'compared to'
|
|
2346
|
+
]
|
|
2347
|
+
|
|
2348
|
+
# System/technical indicators
|
|
2349
|
+
system_keywords = [
|
|
2350
|
+
'file', 'directory', 'command', 'run', 'execute', 'install',
|
|
2351
|
+
'python', 'code', 'script', 'program', 'system', 'terminal'
|
|
2352
|
+
]
|
|
2353
|
+
|
|
2354
|
+
question_lower = question.lower()
|
|
2355
|
+
|
|
2356
|
+
matched_types: List[str] = []
|
|
2357
|
+
apis_to_use: List[str] = []
|
|
2358
|
+
analysis_mode = "quantitative" # default
|
|
2359
|
+
|
|
2360
|
+
# Context-aware keyword detection
|
|
2361
|
+
# Strong quant contexts that override everything
|
|
2362
|
+
strong_quant_contexts = [
|
|
2363
|
+
'algorithm', 'park', 'system', 'database',
|
|
2364
|
+
'calculate', 'predict', 'forecast', 'ratio', 'percentage'
|
|
2365
|
+
]
|
|
2366
|
+
|
|
2367
|
+
# Measurement words (can indicate mixed when combined with qual words)
|
|
2368
|
+
measurement_words = ['score', 'metric', 'rating', 'measure', 'index']
|
|
2369
|
+
|
|
2370
|
+
has_strong_quant_context = any(ctx in question_lower for ctx in strong_quant_contexts)
|
|
2371
|
+
has_measurement = any(mw in question_lower for mw in measurement_words)
|
|
2372
|
+
|
|
2373
|
+
# Special cases: Certain qual words + measurement = mixed (subjective + quantified)
|
|
2374
|
+
# BUT: Only if NOT in a strong quant context (algorithm overrides)
|
|
2375
|
+
mixed_indicators = [
|
|
2376
|
+
'experience', # user experience
|
|
2377
|
+
'sentiment', # sentiment analysis
|
|
2378
|
+
'perception', # perception
|
|
2379
|
+
]
|
|
2380
|
+
|
|
2381
|
+
is_mixed_method = False
|
|
2382
|
+
if not has_strong_quant_context and has_measurement:
|
|
2383
|
+
if any(indicator in question_lower for indicator in mixed_indicators):
|
|
2384
|
+
is_mixed_method = True
|
|
2385
|
+
|
|
2386
|
+
# Check for qualitative vs quantitative keywords
|
|
2387
|
+
qual_score = sum(1 for kw in qualitative_keywords if kw in question_lower)
|
|
2388
|
+
quant_score = sum(1 for kw in quantitative_keywords if kw in question_lower)
|
|
2389
|
+
|
|
2390
|
+
# Financial queries are quantitative by nature (unless explicitly qualitative like "interview")
|
|
2391
|
+
has_financial = any(kw in question_lower for kw in financial_keywords)
|
|
2392
|
+
if has_financial and qual_score == 1:
|
|
2393
|
+
# Single qual keyword + financial = probably mixed
|
|
2394
|
+
# e.g., "Interview CEO about earnings" = interview (qual) + earnings/CEO (financial)
|
|
2395
|
+
quant_score += 1
|
|
2396
|
+
|
|
2397
|
+
# Adjust for context
|
|
2398
|
+
if has_strong_quant_context:
|
|
2399
|
+
# Reduce qualitative score if in strong quantitative context
|
|
2400
|
+
# e.g., "theme park" or "sentiment analysis algorithm"
|
|
2401
|
+
qual_score = max(0, qual_score - 1)
|
|
2402
|
+
|
|
2403
|
+
# Improved mixed detection: use ratio instead of simple comparison
|
|
2404
|
+
if is_mixed_method:
|
|
2405
|
+
# Special case: qual word + measurement = always mixed
|
|
2406
|
+
analysis_mode = "mixed"
|
|
2407
|
+
elif qual_score >= 2 and quant_score >= 1:
|
|
2408
|
+
# Clear mixed: multiple qual + some quant
|
|
2409
|
+
analysis_mode = "mixed"
|
|
2410
|
+
elif qual_score > quant_score and qual_score > 0:
|
|
2411
|
+
# Predominantly qualitative
|
|
2412
|
+
analysis_mode = "qualitative"
|
|
2413
|
+
elif qual_score > 0 and quant_score > 0:
|
|
2414
|
+
# Some of both - default to mixed
|
|
2415
|
+
analysis_mode = "mixed"
|
|
2416
|
+
|
|
2417
|
+
if any(keyword in question_lower for keyword in financial_keywords):
|
|
2418
|
+
matched_types.append("financial")
|
|
2419
|
+
apis_to_use.append("finsight")
|
|
2420
|
+
|
|
2421
|
+
if any(keyword in question_lower for keyword in research_keywords):
|
|
2422
|
+
matched_types.append("research")
|
|
2423
|
+
apis_to_use.append("archive")
|
|
2424
|
+
|
|
2425
|
+
# Qualitative queries often involve research
|
|
2426
|
+
if analysis_mode in ("qualitative", "mixed") and "research" not in matched_types:
|
|
2427
|
+
matched_types.append("research")
|
|
2428
|
+
if "archive" not in apis_to_use:
|
|
2429
|
+
apis_to_use.append("archive")
|
|
2430
|
+
|
|
2431
|
+
if any(keyword in question_lower for keyword in system_keywords):
|
|
2432
|
+
matched_types.append("system")
|
|
2433
|
+
apis_to_use.append("shell")
|
|
2434
|
+
|
|
2435
|
+
# Deduplicate while preserving order
|
|
2436
|
+
apis_to_use = list(dict.fromkeys(apis_to_use))
|
|
2437
|
+
unique_types = list(dict.fromkeys(matched_types))
|
|
2438
|
+
|
|
2439
|
+
if not unique_types:
|
|
2440
|
+
request_type = "general"
|
|
2441
|
+
elif len(unique_types) == 1:
|
|
2442
|
+
request_type = unique_types[0]
|
|
2443
|
+
elif {"financial", "research"}.issubset(set(unique_types)):
|
|
2444
|
+
request_type = "comprehensive"
|
|
2445
|
+
if "system" in unique_types:
|
|
2446
|
+
request_type += "+system"
|
|
2447
|
+
else:
|
|
2448
|
+
request_type = "+".join(unique_types)
|
|
164
2449
|
|
|
165
|
-
|
|
166
|
-
|
|
2450
|
+
confidence = 0.8 if apis_to_use else 0.5
|
|
2451
|
+
if len(unique_types) > 1:
|
|
2452
|
+
confidence = 0.85
|
|
167
2453
|
|
|
168
2454
|
return {
|
|
169
|
-
"
|
|
170
|
-
"
|
|
171
|
-
"
|
|
2455
|
+
"type": request_type,
|
|
2456
|
+
"apis": apis_to_use,
|
|
2457
|
+
"confidence": confidence,
|
|
2458
|
+
"analysis_mode": analysis_mode # NEW: qualitative, quantitative, or mixed
|
|
172
2459
|
}
|
|
2460
|
+
|
|
2461
|
+
async def process_request(self, request: ChatRequest) -> ChatResponse:
|
|
2462
|
+
"""Process request with full AI capabilities and API integration"""
|
|
2463
|
+
try:
|
|
2464
|
+
# Check workflow commands first (both modes)
|
|
2465
|
+
workflow_response = await self._handle_workflow_commands(request)
|
|
2466
|
+
if workflow_response:
|
|
2467
|
+
return workflow_response
|
|
2468
|
+
|
|
2469
|
+
# Analyze request to determine what APIs to call
|
|
2470
|
+
request_analysis = await self._analyze_request_type(request.question)
|
|
2471
|
+
|
|
2472
|
+
# Debug: Check what was detected
|
|
2473
|
+
debug_mode = os.getenv("NOCTURNAL_DEBUG", "").lower() == "1"
|
|
2474
|
+
if debug_mode:
|
|
2475
|
+
print(f"🔍 Request analysis: {request_analysis}")
|
|
2476
|
+
|
|
2477
|
+
# Call appropriate APIs (Archive, FinSight) - BOTH production and dev mode
|
|
2478
|
+
api_results = {}
|
|
2479
|
+
tools_used = []
|
|
2480
|
+
|
|
2481
|
+
# Archive API for research
|
|
2482
|
+
if "archive" in request_analysis.get("apis", []):
|
|
2483
|
+
result = await self.search_academic_papers(request.question, 5)
|
|
2484
|
+
if "error" not in result:
|
|
2485
|
+
api_results["research"] = result
|
|
2486
|
+
tools_used.append("archive_api")
|
|
2487
|
+
|
|
2488
|
+
# FinSight API for financial data
|
|
2489
|
+
if "finsight" in request_analysis.get("apis", []):
|
|
2490
|
+
tickers = self._extract_tickers_from_text(request.question)
|
|
2491
|
+
if not tickers:
|
|
2492
|
+
# Try common company name mappings
|
|
2493
|
+
question_lower = request.question.lower()
|
|
2494
|
+
if "apple" in question_lower:
|
|
2495
|
+
tickers = ["AAPL"]
|
|
2496
|
+
elif "tesla" in question_lower:
|
|
2497
|
+
tickers = ["TSLA"]
|
|
2498
|
+
elif "microsoft" in question_lower:
|
|
2499
|
+
tickers = ["MSFT"]
|
|
2500
|
+
elif "google" in question_lower or "alphabet" in question_lower:
|
|
2501
|
+
tickers = ["GOOGL"]
|
|
2502
|
+
|
|
2503
|
+
if debug_mode:
|
|
2504
|
+
print(f"🔍 Extracted tickers: {tickers}")
|
|
2505
|
+
|
|
2506
|
+
if tickers:
|
|
2507
|
+
# Call FinSight with proper endpoint format
|
|
2508
|
+
if debug_mode:
|
|
2509
|
+
print(f"🔍 Calling FinSight API: calc/{tickers[0]}/revenue")
|
|
2510
|
+
financial_data = await self._call_finsight_api(f"calc/{tickers[0]}/revenue")
|
|
2511
|
+
if debug_mode:
|
|
2512
|
+
print(f"🔍 FinSight returned: {list(financial_data.keys()) if financial_data else None}")
|
|
2513
|
+
if financial_data and "error" not in financial_data:
|
|
2514
|
+
api_results["financial"] = financial_data
|
|
2515
|
+
tools_used.append("finsight_api")
|
|
2516
|
+
else:
|
|
2517
|
+
if debug_mode and financial_data:
|
|
2518
|
+
print(f"🔍 FinSight error: {financial_data.get('error')}")
|
|
2519
|
+
|
|
2520
|
+
# PRODUCTION MODE: Send to backend LLM with API results
|
|
2521
|
+
if self.client is None:
|
|
2522
|
+
return await self.call_backend_query(
|
|
2523
|
+
query=request.question,
|
|
2524
|
+
conversation_history=self.conversation_history[-10:],
|
|
2525
|
+
api_results=api_results, # Include the data!
|
|
2526
|
+
tools_used=tools_used # Pass tools list for history
|
|
2527
|
+
)
|
|
2528
|
+
|
|
2529
|
+
# DEV MODE ONLY: Direct Groq calls (only works with local API keys)
|
|
2530
|
+
# This code path won't execute in production since self.client = None
|
|
2531
|
+
|
|
2532
|
+
if not self._check_query_budget(request.user_id):
|
|
2533
|
+
effective_limit = self.daily_query_limit if self.daily_query_limit > 0 else self.per_user_query_limit
|
|
2534
|
+
if effective_limit <= 0:
|
|
2535
|
+
effective_limit = 25
|
|
2536
|
+
message = (
|
|
2537
|
+
"Daily query limit reached. You've hit the "
|
|
2538
|
+
f"{effective_limit} request cap for today. "
|
|
2539
|
+
"Try again tomorrow or reach out if you need the limit raised."
|
|
2540
|
+
)
|
|
2541
|
+
return self._quick_reply(
|
|
2542
|
+
request,
|
|
2543
|
+
message,
|
|
2544
|
+
tools_used=["rate_limit"],
|
|
2545
|
+
confidence=0.35,
|
|
2546
|
+
)
|
|
2547
|
+
|
|
2548
|
+
self._record_query_usage(request.user_id)
|
|
2549
|
+
|
|
2550
|
+
# Analyze request type
|
|
2551
|
+
request_analysis = await self._analyze_request_type(request.question)
|
|
2552
|
+
question_lower = request.question.lower()
|
|
2553
|
+
|
|
2554
|
+
self._reset_data_sources()
|
|
2555
|
+
|
|
2556
|
+
direct_shell = re.match(r"^(?:run|execute)\s*:?\s*(.+)$", request.question.strip(), re.IGNORECASE)
|
|
2557
|
+
if direct_shell:
|
|
2558
|
+
return self._respond_with_shell_command(request, direct_shell.group(1).strip())
|
|
2559
|
+
|
|
2560
|
+
# Get memory context
|
|
2561
|
+
memory_context = self._get_memory_context(request.user_id, request.conversation_id)
|
|
2562
|
+
|
|
2563
|
+
# Ultra-light handling for small talk to save tokens entirely
|
|
2564
|
+
if self._is_simple_greeting(request.question):
|
|
2565
|
+
return self._quick_reply(
|
|
2566
|
+
request,
|
|
2567
|
+
"Hi there! I'm up and ready whenever you want to dig into finance or research.",
|
|
2568
|
+
tools_used=["quick_reply"],
|
|
2569
|
+
confidence=0.5
|
|
2570
|
+
)
|
|
2571
|
+
|
|
2572
|
+
if self._is_casual_acknowledgment(request.question):
|
|
2573
|
+
return self._quick_reply(
|
|
2574
|
+
request,
|
|
2575
|
+
"Happy to help! Feel free to fire off another question whenever you're ready.",
|
|
2576
|
+
tools_used=["quick_reply"],
|
|
2577
|
+
confidence=0.55
|
|
2578
|
+
)
|
|
2579
|
+
|
|
2580
|
+
# Check for workflow commands (natural language)
|
|
2581
|
+
workflow_response = await self._handle_workflow_commands(request)
|
|
2582
|
+
if workflow_response:
|
|
2583
|
+
return workflow_response
|
|
2584
|
+
|
|
2585
|
+
# Call appropriate APIs based on request type
|
|
2586
|
+
api_results = {}
|
|
2587
|
+
tools_used = []
|
|
2588
|
+
|
|
2589
|
+
# Auto file-reading: detect filenames in the prompt and attach previews
|
|
2590
|
+
def _extract_filenames(text: str) -> List[str]:
|
|
2591
|
+
# Match common file patterns (no spaces) and simple quoted paths
|
|
2592
|
+
patterns = [
|
|
2593
|
+
r"[\w\-./]+\.(?:py|md|txt|json|csv|yml|yaml|toml|ini|ts|tsx|js|ipynb)",
|
|
2594
|
+
r"(?:\./|/)?[\w\-./]+/" # directories
|
|
2595
|
+
]
|
|
2596
|
+
matches: List[str] = []
|
|
2597
|
+
for pat in patterns:
|
|
2598
|
+
matches.extend(re.findall(pat, text))
|
|
2599
|
+
# Deduplicate and keep reasonable length
|
|
2600
|
+
uniq = []
|
|
2601
|
+
for m in matches:
|
|
2602
|
+
if len(m) <= 256 and m not in uniq:
|
|
2603
|
+
uniq.append(m)
|
|
2604
|
+
return uniq[:5]
|
|
2605
|
+
|
|
2606
|
+
mentioned = _extract_filenames(request.question)
|
|
2607
|
+
file_previews: List[Dict[str, Any]] = []
|
|
2608
|
+
files_forbidden: List[str] = []
|
|
2609
|
+
base_dir = Path.cwd().resolve()
|
|
2610
|
+
sensitive_roots = {Path('/etc'), Path('/proc'), Path('/sys'), Path('/dev'), Path('/root'), Path('/usr'), Path('/bin'), Path('/sbin'), Path('/var')}
|
|
2611
|
+
def _is_safe_path(path_str: str) -> bool:
|
|
2612
|
+
try:
|
|
2613
|
+
rp = Path(path_str).resolve()
|
|
2614
|
+
if any(str(rp).startswith(str(sr)) for sr in sensitive_roots):
|
|
2615
|
+
return False
|
|
2616
|
+
return str(rp).startswith(str(base_dir))
|
|
2617
|
+
except Exception:
|
|
2618
|
+
return False
|
|
2619
|
+
for m in mentioned:
|
|
2620
|
+
if not _is_safe_path(m):
|
|
2621
|
+
files_forbidden.append(m)
|
|
2622
|
+
continue
|
|
2623
|
+
pr = await self._preview_file(m)
|
|
2624
|
+
if pr:
|
|
2625
|
+
file_previews.append(pr)
|
|
2626
|
+
if file_previews:
|
|
2627
|
+
api_results["files"] = file_previews
|
|
2628
|
+
# Build grounded context from first text preview
|
|
2629
|
+
text_previews = [fp for fp in file_previews if fp.get("type") == "text" and fp.get("preview")]
|
|
2630
|
+
files_context = ""
|
|
2631
|
+
if text_previews:
|
|
2632
|
+
fp = text_previews[0]
|
|
2633
|
+
quoted = "\n".join(fp["preview"].splitlines()[:20])
|
|
2634
|
+
files_context = f"File: {fp['path']} (first lines)\n" + quoted
|
|
2635
|
+
api_results["files_context"] = files_context
|
|
2636
|
+
elif mentioned:
|
|
2637
|
+
# Mentioned files but none found
|
|
2638
|
+
api_results["files_missing"] = mentioned
|
|
2639
|
+
if files_forbidden:
|
|
2640
|
+
api_results["files_forbidden"] = files_forbidden
|
|
2641
|
+
|
|
2642
|
+
workspace_listing: Optional[Dict[str, Any]] = None
|
|
2643
|
+
if not file_previews:
|
|
2644
|
+
file_browse_keywords = (
|
|
2645
|
+
"list files",
|
|
2646
|
+
"show files",
|
|
2647
|
+
"show me files",
|
|
2648
|
+
"file browser",
|
|
2649
|
+
"file upload",
|
|
2650
|
+
"upload file",
|
|
2651
|
+
"files?",
|
|
2652
|
+
"browse files",
|
|
2653
|
+
"what files",
|
|
2654
|
+
"available files"
|
|
2655
|
+
)
|
|
2656
|
+
describe_files = (
|
|
2657
|
+
"file" in question_lower or "directory" in question_lower
|
|
2658
|
+
) and any(verb in question_lower for verb in ("show", "list", "what", "which", "display"))
|
|
2659
|
+
if any(keyword in question_lower for keyword in file_browse_keywords) or describe_files:
|
|
2660
|
+
workspace_listing = await self._get_workspace_listing()
|
|
2661
|
+
api_results["workspace_listing"] = workspace_listing
|
|
2662
|
+
|
|
2663
|
+
if workspace_listing and set(request_analysis.get("apis", [])) <= {"shell"}:
|
|
2664
|
+
return self._respond_with_workspace_listing(request, workspace_listing)
|
|
2665
|
+
|
|
2666
|
+
if "finsight" in request_analysis["apis"]:
|
|
2667
|
+
# Extract tickers from symbols or company names
|
|
2668
|
+
tickers = self._extract_tickers_from_text(request.question)
|
|
2669
|
+
financial_payload = {}
|
|
2670
|
+
session_key = f"{request.user_id}:{request.conversation_id}"
|
|
2671
|
+
last_topic = self._session_topics.get(session_key)
|
|
2672
|
+
if not tickers:
|
|
2673
|
+
# Heuristic defaults for common requests
|
|
2674
|
+
if "apple" in request.question.lower():
|
|
2675
|
+
tickers = ["AAPL"]
|
|
2676
|
+
if "microsoft" in request.question.lower():
|
|
2677
|
+
tickers = tickers + ["MSFT"] if "AAPL" in tickers else ["MSFT"]
|
|
2678
|
+
|
|
2679
|
+
# Determine which metrics to fetch based on query keywords
|
|
2680
|
+
metrics_to_fetch = []
|
|
2681
|
+
if any(kw in question_lower for kw in ["revenue", "sales", "top line"]):
|
|
2682
|
+
metrics_to_fetch.append("revenue")
|
|
2683
|
+
if any(kw in question_lower for kw in ["gross profit", "gross margin", "margin"]):
|
|
2684
|
+
metrics_to_fetch.append("grossProfit")
|
|
2685
|
+
if any(kw in question_lower for kw in ["operating income", "operating profit", "ebit"]):
|
|
2686
|
+
metrics_to_fetch.append("operatingIncome")
|
|
2687
|
+
if any(kw in question_lower for kw in ["net income", "profit", "earnings", "bottom line"]):
|
|
2688
|
+
metrics_to_fetch.append("netIncome")
|
|
2689
|
+
|
|
2690
|
+
# Default to key metrics if no specific request
|
|
2691
|
+
if not metrics_to_fetch and last_topic and last_topic.get("metrics"):
|
|
2692
|
+
metrics_to_fetch = list(last_topic["metrics"])
|
|
2693
|
+
|
|
2694
|
+
if not metrics_to_fetch:
|
|
2695
|
+
metrics_to_fetch = ["revenue", "grossProfit"]
|
|
2696
|
+
|
|
2697
|
+
# Fetch metrics for each ticker (cap 2 tickers)
|
|
2698
|
+
for t in tickers[:2]:
|
|
2699
|
+
result = await self.get_financial_metrics(t, metrics_to_fetch)
|
|
2700
|
+
financial_payload[t] = result
|
|
2701
|
+
|
|
2702
|
+
if financial_payload:
|
|
2703
|
+
self._session_topics[session_key] = {
|
|
2704
|
+
"tickers": tickers[:2],
|
|
2705
|
+
"metrics": metrics_to_fetch,
|
|
2706
|
+
}
|
|
2707
|
+
direct_finance = (
|
|
2708
|
+
len(financial_payload) == 1
|
|
2709
|
+
and set(request_analysis.get("apis", [])) == {"finsight"}
|
|
2710
|
+
and not api_results.get("research")
|
|
2711
|
+
and not file_previews
|
|
2712
|
+
and not workspace_listing
|
|
2713
|
+
)
|
|
2714
|
+
if direct_finance:
|
|
2715
|
+
return self._respond_with_financial_metrics(request, financial_payload)
|
|
2716
|
+
api_results["financial"] = financial_payload
|
|
2717
|
+
tools_used.append("finsight_api")
|
|
2718
|
+
|
|
2719
|
+
if "archive" in request_analysis["apis"]:
|
|
2720
|
+
# Extract research query
|
|
2721
|
+
result = await self.search_academic_papers(request.question, 5)
|
|
2722
|
+
if "error" not in result:
|
|
2723
|
+
api_results["research"] = result
|
|
2724
|
+
# DEBUG: Log what we got from the API
|
|
2725
|
+
papers_count = len(result.get("results", []))
|
|
2726
|
+
logger.info(f"🔍 DEBUG: Got {papers_count} papers from Archive API")
|
|
2727
|
+
if papers_count > 0:
|
|
2728
|
+
logger.info(f"🔍 DEBUG: First paper: {result['results'][0].get('title', 'NO TITLE')[:80]}")
|
|
2729
|
+
else:
|
|
2730
|
+
api_results["research"] = {"error": result["error"]}
|
|
2731
|
+
logger.warning(f"🔍 DEBUG: Archive API returned error: {result['error']}")
|
|
2732
|
+
tools_used.append("archive_api")
|
|
2733
|
+
|
|
2734
|
+
# Build enhanced system prompt with trimmed sections based on detected needs
|
|
2735
|
+
system_prompt = self._build_system_prompt(request_analysis, memory_context, api_results)
|
|
2736
|
+
|
|
2737
|
+
# Build messages
|
|
2738
|
+
messages = [
|
|
2739
|
+
{"role": "system", "content": system_prompt}
|
|
2740
|
+
]
|
|
2741
|
+
# If we have file context, inject it as an additional grounding message
|
|
2742
|
+
fc = api_results.get("files_context")
|
|
2743
|
+
if fc:
|
|
2744
|
+
messages.append({"role": "system", "content": f"Grounding from mentioned file(s):\n{fc}\n\nAnswer based strictly on this content when relevant. Do not run shell commands."})
|
|
2745
|
+
missing = api_results.get("files_missing")
|
|
2746
|
+
if missing:
|
|
2747
|
+
messages.append({"role": "system", "content": f"User mentioned file(s) not found: {missing}. Respond explicitly that the file was not found and avoid speculation."})
|
|
2748
|
+
forbidden = api_results.get("files_forbidden")
|
|
2749
|
+
if forbidden:
|
|
2750
|
+
messages.append({"role": "system", "content": f"User mentioned file(s) outside the allowed workspace or sensitive paths: {forbidden}. Refuse to access and explain the restriction succinctly."})
|
|
2751
|
+
|
|
2752
|
+
# Add conversation history with smart context management
|
|
2753
|
+
if len(self.conversation_history) > 12:
|
|
2754
|
+
# For long conversations, summarize early context and keep recent history
|
|
2755
|
+
early_history = self.conversation_history[:-6]
|
|
2756
|
+
recent_history = self.conversation_history[-6:]
|
|
2757
|
+
|
|
2758
|
+
# Create a summary of early conversation
|
|
2759
|
+
summary_prompt = "Summarize the key points from this conversation history in 2-3 sentences:"
|
|
2760
|
+
summary_messages = [
|
|
2761
|
+
{"role": "system", "content": summary_prompt},
|
|
2762
|
+
{"role": "user", "content": str(early_history)}
|
|
2763
|
+
]
|
|
2764
|
+
|
|
2765
|
+
try:
|
|
2766
|
+
if self._ensure_client_ready():
|
|
2767
|
+
summary_response = self.client.chat.completions.create(
|
|
2768
|
+
model="llama-3.1-8b-instant",
|
|
2769
|
+
messages=summary_messages,
|
|
2770
|
+
max_tokens=160,
|
|
2771
|
+
temperature=0.2
|
|
2772
|
+
)
|
|
2773
|
+
conversation_summary = summary_response.choices[0].message.content
|
|
2774
|
+
if summary_response.usage and summary_response.usage.total_tokens:
|
|
2775
|
+
summary_tokens = summary_response.usage.total_tokens
|
|
2776
|
+
self._charge_tokens(request.user_id, summary_tokens)
|
|
2777
|
+
self.total_cost += (summary_tokens / 1000) * self.cost_per_1k_tokens
|
|
2778
|
+
messages.append({"role": "system", "content": f"Previous conversation summary: {conversation_summary}"})
|
|
2779
|
+
except:
|
|
2780
|
+
# If summary fails, just use recent history
|
|
2781
|
+
pass
|
|
2782
|
+
|
|
2783
|
+
messages.extend(recent_history)
|
|
2784
|
+
else:
|
|
2785
|
+
# For shorter conversations, use full history
|
|
2786
|
+
messages.extend(self.conversation_history)
|
|
2787
|
+
|
|
2788
|
+
# Add current user message
|
|
2789
|
+
messages.append({"role": "user", "content": request.question})
|
|
2790
|
+
|
|
2791
|
+
model_config = self._select_model(request, request_analysis, api_results)
|
|
2792
|
+
target_model = model_config["model"]
|
|
2793
|
+
max_completion_tokens = model_config["max_tokens"]
|
|
2794
|
+
temperature = model_config["temperature"]
|
|
2795
|
+
|
|
2796
|
+
# Check token budget
|
|
2797
|
+
estimated_tokens = (len(str(messages)) // 4) + max_completion_tokens # Rough estimate incl. completion budget
|
|
2798
|
+
if not self._check_token_budget(estimated_tokens):
|
|
2799
|
+
return self._respond_with_fallback(
|
|
2800
|
+
request,
|
|
2801
|
+
tools_used,
|
|
2802
|
+
api_results,
|
|
2803
|
+
failure_reason="Daily Groq token budget exhausted",
|
|
2804
|
+
error_message="Daily token limit reached"
|
|
2805
|
+
)
|
|
2806
|
+
|
|
2807
|
+
if not self._check_user_token_budget(request.user_id, estimated_tokens):
|
|
2808
|
+
return self._respond_with_fallback(
|
|
2809
|
+
request,
|
|
2810
|
+
tools_used,
|
|
2811
|
+
api_results,
|
|
2812
|
+
failure_reason="Per-user Groq token budget exhausted",
|
|
2813
|
+
error_message="Per-user token limit reached"
|
|
2814
|
+
)
|
|
2815
|
+
|
|
2816
|
+
if not self._ensure_client_ready():
|
|
2817
|
+
return self._respond_with_fallback(
|
|
2818
|
+
request,
|
|
2819
|
+
tools_used,
|
|
2820
|
+
api_results,
|
|
2821
|
+
failure_reason="No available Groq API key"
|
|
2822
|
+
)
|
|
2823
|
+
|
|
2824
|
+
response_text: Optional[str] = None
|
|
2825
|
+
tokens_used = 0
|
|
2826
|
+
attempts_remaining = len(self.api_keys) if self.api_keys else (1 if self.client else 0)
|
|
2827
|
+
last_error: Optional[Exception] = None
|
|
2828
|
+
|
|
2829
|
+
while attempts_remaining > 0:
|
|
2830
|
+
attempts_remaining -= 1
|
|
2831
|
+
try:
|
|
2832
|
+
response = self.client.chat.completions.create(
|
|
2833
|
+
model=target_model,
|
|
2834
|
+
messages=messages,
|
|
2835
|
+
max_tokens=max_completion_tokens,
|
|
2836
|
+
temperature=temperature
|
|
2837
|
+
)
|
|
2838
|
+
|
|
2839
|
+
response_text = response.choices[0].message.content
|
|
2840
|
+
tokens_used = response.usage.total_tokens if response.usage else estimated_tokens
|
|
2841
|
+
self._charge_tokens(request.user_id, tokens_used)
|
|
2842
|
+
cost = (tokens_used / 1000) * self.cost_per_1k_tokens
|
|
2843
|
+
self.total_cost += cost
|
|
2844
|
+
break
|
|
2845
|
+
except Exception as e:
|
|
2846
|
+
last_error = e
|
|
2847
|
+
if self._is_rate_limit_error(e):
|
|
2848
|
+
self._mark_current_key_exhausted(str(e))
|
|
2849
|
+
if not self._rotate_to_next_available_key():
|
|
2850
|
+
break
|
|
2851
|
+
continue
|
|
2852
|
+
else:
|
|
2853
|
+
error_str = str(e)
|
|
2854
|
+
friendly = self._format_model_error(error_str)
|
|
2855
|
+
return ChatResponse(
|
|
2856
|
+
response=friendly,
|
|
2857
|
+
timestamp=datetime.now().isoformat(),
|
|
2858
|
+
tools_used=tools_used,
|
|
2859
|
+
api_results=api_results,
|
|
2860
|
+
error_message=error_str
|
|
2861
|
+
)
|
|
2862
|
+
|
|
2863
|
+
if response_text is None:
|
|
2864
|
+
rate_limit_error = last_error if last_error and self._is_rate_limit_error(last_error) else None
|
|
2865
|
+
if rate_limit_error:
|
|
2866
|
+
return self._respond_with_fallback(
|
|
2867
|
+
request,
|
|
2868
|
+
tools_used,
|
|
2869
|
+
api_results,
|
|
2870
|
+
failure_reason="All Groq API keys exhausted",
|
|
2871
|
+
error_message=str(rate_limit_error)
|
|
2872
|
+
)
|
|
2873
|
+
error_str = str(last_error) if last_error else "Unknown error"
|
|
2874
|
+
friendly = self._format_model_error(error_str)
|
|
2875
|
+
return ChatResponse(
|
|
2876
|
+
response=friendly,
|
|
2877
|
+
timestamp=datetime.now().isoformat(),
|
|
2878
|
+
tools_used=tools_used,
|
|
2879
|
+
api_results=api_results,
|
|
2880
|
+
error_message=error_str
|
|
2881
|
+
)
|
|
2882
|
+
|
|
2883
|
+
self._schedule_next_key_rotation()
|
|
2884
|
+
|
|
2885
|
+
allow_shell_commands = "shell" in request_analysis.get("apis", []) or request_analysis.get("type") in {"system", "comprehensive+system"}
|
|
2886
|
+
if api_results.get("files_context") or api_results.get("files_missing") or api_results.get("files_forbidden"):
|
|
2887
|
+
allow_shell_commands = False
|
|
2888
|
+
|
|
2889
|
+
commands = re.findall(r'`([^`]+)`', response_text) if allow_shell_commands else []
|
|
2890
|
+
execution_results = {}
|
|
2891
|
+
final_response = response_text
|
|
2892
|
+
|
|
2893
|
+
if commands:
|
|
2894
|
+
command = commands[0].strip()
|
|
2895
|
+
if self._is_safe_shell_command(command):
|
|
2896
|
+
print(f"\n🔧 Executing: {command}")
|
|
2897
|
+
output = self.execute_command(command)
|
|
2898
|
+
print(f"✅ Command completed")
|
|
2899
|
+
execution_results = {
|
|
2900
|
+
"command": command,
|
|
2901
|
+
"output": output,
|
|
2902
|
+
"success": not output.startswith("ERROR:")
|
|
2903
|
+
}
|
|
2904
|
+
tools_used.append("shell_execution")
|
|
2905
|
+
else:
|
|
2906
|
+
execution_results = {
|
|
2907
|
+
"command": command,
|
|
2908
|
+
"output": "Command blocked by safety policy",
|
|
2909
|
+
"success": False
|
|
2910
|
+
}
|
|
2911
|
+
if "⚠️ Shell command skipped for safety." not in final_response:
|
|
2912
|
+
final_response = f"{final_response.strip()}\n\n⚠️ Shell command skipped for safety."
|
|
2913
|
+
|
|
2914
|
+
# Create analysis prompt only if we actually executed and have output
|
|
2915
|
+
if execution_results.get("success") and isinstance(execution_results.get("output"), str):
|
|
2916
|
+
truncated_output = execution_results["output"]
|
|
2917
|
+
truncated_flag = False
|
|
2918
|
+
if len(truncated_output) > 1000:
|
|
2919
|
+
truncated_output = truncated_output[:1000]
|
|
2920
|
+
truncated_flag = True
|
|
2921
|
+
|
|
2922
|
+
summarised_text, summary_tokens = self._summarize_command_output(
|
|
2923
|
+
request,
|
|
2924
|
+
command,
|
|
2925
|
+
truncated_output,
|
|
2926
|
+
response_text
|
|
2927
|
+
)
|
|
2928
|
+
|
|
2929
|
+
final_response = summarised_text
|
|
2930
|
+
if truncated_flag:
|
|
2931
|
+
final_response += "\n\n(Output truncated to first 1000 characters.)"
|
|
2932
|
+
if summary_tokens:
|
|
2933
|
+
self._charge_tokens(request.user_id, summary_tokens)
|
|
2934
|
+
tokens_used += summary_tokens
|
|
2935
|
+
else:
|
|
2936
|
+
final_response = response_text
|
|
2937
|
+
|
|
2938
|
+
footer = self._format_data_sources_footer()
|
|
2939
|
+
if footer:
|
|
2940
|
+
final_response = f"{final_response}\n\n_{footer}_"
|
|
2941
|
+
|
|
2942
|
+
# Update conversation history
|
|
2943
|
+
self.conversation_history.append({"role": "user", "content": request.question})
|
|
2944
|
+
self.conversation_history.append({"role": "assistant", "content": final_response})
|
|
2945
|
+
|
|
2946
|
+
# Update memory
|
|
2947
|
+
self._update_memory(
|
|
2948
|
+
request.user_id,
|
|
2949
|
+
request.conversation_id,
|
|
2950
|
+
f"Q: {request.question[:100]}... A: {final_response[:100]}..."
|
|
2951
|
+
)
|
|
2952
|
+
|
|
2953
|
+
# Save to workflow history automatically
|
|
2954
|
+
self.workflow.save_query_result(
|
|
2955
|
+
query=request.question,
|
|
2956
|
+
response=final_response,
|
|
2957
|
+
metadata={
|
|
2958
|
+
"tools_used": tools_used,
|
|
2959
|
+
"tokens_used": tokens_used,
|
|
2960
|
+
"confidence_score": request_analysis['confidence']
|
|
2961
|
+
}
|
|
2962
|
+
)
|
|
2963
|
+
|
|
2964
|
+
return ChatResponse(
|
|
2965
|
+
response=final_response,
|
|
2966
|
+
tools_used=tools_used,
|
|
2967
|
+
reasoning_steps=[f"Request type: {request_analysis['type']}", f"APIs used: {request_analysis['apis']}"],
|
|
2968
|
+
timestamp=datetime.now().isoformat(),
|
|
2969
|
+
tokens_used=tokens_used,
|
|
2970
|
+
confidence_score=request_analysis['confidence'],
|
|
2971
|
+
execution_results=execution_results,
|
|
2972
|
+
api_results=api_results
|
|
2973
|
+
)
|
|
2974
|
+
|
|
2975
|
+
except Exception as e:
|
|
2976
|
+
details = str(e)
|
|
2977
|
+
message = (
|
|
2978
|
+
"⚠️ Something went wrong while orchestrating your request, but no actions were performed. "
|
|
2979
|
+
"Please retry, and if the issue persists share this detail with the team: {details}."
|
|
2980
|
+
).format(details=details)
|
|
2981
|
+
return ChatResponse(
|
|
2982
|
+
response=message,
|
|
2983
|
+
timestamp=datetime.now().isoformat(),
|
|
2984
|
+
confidence_score=0.0,
|
|
2985
|
+
error_message=details
|
|
2986
|
+
)
|
|
2987
|
+
|
|
2988
|
+
async def process_request_streaming(self, request: ChatRequest):
|
|
2989
|
+
"""
|
|
2990
|
+
Process request with streaming response from Groq API
|
|
2991
|
+
Returns a Groq stream object that yields chunks as they arrive
|
|
2992
|
+
|
|
2993
|
+
This enables real-time character-by-character streaming in the UI
|
|
2994
|
+
"""
|
|
2995
|
+
# PRODUCTION MODE: Backend doesn't support streaming yet, use regular response
|
|
2996
|
+
if self.client is None:
|
|
2997
|
+
response = await self.call_backend_query(request.question, self.conversation_history[-10:])
|
|
2998
|
+
async def single_yield():
|
|
2999
|
+
yield response.response
|
|
3000
|
+
return single_yield()
|
|
3001
|
+
|
|
3002
|
+
# DEV MODE ONLY
|
|
3003
|
+
try:
|
|
3004
|
+
# Quick budget checks
|
|
3005
|
+
if not self._check_query_budget(request.user_id):
|
|
3006
|
+
effective_limit = self.daily_query_limit if self.daily_query_limit > 0 else self.per_user_query_limit
|
|
3007
|
+
if effective_limit <= 0:
|
|
3008
|
+
effective_limit = 25
|
|
3009
|
+
error_msg = (
|
|
3010
|
+
f"Daily query limit reached. You've hit the {effective_limit} request cap for today. "
|
|
3011
|
+
"Try again tomorrow or reach out if you need the limit raised."
|
|
3012
|
+
)
|
|
3013
|
+
async def error_gen():
|
|
3014
|
+
yield error_msg
|
|
3015
|
+
return error_gen()
|
|
3016
|
+
|
|
3017
|
+
self._record_query_usage(request.user_id)
|
|
3018
|
+
|
|
3019
|
+
# Analyze request
|
|
3020
|
+
request_analysis = await self._analyze_request_type(request.question)
|
|
3021
|
+
question_lower = request.question.lower()
|
|
3022
|
+
self._reset_data_sources()
|
|
3023
|
+
|
|
3024
|
+
# Direct shell commands (non-streaming fallback)
|
|
3025
|
+
direct_shell = re.match(r"^(?:run|execute)\s*:?\s*(.+)$", request.question.strip(), re.IGNORECASE)
|
|
3026
|
+
if direct_shell:
|
|
3027
|
+
result = self._respond_with_shell_command(request, direct_shell.group(1).strip())
|
|
3028
|
+
async def shell_gen():
|
|
3029
|
+
yield result.response
|
|
3030
|
+
return shell_gen()
|
|
3031
|
+
|
|
3032
|
+
# Memory context
|
|
3033
|
+
memory_context = self._get_memory_context(request.user_id, request.conversation_id)
|
|
3034
|
+
|
|
3035
|
+
# Quick greetings (non-streaming)
|
|
3036
|
+
if self._is_simple_greeting(request.question):
|
|
3037
|
+
async def greeting_gen():
|
|
3038
|
+
yield "Hi there! I'm up and ready whenever you want to dig into finance or research."
|
|
3039
|
+
return greeting_gen()
|
|
3040
|
+
|
|
3041
|
+
if self._is_casual_acknowledgment(request.question):
|
|
3042
|
+
async def ack_gen():
|
|
3043
|
+
yield "Happy to help! Feel free to fire off another question whenever you're ready."
|
|
3044
|
+
return ack_gen()
|
|
3045
|
+
|
|
3046
|
+
# Gather API results (same logic as process_request but abbreviated)
|
|
3047
|
+
api_results = {}
|
|
3048
|
+
tools_used = []
|
|
3049
|
+
|
|
3050
|
+
# File preview
|
|
3051
|
+
def _extract_filenames(text: str) -> List[str]:
|
|
3052
|
+
patterns = [
|
|
3053
|
+
r"[\w\-./]+\.(?:py|md|txt|json|csv|yml|yaml|toml|ini|ts|tsx|js|ipynb)",
|
|
3054
|
+
r"(?:\./|/)?[\w\-./]+/"
|
|
3055
|
+
]
|
|
3056
|
+
matches: List[str] = []
|
|
3057
|
+
for pat in patterns:
|
|
3058
|
+
matches.extend(re.findall(pat, text))
|
|
3059
|
+
uniq = []
|
|
3060
|
+
for m in matches:
|
|
3061
|
+
if len(m) <= 256 and m not in uniq:
|
|
3062
|
+
uniq.append(m)
|
|
3063
|
+
return uniq[:5]
|
|
3064
|
+
|
|
3065
|
+
mentioned = _extract_filenames(request.question)
|
|
3066
|
+
file_previews: List[Dict[str, Any]] = []
|
|
3067
|
+
files_forbidden: List[str] = []
|
|
3068
|
+
base_dir = Path.cwd().resolve()
|
|
3069
|
+
sensitive_roots = {Path('/etc'), Path('/proc'), Path('/sys'), Path('/dev'), Path('/root'), Path('/usr'), Path('/bin'), Path('/sbin'), Path('/var')}
|
|
3070
|
+
|
|
3071
|
+
def _is_safe_path(path_str: str) -> bool:
|
|
3072
|
+
try:
|
|
3073
|
+
rp = Path(path_str).resolve()
|
|
3074
|
+
if any(str(rp).startswith(str(sr)) for sr in sensitive_roots):
|
|
3075
|
+
return False
|
|
3076
|
+
return str(rp).startswith(str(base_dir))
|
|
3077
|
+
except Exception:
|
|
3078
|
+
return False
|
|
3079
|
+
|
|
3080
|
+
for m in mentioned:
|
|
3081
|
+
if not _is_safe_path(m):
|
|
3082
|
+
files_forbidden.append(m)
|
|
3083
|
+
continue
|
|
3084
|
+
pr = await self._preview_file(m)
|
|
3085
|
+
if pr:
|
|
3086
|
+
file_previews.append(pr)
|
|
3087
|
+
|
|
3088
|
+
if file_previews:
|
|
3089
|
+
api_results["files"] = file_previews
|
|
3090
|
+
text_previews = [fp for fp in file_previews if fp.get("type") == "text" and fp.get("preview")]
|
|
3091
|
+
files_context = ""
|
|
3092
|
+
if text_previews:
|
|
3093
|
+
fp = text_previews[0]
|
|
3094
|
+
quoted = "\n".join(fp["preview"].splitlines()[:20])
|
|
3095
|
+
files_context = f"File: {fp['path']} (first lines)\n" + quoted
|
|
3096
|
+
api_results["files_context"] = files_context
|
|
3097
|
+
elif mentioned:
|
|
3098
|
+
api_results["files_missing"] = mentioned
|
|
3099
|
+
if files_forbidden:
|
|
3100
|
+
api_results["files_forbidden"] = files_forbidden
|
|
3101
|
+
|
|
3102
|
+
# Workspace listing
|
|
3103
|
+
workspace_listing: Optional[Dict[str, Any]] = None
|
|
3104
|
+
if not file_previews:
|
|
3105
|
+
file_browse_keywords = ("list files", "show files", "what files")
|
|
3106
|
+
describe_files = ("file" in question_lower or "directory" in question_lower)
|
|
3107
|
+
if any(keyword in question_lower for keyword in file_browse_keywords) or describe_files:
|
|
3108
|
+
workspace_listing = await self._get_workspace_listing()
|
|
3109
|
+
api_results["workspace_listing"] = workspace_listing
|
|
3110
|
+
|
|
3111
|
+
if workspace_listing and set(request_analysis.get("apis", [])) <= {"shell"}:
|
|
3112
|
+
result = self._respond_with_workspace_listing(request, workspace_listing)
|
|
3113
|
+
async def workspace_gen():
|
|
3114
|
+
yield result.response
|
|
3115
|
+
return workspace_gen()
|
|
3116
|
+
|
|
3117
|
+
# FinSight API (abbreviated)
|
|
3118
|
+
if "finsight" in request_analysis["apis"]:
|
|
3119
|
+
tickers = self._extract_tickers_from_text(request.question)
|
|
3120
|
+
financial_payload = {}
|
|
3121
|
+
|
|
3122
|
+
if not tickers:
|
|
3123
|
+
if "apple" in question_lower:
|
|
3124
|
+
tickers = ["AAPL"]
|
|
3125
|
+
if "microsoft" in question_lower:
|
|
3126
|
+
tickers = ["MSFT"] if not tickers else tickers + ["MSFT"]
|
|
3127
|
+
|
|
3128
|
+
metrics_to_fetch = ["revenue", "grossProfit"]
|
|
3129
|
+
if any(kw in question_lower for kw in ["revenue", "sales"]):
|
|
3130
|
+
metrics_to_fetch = ["revenue"]
|
|
3131
|
+
if any(kw in question_lower for kw in ["profit", "margin"]):
|
|
3132
|
+
metrics_to_fetch.append("grossProfit")
|
|
3133
|
+
|
|
3134
|
+
for t in tickers[:2]:
|
|
3135
|
+
result = await self.get_financial_metrics(t, metrics_to_fetch)
|
|
3136
|
+
financial_payload[t] = result
|
|
3137
|
+
|
|
3138
|
+
if financial_payload:
|
|
3139
|
+
api_results["financial"] = financial_payload
|
|
3140
|
+
tools_used.append("finsight_api")
|
|
3141
|
+
|
|
3142
|
+
# Archive API (abbreviated)
|
|
3143
|
+
if "archive" in request_analysis["apis"]:
|
|
3144
|
+
result = await self.search_academic_papers(request.question, 5)
|
|
3145
|
+
if "error" not in result:
|
|
3146
|
+
api_results["research"] = result
|
|
3147
|
+
else:
|
|
3148
|
+
api_results["research"] = {"error": result["error"]}
|
|
3149
|
+
tools_used.append("archive_api")
|
|
3150
|
+
|
|
3151
|
+
# Build messages
|
|
3152
|
+
system_prompt = self._build_system_prompt(request_analysis, memory_context, api_results)
|
|
3153
|
+
messages = [{"role": "system", "content": system_prompt}]
|
|
3154
|
+
|
|
3155
|
+
fc = api_results.get("files_context")
|
|
3156
|
+
if fc:
|
|
3157
|
+
messages.append({"role": "system", "content": f"Grounding from mentioned file(s):\n{fc}"})
|
|
3158
|
+
|
|
3159
|
+
# Add conversation history (abbreviated - just recent)
|
|
3160
|
+
if len(self.conversation_history) > 6:
|
|
3161
|
+
messages.extend(self.conversation_history[-6:])
|
|
3162
|
+
else:
|
|
3163
|
+
messages.extend(self.conversation_history)
|
|
3164
|
+
|
|
3165
|
+
messages.append({"role": "user", "content": request.question})
|
|
3166
|
+
|
|
3167
|
+
# Model selection
|
|
3168
|
+
model_config = self._select_model(request, request_analysis, api_results)
|
|
3169
|
+
target_model = model_config["model"]
|
|
3170
|
+
max_completion_tokens = model_config["max_tokens"]
|
|
3171
|
+
temperature = model_config["temperature"]
|
|
3172
|
+
|
|
3173
|
+
# Token budget check
|
|
3174
|
+
estimated_tokens = (len(str(messages)) // 4) + max_completion_tokens
|
|
3175
|
+
if not self._check_token_budget(estimated_tokens):
|
|
3176
|
+
async def budget_gen():
|
|
3177
|
+
yield "⚠️ Daily Groq token budget exhausted. Please try again tomorrow."
|
|
3178
|
+
return budget_gen()
|
|
3179
|
+
|
|
3180
|
+
if not self._ensure_client_ready():
|
|
3181
|
+
async def no_key_gen():
|
|
3182
|
+
yield "⚠️ No available Groq API key."
|
|
3183
|
+
return no_key_gen()
|
|
3184
|
+
|
|
3185
|
+
# **STREAMING: Call Groq with stream=True**
|
|
3186
|
+
try:
|
|
3187
|
+
stream = self.client.chat.completions.create(
|
|
3188
|
+
model=target_model,
|
|
3189
|
+
messages=messages,
|
|
3190
|
+
max_tokens=max_completion_tokens,
|
|
3191
|
+
temperature=temperature,
|
|
3192
|
+
stream=True # Enable streaming!
|
|
3193
|
+
)
|
|
3194
|
+
|
|
3195
|
+
# Update conversation history (add user message now, assistant message will be added after streaming completes)
|
|
3196
|
+
self.conversation_history.append({"role": "user", "content": request.question})
|
|
3197
|
+
|
|
3198
|
+
# Return the stream directly - groq_stream_to_generator() in streaming_ui.py will handle it
|
|
3199
|
+
return stream
|
|
3200
|
+
|
|
3201
|
+
except Exception as e:
|
|
3202
|
+
if self._is_rate_limit_error(e):
|
|
3203
|
+
self._mark_current_key_exhausted(str(e))
|
|
3204
|
+
if self._rotate_to_next_available_key():
|
|
3205
|
+
try:
|
|
3206
|
+
stream = self.client.chat.completions.create(
|
|
3207
|
+
model=target_model,
|
|
3208
|
+
messages=messages,
|
|
3209
|
+
max_tokens=max_completion_tokens,
|
|
3210
|
+
temperature=temperature,
|
|
3211
|
+
stream=True
|
|
3212
|
+
)
|
|
3213
|
+
self.conversation_history.append({"role": "user", "content": request.question})
|
|
3214
|
+
return stream
|
|
3215
|
+
except:
|
|
3216
|
+
pass
|
|
3217
|
+
async def error_gen():
|
|
3218
|
+
yield f"⚠️ Groq API error: {str(e)}"
|
|
3219
|
+
return error_gen()
|
|
3220
|
+
|
|
3221
|
+
except Exception as e:
|
|
3222
|
+
async def exception_gen():
|
|
3223
|
+
yield f"⚠️ Request failed: {str(e)}"
|
|
3224
|
+
return exception_gen()
|
|
3225
|
+
|
|
3226
|
+
async def run_interactive(self):
|
|
3227
|
+
"""Run interactive chat session"""
|
|
3228
|
+
if not await self.initialize():
|
|
3229
|
+
return
|
|
3230
|
+
|
|
3231
|
+
print("\n" + "="*70)
|
|
3232
|
+
print("🤖 ENHANCED NOCTURNAL AI AGENT")
|
|
3233
|
+
print("="*70)
|
|
3234
|
+
print("Research Assistant with Archive API + FinSight API Integration")
|
|
3235
|
+
print("Type 'quit' to exit")
|
|
3236
|
+
print("="*70)
|
|
3237
|
+
|
|
3238
|
+
while True:
|
|
3239
|
+
try:
|
|
3240
|
+
user_input = input("\n👤 You: ").strip()
|
|
3241
|
+
|
|
3242
|
+
if user_input.lower() in ['quit', 'exit', 'bye']:
|
|
3243
|
+
print("👋 Goodbye!")
|
|
3244
|
+
await self.close()
|
|
3245
|
+
break
|
|
3246
|
+
|
|
3247
|
+
# Process request
|
|
3248
|
+
request = ChatRequest(question=user_input)
|
|
3249
|
+
response = await self.process_request(request)
|
|
3250
|
+
|
|
3251
|
+
print(f"\n🤖 Agent: {response.response}")
|
|
3252
|
+
|
|
3253
|
+
if response.api_results:
|
|
3254
|
+
print(f"📊 API Results: {len(response.api_results)} sources used")
|
|
3255
|
+
|
|
3256
|
+
if response.execution_results:
|
|
3257
|
+
print(f"🔧 Command: {response.execution_results['command']}")
|
|
3258
|
+
print(f"📊 Success: {response.execution_results['success']}")
|
|
3259
|
+
|
|
3260
|
+
print(f"📈 Tokens used: {response.tokens_used}")
|
|
3261
|
+
print(f"🎯 Confidence: {response.confidence_score:.2f}")
|
|
3262
|
+
print(f"🛠️ Tools used: {', '.join(response.tools_used) if response.tools_used else 'None'}")
|
|
3263
|
+
|
|
3264
|
+
except KeyboardInterrupt:
|
|
3265
|
+
print("\n👋 Goodbye!")
|
|
3266
|
+
await self.close()
|
|
3267
|
+
break
|
|
3268
|
+
except Exception as e:
|
|
3269
|
+
print(f"\n❌ Error: {e}")
|
|
3270
|
+
|
|
3271
|
+
async def main():
|
|
3272
|
+
"""Main entry point"""
|
|
3273
|
+
agent = EnhancedNocturnalAgent()
|
|
3274
|
+
await agent.run_interactive()
|
|
3275
|
+
|
|
3276
|
+
if __name__ == "__main__":
|
|
3277
|
+
asyncio.run(main())
|