kollabor 0.4.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- core/__init__.py +18 -0
- core/application.py +578 -0
- core/cli.py +193 -0
- core/commands/__init__.py +43 -0
- core/commands/executor.py +277 -0
- core/commands/menu_renderer.py +319 -0
- core/commands/parser.py +186 -0
- core/commands/registry.py +331 -0
- core/commands/system_commands.py +479 -0
- core/config/__init__.py +7 -0
- core/config/llm_task_config.py +110 -0
- core/config/loader.py +501 -0
- core/config/manager.py +112 -0
- core/config/plugin_config_manager.py +346 -0
- core/config/plugin_schema.py +424 -0
- core/config/service.py +399 -0
- core/effects/__init__.py +1 -0
- core/events/__init__.py +12 -0
- core/events/bus.py +129 -0
- core/events/executor.py +154 -0
- core/events/models.py +258 -0
- core/events/processor.py +176 -0
- core/events/registry.py +289 -0
- core/fullscreen/__init__.py +19 -0
- core/fullscreen/command_integration.py +290 -0
- core/fullscreen/components/__init__.py +12 -0
- core/fullscreen/components/animation.py +258 -0
- core/fullscreen/components/drawing.py +160 -0
- core/fullscreen/components/matrix_components.py +177 -0
- core/fullscreen/manager.py +302 -0
- core/fullscreen/plugin.py +204 -0
- core/fullscreen/renderer.py +282 -0
- core/fullscreen/session.py +324 -0
- core/io/__init__.py +52 -0
- core/io/buffer_manager.py +362 -0
- core/io/config_status_view.py +272 -0
- core/io/core_status_views.py +410 -0
- core/io/input_errors.py +313 -0
- core/io/input_handler.py +2655 -0
- core/io/input_mode_manager.py +402 -0
- core/io/key_parser.py +344 -0
- core/io/layout.py +587 -0
- core/io/message_coordinator.py +204 -0
- core/io/message_renderer.py +601 -0
- core/io/modal_interaction_handler.py +315 -0
- core/io/raw_input_processor.py +946 -0
- core/io/status_renderer.py +845 -0
- core/io/terminal_renderer.py +586 -0
- core/io/terminal_state.py +551 -0
- core/io/visual_effects.py +734 -0
- core/llm/__init__.py +26 -0
- core/llm/api_communication_service.py +863 -0
- core/llm/conversation_logger.py +473 -0
- core/llm/conversation_manager.py +414 -0
- core/llm/file_operations_executor.py +1401 -0
- core/llm/hook_system.py +402 -0
- core/llm/llm_service.py +1629 -0
- core/llm/mcp_integration.py +386 -0
- core/llm/message_display_service.py +450 -0
- core/llm/model_router.py +214 -0
- core/llm/plugin_sdk.py +396 -0
- core/llm/response_parser.py +848 -0
- core/llm/response_processor.py +364 -0
- core/llm/tool_executor.py +520 -0
- core/logging/__init__.py +19 -0
- core/logging/setup.py +208 -0
- core/models/__init__.py +5 -0
- core/models/base.py +23 -0
- core/plugins/__init__.py +13 -0
- core/plugins/collector.py +212 -0
- core/plugins/discovery.py +386 -0
- core/plugins/factory.py +263 -0
- core/plugins/registry.py +152 -0
- core/storage/__init__.py +5 -0
- core/storage/state_manager.py +84 -0
- core/ui/__init__.py +6 -0
- core/ui/config_merger.py +176 -0
- core/ui/config_widgets.py +369 -0
- core/ui/live_modal_renderer.py +276 -0
- core/ui/modal_actions.py +162 -0
- core/ui/modal_overlay_renderer.py +373 -0
- core/ui/modal_renderer.py +591 -0
- core/ui/modal_state_manager.py +443 -0
- core/ui/widget_integration.py +222 -0
- core/ui/widgets/__init__.py +27 -0
- core/ui/widgets/base_widget.py +136 -0
- core/ui/widgets/checkbox.py +85 -0
- core/ui/widgets/dropdown.py +140 -0
- core/ui/widgets/label.py +78 -0
- core/ui/widgets/slider.py +185 -0
- core/ui/widgets/text_input.py +224 -0
- core/utils/__init__.py +11 -0
- core/utils/config_utils.py +656 -0
- core/utils/dict_utils.py +212 -0
- core/utils/error_utils.py +275 -0
- core/utils/key_reader.py +171 -0
- core/utils/plugin_utils.py +267 -0
- core/utils/prompt_renderer.py +151 -0
- kollabor-0.4.9.dist-info/METADATA +298 -0
- kollabor-0.4.9.dist-info/RECORD +128 -0
- kollabor-0.4.9.dist-info/WHEEL +5 -0
- kollabor-0.4.9.dist-info/entry_points.txt +2 -0
- kollabor-0.4.9.dist-info/licenses/LICENSE +21 -0
- kollabor-0.4.9.dist-info/top_level.txt +4 -0
- kollabor_cli_main.py +20 -0
- plugins/__init__.py +1 -0
- plugins/enhanced_input/__init__.py +18 -0
- plugins/enhanced_input/box_renderer.py +103 -0
- plugins/enhanced_input/box_styles.py +142 -0
- plugins/enhanced_input/color_engine.py +165 -0
- plugins/enhanced_input/config.py +150 -0
- plugins/enhanced_input/cursor_manager.py +72 -0
- plugins/enhanced_input/geometry.py +81 -0
- plugins/enhanced_input/state.py +130 -0
- plugins/enhanced_input/text_processor.py +115 -0
- plugins/enhanced_input_plugin.py +385 -0
- plugins/fullscreen/__init__.py +9 -0
- plugins/fullscreen/example_plugin.py +327 -0
- plugins/fullscreen/matrix_plugin.py +132 -0
- plugins/hook_monitoring_plugin.py +1299 -0
- plugins/query_enhancer_plugin.py +350 -0
- plugins/save_conversation_plugin.py +502 -0
- plugins/system_commands_plugin.py +93 -0
- plugins/tmux_plugin.py +795 -0
- plugins/workflow_enforcement_plugin.py +629 -0
- system_prompt/default.md +1286 -0
- system_prompt/default_win.md +265 -0
- system_prompt/example_with_trender.md +47 -0
|
@@ -0,0 +1,863 @@
|
|
|
1
|
+
"""API Communication Service for LLM requests.
|
|
2
|
+
|
|
3
|
+
Handles pure API communication with LLM endpoints, eliminating
|
|
4
|
+
networking concerns from the main LLM service. Follows KISS principle
|
|
5
|
+
with single responsibility for HTTP communication.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
import os
|
|
12
|
+
import time
|
|
13
|
+
from contextlib import asynccontextmanager
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
from typing import Any, Dict, List, Optional
|
|
16
|
+
|
|
17
|
+
import aiohttp
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class APICommunicationService:
|
|
23
|
+
"""Pure API communication service for LLM requests.
|
|
24
|
+
|
|
25
|
+
Handles HTTP sessions, request formatting, response parsing,
|
|
26
|
+
and error handling for LLM API communication. Follows KISS
|
|
27
|
+
principle with single responsibility for API communication.
|
|
28
|
+
|
|
29
|
+
Eliminates API concerns from the main LLM service class.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(self, config, raw_conversations_dir):
|
|
33
|
+
"""Initialize API communication service.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
config: Configuration manager for API settings
|
|
37
|
+
raw_conversations_dir: Directory for raw interaction logs
|
|
38
|
+
"""
|
|
39
|
+
self.config = config
|
|
40
|
+
self.raw_conversations_dir = raw_conversations_dir
|
|
41
|
+
|
|
42
|
+
# Load API configuration (environment variables take precedence over config)
|
|
43
|
+
|
|
44
|
+
# API endpoint/URL
|
|
45
|
+
self.api_url = os.environ.get("KOLLABOR_API_ENDPOINT") or config.get("core.llm.api_url", "http://localhost:1234")
|
|
46
|
+
if os.environ.get("KOLLABOR_API_ENDPOINT"):
|
|
47
|
+
logger.debug("Using API endpoint from KOLLABOR_API_ENDPOINT environment variable")
|
|
48
|
+
|
|
49
|
+
# Model name
|
|
50
|
+
self.model = os.environ.get("KOLLABOR_API_MODEL") or config.get("core.llm.model", "qwen/qwen3-4b")
|
|
51
|
+
if os.environ.get("KOLLABOR_API_MODEL"):
|
|
52
|
+
logger.debug("Using model from KOLLABOR_API_MODEL environment variable")
|
|
53
|
+
|
|
54
|
+
# Temperature (with type conversion)
|
|
55
|
+
env_temperature = os.environ.get("KOLLABOR_API_TEMPERATURE")
|
|
56
|
+
if env_temperature:
|
|
57
|
+
try:
|
|
58
|
+
self.temperature = float(env_temperature)
|
|
59
|
+
logger.debug("Using temperature from KOLLABOR_API_TEMPERATURE environment variable")
|
|
60
|
+
except ValueError:
|
|
61
|
+
logger.warning(f"Invalid KOLLABOR_API_TEMPERATURE value: {env_temperature}, using config/default")
|
|
62
|
+
self.temperature = config.get("core.llm.temperature", 0.7)
|
|
63
|
+
else:
|
|
64
|
+
self.temperature = config.get("core.llm.temperature", 0.7)
|
|
65
|
+
|
|
66
|
+
# Timeout (with type conversion)
|
|
67
|
+
env_timeout = os.environ.get("KOLLABOR_API_TIMEOUT")
|
|
68
|
+
if env_timeout:
|
|
69
|
+
try:
|
|
70
|
+
self.timeout = int(env_timeout)
|
|
71
|
+
logger.debug("Using timeout from KOLLABOR_API_TIMEOUT environment variable")
|
|
72
|
+
except ValueError:
|
|
73
|
+
logger.warning(f"Invalid KOLLABOR_API_TIMEOUT value: {env_timeout}, using config/default")
|
|
74
|
+
self.timeout = config.get("core.llm.timeout", 30000)
|
|
75
|
+
else:
|
|
76
|
+
self.timeout = config.get("core.llm.timeout", 30000)
|
|
77
|
+
|
|
78
|
+
# Streaming (not typically set via env, kept for completeness)
|
|
79
|
+
self.enable_streaming = config.get("core.llm.enable_streaming", False)
|
|
80
|
+
|
|
81
|
+
# Max tokens (with type conversion)
|
|
82
|
+
env_max_tokens = os.environ.get("KOLLABOR_API_MAX_TOKENS")
|
|
83
|
+
if env_max_tokens:
|
|
84
|
+
try:
|
|
85
|
+
self.max_tokens = int(env_max_tokens)
|
|
86
|
+
logger.debug("Using max tokens from KOLLABOR_API_MAX_TOKENS environment variable")
|
|
87
|
+
except ValueError:
|
|
88
|
+
logger.warning(f"Invalid KOLLABOR_API_MAX_TOKENS value: {env_max_tokens}, using config/default")
|
|
89
|
+
self.max_tokens = config.get("core.llm.max_tokens", None)
|
|
90
|
+
else:
|
|
91
|
+
self.max_tokens = config.get("core.llm.max_tokens", None)
|
|
92
|
+
|
|
93
|
+
# API token (supports both KOLLABOR_API_TOKEN and KOLLABOR_API_KEY)
|
|
94
|
+
self.api_token = (
|
|
95
|
+
os.environ.get("KOLLABOR_API_TOKEN")
|
|
96
|
+
or os.environ.get("KOLLABOR_API_KEY")
|
|
97
|
+
or config.get("core.llm.api_token")
|
|
98
|
+
)
|
|
99
|
+
if os.environ.get("KOLLABOR_API_TOKEN"):
|
|
100
|
+
logger.debug("Using API token from KOLLABOR_API_TOKEN environment variable")
|
|
101
|
+
elif os.environ.get("KOLLABOR_API_KEY"):
|
|
102
|
+
logger.debug("Using API token from KOLLABOR_API_KEY environment variable")
|
|
103
|
+
|
|
104
|
+
# HTTP session state with enhanced lifecycle management
|
|
105
|
+
self.session = None
|
|
106
|
+
self.connector = None
|
|
107
|
+
self._session_lock = asyncio.Lock()
|
|
108
|
+
self._initialized = False
|
|
109
|
+
|
|
110
|
+
# Request cancellation support
|
|
111
|
+
self.current_request_task = None
|
|
112
|
+
self.cancel_requested = False
|
|
113
|
+
|
|
114
|
+
# Token usage tracking
|
|
115
|
+
self.last_token_usage = {}
|
|
116
|
+
|
|
117
|
+
# Resource monitoring and statistics
|
|
118
|
+
self._connection_stats = {
|
|
119
|
+
'total_requests': 0,
|
|
120
|
+
'failed_requests': 0,
|
|
121
|
+
'recreated_sessions': 0,
|
|
122
|
+
'last_activity': None,
|
|
123
|
+
'session_creation_time': None,
|
|
124
|
+
'connection_errors': 0
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
logger.info(f"API service initialized for {self.api_url}")
|
|
128
|
+
|
|
129
|
+
async def initialize(self):
|
|
130
|
+
"""Initialize HTTP session with proper error handling and resource management."""
|
|
131
|
+
async with self._session_lock:
|
|
132
|
+
if self._initialized:
|
|
133
|
+
return
|
|
134
|
+
|
|
135
|
+
try:
|
|
136
|
+
# Create session with proper configuration and resource limits
|
|
137
|
+
# 0 = no timeout (None in aiohttp), >0 = timeout in seconds
|
|
138
|
+
timeout_val = None if self.timeout == 0 else self.timeout
|
|
139
|
+
timeout = aiohttp.ClientTimeout(
|
|
140
|
+
total=timeout_val,
|
|
141
|
+
connect=10, # Connection timeout
|
|
142
|
+
sock_read=timeout_val # Read timeout
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
# Enhanced connector with proper resource management
|
|
146
|
+
http_connector_limit = self.config.get("core.llm.http_connector_limit", 100)
|
|
147
|
+
http_limit_per_host = self.config.get("core.llm.http_limit_per_host", 20)
|
|
148
|
+
keepalive_timeout = self.config.get("core.llm.keepalive_timeout", 30)
|
|
149
|
+
|
|
150
|
+
self.connector = aiohttp.TCPConnector(
|
|
151
|
+
limit=http_connector_limit,
|
|
152
|
+
limit_per_host=http_limit_per_host,
|
|
153
|
+
keepalive_timeout=keepalive_timeout,
|
|
154
|
+
enable_cleanup_closed=True, # Enable automatic cleanup
|
|
155
|
+
force_close=False, # Allow connection reuse
|
|
156
|
+
use_dns_cache=True,
|
|
157
|
+
ttl_dns_cache=300, # DNS cache TTL
|
|
158
|
+
family=0, # IPv4 and IPv6
|
|
159
|
+
ssl=False # For local development, adjust as needed
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
self.session = aiohttp.ClientSession(
|
|
163
|
+
connector=self.connector,
|
|
164
|
+
timeout=timeout,
|
|
165
|
+
headers={"User-Agent": "Kollabor-CLI/1.0"}
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
self._initialized = True
|
|
169
|
+
self._connection_stats['session_creation_time'] = time.time()
|
|
170
|
+
self._connection_stats['last_activity'] = time.time()
|
|
171
|
+
|
|
172
|
+
logger.info(
|
|
173
|
+
f"HTTP session initialized with {http_connector_limit} total connections, "
|
|
174
|
+
f"{http_limit_per_host} per host, {keepalive_timeout}s keepalive"
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
except Exception as e:
|
|
178
|
+
logger.error(f"Failed to initialize API service: {e}")
|
|
179
|
+
# Ensure cleanup on failure
|
|
180
|
+
await self._cleanup_session()
|
|
181
|
+
raise
|
|
182
|
+
|
|
183
|
+
async def shutdown(self):
|
|
184
|
+
"""Shutdown HTTP session and cleanup resources with comprehensive error handling."""
|
|
185
|
+
async with self._session_lock:
|
|
186
|
+
if not self._initialized:
|
|
187
|
+
return
|
|
188
|
+
|
|
189
|
+
try:
|
|
190
|
+
logger.info("Starting API communication service shutdown")
|
|
191
|
+
|
|
192
|
+
# Cancel any active requests
|
|
193
|
+
if self.current_request_task and not self.current_request_task.done():
|
|
194
|
+
logger.info("Cancelling active request during shutdown")
|
|
195
|
+
self.current_request_task.cancel()
|
|
196
|
+
try:
|
|
197
|
+
await self.current_request_task
|
|
198
|
+
except asyncio.CancelledError:
|
|
199
|
+
pass
|
|
200
|
+
except Exception as e:
|
|
201
|
+
logger.error(f"Error cancelling request during shutdown: {e}")
|
|
202
|
+
|
|
203
|
+
# Clean up session resources
|
|
204
|
+
await self._cleanup_session()
|
|
205
|
+
|
|
206
|
+
self._initialized = False
|
|
207
|
+
logger.info("API communication service shutdown complete")
|
|
208
|
+
|
|
209
|
+
except Exception as e:
|
|
210
|
+
logger.error(f"Error during API service shutdown: {e}")
|
|
211
|
+
# Don't raise - we want cleanup to complete even if there are errors
|
|
212
|
+
|
|
213
|
+
async def _ensure_session(self):
|
|
214
|
+
"""Ensure we have a valid session, recreate if needed."""
|
|
215
|
+
if not self._initialized or not self.session or self.session.closed:
|
|
216
|
+
logger.warning("Session not available or closed, reinitializing...")
|
|
217
|
+
await self._recreate_session()
|
|
218
|
+
|
|
219
|
+
async def _recreate_session(self):
|
|
220
|
+
"""Recreate the session after errors or timeout."""
|
|
221
|
+
async with self._session_lock:
|
|
222
|
+
try:
|
|
223
|
+
logger.info("Recreating HTTP session")
|
|
224
|
+
await self._cleanup_session()
|
|
225
|
+
self._connection_stats['recreated_sessions'] += 1
|
|
226
|
+
|
|
227
|
+
# Reinitialize with fresh session
|
|
228
|
+
await self._create_session()
|
|
229
|
+
|
|
230
|
+
logger.info("HTTP session recreated successfully")
|
|
231
|
+
|
|
232
|
+
except Exception as e:
|
|
233
|
+
logger.error(f"Failed to recreate session: {e}")
|
|
234
|
+
raise
|
|
235
|
+
|
|
236
|
+
async def _create_session(self):
|
|
237
|
+
"""Create a fresh HTTP session."""
|
|
238
|
+
timeout_val = None if self.timeout == 0 else self.timeout
|
|
239
|
+
timeout = aiohttp.ClientTimeout(
|
|
240
|
+
total=timeout_val,
|
|
241
|
+
connect=10,
|
|
242
|
+
sock_read=timeout_val
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
http_connector_limit = self.config.get("core.llm.http_connector_limit", 100)
|
|
246
|
+
http_limit_per_host = self.config.get("core.llm.http_limit_per_host", 20)
|
|
247
|
+
keepalive_timeout = self.config.get("core.llm.keepalive_timeout", 30)
|
|
248
|
+
|
|
249
|
+
self.connector = aiohttp.TCPConnector(
|
|
250
|
+
limit=http_connector_limit,
|
|
251
|
+
limit_per_host=http_limit_per_host,
|
|
252
|
+
keepalive_timeout=keepalive_timeout,
|
|
253
|
+
enable_cleanup_closed=True,
|
|
254
|
+
force_close=False,
|
|
255
|
+
use_dns_cache=True,
|
|
256
|
+
ttl_dns_cache=300,
|
|
257
|
+
family=0,
|
|
258
|
+
ssl=False
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
self.session = aiohttp.ClientSession(
|
|
262
|
+
connector=self.connector,
|
|
263
|
+
timeout=timeout,
|
|
264
|
+
headers={"User-Agent": "Kollabor-CLI/1.0"}
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
self._initialized = True
|
|
268
|
+
self._connection_stats['session_creation_time'] = time.time()
|
|
269
|
+
|
|
270
|
+
async def _cleanup_session(self):
|
|
271
|
+
"""Clean up session and connector resources."""
|
|
272
|
+
try:
|
|
273
|
+
if self.session and not self.session.closed:
|
|
274
|
+
await self.session.close()
|
|
275
|
+
# Give connections time to close properly
|
|
276
|
+
await asyncio.sleep(0.1)
|
|
277
|
+
|
|
278
|
+
if self.connector:
|
|
279
|
+
await self.connector.close()
|
|
280
|
+
|
|
281
|
+
self.session = None
|
|
282
|
+
self.connector = None
|
|
283
|
+
|
|
284
|
+
except Exception as e:
|
|
285
|
+
logger.error(f"Error during session cleanup: {e}")
|
|
286
|
+
|
|
287
|
+
def get_last_token_usage(self) -> Dict[str, Any]:
|
|
288
|
+
"""Get token usage from the last API call.
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
Dictionary containing token usage info
|
|
292
|
+
"""
|
|
293
|
+
return self.last_token_usage.copy()
|
|
294
|
+
|
|
295
|
+
def cancel_current_request(self):
|
|
296
|
+
"""Cancel any active API request."""
|
|
297
|
+
self.cancel_requested = True
|
|
298
|
+
|
|
299
|
+
if self.current_request_task and not self.current_request_task.done():
|
|
300
|
+
logger.info("Cancelling active API request")
|
|
301
|
+
self.current_request_task.cancel()
|
|
302
|
+
|
|
303
|
+
async def call_llm(self, conversation_history: List[Dict[str, str]],
|
|
304
|
+
max_history: int = None, streaming_callback=None) -> str:
|
|
305
|
+
"""Make API call to LLM with conversation history and robust error handling.
|
|
306
|
+
|
|
307
|
+
Args:
|
|
308
|
+
conversation_history: List of conversation messages
|
|
309
|
+
max_history: Maximum number of messages to send (optional)
|
|
310
|
+
streaming_callback: Optional callback for streaming content chunks
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
LLM response content
|
|
314
|
+
|
|
315
|
+
Raises:
|
|
316
|
+
RuntimeError: If session not initialized
|
|
317
|
+
asyncio.CancelledError: If request was cancelled
|
|
318
|
+
Exception: For API communication errors
|
|
319
|
+
"""
|
|
320
|
+
# Ensure we have a valid session before proceeding
|
|
321
|
+
await self._ensure_session()
|
|
322
|
+
|
|
323
|
+
# Validate session state
|
|
324
|
+
if not self.session or self.session.closed:
|
|
325
|
+
raise RuntimeError("HTTP session is not available - failed to initialize")
|
|
326
|
+
|
|
327
|
+
# Reset cancellation flag
|
|
328
|
+
self.cancel_requested = False
|
|
329
|
+
|
|
330
|
+
# Store streaming callback for use in handlers
|
|
331
|
+
self.streaming_callback = streaming_callback
|
|
332
|
+
|
|
333
|
+
# Update activity tracking
|
|
334
|
+
self._connection_stats['total_requests'] += 1
|
|
335
|
+
self._connection_stats['last_activity'] = time.time()
|
|
336
|
+
|
|
337
|
+
# Prepare messages for API
|
|
338
|
+
messages = self._prepare_messages(conversation_history, max_history)
|
|
339
|
+
|
|
340
|
+
# Build request payload
|
|
341
|
+
payload = {
|
|
342
|
+
"model": self.model,
|
|
343
|
+
"messages": messages,
|
|
344
|
+
"temperature": self.temperature,
|
|
345
|
+
"stream": self.enable_streaming
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
# Add max_tokens if configured
|
|
349
|
+
if self.max_tokens:
|
|
350
|
+
payload["max_tokens"] = int(self.max_tokens)
|
|
351
|
+
|
|
352
|
+
# Execute request with cancellation support and comprehensive error handling
|
|
353
|
+
self.current_request_task = asyncio.create_task(
|
|
354
|
+
self._execute_request_with_error_handling(payload)
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
try:
|
|
358
|
+
return await self._monitor_request()
|
|
359
|
+
except asyncio.CancelledError:
|
|
360
|
+
# Log cancellation to raw logs
|
|
361
|
+
self._log_raw_interaction(payload, cancelled=True)
|
|
362
|
+
raise asyncio.CancelledError("API request cancelled by user")
|
|
363
|
+
except Exception as e:
|
|
364
|
+
self._connection_stats['failed_requests'] += 1
|
|
365
|
+
raise
|
|
366
|
+
|
|
367
|
+
def _prepare_messages(self, conversation_history: List[Any],
|
|
368
|
+
max_history: Optional[int]) -> List[Dict[str, str]]:
|
|
369
|
+
"""Prepare conversation messages for API request.
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
conversation_history: Raw conversation history
|
|
373
|
+
max_history: Maximum messages to include
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
List of formatted messages for API
|
|
377
|
+
"""
|
|
378
|
+
# Apply history limit if specified
|
|
379
|
+
if max_history:
|
|
380
|
+
recent_messages = conversation_history[-max_history:]
|
|
381
|
+
else:
|
|
382
|
+
recent_messages = conversation_history
|
|
383
|
+
|
|
384
|
+
# Format messages for API
|
|
385
|
+
messages = []
|
|
386
|
+
for msg in recent_messages:
|
|
387
|
+
# Handle both ConversationMessage objects and dicts
|
|
388
|
+
if hasattr(msg, 'role'):
|
|
389
|
+
role, content = msg.role, msg.content
|
|
390
|
+
else:
|
|
391
|
+
role, content = msg["role"], msg["content"]
|
|
392
|
+
|
|
393
|
+
messages.append({
|
|
394
|
+
"role": role,
|
|
395
|
+
"content": content
|
|
396
|
+
})
|
|
397
|
+
|
|
398
|
+
return messages
|
|
399
|
+
|
|
400
|
+
async def _execute_request_with_error_handling(self, payload: Dict[str, Any]) -> str:
|
|
401
|
+
"""Execute HTTP request with comprehensive error handling and session recovery.
|
|
402
|
+
|
|
403
|
+
Args:
|
|
404
|
+
payload: Request payload
|
|
405
|
+
|
|
406
|
+
Returns:
|
|
407
|
+
Response content
|
|
408
|
+
|
|
409
|
+
Raises:
|
|
410
|
+
Exception: For various API communication errors
|
|
411
|
+
"""
|
|
412
|
+
start_time = time.time()
|
|
413
|
+
|
|
414
|
+
try:
|
|
415
|
+
# Log raw request
|
|
416
|
+
self._log_raw_interaction(payload)
|
|
417
|
+
|
|
418
|
+
# Build headers for authentication
|
|
419
|
+
headers = {"Content-Type": "application/json"}
|
|
420
|
+
if self.api_token:
|
|
421
|
+
headers["Authorization"] = f"Bearer {self.api_token}"
|
|
422
|
+
|
|
423
|
+
# Determine the correct URL
|
|
424
|
+
if "/chat/completions" in self.api_url:
|
|
425
|
+
url = self.api_url
|
|
426
|
+
else:
|
|
427
|
+
url = f"{self.api_url}/v1/chat/completions"
|
|
428
|
+
|
|
429
|
+
# Execute request with proper timeout and error handling
|
|
430
|
+
timeout_val = None if self.timeout == 0 else self.timeout
|
|
431
|
+
timeout = aiohttp.ClientTimeout(
|
|
432
|
+
total=timeout_val,
|
|
433
|
+
connect=10,
|
|
434
|
+
sock_read=timeout_val
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
async with self.session.post(
|
|
438
|
+
url,
|
|
439
|
+
json=payload,
|
|
440
|
+
headers=headers,
|
|
441
|
+
timeout=timeout
|
|
442
|
+
) as response:
|
|
443
|
+
|
|
444
|
+
request_duration = time.time() - start_time
|
|
445
|
+
|
|
446
|
+
if response.status == 200:
|
|
447
|
+
if self.enable_streaming:
|
|
448
|
+
content = await self._handle_streaming_response(response)
|
|
449
|
+
else:
|
|
450
|
+
data = await response.json()
|
|
451
|
+
content = data["choices"][0]["message"]["content"]
|
|
452
|
+
# Extract token usage if available
|
|
453
|
+
self.last_token_usage = data.get("usage", {})
|
|
454
|
+
|
|
455
|
+
# Log successful response
|
|
456
|
+
self._log_raw_interaction(
|
|
457
|
+
payload,
|
|
458
|
+
response_data=data if not self.enable_streaming else {"choices": [{"message": {"content": content}}]}
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
logger.debug(f"API call completed in {request_duration:.2f}s")
|
|
462
|
+
return content
|
|
463
|
+
|
|
464
|
+
else:
|
|
465
|
+
# Handle HTTP error responses
|
|
466
|
+
error_text = await response.text()
|
|
467
|
+
error_msg = f"LLM API error: {response.status} - {error_text}"
|
|
468
|
+
|
|
469
|
+
# Log error response
|
|
470
|
+
self._log_raw_interaction(payload, error=error_msg)
|
|
471
|
+
|
|
472
|
+
# For server errors (5xx), session might be broken
|
|
473
|
+
if 500 <= response.status < 600:
|
|
474
|
+
logger.warning(f"Server error detected, recreating session: {error_msg}")
|
|
475
|
+
await self._recreate_session()
|
|
476
|
+
|
|
477
|
+
raise Exception(error_msg)
|
|
478
|
+
|
|
479
|
+
except aiohttp.ClientError as e:
|
|
480
|
+
self._connection_stats['connection_errors'] += 1
|
|
481
|
+
logger.error(f"API request failed with client error: {e}")
|
|
482
|
+
|
|
483
|
+
# Session might be broken, recreate it
|
|
484
|
+
if isinstance(e, (aiohttp.ClientConnectionError,
|
|
485
|
+
aiohttp.ServerDisconnectedError,
|
|
486
|
+
aiohttp.ClientPayloadError)):
|
|
487
|
+
logger.info("Connection error detected, recreating session")
|
|
488
|
+
await self._recreate_session()
|
|
489
|
+
|
|
490
|
+
raise Exception(f"API connection error: {e}")
|
|
491
|
+
|
|
492
|
+
except asyncio.TimeoutError:
|
|
493
|
+
error_msg = f"LLM API timeout after {self.timeout} seconds"
|
|
494
|
+
self._log_raw_interaction(payload, error=error_msg)
|
|
495
|
+
logger.warning(f"API timeout, session may be stale")
|
|
496
|
+
await self._recreate_session()
|
|
497
|
+
raise Exception(error_msg)
|
|
498
|
+
|
|
499
|
+
except Exception as e:
|
|
500
|
+
# Log any other exceptions
|
|
501
|
+
error_msg = f"Unexpected API error: {e}"
|
|
502
|
+
if not str(e).startswith("LLM API error") and not str(e).startswith("API connection error"):
|
|
503
|
+
self._log_raw_interaction(payload, error=error_msg)
|
|
504
|
+
raise
|
|
505
|
+
|
|
506
|
+
async def _execute_request(self, payload: Dict[str, Any]) -> str:
|
|
507
|
+
"""Execute the actual HTTP request.
|
|
508
|
+
|
|
509
|
+
Args:
|
|
510
|
+
payload: Request payload
|
|
511
|
+
|
|
512
|
+
Returns:
|
|
513
|
+
Response content
|
|
514
|
+
"""
|
|
515
|
+
start_time = time.time()
|
|
516
|
+
|
|
517
|
+
try:
|
|
518
|
+
# Log raw request
|
|
519
|
+
self._log_raw_interaction(payload)
|
|
520
|
+
|
|
521
|
+
# Build headers for authentication
|
|
522
|
+
headers = {"Content-Type": "application/json"}
|
|
523
|
+
if self.api_token:
|
|
524
|
+
headers["Authorization"] = f"Bearer {self.api_token}"
|
|
525
|
+
|
|
526
|
+
# Determine the correct URL - if it already contains the full path, use as-is
|
|
527
|
+
if "/chat/completions" in self.api_url:
|
|
528
|
+
url = self.api_url
|
|
529
|
+
else:
|
|
530
|
+
url = f"{self.api_url}/v1/chat/completions"
|
|
531
|
+
|
|
532
|
+
async with self.session.post(
|
|
533
|
+
url,
|
|
534
|
+
json=payload,
|
|
535
|
+
headers=headers,
|
|
536
|
+
timeout=aiohttp.ClientTimeout(total=None if self.timeout == 0 else self.timeout)
|
|
537
|
+
) as response:
|
|
538
|
+
|
|
539
|
+
request_duration = time.time() - start_time
|
|
540
|
+
|
|
541
|
+
if response.status == 200:
|
|
542
|
+
if self.enable_streaming:
|
|
543
|
+
content = await self._handle_streaming_response(response)
|
|
544
|
+
else:
|
|
545
|
+
data = await response.json()
|
|
546
|
+
content = data["choices"][0]["message"]["content"]
|
|
547
|
+
|
|
548
|
+
# Extract token usage if available
|
|
549
|
+
self.last_token_usage = data.get("usage", {})
|
|
550
|
+
|
|
551
|
+
# Log successful response with full data
|
|
552
|
+
self._log_raw_interaction(payload, response_data=data if not self.enable_streaming else {"choices": [{"message": {"content": content}}]})
|
|
553
|
+
|
|
554
|
+
logger.debug(f"API call completed in {request_duration:.2f}s")
|
|
555
|
+
return content
|
|
556
|
+
|
|
557
|
+
else:
|
|
558
|
+
error_text = await response.text()
|
|
559
|
+
error_msg = f"LLM API error: {response.status} - {error_text}"
|
|
560
|
+
|
|
561
|
+
# Log error response
|
|
562
|
+
self._log_raw_interaction(payload, error=error_msg)
|
|
563
|
+
|
|
564
|
+
raise Exception(error_msg)
|
|
565
|
+
|
|
566
|
+
except asyncio.TimeoutError:
|
|
567
|
+
error_msg = f"LLM API timeout after {self.timeout} seconds"
|
|
568
|
+
self._log_raw_interaction(payload, error=error_msg)
|
|
569
|
+
raise Exception(error_msg)
|
|
570
|
+
|
|
571
|
+
except Exception as e:
|
|
572
|
+
# Log any other exceptions
|
|
573
|
+
if not str(e).startswith("LLM API error"):
|
|
574
|
+
self._log_raw_interaction(payload, error=str(e))
|
|
575
|
+
raise
|
|
576
|
+
|
|
577
|
+
async def _handle_streaming_response(self, response) -> str:
|
|
578
|
+
"""Handle streaming response from API.
|
|
579
|
+
|
|
580
|
+
Args:
|
|
581
|
+
response: HTTP response object
|
|
582
|
+
|
|
583
|
+
Returns:
|
|
584
|
+
Complete response content
|
|
585
|
+
"""
|
|
586
|
+
content_parts = []
|
|
587
|
+
buffer = ""
|
|
588
|
+
|
|
589
|
+
async for chunk in response.content.iter_chunked(1024):
|
|
590
|
+
# Check for cancellation
|
|
591
|
+
if self.cancel_requested:
|
|
592
|
+
raise asyncio.CancelledError("Streaming request cancelled")
|
|
593
|
+
|
|
594
|
+
chunk_text = chunk.decode('utf-8')
|
|
595
|
+
buffer += chunk_text
|
|
596
|
+
|
|
597
|
+
# Process complete SSE lines
|
|
598
|
+
while '\n' in buffer:
|
|
599
|
+
line, buffer = buffer.split('\n', 1)
|
|
600
|
+
line = line.strip()
|
|
601
|
+
|
|
602
|
+
if line.startswith('data: '):
|
|
603
|
+
data_text = line[6:] # Remove 'data: ' prefix
|
|
604
|
+
if data_text == '[DONE]':
|
|
605
|
+
break
|
|
606
|
+
try:
|
|
607
|
+
chunk_data = json.loads(data_text)
|
|
608
|
+
if 'choices' in chunk_data and len(chunk_data['choices']) > 0:
|
|
609
|
+
delta = chunk_data['choices'][0].get('delta', {})
|
|
610
|
+
if 'content' in delta:
|
|
611
|
+
content_chunk = delta['content']
|
|
612
|
+
content_parts.append(content_chunk)
|
|
613
|
+
|
|
614
|
+
# Call streaming callback with chunk if provided
|
|
615
|
+
if self.streaming_callback:
|
|
616
|
+
await self.streaming_callback(content_chunk)
|
|
617
|
+
except json.JSONDecodeError:
|
|
618
|
+
continue
|
|
619
|
+
|
|
620
|
+
return ''.join(content_parts)
|
|
621
|
+
|
|
622
|
+
async def _monitor_request(self) -> str:
|
|
623
|
+
"""Monitor request execution with cancellation support.
|
|
624
|
+
|
|
625
|
+
Returns:
|
|
626
|
+
API response content
|
|
627
|
+
"""
|
|
628
|
+
try:
|
|
629
|
+
while not self.current_request_task.done():
|
|
630
|
+
if self.cancel_requested:
|
|
631
|
+
logger.info("Cancelling API request due to user request")
|
|
632
|
+
self.current_request_task.cancel()
|
|
633
|
+
break
|
|
634
|
+
|
|
635
|
+
# Small delay to avoid busy waiting
|
|
636
|
+
await asyncio.sleep(self.config.get("core.llm.api_poll_delay", 0.01))
|
|
637
|
+
|
|
638
|
+
# Get result
|
|
639
|
+
return await self.current_request_task
|
|
640
|
+
|
|
641
|
+
except asyncio.CancelledError:
|
|
642
|
+
logger.info("API request was cancelled")
|
|
643
|
+
raise
|
|
644
|
+
|
|
645
|
+
def _log_raw_interaction(self, request_payload: Dict[str, Any],
|
|
646
|
+
response_data: Optional[Dict[str, Any]] = None,
|
|
647
|
+
error: Optional[str] = None,
|
|
648
|
+
cancelled: bool = False) -> None:
|
|
649
|
+
"""Log raw request and response data to JSONL file.
|
|
650
|
+
|
|
651
|
+
Args:
|
|
652
|
+
request_payload: The request payload sent to LLM
|
|
653
|
+
response_data: The full response data from LLM (optional)
|
|
654
|
+
error: Error message if request failed (optional)
|
|
655
|
+
cancelled: Whether the request was cancelled (optional)
|
|
656
|
+
"""
|
|
657
|
+
try:
|
|
658
|
+
# Create filename with timestamp
|
|
659
|
+
timestamp = datetime.now().strftime("%Y-%m-%d_%H%M%S")
|
|
660
|
+
filename = f"raw_llm_interactions_{timestamp}.jsonl"
|
|
661
|
+
filepath = self.raw_conversations_dir / filename
|
|
662
|
+
|
|
663
|
+
# Create log entry
|
|
664
|
+
log_entry = {
|
|
665
|
+
"timestamp": datetime.now().isoformat(),
|
|
666
|
+
"request": {
|
|
667
|
+
"url": f"{self.api_url}/v1/chat/completions",
|
|
668
|
+
"method": "POST",
|
|
669
|
+
"payload": request_payload
|
|
670
|
+
}
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
if response_data:
|
|
674
|
+
log_entry["response"] = {
|
|
675
|
+
"status": "success",
|
|
676
|
+
"data": response_data
|
|
677
|
+
}
|
|
678
|
+
elif error:
|
|
679
|
+
log_entry["response"] = {
|
|
680
|
+
"status": "error",
|
|
681
|
+
"error": error
|
|
682
|
+
}
|
|
683
|
+
elif cancelled:
|
|
684
|
+
log_entry["response"] = {
|
|
685
|
+
"status": "cancelled",
|
|
686
|
+
"message": "Request was cancelled by user"
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
# Append to JSONL file
|
|
690
|
+
with open(filepath, 'a', encoding='utf-8') as f:
|
|
691
|
+
f.write(json.dumps(log_entry, ensure_ascii=False) + '\n')
|
|
692
|
+
|
|
693
|
+
except Exception as e:
|
|
694
|
+
logger.error(f"Failed to log raw interaction: {e}")
|
|
695
|
+
|
|
696
|
+
@asynccontextmanager
|
|
697
|
+
async def api_session(self):
|
|
698
|
+
"""Context manager for safe API operations with guaranteed cleanup.
|
|
699
|
+
|
|
700
|
+
Usage:
|
|
701
|
+
async with api_service.api_session():
|
|
702
|
+
result = await api_service.call_llm(conversation)
|
|
703
|
+
|
|
704
|
+
Yields:
|
|
705
|
+
The API service instance with initialized session
|
|
706
|
+
"""
|
|
707
|
+
try:
|
|
708
|
+
# Ensure session is initialized
|
|
709
|
+
await self._ensure_session()
|
|
710
|
+
logger.debug("API session context entered")
|
|
711
|
+
yield self
|
|
712
|
+
except Exception as e:
|
|
713
|
+
logger.error(f"Error in API session context: {e}")
|
|
714
|
+
raise
|
|
715
|
+
finally:
|
|
716
|
+
# Note: We don't cleanup here to allow session reuse
|
|
717
|
+
# Session cleanup is handled by explicit shutdown() calls
|
|
718
|
+
logger.debug("API session context exited")
|
|
719
|
+
|
|
720
|
+
def get_connection_stats(self) -> Dict[str, Any]:
|
|
721
|
+
"""Get comprehensive connection statistics and resource usage.
|
|
722
|
+
|
|
723
|
+
Returns:
|
|
724
|
+
Dictionary with connection statistics and resource information
|
|
725
|
+
"""
|
|
726
|
+
stats = self._connection_stats.copy()
|
|
727
|
+
|
|
728
|
+
# Add current session information
|
|
729
|
+
if self.session and hasattr(self.session, '_connector'):
|
|
730
|
+
connector = self.session._connector
|
|
731
|
+
stats.update({
|
|
732
|
+
'active_connections': len(connector._conns),
|
|
733
|
+
'available_connections': len(connector._available),
|
|
734
|
+
'closed_connections': getattr(connector, '_closed', 0),
|
|
735
|
+
'limit': connector.limit,
|
|
736
|
+
'limit_per_host': connector.limit_per_host,
|
|
737
|
+
'keepalive_timeout': connector.keepalive_timeout
|
|
738
|
+
})
|
|
739
|
+
|
|
740
|
+
# Add session health information
|
|
741
|
+
stats.update({
|
|
742
|
+
'session_initialized': self._initialized,
|
|
743
|
+
'session_closed': self.session.closed if self.session else True,
|
|
744
|
+
'session_age_seconds': (
|
|
745
|
+
time.time() - self._connection_stats['session_creation_time']
|
|
746
|
+
if self._connection_stats['session_creation_time'] else 0
|
|
747
|
+
),
|
|
748
|
+
'last_activity_age_seconds': (
|
|
749
|
+
time.time() - self._connection_stats['last_activity']
|
|
750
|
+
if self._connection_stats['last_activity'] else 0
|
|
751
|
+
)
|
|
752
|
+
})
|
|
753
|
+
|
|
754
|
+
# Calculate derived metrics
|
|
755
|
+
total_requests = stats['total_requests']
|
|
756
|
+
if total_requests > 0:
|
|
757
|
+
stats['failure_rate_percent'] = round((stats['failed_requests'] / total_requests) * 100, 2)
|
|
758
|
+
stats['connection_error_rate_percent'] = round((stats['connection_errors'] / total_requests) * 100, 2)
|
|
759
|
+
else:
|
|
760
|
+
stats['failure_rate_percent'] = 0.0
|
|
761
|
+
stats['connection_error_rate_percent'] = 0.0
|
|
762
|
+
|
|
763
|
+
return stats
|
|
764
|
+
|
|
765
|
+
async def health_check(self) -> Dict[str, Any]:
|
|
766
|
+
"""Perform comprehensive health check on the API service.
|
|
767
|
+
|
|
768
|
+
Returns:
|
|
769
|
+
Dictionary with health status information
|
|
770
|
+
"""
|
|
771
|
+
health_status = {
|
|
772
|
+
'healthy': True,
|
|
773
|
+
'checks': {},
|
|
774
|
+
'timestamp': time.time()
|
|
775
|
+
}
|
|
776
|
+
|
|
777
|
+
# Check session status
|
|
778
|
+
session_healthy = (
|
|
779
|
+
self._initialized and
|
|
780
|
+
self.session and
|
|
781
|
+
not self.session.closed
|
|
782
|
+
)
|
|
783
|
+
health_status['checks']['session'] = {
|
|
784
|
+
'healthy': session_healthy,
|
|
785
|
+
'initialized': self._initialized,
|
|
786
|
+
'closed': self.session.closed if self.session else True
|
|
787
|
+
}
|
|
788
|
+
if not session_healthy:
|
|
789
|
+
health_status['healthy'] = False
|
|
790
|
+
|
|
791
|
+
# Check connection health by attempting a simple request
|
|
792
|
+
connection_healthy = await self._test_connection()
|
|
793
|
+
health_status['checks']['connection'] = {
|
|
794
|
+
'healthy': connection_healthy,
|
|
795
|
+
'url': self.api_url
|
|
796
|
+
}
|
|
797
|
+
if not connection_healthy:
|
|
798
|
+
health_status['healthy'] = False
|
|
799
|
+
|
|
800
|
+
# Check resource usage
|
|
801
|
+
stats = self.get_connection_stats()
|
|
802
|
+
resource_healthy = (
|
|
803
|
+
stats.get('failure_rate_percent', 0) < 50 and # Less than 50% failure rate
|
|
804
|
+
stats.get('connection_error_rate_percent', 0) < 25 # Less than 25% connection error rate
|
|
805
|
+
)
|
|
806
|
+
health_status['checks']['resources'] = {
|
|
807
|
+
'healthy': resource_healthy,
|
|
808
|
+
'failure_rate': stats.get('failure_rate_percent', 0),
|
|
809
|
+
'connection_error_rate': stats.get('connection_error_rate_percent', 0),
|
|
810
|
+
'recreated_sessions': stats.get('recreated_sessions', 0)
|
|
811
|
+
}
|
|
812
|
+
if not resource_healthy:
|
|
813
|
+
health_status['healthy'] = False
|
|
814
|
+
|
|
815
|
+
return health_status
|
|
816
|
+
|
|
817
|
+
async def _test_connection(self) -> bool:
|
|
818
|
+
"""Test if we can establish a connection to the API.
|
|
819
|
+
|
|
820
|
+
Returns:
|
|
821
|
+
True if connection test succeeds, False otherwise
|
|
822
|
+
"""
|
|
823
|
+
if not self.session or self.session.closed:
|
|
824
|
+
return False
|
|
825
|
+
|
|
826
|
+
try:
|
|
827
|
+
# Try to make a simple health check request
|
|
828
|
+
# Note: Many LLM APIs don't have a health endpoint, so we'll test with a minimal request
|
|
829
|
+
timeout = aiohttp.ClientTimeout(total=5) # Short timeout for health check
|
|
830
|
+
|
|
831
|
+
# Try to connect to the base URL
|
|
832
|
+
if "/chat/completions" in self.api_url:
|
|
833
|
+
health_url = self.api_url.rsplit('/chat/completions', 1)[0]
|
|
834
|
+
else:
|
|
835
|
+
health_url = self.api_url
|
|
836
|
+
|
|
837
|
+
async with self.session.get(
|
|
838
|
+
health_url,
|
|
839
|
+
timeout=timeout,
|
|
840
|
+
allow_redirects=True
|
|
841
|
+
) as response:
|
|
842
|
+
# Any response (even 404) indicates the server is reachable
|
|
843
|
+
return response.status < 500
|
|
844
|
+
|
|
845
|
+
except Exception as e:
|
|
846
|
+
logger.debug(f"Connection test failed: {e}")
|
|
847
|
+
return False
|
|
848
|
+
|
|
849
|
+
def get_api_stats(self) -> Dict[str, Any]:
|
|
850
|
+
"""Get API communication statistics.
|
|
851
|
+
|
|
852
|
+
Returns:
|
|
853
|
+
Dictionary with API statistics
|
|
854
|
+
"""
|
|
855
|
+
return {
|
|
856
|
+
"api_url": self.api_url,
|
|
857
|
+
"model": self.model,
|
|
858
|
+
"temperature": self.temperature,
|
|
859
|
+
"timeout": self.timeout,
|
|
860
|
+
"streaming_enabled": self.enable_streaming,
|
|
861
|
+
"session_active": self.session is not None,
|
|
862
|
+
"connection_stats": self.get_connection_stats()
|
|
863
|
+
}
|