kollabor 0.4.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- core/__init__.py +18 -0
- core/application.py +578 -0
- core/cli.py +193 -0
- core/commands/__init__.py +43 -0
- core/commands/executor.py +277 -0
- core/commands/menu_renderer.py +319 -0
- core/commands/parser.py +186 -0
- core/commands/registry.py +331 -0
- core/commands/system_commands.py +479 -0
- core/config/__init__.py +7 -0
- core/config/llm_task_config.py +110 -0
- core/config/loader.py +501 -0
- core/config/manager.py +112 -0
- core/config/plugin_config_manager.py +346 -0
- core/config/plugin_schema.py +424 -0
- core/config/service.py +399 -0
- core/effects/__init__.py +1 -0
- core/events/__init__.py +12 -0
- core/events/bus.py +129 -0
- core/events/executor.py +154 -0
- core/events/models.py +258 -0
- core/events/processor.py +176 -0
- core/events/registry.py +289 -0
- core/fullscreen/__init__.py +19 -0
- core/fullscreen/command_integration.py +290 -0
- core/fullscreen/components/__init__.py +12 -0
- core/fullscreen/components/animation.py +258 -0
- core/fullscreen/components/drawing.py +160 -0
- core/fullscreen/components/matrix_components.py +177 -0
- core/fullscreen/manager.py +302 -0
- core/fullscreen/plugin.py +204 -0
- core/fullscreen/renderer.py +282 -0
- core/fullscreen/session.py +324 -0
- core/io/__init__.py +52 -0
- core/io/buffer_manager.py +362 -0
- core/io/config_status_view.py +272 -0
- core/io/core_status_views.py +410 -0
- core/io/input_errors.py +313 -0
- core/io/input_handler.py +2655 -0
- core/io/input_mode_manager.py +402 -0
- core/io/key_parser.py +344 -0
- core/io/layout.py +587 -0
- core/io/message_coordinator.py +204 -0
- core/io/message_renderer.py +601 -0
- core/io/modal_interaction_handler.py +315 -0
- core/io/raw_input_processor.py +946 -0
- core/io/status_renderer.py +845 -0
- core/io/terminal_renderer.py +586 -0
- core/io/terminal_state.py +551 -0
- core/io/visual_effects.py +734 -0
- core/llm/__init__.py +26 -0
- core/llm/api_communication_service.py +863 -0
- core/llm/conversation_logger.py +473 -0
- core/llm/conversation_manager.py +414 -0
- core/llm/file_operations_executor.py +1401 -0
- core/llm/hook_system.py +402 -0
- core/llm/llm_service.py +1629 -0
- core/llm/mcp_integration.py +386 -0
- core/llm/message_display_service.py +450 -0
- core/llm/model_router.py +214 -0
- core/llm/plugin_sdk.py +396 -0
- core/llm/response_parser.py +848 -0
- core/llm/response_processor.py +364 -0
- core/llm/tool_executor.py +520 -0
- core/logging/__init__.py +19 -0
- core/logging/setup.py +208 -0
- core/models/__init__.py +5 -0
- core/models/base.py +23 -0
- core/plugins/__init__.py +13 -0
- core/plugins/collector.py +212 -0
- core/plugins/discovery.py +386 -0
- core/plugins/factory.py +263 -0
- core/plugins/registry.py +152 -0
- core/storage/__init__.py +5 -0
- core/storage/state_manager.py +84 -0
- core/ui/__init__.py +6 -0
- core/ui/config_merger.py +176 -0
- core/ui/config_widgets.py +369 -0
- core/ui/live_modal_renderer.py +276 -0
- core/ui/modal_actions.py +162 -0
- core/ui/modal_overlay_renderer.py +373 -0
- core/ui/modal_renderer.py +591 -0
- core/ui/modal_state_manager.py +443 -0
- core/ui/widget_integration.py +222 -0
- core/ui/widgets/__init__.py +27 -0
- core/ui/widgets/base_widget.py +136 -0
- core/ui/widgets/checkbox.py +85 -0
- core/ui/widgets/dropdown.py +140 -0
- core/ui/widgets/label.py +78 -0
- core/ui/widgets/slider.py +185 -0
- core/ui/widgets/text_input.py +224 -0
- core/utils/__init__.py +11 -0
- core/utils/config_utils.py +656 -0
- core/utils/dict_utils.py +212 -0
- core/utils/error_utils.py +275 -0
- core/utils/key_reader.py +171 -0
- core/utils/plugin_utils.py +267 -0
- core/utils/prompt_renderer.py +151 -0
- kollabor-0.4.9.dist-info/METADATA +298 -0
- kollabor-0.4.9.dist-info/RECORD +128 -0
- kollabor-0.4.9.dist-info/WHEEL +5 -0
- kollabor-0.4.9.dist-info/entry_points.txt +2 -0
- kollabor-0.4.9.dist-info/licenses/LICENSE +21 -0
- kollabor-0.4.9.dist-info/top_level.txt +4 -0
- kollabor_cli_main.py +20 -0
- plugins/__init__.py +1 -0
- plugins/enhanced_input/__init__.py +18 -0
- plugins/enhanced_input/box_renderer.py +103 -0
- plugins/enhanced_input/box_styles.py +142 -0
- plugins/enhanced_input/color_engine.py +165 -0
- plugins/enhanced_input/config.py +150 -0
- plugins/enhanced_input/cursor_manager.py +72 -0
- plugins/enhanced_input/geometry.py +81 -0
- plugins/enhanced_input/state.py +130 -0
- plugins/enhanced_input/text_processor.py +115 -0
- plugins/enhanced_input_plugin.py +385 -0
- plugins/fullscreen/__init__.py +9 -0
- plugins/fullscreen/example_plugin.py +327 -0
- plugins/fullscreen/matrix_plugin.py +132 -0
- plugins/hook_monitoring_plugin.py +1299 -0
- plugins/query_enhancer_plugin.py +350 -0
- plugins/save_conversation_plugin.py +502 -0
- plugins/system_commands_plugin.py +93 -0
- plugins/tmux_plugin.py +795 -0
- plugins/workflow_enforcement_plugin.py +629 -0
- system_prompt/default.md +1286 -0
- system_prompt/default_win.md +265 -0
- system_prompt/example_with_trender.md +47 -0
core/llm/hook_system.py
ADDED
|
@@ -0,0 +1,402 @@
|
|
|
1
|
+
"""Comprehensive hook system for LLM core service.
|
|
2
|
+
|
|
3
|
+
Provides complete hook coverage for all LLM operations including
|
|
4
|
+
pre/post processing, tool calls, and intelligence features.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any, Dict
|
|
9
|
+
|
|
10
|
+
from ..events import EventType, Hook, HookPriority
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class LLMHookSystem:
|
|
16
|
+
"""Comprehensive hook system for LLM operations.
|
|
17
|
+
|
|
18
|
+
Manages registration and execution of hooks for all LLM events,
|
|
19
|
+
providing extensive customization points for the core service.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self, event_bus):
|
|
23
|
+
"""Initialize the LLM hook system.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
event_bus: Event bus for hook registration
|
|
27
|
+
"""
|
|
28
|
+
self.event_bus = event_bus
|
|
29
|
+
self.hooks = []
|
|
30
|
+
self._create_hooks()
|
|
31
|
+
|
|
32
|
+
logger.info("LLM Hook System initialized")
|
|
33
|
+
|
|
34
|
+
def _create_hooks(self):
|
|
35
|
+
"""Create all LLM hooks."""
|
|
36
|
+
# Pre-processing hooks
|
|
37
|
+
self.hooks.extend([
|
|
38
|
+
Hook(
|
|
39
|
+
name="llm_pre_user_input",
|
|
40
|
+
plugin_name="llm_core",
|
|
41
|
+
event_type=EventType.USER_INPUT_PRE,
|
|
42
|
+
priority=HookPriority.SYSTEM.value,
|
|
43
|
+
callback=self._handle_pre_user_input
|
|
44
|
+
),
|
|
45
|
+
Hook(
|
|
46
|
+
name="llm_pre_request",
|
|
47
|
+
plugin_name="llm_core",
|
|
48
|
+
event_type=EventType.LLM_REQUEST_PRE,
|
|
49
|
+
priority=HookPriority.SYSTEM.value,
|
|
50
|
+
callback=self._handle_pre_llm_request
|
|
51
|
+
),
|
|
52
|
+
Hook(
|
|
53
|
+
name="llm_pre_tool_call",
|
|
54
|
+
plugin_name="llm_core",
|
|
55
|
+
event_type=EventType.TOOL_CALL_PRE,
|
|
56
|
+
priority=HookPriority.SYSTEM.value,
|
|
57
|
+
callback=self._handle_pre_tool_call
|
|
58
|
+
)
|
|
59
|
+
])
|
|
60
|
+
|
|
61
|
+
# Processing hooks
|
|
62
|
+
self.hooks.extend([
|
|
63
|
+
Hook(
|
|
64
|
+
name="llm_request",
|
|
65
|
+
plugin_name="llm_core",
|
|
66
|
+
event_type=EventType.LLM_REQUEST,
|
|
67
|
+
priority=HookPriority.SYSTEM.value,
|
|
68
|
+
callback=self._handle_llm_request
|
|
69
|
+
),
|
|
70
|
+
Hook(
|
|
71
|
+
name="llm_tool_call",
|
|
72
|
+
plugin_name="llm_core",
|
|
73
|
+
event_type=EventType.TOOL_CALL,
|
|
74
|
+
priority=HookPriority.SYSTEM.value,
|
|
75
|
+
callback=self._handle_tool_call
|
|
76
|
+
)
|
|
77
|
+
])
|
|
78
|
+
|
|
79
|
+
# Post-processing hooks
|
|
80
|
+
self.hooks.extend([
|
|
81
|
+
Hook(
|
|
82
|
+
name="llm_post_response",
|
|
83
|
+
plugin_name="llm_core",
|
|
84
|
+
event_type=EventType.LLM_RESPONSE_POST,
|
|
85
|
+
priority=HookPriority.SYSTEM.value,
|
|
86
|
+
callback=self._handle_post_llm_response
|
|
87
|
+
),
|
|
88
|
+
Hook(
|
|
89
|
+
name="llm_post_tool_call",
|
|
90
|
+
plugin_name="llm_core",
|
|
91
|
+
event_type=EventType.TOOL_CALL_POST,
|
|
92
|
+
priority=HookPriority.SYSTEM.value,
|
|
93
|
+
callback=self._handle_post_tool_call
|
|
94
|
+
),
|
|
95
|
+
Hook(
|
|
96
|
+
name="llm_post_user_response",
|
|
97
|
+
plugin_name="llm_core",
|
|
98
|
+
event_type=EventType.USER_INPUT_POST,
|
|
99
|
+
priority=HookPriority.SYSTEM.value,
|
|
100
|
+
callback=self._handle_post_user_response
|
|
101
|
+
)
|
|
102
|
+
])
|
|
103
|
+
|
|
104
|
+
# System hooks
|
|
105
|
+
self.hooks.extend([
|
|
106
|
+
Hook(
|
|
107
|
+
name="llm_startup",
|
|
108
|
+
plugin_name="llm_core",
|
|
109
|
+
event_type=EventType.SYSTEM_STARTUP,
|
|
110
|
+
priority=HookPriority.SYSTEM.value,
|
|
111
|
+
callback=self._handle_conversation_start
|
|
112
|
+
),
|
|
113
|
+
Hook(
|
|
114
|
+
name="llm_shutdown",
|
|
115
|
+
plugin_name="llm_core",
|
|
116
|
+
event_type=EventType.SYSTEM_SHUTDOWN,
|
|
117
|
+
priority=HookPriority.SYSTEM.value,
|
|
118
|
+
callback=self._handle_conversation_end
|
|
119
|
+
)
|
|
120
|
+
])
|
|
121
|
+
|
|
122
|
+
# Thinking process hook
|
|
123
|
+
self.hooks.append(
|
|
124
|
+
Hook(
|
|
125
|
+
name="llm_thinking",
|
|
126
|
+
plugin_name="llm_core",
|
|
127
|
+
event_type=EventType.LLM_THINKING,
|
|
128
|
+
priority=HookPriority.SYSTEM.value,
|
|
129
|
+
callback=self._handle_thinking
|
|
130
|
+
)
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
logger.debug(f"Created {len(self.hooks)} LLM hooks")
|
|
134
|
+
|
|
135
|
+
async def register_hooks(self):
|
|
136
|
+
"""Register all hooks with the event bus."""
|
|
137
|
+
for hook in self.hooks:
|
|
138
|
+
success = await self.event_bus.register_hook(hook)
|
|
139
|
+
if success:
|
|
140
|
+
logger.debug(f"Registered hook: {hook.name}")
|
|
141
|
+
else:
|
|
142
|
+
logger.error(f"Failed to register hook: {hook.name}")
|
|
143
|
+
|
|
144
|
+
logger.info(f"Registered {len(self.hooks)} LLM hooks")
|
|
145
|
+
|
|
146
|
+
async def _handle_pre_user_input(self, data: Dict[str, Any], event) -> Dict[str, Any]:
|
|
147
|
+
"""Handle pre-user-input events.
|
|
148
|
+
|
|
149
|
+
This hook runs before user input is processed, allowing for
|
|
150
|
+
message enhancement, validation, or filtering.
|
|
151
|
+
"""
|
|
152
|
+
message = data.get("message", "")
|
|
153
|
+
|
|
154
|
+
# Context enrichment
|
|
155
|
+
enriched_data = {
|
|
156
|
+
"original_message": message,
|
|
157
|
+
"message": message,
|
|
158
|
+
"timestamp": event.timestamp if hasattr(event, 'timestamp') else None,
|
|
159
|
+
"session_context": self._get_session_context()
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
logger.debug(f"Pre-processing user input: {message[:50]}...")
|
|
163
|
+
return enriched_data
|
|
164
|
+
|
|
165
|
+
async def _handle_pre_llm_request(self, data: Dict[str, Any], event) -> Dict[str, Any]:
|
|
166
|
+
"""Handle pre-LLM-request events.
|
|
167
|
+
|
|
168
|
+
This hook runs before sending requests to the LLM API,
|
|
169
|
+
allowing for prompt engineering and context optimization.
|
|
170
|
+
"""
|
|
171
|
+
messages = data.get("messages", [])
|
|
172
|
+
|
|
173
|
+
# Prompt optimization could happen here
|
|
174
|
+
optimized_data = {
|
|
175
|
+
"messages": messages,
|
|
176
|
+
"optimization_applied": False,
|
|
177
|
+
"context_window": len(str(messages))
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
logger.debug(f"Pre-processing LLM request with {len(messages)} messages")
|
|
181
|
+
return optimized_data
|
|
182
|
+
|
|
183
|
+
async def _handle_pre_tool_call(self, data: Dict[str, Any], event) -> Dict[str, Any]:
|
|
184
|
+
"""Handle pre-tool-call events.
|
|
185
|
+
|
|
186
|
+
This hook runs before executing tool calls,
|
|
187
|
+
allowing for validation and security checks.
|
|
188
|
+
"""
|
|
189
|
+
command = data.get("command", "")
|
|
190
|
+
|
|
191
|
+
# Security validation
|
|
192
|
+
validated_data = {
|
|
193
|
+
"command": command,
|
|
194
|
+
"validated": True,
|
|
195
|
+
"risk_level": self._assess_command_risk(command)
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
logger.debug(f"Pre-processing tool call: {command[:50]}...")
|
|
199
|
+
return validated_data
|
|
200
|
+
|
|
201
|
+
async def _handle_llm_request(self, data: Dict[str, Any], event) -> Dict[str, Any]:
|
|
202
|
+
"""Handle LLM request events.
|
|
203
|
+
|
|
204
|
+
This hook runs during LLM API calls,
|
|
205
|
+
allowing for monitoring and metrics collection.
|
|
206
|
+
"""
|
|
207
|
+
request_data = {
|
|
208
|
+
"status": "processing",
|
|
209
|
+
"request_id": event.uuid if hasattr(event, 'uuid') else None,
|
|
210
|
+
"model": data.get("model", "unknown")
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
logger.debug("Processing LLM request")
|
|
214
|
+
return request_data
|
|
215
|
+
|
|
216
|
+
async def _handle_tool_call(self, data: Dict[str, Any], event) -> Dict[str, Any]:
|
|
217
|
+
"""Handle tool call events.
|
|
218
|
+
|
|
219
|
+
This hook runs during tool execution,
|
|
220
|
+
allowing for monitoring and result processing.
|
|
221
|
+
"""
|
|
222
|
+
tool_data = {
|
|
223
|
+
"command": data.get("command", ""),
|
|
224
|
+
"status": "executing",
|
|
225
|
+
"tool_id": event.uuid if hasattr(event, 'uuid') else None
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
logger.debug(f"Executing tool call: {tool_data['command'][:50]}...")
|
|
229
|
+
return tool_data
|
|
230
|
+
|
|
231
|
+
async def _handle_post_llm_response(self, data: Dict[str, Any], event) -> Dict[str, Any]:
|
|
232
|
+
"""Handle post-LLM-response events.
|
|
233
|
+
|
|
234
|
+
This hook runs after receiving LLM responses,
|
|
235
|
+
allowing for response processing and analysis.
|
|
236
|
+
"""
|
|
237
|
+
response = data.get("response", "")
|
|
238
|
+
|
|
239
|
+
# Response analysis
|
|
240
|
+
analysis_data = {
|
|
241
|
+
"response_length": len(response),
|
|
242
|
+
"has_code": "```" in response,
|
|
243
|
+
"has_thinking": "<think>" in response,
|
|
244
|
+
"quality_score": self._assess_response_quality(response)
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
logger.debug(f"Post-processing LLM response: {len(response)} chars")
|
|
248
|
+
return analysis_data
|
|
249
|
+
|
|
250
|
+
async def _handle_post_tool_call(self, data: Dict[str, Any], event) -> Dict[str, Any]:
|
|
251
|
+
"""Handle post-tool-call events.
|
|
252
|
+
|
|
253
|
+
This hook runs after tool execution completes,
|
|
254
|
+
allowing for result processing and logging.
|
|
255
|
+
"""
|
|
256
|
+
output = data.get("output", "")
|
|
257
|
+
|
|
258
|
+
# Result processing
|
|
259
|
+
result_data = {
|
|
260
|
+
"output_length": len(output),
|
|
261
|
+
"success": data.get("success", True),
|
|
262
|
+
"execution_time": data.get("execution_time", 0)
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
logger.debug(f"Post-processing tool call output: {len(output)} chars")
|
|
266
|
+
return result_data
|
|
267
|
+
|
|
268
|
+
async def _handle_post_user_response(self, data: Dict[str, Any], event) -> Dict[str, Any]:
|
|
269
|
+
"""Handle post-user-response events.
|
|
270
|
+
|
|
271
|
+
This hook runs after user input has been fully processed,
|
|
272
|
+
allowing for conversation flow management.
|
|
273
|
+
"""
|
|
274
|
+
processed_data = {
|
|
275
|
+
"message_processed": True,
|
|
276
|
+
"conversation_state": "active",
|
|
277
|
+
"follow_up_needed": self._check_follow_up_needed(data)
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
logger.debug("Post-processing user response complete")
|
|
281
|
+
return processed_data
|
|
282
|
+
|
|
283
|
+
async def _handle_conversation_start(self, data: Dict[str, Any], event) -> Dict[str, Any]:
|
|
284
|
+
"""Handle conversation start events.
|
|
285
|
+
|
|
286
|
+
This hook runs when a conversation begins,
|
|
287
|
+
allowing for initialization and context setup.
|
|
288
|
+
"""
|
|
289
|
+
start_data = {
|
|
290
|
+
"conversation_started": True,
|
|
291
|
+
"initial_context": self._build_initial_context(),
|
|
292
|
+
"welcome_message": "LLM Core Service ready"
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
logger.info("Conversation started")
|
|
296
|
+
return start_data
|
|
297
|
+
|
|
298
|
+
async def _handle_conversation_end(self, data: Dict[str, Any], event) -> Dict[str, Any]:
|
|
299
|
+
"""Handle conversation end events.
|
|
300
|
+
|
|
301
|
+
This hook runs when a conversation ends,
|
|
302
|
+
allowing for cleanup and summary generation.
|
|
303
|
+
"""
|
|
304
|
+
end_data = {
|
|
305
|
+
"conversation_ended": True,
|
|
306
|
+
"summary": self._generate_conversation_summary(data),
|
|
307
|
+
"cleanup_complete": True
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
logger.info("Conversation ended")
|
|
311
|
+
return end_data
|
|
312
|
+
|
|
313
|
+
async def _handle_thinking(self, data: Dict[str, Any], event) -> Dict[str, Any]:
|
|
314
|
+
"""Handle thinking process events.
|
|
315
|
+
|
|
316
|
+
This hook runs during LLM thinking phases,
|
|
317
|
+
allowing for thinking visualization and analysis.
|
|
318
|
+
"""
|
|
319
|
+
thinking_data = {
|
|
320
|
+
"thinking_content": data.get("thinking", ""),
|
|
321
|
+
"phase": data.get("phase", "processing"),
|
|
322
|
+
"visualization": "enabled"
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
logger.debug("Processing thinking phase")
|
|
326
|
+
return thinking_data
|
|
327
|
+
|
|
328
|
+
def _get_session_context(self) -> Dict[str, Any]:
|
|
329
|
+
"""Get current session context."""
|
|
330
|
+
return {
|
|
331
|
+
"active": True,
|
|
332
|
+
"hooks_enabled": len(self.hooks),
|
|
333
|
+
"system_ready": True
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
def _assess_command_risk(self, command: str) -> str:
|
|
337
|
+
"""Assess risk level of a command."""
|
|
338
|
+
high_risk_patterns = ["rm -rf", "sudo", "chmod 777", "mkfs", "dd if="]
|
|
339
|
+
medium_risk_patterns = ["rm", "mv", "cp -r", "git reset --hard"]
|
|
340
|
+
|
|
341
|
+
command_lower = command.lower()
|
|
342
|
+
|
|
343
|
+
for pattern in high_risk_patterns:
|
|
344
|
+
if pattern in command_lower:
|
|
345
|
+
return "high"
|
|
346
|
+
|
|
347
|
+
for pattern in medium_risk_patterns:
|
|
348
|
+
if pattern in command_lower:
|
|
349
|
+
return "medium"
|
|
350
|
+
|
|
351
|
+
return "low"
|
|
352
|
+
|
|
353
|
+
def _assess_response_quality(self, response: str) -> float:
|
|
354
|
+
"""Assess quality of LLM response."""
|
|
355
|
+
score = 0.5 # Base score
|
|
356
|
+
|
|
357
|
+
# Positive indicators
|
|
358
|
+
if len(response) > 100:
|
|
359
|
+
score += 0.1
|
|
360
|
+
if "```" in response: # Has code
|
|
361
|
+
score += 0.1
|
|
362
|
+
if any(word in response.lower() for word in ["because", "therefore", "however"]):
|
|
363
|
+
score += 0.1 # Has reasoning
|
|
364
|
+
|
|
365
|
+
# Negative indicators
|
|
366
|
+
if len(response) < 10:
|
|
367
|
+
score -= 0.2
|
|
368
|
+
if response.count("I") > 10: # Too self-referential
|
|
369
|
+
score -= 0.1
|
|
370
|
+
|
|
371
|
+
return min(max(score, 0.0), 1.0)
|
|
372
|
+
|
|
373
|
+
def _check_follow_up_needed(self, data: Dict[str, Any]) -> bool:
|
|
374
|
+
"""Check if follow-up is needed."""
|
|
375
|
+
response = data.get("response", "")
|
|
376
|
+
|
|
377
|
+
# Check for indicators that follow-up is needed
|
|
378
|
+
follow_up_indicators = [
|
|
379
|
+
"?", # Question asked
|
|
380
|
+
"let me know",
|
|
381
|
+
"would you like",
|
|
382
|
+
"should I",
|
|
383
|
+
"shall I"
|
|
384
|
+
]
|
|
385
|
+
|
|
386
|
+
response_lower = response.lower()
|
|
387
|
+
return any(indicator in response_lower for indicator in follow_up_indicators)
|
|
388
|
+
|
|
389
|
+
def _build_initial_context(self) -> Dict[str, Any]:
|
|
390
|
+
"""Build initial conversation context."""
|
|
391
|
+
return {
|
|
392
|
+
"project_loaded": True,
|
|
393
|
+
"hooks_active": len(self.hooks),
|
|
394
|
+
"intelligence_enabled": True
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
def _generate_conversation_summary(self, data: Dict[str, Any]) -> str:
|
|
398
|
+
"""Generate conversation summary."""
|
|
399
|
+
messages = data.get("message_count", 0)
|
|
400
|
+
duration = data.get("duration", 0)
|
|
401
|
+
|
|
402
|
+
return f"Conversation completed: {messages} messages over {duration:.1f} seconds"
|