kollabor 0.4.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- core/__init__.py +18 -0
- core/application.py +578 -0
- core/cli.py +193 -0
- core/commands/__init__.py +43 -0
- core/commands/executor.py +277 -0
- core/commands/menu_renderer.py +319 -0
- core/commands/parser.py +186 -0
- core/commands/registry.py +331 -0
- core/commands/system_commands.py +479 -0
- core/config/__init__.py +7 -0
- core/config/llm_task_config.py +110 -0
- core/config/loader.py +501 -0
- core/config/manager.py +112 -0
- core/config/plugin_config_manager.py +346 -0
- core/config/plugin_schema.py +424 -0
- core/config/service.py +399 -0
- core/effects/__init__.py +1 -0
- core/events/__init__.py +12 -0
- core/events/bus.py +129 -0
- core/events/executor.py +154 -0
- core/events/models.py +258 -0
- core/events/processor.py +176 -0
- core/events/registry.py +289 -0
- core/fullscreen/__init__.py +19 -0
- core/fullscreen/command_integration.py +290 -0
- core/fullscreen/components/__init__.py +12 -0
- core/fullscreen/components/animation.py +258 -0
- core/fullscreen/components/drawing.py +160 -0
- core/fullscreen/components/matrix_components.py +177 -0
- core/fullscreen/manager.py +302 -0
- core/fullscreen/plugin.py +204 -0
- core/fullscreen/renderer.py +282 -0
- core/fullscreen/session.py +324 -0
- core/io/__init__.py +52 -0
- core/io/buffer_manager.py +362 -0
- core/io/config_status_view.py +272 -0
- core/io/core_status_views.py +410 -0
- core/io/input_errors.py +313 -0
- core/io/input_handler.py +2655 -0
- core/io/input_mode_manager.py +402 -0
- core/io/key_parser.py +344 -0
- core/io/layout.py +587 -0
- core/io/message_coordinator.py +204 -0
- core/io/message_renderer.py +601 -0
- core/io/modal_interaction_handler.py +315 -0
- core/io/raw_input_processor.py +946 -0
- core/io/status_renderer.py +845 -0
- core/io/terminal_renderer.py +586 -0
- core/io/terminal_state.py +551 -0
- core/io/visual_effects.py +734 -0
- core/llm/__init__.py +26 -0
- core/llm/api_communication_service.py +863 -0
- core/llm/conversation_logger.py +473 -0
- core/llm/conversation_manager.py +414 -0
- core/llm/file_operations_executor.py +1401 -0
- core/llm/hook_system.py +402 -0
- core/llm/llm_service.py +1629 -0
- core/llm/mcp_integration.py +386 -0
- core/llm/message_display_service.py +450 -0
- core/llm/model_router.py +214 -0
- core/llm/plugin_sdk.py +396 -0
- core/llm/response_parser.py +848 -0
- core/llm/response_processor.py +364 -0
- core/llm/tool_executor.py +520 -0
- core/logging/__init__.py +19 -0
- core/logging/setup.py +208 -0
- core/models/__init__.py +5 -0
- core/models/base.py +23 -0
- core/plugins/__init__.py +13 -0
- core/plugins/collector.py +212 -0
- core/plugins/discovery.py +386 -0
- core/plugins/factory.py +263 -0
- core/plugins/registry.py +152 -0
- core/storage/__init__.py +5 -0
- core/storage/state_manager.py +84 -0
- core/ui/__init__.py +6 -0
- core/ui/config_merger.py +176 -0
- core/ui/config_widgets.py +369 -0
- core/ui/live_modal_renderer.py +276 -0
- core/ui/modal_actions.py +162 -0
- core/ui/modal_overlay_renderer.py +373 -0
- core/ui/modal_renderer.py +591 -0
- core/ui/modal_state_manager.py +443 -0
- core/ui/widget_integration.py +222 -0
- core/ui/widgets/__init__.py +27 -0
- core/ui/widgets/base_widget.py +136 -0
- core/ui/widgets/checkbox.py +85 -0
- core/ui/widgets/dropdown.py +140 -0
- core/ui/widgets/label.py +78 -0
- core/ui/widgets/slider.py +185 -0
- core/ui/widgets/text_input.py +224 -0
- core/utils/__init__.py +11 -0
- core/utils/config_utils.py +656 -0
- core/utils/dict_utils.py +212 -0
- core/utils/error_utils.py +275 -0
- core/utils/key_reader.py +171 -0
- core/utils/plugin_utils.py +267 -0
- core/utils/prompt_renderer.py +151 -0
- kollabor-0.4.9.dist-info/METADATA +298 -0
- kollabor-0.4.9.dist-info/RECORD +128 -0
- kollabor-0.4.9.dist-info/WHEEL +5 -0
- kollabor-0.4.9.dist-info/entry_points.txt +2 -0
- kollabor-0.4.9.dist-info/licenses/LICENSE +21 -0
- kollabor-0.4.9.dist-info/top_level.txt +4 -0
- kollabor_cli_main.py +20 -0
- plugins/__init__.py +1 -0
- plugins/enhanced_input/__init__.py +18 -0
- plugins/enhanced_input/box_renderer.py +103 -0
- plugins/enhanced_input/box_styles.py +142 -0
- plugins/enhanced_input/color_engine.py +165 -0
- plugins/enhanced_input/config.py +150 -0
- plugins/enhanced_input/cursor_manager.py +72 -0
- plugins/enhanced_input/geometry.py +81 -0
- plugins/enhanced_input/state.py +130 -0
- plugins/enhanced_input/text_processor.py +115 -0
- plugins/enhanced_input_plugin.py +385 -0
- plugins/fullscreen/__init__.py +9 -0
- plugins/fullscreen/example_plugin.py +327 -0
- plugins/fullscreen/matrix_plugin.py +132 -0
- plugins/hook_monitoring_plugin.py +1299 -0
- plugins/query_enhancer_plugin.py +350 -0
- plugins/save_conversation_plugin.py +502 -0
- plugins/system_commands_plugin.py +93 -0
- plugins/tmux_plugin.py +795 -0
- plugins/workflow_enforcement_plugin.py +629 -0
- system_prompt/default.md +1286 -0
- system_prompt/default_win.md +265 -0
- system_prompt/example_with_trender.md +47 -0
|
@@ -0,0 +1,848 @@
|
|
|
1
|
+
"""Response parsing for LLM outputs with comprehensive tag support.
|
|
2
|
+
|
|
3
|
+
Handles parsing of special tags including thinking, terminal commands,
|
|
4
|
+
MCP tool calls, and file operations from LLM responses with clean architecture.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import re
|
|
10
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
11
|
+
from xml.etree import ElementTree as ET
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class FileOperationParser:
|
|
17
|
+
"""Parse file operations from LLM response without XML parser.
|
|
18
|
+
|
|
19
|
+
Uses regex-based parsing to extract file operation blocks, treating
|
|
20
|
+
tag content as raw text (no CDATA escaping needed).
|
|
21
|
+
|
|
22
|
+
Supports 14 file operations:
|
|
23
|
+
- edit: Replace content in existing file
|
|
24
|
+
- create: Create new file
|
|
25
|
+
- create_overwrite: Create/overwrite file
|
|
26
|
+
- delete: Delete file
|
|
27
|
+
- move: Move/rename file
|
|
28
|
+
- copy: Copy file
|
|
29
|
+
- copy_overwrite: Copy file with overwrite
|
|
30
|
+
- append: Append to file
|
|
31
|
+
- insert_after: Insert content after pattern
|
|
32
|
+
- insert_before: Insert content before pattern
|
|
33
|
+
- mkdir: Create directory
|
|
34
|
+
- rmdir: Remove directory
|
|
35
|
+
- read: Read file content
|
|
36
|
+
- grep: Search file for pattern
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(self):
|
|
40
|
+
"""Initialize file operation parser with compiled regex patterns."""
|
|
41
|
+
# Operation-level patterns (outer tags only)
|
|
42
|
+
self.edit_pattern = re.compile(
|
|
43
|
+
r'<edit>(.*?)</edit>',
|
|
44
|
+
re.DOTALL | re.IGNORECASE
|
|
45
|
+
)
|
|
46
|
+
self.create_pattern = re.compile(
|
|
47
|
+
r'<create>(.*?)</create>',
|
|
48
|
+
re.DOTALL | re.IGNORECASE
|
|
49
|
+
)
|
|
50
|
+
self.create_overwrite_pattern = re.compile(
|
|
51
|
+
r'<create_overwrite>(.*?)</create_overwrite>',
|
|
52
|
+
re.DOTALL | re.IGNORECASE
|
|
53
|
+
)
|
|
54
|
+
self.delete_pattern = re.compile(
|
|
55
|
+
r'<delete>(.*?)</delete>',
|
|
56
|
+
re.DOTALL | re.IGNORECASE
|
|
57
|
+
)
|
|
58
|
+
self.move_pattern = re.compile(
|
|
59
|
+
r'<move>(.*?)</move>',
|
|
60
|
+
re.DOTALL | re.IGNORECASE
|
|
61
|
+
)
|
|
62
|
+
self.copy_pattern = re.compile(
|
|
63
|
+
r'<copy>(.*?)</copy>',
|
|
64
|
+
re.DOTALL | re.IGNORECASE
|
|
65
|
+
)
|
|
66
|
+
self.copy_overwrite_pattern = re.compile(
|
|
67
|
+
r'<copy_overwrite>(.*?)</copy_overwrite>',
|
|
68
|
+
re.DOTALL | re.IGNORECASE
|
|
69
|
+
)
|
|
70
|
+
self.append_pattern = re.compile(
|
|
71
|
+
r'<append>(.*?)</append>',
|
|
72
|
+
re.DOTALL | re.IGNORECASE
|
|
73
|
+
)
|
|
74
|
+
self.insert_after_pattern = re.compile(
|
|
75
|
+
r'<insert_after>(.*?)</insert_after>',
|
|
76
|
+
re.DOTALL | re.IGNORECASE
|
|
77
|
+
)
|
|
78
|
+
self.insert_before_pattern = re.compile(
|
|
79
|
+
r'<insert_before>(.*?)</insert_before>',
|
|
80
|
+
re.DOTALL | re.IGNORECASE
|
|
81
|
+
)
|
|
82
|
+
self.mkdir_pattern = re.compile(
|
|
83
|
+
r'<mkdir>(.*?)</mkdir>',
|
|
84
|
+
re.DOTALL | re.IGNORECASE
|
|
85
|
+
)
|
|
86
|
+
self.rmdir_pattern = re.compile(
|
|
87
|
+
r'<rmdir>(.*?)</rmdir>',
|
|
88
|
+
re.DOTALL | re.IGNORECASE
|
|
89
|
+
)
|
|
90
|
+
self.read_pattern = re.compile(
|
|
91
|
+
r'<read>(.*?)</read>',
|
|
92
|
+
re.DOTALL | re.IGNORECASE
|
|
93
|
+
)
|
|
94
|
+
self.grep_pattern = re.compile(
|
|
95
|
+
r'<grep>(.*?)</grep>',
|
|
96
|
+
re.DOTALL | re.IGNORECASE
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
logger.debug("File operation parser initialized with 14 operation patterns")
|
|
100
|
+
|
|
101
|
+
def parse_response(self, llm_response: str) -> List[Dict[str, Any]]:
|
|
102
|
+
"""Extract all file operations from LLM response.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
llm_response: Raw LLM response text
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
List of operation dictionaries with type and parameters
|
|
109
|
+
"""
|
|
110
|
+
operations = []
|
|
111
|
+
|
|
112
|
+
# Parse each operation type in order of appearance
|
|
113
|
+
operations.extend(self._parse_operations(
|
|
114
|
+
self.edit_pattern, self._parse_edit_block, llm_response, "edit"
|
|
115
|
+
))
|
|
116
|
+
operations.extend(self._parse_operations(
|
|
117
|
+
self.create_pattern, self._parse_create_block, llm_response, "create"
|
|
118
|
+
))
|
|
119
|
+
operations.extend(self._parse_operations(
|
|
120
|
+
self.create_overwrite_pattern, self._parse_create_overwrite_block,
|
|
121
|
+
llm_response, "create_overwrite"
|
|
122
|
+
))
|
|
123
|
+
operations.extend(self._parse_operations(
|
|
124
|
+
self.delete_pattern, self._parse_delete_block, llm_response, "delete"
|
|
125
|
+
))
|
|
126
|
+
operations.extend(self._parse_operations(
|
|
127
|
+
self.move_pattern, self._parse_move_block, llm_response, "move"
|
|
128
|
+
))
|
|
129
|
+
operations.extend(self._parse_operations(
|
|
130
|
+
self.copy_pattern, self._parse_copy_block, llm_response, "copy"
|
|
131
|
+
))
|
|
132
|
+
operations.extend(self._parse_operations(
|
|
133
|
+
self.copy_overwrite_pattern, self._parse_copy_overwrite_block,
|
|
134
|
+
llm_response, "copy_overwrite"
|
|
135
|
+
))
|
|
136
|
+
operations.extend(self._parse_operations(
|
|
137
|
+
self.append_pattern, self._parse_append_block, llm_response, "append"
|
|
138
|
+
))
|
|
139
|
+
operations.extend(self._parse_operations(
|
|
140
|
+
self.insert_after_pattern, self._parse_insert_after_block,
|
|
141
|
+
llm_response, "insert_after"
|
|
142
|
+
))
|
|
143
|
+
operations.extend(self._parse_operations(
|
|
144
|
+
self.insert_before_pattern, self._parse_insert_before_block,
|
|
145
|
+
llm_response, "insert_before"
|
|
146
|
+
))
|
|
147
|
+
operations.extend(self._parse_operations(
|
|
148
|
+
self.mkdir_pattern, self._parse_mkdir_block, llm_response, "mkdir"
|
|
149
|
+
))
|
|
150
|
+
operations.extend(self._parse_operations(
|
|
151
|
+
self.rmdir_pattern, self._parse_rmdir_block, llm_response, "rmdir"
|
|
152
|
+
))
|
|
153
|
+
operations.extend(self._parse_operations(
|
|
154
|
+
self.read_pattern, self._parse_read_block, llm_response, "read"
|
|
155
|
+
))
|
|
156
|
+
operations.extend(self._parse_operations(
|
|
157
|
+
self.grep_pattern, self._parse_grep_block, llm_response, "grep"
|
|
158
|
+
))
|
|
159
|
+
|
|
160
|
+
if operations:
|
|
161
|
+
logger.info(f"Parsed {len(operations)} file operations from response")
|
|
162
|
+
|
|
163
|
+
return operations
|
|
164
|
+
|
|
165
|
+
def _parse_operations(
|
|
166
|
+
self,
|
|
167
|
+
pattern: re.Pattern,
|
|
168
|
+
parser_func: callable,
|
|
169
|
+
text: str,
|
|
170
|
+
op_name: str
|
|
171
|
+
) -> List[Dict[str, Any]]:
|
|
172
|
+
"""Generic operation parser.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
pattern: Compiled regex pattern for operation
|
|
176
|
+
parser_func: Function to parse inner content
|
|
177
|
+
text: Text to search in
|
|
178
|
+
op_name: Operation name for error reporting
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
List of parsed operations
|
|
182
|
+
"""
|
|
183
|
+
operations = []
|
|
184
|
+
|
|
185
|
+
for i, match in enumerate(pattern.finditer(text)):
|
|
186
|
+
inner_content = match.group(1)
|
|
187
|
+
try:
|
|
188
|
+
op = parser_func(inner_content)
|
|
189
|
+
op["id"] = f"file_{op_name}_{i}"
|
|
190
|
+
operations.append(op)
|
|
191
|
+
logger.debug(f"Parsed {op_name} operation: {op.get('file', 'N/A')}")
|
|
192
|
+
except ValueError as e:
|
|
193
|
+
logger.error(f"Invalid <{op_name}> block: {e}")
|
|
194
|
+
# Build helpful error with expected format
|
|
195
|
+
expected_format = self._get_expected_format(op_name)
|
|
196
|
+
# Add malformed operation for error reporting
|
|
197
|
+
operations.append({
|
|
198
|
+
"type": "malformed_file_op",
|
|
199
|
+
"id": f"malformed_{op_name}_{i}",
|
|
200
|
+
"operation": op_name,
|
|
201
|
+
"error": str(e),
|
|
202
|
+
"expected_format": expected_format,
|
|
203
|
+
"content_preview": inner_content[:300] if len(inner_content) > 300 else inner_content
|
|
204
|
+
})
|
|
205
|
+
|
|
206
|
+
return operations
|
|
207
|
+
|
|
208
|
+
def _get_expected_format(self, op_name: str) -> str:
|
|
209
|
+
"""Get expected format string for a file operation."""
|
|
210
|
+
formats = {
|
|
211
|
+
"edit": "<edit>\n <file>path/to/file</file>\n <find>text to find</find>\n <replace>replacement text</replace>\n</edit>",
|
|
212
|
+
"create": "<create>\n <file>path/to/file</file>\n <content>file content</content>\n</create>",
|
|
213
|
+
"create_overwrite": "<create_overwrite>\n <file>path/to/file</file>\n <content>file content</content>\n</create_overwrite>",
|
|
214
|
+
"delete": "<delete>\n <file>path/to/file</file>\n</delete>",
|
|
215
|
+
"move": "<move>\n <from>source/path</from>\n <to>dest/path</to>\n</move>",
|
|
216
|
+
"copy": "<copy>\n <from>source/path</from>\n <to>dest/path</to>\n</copy>",
|
|
217
|
+
"append": "<append>\n <file>path/to/file</file>\n <content>content to append</content>\n</append>",
|
|
218
|
+
"read": "<read>\n <file>path/to/file</file>\n</read>",
|
|
219
|
+
"mkdir": "<mkdir>\n <path>directory/path</path>\n</mkdir>",
|
|
220
|
+
"rmdir": "<rmdir>\n <path>directory/path</path>\n</rmdir>",
|
|
221
|
+
"insert_after": "<insert_after>\n <file>path</file>\n <pattern>match</pattern>\n <content>new content</content>\n</insert_after>",
|
|
222
|
+
"insert_before": "<insert_before>\n <file>path</file>\n <pattern>match</pattern>\n <content>new content</content>\n</insert_before>",
|
|
223
|
+
}
|
|
224
|
+
return formats.get(op_name, f"<{op_name}>...</{op_name}>")
|
|
225
|
+
|
|
226
|
+
def _extract_tag(
|
|
227
|
+
self,
|
|
228
|
+
tag_name: str,
|
|
229
|
+
content: str,
|
|
230
|
+
required: bool = True
|
|
231
|
+
) -> Optional[str]:
|
|
232
|
+
"""Extract content between tags.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
tag_name: Tag name (without < >)
|
|
236
|
+
content: Content to search in
|
|
237
|
+
required: If True, raises ValueError if tag not found
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
Content between tags, or None if not found and not required
|
|
241
|
+
|
|
242
|
+
Raises:
|
|
243
|
+
ValueError: If tag not found and required=True
|
|
244
|
+
"""
|
|
245
|
+
pattern = re.compile(
|
|
246
|
+
f'<{tag_name}>(.*?)</{tag_name}>',
|
|
247
|
+
re.DOTALL | re.IGNORECASE
|
|
248
|
+
)
|
|
249
|
+
match = pattern.search(content)
|
|
250
|
+
|
|
251
|
+
if not match:
|
|
252
|
+
if required:
|
|
253
|
+
raise ValueError(f"Missing required tag: <{tag_name}>")
|
|
254
|
+
return None
|
|
255
|
+
|
|
256
|
+
return match.group(1)
|
|
257
|
+
|
|
258
|
+
def _parse_edit_block(self, content: str) -> Dict[str, Any]:
|
|
259
|
+
"""Parse <edit> block.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
content: Inner content of <edit> tag
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
Parsed operation dictionary
|
|
266
|
+
"""
|
|
267
|
+
return {
|
|
268
|
+
"type": "file_edit",
|
|
269
|
+
"file": self._extract_tag("file", content).strip(),
|
|
270
|
+
"find": self._extract_tag("find", content), # Preserve whitespace
|
|
271
|
+
"replace": self._extract_tag("replace", content) # Preserve whitespace
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
def _parse_create_block(self, content: str) -> Dict[str, Any]:
|
|
275
|
+
"""Parse <create> block."""
|
|
276
|
+
return {
|
|
277
|
+
"type": "file_create",
|
|
278
|
+
"file": self._extract_tag("file", content).strip(),
|
|
279
|
+
"content": self._extract_tag("content", content)
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
def _parse_create_overwrite_block(self, content: str) -> Dict[str, Any]:
|
|
283
|
+
"""Parse <create_overwrite> block."""
|
|
284
|
+
return {
|
|
285
|
+
"type": "file_create_overwrite",
|
|
286
|
+
"file": self._extract_tag("file", content).strip(),
|
|
287
|
+
"content": self._extract_tag("content", content)
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
def _parse_delete_block(self, content: str) -> Dict[str, Any]:
|
|
291
|
+
"""Parse <delete> block."""
|
|
292
|
+
return {
|
|
293
|
+
"type": "file_delete",
|
|
294
|
+
"file": self._extract_tag("file", content).strip()
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
def _parse_move_block(self, content: str) -> Dict[str, Any]:
|
|
298
|
+
"""Parse <move> block."""
|
|
299
|
+
return {
|
|
300
|
+
"type": "file_move",
|
|
301
|
+
"from": self._extract_tag("from", content).strip(),
|
|
302
|
+
"to": self._extract_tag("to", content).strip()
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
def _parse_copy_block(self, content: str) -> Dict[str, Any]:
|
|
306
|
+
"""Parse <copy> block."""
|
|
307
|
+
return {
|
|
308
|
+
"type": "file_copy",
|
|
309
|
+
"from": self._extract_tag("from", content).strip(),
|
|
310
|
+
"to": self._extract_tag("to", content).strip()
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
def _parse_copy_overwrite_block(self, content: str) -> Dict[str, Any]:
|
|
314
|
+
"""Parse <copy_overwrite> block."""
|
|
315
|
+
return {
|
|
316
|
+
"type": "file_copy_overwrite",
|
|
317
|
+
"from": self._extract_tag("from", content).strip(),
|
|
318
|
+
"to": self._extract_tag("to", content).strip()
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
def _parse_append_block(self, content: str) -> Dict[str, Any]:
|
|
322
|
+
"""Parse <append> block."""
|
|
323
|
+
return {
|
|
324
|
+
"type": "file_append",
|
|
325
|
+
"file": self._extract_tag("file", content).strip(),
|
|
326
|
+
"content": self._extract_tag("content", content)
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
def _parse_insert_after_block(self, content: str) -> Dict[str, Any]:
|
|
330
|
+
"""Parse <insert_after> block."""
|
|
331
|
+
return {
|
|
332
|
+
"type": "file_insert_after",
|
|
333
|
+
"file": self._extract_tag("file", content).strip(),
|
|
334
|
+
"pattern": self._extract_tag("pattern", content),
|
|
335
|
+
"content": self._extract_tag("content", content)
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
def _parse_insert_before_block(self, content: str) -> Dict[str, Any]:
|
|
339
|
+
"""Parse <insert_before> block."""
|
|
340
|
+
return {
|
|
341
|
+
"type": "file_insert_before",
|
|
342
|
+
"file": self._extract_tag("file", content).strip(),
|
|
343
|
+
"pattern": self._extract_tag("pattern", content),
|
|
344
|
+
"content": self._extract_tag("content", content)
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
def _parse_mkdir_block(self, content: str) -> Dict[str, Any]:
|
|
348
|
+
"""Parse <mkdir> block."""
|
|
349
|
+
return {
|
|
350
|
+
"type": "file_mkdir",
|
|
351
|
+
"path": self._extract_tag("path", content).strip()
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
def _parse_rmdir_block(self, content: str) -> Dict[str, Any]:
|
|
355
|
+
"""Parse <rmdir> block."""
|
|
356
|
+
return {
|
|
357
|
+
"type": "file_rmdir",
|
|
358
|
+
"path": self._extract_tag("path", content).strip()
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
def _parse_read_block(self, content: str) -> Dict[str, Any]:
|
|
362
|
+
"""Parse <read> block."""
|
|
363
|
+
file_path = self._extract_tag("file", content).strip()
|
|
364
|
+
lines_spec = self._extract_tag("lines", content, required=False)
|
|
365
|
+
|
|
366
|
+
result = {
|
|
367
|
+
"type": "file_read",
|
|
368
|
+
"file": file_path
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
if lines_spec:
|
|
372
|
+
result["lines"] = lines_spec.strip()
|
|
373
|
+
|
|
374
|
+
return result
|
|
375
|
+
|
|
376
|
+
def _parse_grep_block(self, content: str) -> Dict[str, Any]:
|
|
377
|
+
"""Parse <grep> block."""
|
|
378
|
+
file_path = self._extract_tag("file", content).strip()
|
|
379
|
+
pattern = self._extract_tag("pattern", content).strip()
|
|
380
|
+
|
|
381
|
+
result = {
|
|
382
|
+
"type": "file_grep",
|
|
383
|
+
"file": file_path,
|
|
384
|
+
"pattern": pattern
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
# Optional: case_insensitive flag
|
|
388
|
+
case_insensitive = self._extract_tag("case_insensitive", content, required=False)
|
|
389
|
+
if case_insensitive:
|
|
390
|
+
result["case_insensitive"] = case_insensitive.strip().lower() in ("true", "1", "yes")
|
|
391
|
+
|
|
392
|
+
return result
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
class ResponseParser:
|
|
396
|
+
"""Parse and extract structured content from LLM responses.
|
|
397
|
+
|
|
398
|
+
Supports multiple tag formats:
|
|
399
|
+
- <think>content</think> - Thinking/reasoning content (removed from output)
|
|
400
|
+
- <terminal>command</terminal> - Bash terminal commands
|
|
401
|
+
- <tool name="tool_name" arg1="value" arg2="value">content</tool> - MCP tool calls
|
|
402
|
+
- File operations: <edit>, <create>, <delete>, <move>, <copy>, <append>, etc.
|
|
403
|
+
"""
|
|
404
|
+
|
|
405
|
+
def __init__(self):
|
|
406
|
+
"""Initialize response parser with compiled regex patterns."""
|
|
407
|
+
# Thinking tags - removed from final output
|
|
408
|
+
self.thinking_pattern = re.compile(
|
|
409
|
+
r'<think>(.*?)</think>',
|
|
410
|
+
re.DOTALL | re.IGNORECASE
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
# Terminal command tags
|
|
414
|
+
self.terminal_pattern = re.compile(
|
|
415
|
+
r'<terminal>(.*?)</terminal>',
|
|
416
|
+
re.DOTALL | re.IGNORECASE
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
# MCP tool call tags with attributes
|
|
420
|
+
self.tool_pattern = re.compile(
|
|
421
|
+
r'<tool\s+([^>]*?)>(.*?)</tool>',
|
|
422
|
+
re.DOTALL | re.IGNORECASE
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
# File operations parser
|
|
426
|
+
self.file_ops_parser = FileOperationParser()
|
|
427
|
+
|
|
428
|
+
logger.info("Response parser initialized with comprehensive tag support + file operations")
|
|
429
|
+
|
|
430
|
+
def parse_response(self, raw_response: str) -> Dict[str, Any]:
|
|
431
|
+
"""Parse LLM response and extract all components.
|
|
432
|
+
|
|
433
|
+
Args:
|
|
434
|
+
raw_response: Raw response text from LLM
|
|
435
|
+
|
|
436
|
+
Returns:
|
|
437
|
+
Parsed response with all extracted components
|
|
438
|
+
"""
|
|
439
|
+
# DIAGNOSTIC: McKinsey Phase 2 - Root cause analysis
|
|
440
|
+
opening_count = raw_response.count('<think>')
|
|
441
|
+
closing_count = raw_response.count('</think>')
|
|
442
|
+
orphaned_closes = closing_count - opening_count
|
|
443
|
+
|
|
444
|
+
if orphaned_closes > 0:
|
|
445
|
+
logger.critical(f"🔍 BUG-011 DIAGNOSTIC: Found {orphaned_closes} orphaned </think> tags in RAW response")
|
|
446
|
+
logger.critical(f"Opening tags: {opening_count}, Closing tags: {closing_count}")
|
|
447
|
+
logger.critical(f"First 500 chars: {raw_response[:500]}")
|
|
448
|
+
elif orphaned_closes < 0:
|
|
449
|
+
logger.warning(f"🔍 BUG-011 DIAGNOSTIC: Found {abs(orphaned_closes)} orphaned <think> tags (unclosed)")
|
|
450
|
+
|
|
451
|
+
# Extract all components
|
|
452
|
+
thinking_blocks = self._extract_thinking(raw_response)
|
|
453
|
+
terminal_commands = self._extract_terminal_commands(raw_response)
|
|
454
|
+
tool_calls = self._extract_tool_calls(raw_response)
|
|
455
|
+
file_operations = self.file_ops_parser.parse_response(raw_response)
|
|
456
|
+
|
|
457
|
+
# Clean content (remove all tags)
|
|
458
|
+
clean_content = self._clean_content(raw_response)
|
|
459
|
+
|
|
460
|
+
# DIAGNOSTIC: Verify defensive fix effectiveness
|
|
461
|
+
if '</think>' in clean_content or '<think>' in clean_content:
|
|
462
|
+
remaining_closes = clean_content.count('</think>')
|
|
463
|
+
remaining_opens = clean_content.count('<think>')
|
|
464
|
+
logger.error(f"⚠️ BUG-011 ALERT: Defensive fix FAILED - {remaining_closes} </think> and {remaining_opens} <think> remain!")
|
|
465
|
+
logger.error(f"Cleaned content sample: {clean_content[:500]}")
|
|
466
|
+
elif orphaned_closes > 0:
|
|
467
|
+
logger.info(f"✅ BUG-011 SUCCESS: Defensive fix removed {orphaned_closes} orphaned tags")
|
|
468
|
+
|
|
469
|
+
# Determine if turn is completed (no tools pending execution)
|
|
470
|
+
turn_completed = (
|
|
471
|
+
len(terminal_commands) == 0 and
|
|
472
|
+
len(tool_calls) == 0 and
|
|
473
|
+
len(file_operations) == 0
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
parsed = {
|
|
477
|
+
"raw": raw_response,
|
|
478
|
+
"content": clean_content,
|
|
479
|
+
"turn_completed": turn_completed,
|
|
480
|
+
"components": {
|
|
481
|
+
"thinking": thinking_blocks,
|
|
482
|
+
"terminal_commands": terminal_commands,
|
|
483
|
+
"tool_calls": tool_calls,
|
|
484
|
+
"file_operations": file_operations
|
|
485
|
+
},
|
|
486
|
+
"metadata": {
|
|
487
|
+
"has_thinking": bool(thinking_blocks),
|
|
488
|
+
"has_terminal_commands": bool(terminal_commands),
|
|
489
|
+
"has_tool_calls": bool(tool_calls),
|
|
490
|
+
"has_file_operations": bool(file_operations),
|
|
491
|
+
"total_tools": len(terminal_commands) + len(tool_calls) + len(file_operations),
|
|
492
|
+
"content_length": len(clean_content)
|
|
493
|
+
}
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
logger.debug(f"Parsed response: {len(thinking_blocks)} thinking, "
|
|
497
|
+
f"{len(terminal_commands)} terminal, {len(tool_calls)} tools, "
|
|
498
|
+
f"{len(file_operations)} file ops")
|
|
499
|
+
|
|
500
|
+
return parsed
|
|
501
|
+
|
|
502
|
+
def _extract_thinking(self, content: str) -> List[str]:
|
|
503
|
+
"""Extract thinking content blocks.
|
|
504
|
+
|
|
505
|
+
Args:
|
|
506
|
+
content: Raw response content
|
|
507
|
+
|
|
508
|
+
Returns:
|
|
509
|
+
List of thinking content strings
|
|
510
|
+
"""
|
|
511
|
+
matches = self.thinking_pattern.findall(content)
|
|
512
|
+
return [match.strip() for match in matches if match.strip()]
|
|
513
|
+
|
|
514
|
+
def _extract_terminal_commands(self, content: str) -> List[Dict[str, Any]]:
|
|
515
|
+
"""Extract terminal command blocks.
|
|
516
|
+
|
|
517
|
+
Args:
|
|
518
|
+
content: Raw response content
|
|
519
|
+
|
|
520
|
+
Returns:
|
|
521
|
+
List of terminal command dictionaries
|
|
522
|
+
"""
|
|
523
|
+
commands = []
|
|
524
|
+
matches = self.terminal_pattern.findall(content)
|
|
525
|
+
|
|
526
|
+
for i, match in enumerate(matches):
|
|
527
|
+
command = match.strip()
|
|
528
|
+
if command:
|
|
529
|
+
commands.append({
|
|
530
|
+
"type": "terminal",
|
|
531
|
+
"id": f"terminal_{i}",
|
|
532
|
+
"command": command,
|
|
533
|
+
"raw": match
|
|
534
|
+
})
|
|
535
|
+
|
|
536
|
+
return commands
|
|
537
|
+
|
|
538
|
+
def _extract_tool_calls(self, content: str) -> List[Dict[str, Any]]:
|
|
539
|
+
"""Extract MCP tool call blocks.
|
|
540
|
+
|
|
541
|
+
Args:
|
|
542
|
+
content: Raw response content
|
|
543
|
+
|
|
544
|
+
Returns:
|
|
545
|
+
List of tool call dictionaries
|
|
546
|
+
"""
|
|
547
|
+
tool_calls = []
|
|
548
|
+
matches = self.tool_pattern.findall(content)
|
|
549
|
+
|
|
550
|
+
for i, (attributes_str, tool_content) in enumerate(matches):
|
|
551
|
+
try:
|
|
552
|
+
# Parse tool attributes
|
|
553
|
+
tool_info = self._parse_tool_attributes(attributes_str)
|
|
554
|
+
|
|
555
|
+
# Build tool call
|
|
556
|
+
tool_call = {
|
|
557
|
+
"type": "mcp_tool",
|
|
558
|
+
"id": f"mcp_tool_{i}",
|
|
559
|
+
"name": tool_info.get("name", "unknown"),
|
|
560
|
+
"arguments": tool_info.get("arguments", {}),
|
|
561
|
+
"content": tool_content.strip(),
|
|
562
|
+
"raw": f"<tool {attributes_str}>{tool_content}</tool>"
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
tool_calls.append(tool_call)
|
|
566
|
+
|
|
567
|
+
except Exception as e:
|
|
568
|
+
logger.warning(f"Failed to parse tool call: {e}")
|
|
569
|
+
# Add as malformed tool call for debugging
|
|
570
|
+
tool_calls.append({
|
|
571
|
+
"type": "malformed_tool",
|
|
572
|
+
"id": f"malformed_{i}",
|
|
573
|
+
"error": str(e),
|
|
574
|
+
"raw": f"<tool {attributes_str}>{tool_content}</tool>"
|
|
575
|
+
})
|
|
576
|
+
|
|
577
|
+
return tool_calls
|
|
578
|
+
|
|
579
|
+
def _parse_tool_attributes(self, attributes_str: str) -> Dict[str, Any]:
|
|
580
|
+
"""Parse tool tag attributes.
|
|
581
|
+
|
|
582
|
+
Supports formats like:
|
|
583
|
+
- name="file_reader" path="/etc/hosts"
|
|
584
|
+
- name="search" query="python" limit="10"
|
|
585
|
+
|
|
586
|
+
Args:
|
|
587
|
+
attributes_str: Raw attributes string
|
|
588
|
+
|
|
589
|
+
Returns:
|
|
590
|
+
Parsed attributes with name and arguments
|
|
591
|
+
"""
|
|
592
|
+
tool_info = {"name": None, "arguments": {}}
|
|
593
|
+
|
|
594
|
+
# Parse attributes using regex to handle quoted values
|
|
595
|
+
attr_pattern = r'(\w+)=(?:"([^"]*)"|\'([^\']*)\'|([^\s]+))'
|
|
596
|
+
matches = re.findall(attr_pattern, attributes_str)
|
|
597
|
+
|
|
598
|
+
for attr_name, quoted_val1, quoted_val2, unquoted_val in matches:
|
|
599
|
+
value = quoted_val1 or quoted_val2 or unquoted_val
|
|
600
|
+
|
|
601
|
+
if attr_name == "name":
|
|
602
|
+
tool_info["name"] = value
|
|
603
|
+
else:
|
|
604
|
+
# Convert value to appropriate type
|
|
605
|
+
tool_info["arguments"][attr_name] = self._convert_value(value)
|
|
606
|
+
|
|
607
|
+
return tool_info
|
|
608
|
+
|
|
609
|
+
def _convert_value(self, value: str) -> Any:
|
|
610
|
+
"""Convert string value to appropriate Python type.
|
|
611
|
+
|
|
612
|
+
Args:
|
|
613
|
+
value: String value to convert
|
|
614
|
+
|
|
615
|
+
Returns:
|
|
616
|
+
Converted value (str, int, float, bool, or original)
|
|
617
|
+
"""
|
|
618
|
+
if not value:
|
|
619
|
+
return value
|
|
620
|
+
|
|
621
|
+
# Try boolean
|
|
622
|
+
if value.lower() in ("true", "false"):
|
|
623
|
+
return value.lower() == "true"
|
|
624
|
+
|
|
625
|
+
# Try integer
|
|
626
|
+
try:
|
|
627
|
+
if "." not in value:
|
|
628
|
+
return int(value)
|
|
629
|
+
except ValueError:
|
|
630
|
+
pass
|
|
631
|
+
|
|
632
|
+
# Try float
|
|
633
|
+
try:
|
|
634
|
+
return float(value)
|
|
635
|
+
except ValueError:
|
|
636
|
+
pass
|
|
637
|
+
|
|
638
|
+
# Return as string
|
|
639
|
+
return value
|
|
640
|
+
|
|
641
|
+
def _clean_content(self, content: str) -> str:
|
|
642
|
+
"""Remove all special tags from content.
|
|
643
|
+
|
|
644
|
+
Args:
|
|
645
|
+
content: Raw content with tags
|
|
646
|
+
|
|
647
|
+
Returns:
|
|
648
|
+
Cleaned content without any special tags
|
|
649
|
+
"""
|
|
650
|
+
# Remove thinking tags (paired)
|
|
651
|
+
cleaned = self.thinking_pattern.sub('', content)
|
|
652
|
+
|
|
653
|
+
# DEFENSIVE: Remove any orphaned thinking tags
|
|
654
|
+
# McKinsey Root Cause Analysis tracked to BUG-011
|
|
655
|
+
cleaned = re.sub(r'</think>', '', cleaned, flags=re.IGNORECASE)
|
|
656
|
+
cleaned = re.sub(r'<think>', '', cleaned, flags=re.IGNORECASE)
|
|
657
|
+
|
|
658
|
+
# Remove terminal tags but preserve content structure
|
|
659
|
+
cleaned = self.terminal_pattern.sub('', cleaned)
|
|
660
|
+
|
|
661
|
+
# Remove tool tags but preserve content structure
|
|
662
|
+
cleaned = self.tool_pattern.sub('', cleaned)
|
|
663
|
+
|
|
664
|
+
# Remove file operation tags (all 14 types)
|
|
665
|
+
# Only successfully parsed tags are removed; malformed tags remain visible
|
|
666
|
+
cleaned = self.file_ops_parser.edit_pattern.sub('', cleaned)
|
|
667
|
+
cleaned = self.file_ops_parser.create_pattern.sub('', cleaned)
|
|
668
|
+
cleaned = self.file_ops_parser.create_overwrite_pattern.sub('', cleaned)
|
|
669
|
+
cleaned = self.file_ops_parser.delete_pattern.sub('', cleaned)
|
|
670
|
+
cleaned = self.file_ops_parser.move_pattern.sub('', cleaned)
|
|
671
|
+
cleaned = self.file_ops_parser.copy_pattern.sub('', cleaned)
|
|
672
|
+
cleaned = self.file_ops_parser.copy_overwrite_pattern.sub('', cleaned)
|
|
673
|
+
cleaned = self.file_ops_parser.append_pattern.sub('', cleaned)
|
|
674
|
+
cleaned = self.file_ops_parser.insert_after_pattern.sub('', cleaned)
|
|
675
|
+
cleaned = self.file_ops_parser.insert_before_pattern.sub('', cleaned)
|
|
676
|
+
cleaned = self.file_ops_parser.mkdir_pattern.sub('', cleaned)
|
|
677
|
+
cleaned = self.file_ops_parser.rmdir_pattern.sub('', cleaned)
|
|
678
|
+
cleaned = self.file_ops_parser.read_pattern.sub('', cleaned)
|
|
679
|
+
cleaned = self.file_ops_parser.grep_pattern.sub('', cleaned)
|
|
680
|
+
|
|
681
|
+
# Clean up excessive whitespace
|
|
682
|
+
cleaned = re.sub(r'\n{3,}', '\n\n', cleaned)
|
|
683
|
+
cleaned = cleaned.strip()
|
|
684
|
+
|
|
685
|
+
return cleaned
|
|
686
|
+
|
|
687
|
+
def get_all_tools(self, parsed_response: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
688
|
+
"""Get all tools (terminal + MCP + file ops) in execution order.
|
|
689
|
+
|
|
690
|
+
Args:
|
|
691
|
+
parsed_response: Parsed response from parse_response()
|
|
692
|
+
|
|
693
|
+
Returns:
|
|
694
|
+
List of all tools to execute in order
|
|
695
|
+
"""
|
|
696
|
+
components = parsed_response.get("components", {})
|
|
697
|
+
|
|
698
|
+
all_tools = []
|
|
699
|
+
all_tools.extend(components.get("terminal_commands", []))
|
|
700
|
+
all_tools.extend(components.get("tool_calls", []))
|
|
701
|
+
all_tools.extend(components.get("file_operations", []))
|
|
702
|
+
|
|
703
|
+
# Sort by original position in text (based on ID)
|
|
704
|
+
def sort_key(tool):
|
|
705
|
+
tool_id = tool.get("id", "")
|
|
706
|
+
if "terminal_" in tool_id:
|
|
707
|
+
return (0, int(tool_id.split("_")[1]))
|
|
708
|
+
elif "mcp_tool_" in tool_id:
|
|
709
|
+
return (1, int(tool_id.split("_")[2]))
|
|
710
|
+
elif "file_" in tool_id:
|
|
711
|
+
# Extract index from file operation IDs like "file_edit_0"
|
|
712
|
+
parts = tool_id.split("_")
|
|
713
|
+
if len(parts) >= 3:
|
|
714
|
+
return (2, int(parts[-1]))
|
|
715
|
+
return (2, 0)
|
|
716
|
+
else:
|
|
717
|
+
return (3, 0)
|
|
718
|
+
|
|
719
|
+
all_tools.sort(key=sort_key)
|
|
720
|
+
return all_tools
|
|
721
|
+
|
|
722
|
+
def format_for_display(self, parsed_response: Dict[str, Any],
|
|
723
|
+
show_thinking: bool = True) -> str:
|
|
724
|
+
"""Format parsed response for terminal display.
|
|
725
|
+
|
|
726
|
+
Args:
|
|
727
|
+
parsed_response: Parsed response data
|
|
728
|
+
show_thinking: Whether to include thinking content
|
|
729
|
+
|
|
730
|
+
Returns:
|
|
731
|
+
Formatted string for display
|
|
732
|
+
"""
|
|
733
|
+
parts = []
|
|
734
|
+
|
|
735
|
+
# Add thinking content if enabled
|
|
736
|
+
if show_thinking:
|
|
737
|
+
thinking = parsed_response.get("components", {}).get("thinking", [])
|
|
738
|
+
for thought in thinking:
|
|
739
|
+
parts.append(f"[dim]{thought}[/dim]")
|
|
740
|
+
parts.append("")
|
|
741
|
+
|
|
742
|
+
# Add main content
|
|
743
|
+
content = parsed_response.get("content", "").strip()
|
|
744
|
+
if content:
|
|
745
|
+
parts.append(content)
|
|
746
|
+
|
|
747
|
+
# Add tool execution indicators
|
|
748
|
+
metadata = parsed_response.get("metadata", {})
|
|
749
|
+
if (metadata.get("has_terminal_commands") or
|
|
750
|
+
metadata.get("has_tool_calls") or
|
|
751
|
+
metadata.get("has_file_operations")):
|
|
752
|
+
tools_count = metadata.get("total_tools", 0)
|
|
753
|
+
parts.append("")
|
|
754
|
+
parts.append(f"[cyan]Executing {tools_count} tool(s)...[/cyan]")
|
|
755
|
+
|
|
756
|
+
return "\n".join(parts)
|
|
757
|
+
|
|
758
|
+
def validate_response(self, response: str) -> Tuple[bool, List[str]]:
|
|
759
|
+
"""Validate response format and syntax.
|
|
760
|
+
|
|
761
|
+
Args:
|
|
762
|
+
response: Raw response to validate
|
|
763
|
+
|
|
764
|
+
Returns:
|
|
765
|
+
Tuple of (is_valid, list_of_issues)
|
|
766
|
+
"""
|
|
767
|
+
issues = []
|
|
768
|
+
|
|
769
|
+
# Check for unclosed tags
|
|
770
|
+
open_tags = ["<think>", "<terminal>", "<tool"]
|
|
771
|
+
close_tags = ["</think>", "</terminal>", "</tool>"]
|
|
772
|
+
|
|
773
|
+
for open_tag, close_tag in zip(open_tags, close_tags):
|
|
774
|
+
if open_tag in response and close_tag not in response:
|
|
775
|
+
issues.append(f"Unclosed tag: {open_tag}")
|
|
776
|
+
|
|
777
|
+
# Check for malformed tool tags
|
|
778
|
+
tool_matches = self.tool_pattern.findall(response)
|
|
779
|
+
for attributes_str, content in tool_matches:
|
|
780
|
+
if 'name=' not in attributes_str:
|
|
781
|
+
issues.append("Tool tag missing 'name' attribute")
|
|
782
|
+
|
|
783
|
+
# Check for empty response
|
|
784
|
+
if not response.strip():
|
|
785
|
+
issues.append("Empty response")
|
|
786
|
+
|
|
787
|
+
return len(issues) == 0, issues
|
|
788
|
+
|
|
789
|
+
def extract_execution_stats(self, parsed_response: Dict[str, Any]) -> Dict[str, Any]:
|
|
790
|
+
"""Extract execution statistics from parsed response.
|
|
791
|
+
|
|
792
|
+
Args:
|
|
793
|
+
parsed_response: Parsed response data
|
|
794
|
+
|
|
795
|
+
Returns:
|
|
796
|
+
Execution statistics
|
|
797
|
+
"""
|
|
798
|
+
metadata = parsed_response.get("metadata", {})
|
|
799
|
+
components = parsed_response.get("components", {})
|
|
800
|
+
|
|
801
|
+
return {
|
|
802
|
+
"content_words": len(parsed_response.get("content", "").split()),
|
|
803
|
+
"thinking_blocks": len(components.get("thinking", [])),
|
|
804
|
+
"terminal_commands": len(components.get("terminal_commands", [])),
|
|
805
|
+
"mcp_tool_calls": len(components.get("tool_calls", [])),
|
|
806
|
+
"total_tools": metadata.get("total_tools", 0),
|
|
807
|
+
"turn_completed": parsed_response.get("turn_completed", True),
|
|
808
|
+
"complexity": self._assess_complexity(parsed_response)
|
|
809
|
+
}
|
|
810
|
+
|
|
811
|
+
def _assess_complexity(self, parsed_response: Dict[str, Any]) -> str:
|
|
812
|
+
"""Assess response complexity level.
|
|
813
|
+
|
|
814
|
+
Args:
|
|
815
|
+
parsed_response: Parsed response data
|
|
816
|
+
|
|
817
|
+
Returns:
|
|
818
|
+
Complexity level: simple, moderate, complex
|
|
819
|
+
"""
|
|
820
|
+
score = 0
|
|
821
|
+
metadata = parsed_response.get("metadata", {})
|
|
822
|
+
|
|
823
|
+
# Content length scoring
|
|
824
|
+
content_length = metadata.get("content_length", 0)
|
|
825
|
+
if content_length > 500:
|
|
826
|
+
score += 2
|
|
827
|
+
elif content_length > 200:
|
|
828
|
+
score += 1
|
|
829
|
+
|
|
830
|
+
# Tool usage scoring
|
|
831
|
+
if metadata.get("has_thinking"):
|
|
832
|
+
score += 1
|
|
833
|
+
if metadata.get("has_terminal_commands"):
|
|
834
|
+
score += 1
|
|
835
|
+
if metadata.get("has_tool_calls"):
|
|
836
|
+
score += 2
|
|
837
|
+
|
|
838
|
+
# Multiple tools indicate complexity
|
|
839
|
+
if metadata.get("total_tools", 0) > 1:
|
|
840
|
+
score += 1
|
|
841
|
+
|
|
842
|
+
# Map score to complexity
|
|
843
|
+
if score >= 4:
|
|
844
|
+
return "complex"
|
|
845
|
+
elif score >= 2:
|
|
846
|
+
return "moderate"
|
|
847
|
+
else:
|
|
848
|
+
return "simple"
|