neuro-simulator 0.0.4__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,494 @@
1
+ # agent/core.py
2
+ """
3
+ Core module for the Neuro Simulator Agent
4
+ """
5
+
6
+ import os
7
+ import json
8
+ import asyncio
9
+ from typing import Dict, List, Any, Optional
10
+ from datetime import datetime
11
+ import sys
12
+ import logging
13
+
14
+ # Import the shared log queue from the main log_handler
15
+ from ..log_handler import agent_log_queue, QueueLogHandler
16
+
17
+ # Create a logger for the agent
18
+ agent_logger = logging.getLogger("neuro_agent")
19
+ agent_logger.setLevel(logging.DEBUG)
20
+
21
+ # Configure agent logging to use the shared queue
22
+ def configure_agent_logging():
23
+ """Configure agent logging to use the shared agent_log_queue"""
24
+ # Create a handler for the agent queue
25
+ agent_queue_handler = QueueLogHandler(agent_log_queue)
26
+ formatter = logging.Formatter('%(asctime)s - [AGENT] - %(levelname)s - %(message)s', datefmt='%H:%M:%S')
27
+ agent_queue_handler.setFormatter(formatter)
28
+
29
+ # Clear any existing handlers
30
+ if agent_logger.hasHandlers():
31
+ agent_logger.handlers.clear()
32
+
33
+ # Add the queue handler
34
+ agent_logger.addHandler(agent_queue_handler)
35
+ agent_logger.propagate = False # Prevent logs from propagating to root logger
36
+
37
+ print("Agent日志系统已配置,将日志输出到 agent_log_queue。")
38
+
39
+ # Configure agent logging when module is imported
40
+ configure_agent_logging()
41
+
42
+ class Agent:
43
+ """Main Agent class that integrates LLM, memory, and tools"""
44
+
45
+ def __init__(self, working_dir: str = None):
46
+ # Lazy imports to avoid circular dependencies
47
+ from .memory.manager import MemoryManager
48
+ from .tools.core import ToolManager
49
+ from .llm import LLMClient
50
+
51
+ self.memory_manager = MemoryManager(working_dir)
52
+ self.tool_manager = ToolManager(self.memory_manager)
53
+ self.llm_client = LLMClient()
54
+ self._initialized = False
55
+
56
+ # Log agent initialization
57
+ agent_logger.info("Agent initialized")
58
+ agent_logger.debug(f"Agent working directory: {working_dir}")
59
+
60
+ async def initialize(self):
61
+ """Initialize the agent, loading any persistent memory"""
62
+ if not self._initialized:
63
+ agent_logger.info("Initializing agent memory manager")
64
+ await self.memory_manager.initialize()
65
+ self._initialized = True
66
+ agent_logger.info("Agent initialized successfully")
67
+
68
+ async def reset_all_memory(self):
69
+ """Reset all agent memory types"""
70
+ # Reset temp memory
71
+ await self.memory_manager.reset_temp_memory()
72
+
73
+ # Reset context (dialog history)
74
+ await self.memory_manager.reset_context()
75
+
76
+ agent_logger.info("All agent memory reset successfully")
77
+ print("All agent memory reset successfully")
78
+
79
+ async def reset_memory(self):
80
+ """Reset agent temp memory (alias for backward compatibility)"""
81
+ await self.reset_all_memory()
82
+
83
+ async def process_messages(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
84
+ """
85
+ Process incoming messages and generate a response
86
+
87
+ Args:
88
+ messages: List of message dictionaries with 'username' and 'text' keys
89
+
90
+ Returns:
91
+ Dictionary containing processing details including tool executions and final response
92
+ """
93
+ # Ensure agent is initialized
94
+ await self.initialize()
95
+
96
+ agent_logger.info(f"Processing {len(messages)} messages")
97
+
98
+ # Add messages to context
99
+ for msg in messages:
100
+ content = f"{msg['username']}: {msg['text']}"
101
+ await self.memory_manager.add_context_entry("user", content)
102
+ agent_logger.debug(f"Added message to context: {content}")
103
+
104
+ # Send context update via WebSocket after adding user messages
105
+ from ..websocket_manager import connection_manager
106
+ context_messages = await self.memory_manager.get_recent_context()
107
+ await connection_manager.broadcast({
108
+ "type": "agent_context",
109
+ "action": "update",
110
+ "messages": context_messages
111
+ })
112
+
113
+ # Add detailed context entry for the start of processing
114
+ processing_entry_id = await self.memory_manager.add_detailed_context_entry(
115
+ input_messages=messages,
116
+ prompt="Processing started",
117
+ llm_response="",
118
+ tool_executions=[],
119
+ final_response="Processing started"
120
+ )
121
+
122
+ # Get full context for LLM
123
+ context = await self.memory_manager.get_full_context()
124
+ tool_descriptions = self.tool_manager.get_tool_descriptions()
125
+
126
+ # Get last agent response to avoid repetition
127
+ last_response = await self.memory_manager.get_last_agent_response()
128
+
129
+ # Create LLM prompt with context and tools
130
+ prompt = f"""You are {self.memory_manager.init_memory.get('name', 'Neuro-Sama')}, an AI VTuber.
131
+ Your personality: {self.memory_manager.init_memory.get('personality', 'Friendly and curious')}
132
+
133
+ === CONTEXT ===
134
+ {context}
135
+
136
+ === AVAILABLE TOOLS ===
137
+ {tool_descriptions}
138
+
139
+ === INSTRUCTIONS ===
140
+ Process the user messages and respond appropriately. You can use tools to manage memory or output responses.
141
+ When you want to speak to the user, use the 'speak' tool with your response as the text parameter.
142
+ When you want to update memory, use the appropriate memory management tools.
143
+ You are fully responsible for managing your own memory. Use the memory tools proactively when you need to:
144
+ - Remember important information from the conversation
145
+ - Update your knowledge or personality
146
+ - Store observations about users or events
147
+ - Retrieve relevant information to inform your responses
148
+ Always think about whether you need to use tools before responding.
149
+
150
+ IMPORTANT GUIDELINES:
151
+ - Be creative and engaging in your responses
152
+ - Keep responses concise and conversational
153
+ - Maintain your character's personality
154
+ - Pay close attention to the conversation history in the context to understand the flow of the dialogue
155
+ - Respond to the most recent user messages while considering the overall context of the conversation
156
+
157
+ === YOUR SPEAK HISTORY ===
158
+ """
159
+
160
+ # Add assistant's recent responses to the prompt
161
+ # Get the recent context and extract speak results from llm_interaction entries
162
+ recent_context = await self.memory_manager.get_recent_context(100) # Get more entries to filter
163
+ assistant_responses = []
164
+
165
+ # Filter for llm_interaction entries with role: "assistant" and extract speak results
166
+ for entry in reversed(recent_context): # Reverse to get newest first
167
+ if entry.get("type") == "llm_interaction" and entry.get("role") == "assistant":
168
+ tool_executions = entry.get("tool_executions", [])
169
+ for tool_execution in tool_executions:
170
+ if tool_execution.get("name") == "speak":
171
+ result = tool_execution.get("result")
172
+ if result:
173
+ assistant_responses.append(result)
174
+
175
+ # Add up to 64 most recent assistant responses
176
+ for i, response in enumerate(assistant_responses[:64]):
177
+ prompt += f"{i+1}. {response}\n"
178
+
179
+ prompt += f"\nUser messages to respond to:\n"
180
+
181
+ for msg in messages:
182
+ prompt += f"{msg['username']}: {msg['text']}\n"
183
+
184
+ prompt += "\nYour response (use tools as needed):"
185
+
186
+ agent_logger.debug("Sending prompt to LLM")
187
+
188
+ # Add detailed context entry for the prompt
189
+ await self.memory_manager.add_detailed_context_entry(
190
+ input_messages=messages,
191
+ prompt=prompt,
192
+ llm_response="",
193
+ tool_executions=[],
194
+ final_response="Prompt sent to LLM",
195
+ entry_id=processing_entry_id
196
+ )
197
+
198
+ # Generate response using LLM
199
+ response = await self.llm_client.generate(prompt)
200
+ agent_logger.debug(f"LLM response received: {response[:100] if response else 'None'}...")
201
+
202
+ # Add detailed context entry for the LLM response
203
+ await self.memory_manager.add_detailed_context_entry(
204
+ input_messages=messages,
205
+ prompt=prompt,
206
+ llm_response=response,
207
+ tool_executions=[],
208
+ final_response="LLM response received",
209
+ entry_id=processing_entry_id
210
+ )
211
+
212
+ # Parse the response to handle tool calls
213
+ # This is a simplified parser - in a full implementation, you would use a more robust method
214
+ processing_result = {
215
+ "input_messages": messages,
216
+ "llm_response": response,
217
+ "tool_executions": [],
218
+ "final_response": ""
219
+ }
220
+
221
+ # Extract tool calls from the response
222
+ # Look for tool calls in the response
223
+ lines = response.split('\n') if response else []
224
+ i = 0
225
+ json_buffer = "" # Buffer to accumulate multi-line JSON
226
+ in_json_block = False # Flag to track if we're inside a JSON block
227
+
228
+ while i < len(lines):
229
+ line = lines[i].strip()
230
+ agent_logger.debug(f"Parsing line: {line}")
231
+
232
+ # Handle JSON blocks
233
+ if line.startswith('```json'):
234
+ in_json_block = True
235
+ json_buffer = line + '\n'
236
+ elif line == '```' and in_json_block:
237
+ # End of JSON block
238
+ json_buffer += line
239
+ in_json_block = False
240
+ # Process the complete JSON block
241
+ tool_call = self._parse_tool_call(json_buffer)
242
+ if tool_call:
243
+ agent_logger.info(f"Executing tool: {tool_call['name']}")
244
+ await self._execute_parsed_tool(tool_call, processing_result)
245
+ # Update detailed context entry for tool execution
246
+ await self.memory_manager.add_detailed_context_entry(
247
+ input_messages=messages,
248
+ prompt=prompt,
249
+ llm_response=response,
250
+ tool_executions=processing_result["tool_executions"].copy(), # Pass a copy of current tool executions
251
+ final_response=f"Executed tool: {tool_call['name']}",
252
+ entry_id=processing_entry_id
253
+ )
254
+ else:
255
+ agent_logger.warning(f"Failed to parse tool call from JSON block: {json_buffer}")
256
+ elif in_json_block:
257
+ # Accumulate lines for JSON block
258
+ json_buffer += line + '\n'
259
+ else:
260
+ # Check if line contains a tool call
261
+ if any(line.startswith(prefix) for prefix in ["get_", "create_", "update_", "delete_", "add_", "remove_", "speak("]):
262
+ # Parse tool call
263
+ tool_call = self._parse_tool_call(line)
264
+ if tool_call:
265
+ agent_logger.info(f"Executing tool: {tool_call['name']}")
266
+ await self._execute_parsed_tool(tool_call, processing_result)
267
+ # Update detailed context entry for tool execution
268
+ await self.memory_manager.add_detailed_context_entry(
269
+ input_messages=messages,
270
+ prompt=prompt,
271
+ llm_response=response,
272
+ tool_executions=processing_result["tool_executions"].copy(), # Pass a copy of current tool executions
273
+ final_response=f"Executed tool: {tool_call['name']}",
274
+ entry_id=processing_entry_id
275
+ )
276
+ else:
277
+ agent_logger.warning(f"Failed to parse tool call from line: {line}")
278
+ i += 1
279
+
280
+ # If we're still in a JSON block at the end, process it
281
+ if in_json_block and json_buffer:
282
+ tool_call = self._parse_tool_call(json_buffer)
283
+ if tool_call:
284
+ agent_logger.info(f"Executing tool: {tool_call['name']}")
285
+ await self._execute_parsed_tool(tool_call, processing_result)
286
+ # Update detailed context entry for tool execution
287
+ await self.memory_manager.add_detailed_context_entry(
288
+ input_messages=messages,
289
+ prompt=prompt,
290
+ llm_response=response,
291
+ tool_executions=processing_result["tool_executions"].copy(), # Pass a copy of current tool executions
292
+ final_response=f"Executed tool: {tool_call['name']}",
293
+ entry_id=processing_entry_id
294
+ )
295
+ else:
296
+ agent_logger.warning(f"Failed to parse tool call from incomplete JSON block: {json_buffer}")
297
+
298
+ # If we have a final response, we don't need to add it to context here
299
+ # because add_detailed_context_entry will add a short entry for us
300
+ # if processing_result["final_response"]:
301
+ # await self.memory_manager.add_context_entry("assistant", processing_result["final_response"])
302
+
303
+ # Update the detailed context entry with final LLM interaction details
304
+ await self.memory_manager.add_detailed_context_entry(
305
+ input_messages=messages,
306
+ prompt=prompt,
307
+ llm_response=response,
308
+ tool_executions=processing_result["tool_executions"],
309
+ final_response=processing_result["final_response"],
310
+ entry_id=processing_entry_id
311
+ )
312
+
313
+ # Send context update via WebSocket
314
+ from ..websocket_manager import connection_manager
315
+ context_messages = await self.memory_manager.get_recent_context()
316
+ await connection_manager.broadcast({
317
+ "type": "agent_context",
318
+ "action": "update",
319
+ "messages": context_messages
320
+ })
321
+
322
+ agent_logger.info("Message processing completed")
323
+ return processing_result
324
+
325
+ async def _execute_parsed_tool(self, tool_call: Dict[str, Any], processing_result: Dict[str, Any]):
326
+ """Execute a parsed tool call and update processing result"""
327
+ # Only prevent duplicate speak tool executions to avoid repeated responses
328
+ if tool_call["name"] == "speak":
329
+ for executed_tool in processing_result["tool_executions"]:
330
+ if (executed_tool["name"] == "speak" and
331
+ executed_tool["params"].get("text") == tool_call["params"].get("text")):
332
+ agent_logger.debug(f"Skipping duplicate speak tool execution: {tool_call['params'].get('text')}")
333
+ return
334
+
335
+ # Execute the tool
336
+ try:
337
+ tool_result = await self.execute_tool(tool_call["name"], tool_call["params"])
338
+ tool_call["result"] = tool_result
339
+
340
+ # If this is the speak tool, capture the final response
341
+ if tool_call["name"] == "speak":
342
+ processing_result["final_response"] = tool_call["params"].get("text", "")
343
+ agent_logger.info(f"Speak tool executed with text: {processing_result['final_response']}")
344
+ else:
345
+ agent_logger.debug(f"Tool execution result: {tool_result}")
346
+
347
+ processing_result["tool_executions"].append(tool_call)
348
+ except Exception as e:
349
+ tool_call["error"] = str(e)
350
+ processing_result["tool_executions"].append(tool_call)
351
+ agent_logger.error(f"Error executing tool {tool_call['name']}: {e}")
352
+
353
+ def _parse_tool_call(self, line: str) -> Optional[Dict[str, Any]]:
354
+ """Parse a tool call from a line of text"""
355
+ import re
356
+ import json
357
+
358
+ # First try to parse as JSON if it looks like JSON
359
+ line = line.strip()
360
+ if line.startswith('```json'):
361
+ try:
362
+ # Extract JSON content
363
+ json_content = line[7:] # Remove ```json
364
+ if json_content.endswith('```'):
365
+ json_content = json_content[:-3] # Remove trailing ```
366
+ json_content = json_content.strip()
367
+
368
+ # Parse the JSON
369
+ tool_call_data = json.loads(json_content)
370
+
371
+ # Handle different JSON formats
372
+ if isinstance(tool_call_data, dict):
373
+ # Check if it's a tool_code format
374
+ if 'tool_code' in tool_call_data:
375
+ # Extract the tool call from tool_code
376
+ tool_code = tool_call_data['tool_code']
377
+ # Remove any wrapper functions like print()
378
+ tool_code = re.sub(r'^\w+\((.*)\)$', r'\1', tool_code)
379
+ # Now parse the tool call normally
380
+ pattern = r'(\w+)\((.*)\)'
381
+ match = re.match(pattern, tool_code)
382
+ if match:
383
+ tool_name = match.group(1)
384
+ params_str = match.group(2)
385
+
386
+ # Parse parameters
387
+ params = {}
388
+ param_pattern = r'(\w+)\s*=\s*(".*?"|\'.*?\'|[^,]+?)(?:,|$)'
389
+ for param_match in re.finditer(param_pattern, params_str):
390
+ key, value = param_match.groups()
391
+ # Remove quotes if present
392
+ if (value.startswith('"') and value.endswith('"')) or \
393
+ (value.startswith("'") and value.endswith("'")):
394
+ value = value[1:-1]
395
+ params[key] = value
396
+
397
+ return {
398
+ "name": tool_name,
399
+ "params": params
400
+ }
401
+ # Check if it's a name/arguments format
402
+ elif 'name' in tool_call_data and 'arguments' in tool_call_data:
403
+ return {
404
+ "name": tool_call_data['name'],
405
+ "params": tool_call_data['arguments']
406
+ }
407
+ elif isinstance(tool_call_data, list) and len(tool_call_data) > 0:
408
+ # Handle array format - take the first item
409
+ first_item = tool_call_data[0]
410
+ if isinstance(first_item, dict):
411
+ if 'tool_code' in first_item:
412
+ # Extract the tool call from tool_code
413
+ tool_code = first_item['tool_code']
414
+ # Remove any wrapper functions like print()
415
+ tool_code = re.sub(r'^\w+\((.*)\)$', r'\1', tool_code)
416
+ # Now parse the tool call normally
417
+ pattern = r'(\w+)\((.*)\)'
418
+ match = re.match(pattern, tool_code)
419
+ if match:
420
+ tool_name = match.group(1)
421
+ params_str = match.group(2)
422
+
423
+ # Parse parameters
424
+ params = {}
425
+ param_pattern = r'(\w+)\s*=\s*(".*?"|\'.*?\'|[^,]+?)(?:,|$)'
426
+ for param_match in re.finditer(param_pattern, params_str):
427
+ key, value = param_match.groups()
428
+ # Remove quotes if present
429
+ if (value.startswith('"') and value.endswith('"')) or \
430
+ (value.startswith("'") and value.endswith("'")):
431
+ value = value[1:-1]
432
+ params[key] = value
433
+
434
+ return {
435
+ "name": tool_name,
436
+ "params": params
437
+ }
438
+ elif 'name' in first_item and 'arguments' in first_item:
439
+ return {
440
+ "name": first_item['name'],
441
+ "params": first_item['arguments']
442
+ }
443
+
444
+ except (json.JSONDecodeError, KeyError, IndexError):
445
+ pass # Fall back to regex parsing
446
+
447
+ # Handle multi-line JSON that might be split across several lines
448
+ if line == '```json' or line == '{' or line == '}':
449
+ # Skip these lines as they're part of JSON structure
450
+ return None
451
+
452
+ # Pattern to match tool_name(param1=value1, param2=value2, ...)
453
+ pattern = r'(\w+)\((.*)\)'
454
+ match = re.match(pattern, line)
455
+
456
+ if match:
457
+ tool_name = match.group(1)
458
+ params_str = match.group(2)
459
+
460
+ # Parse parameters more robustly
461
+ params = {}
462
+
463
+ # Handle parameters one by one
464
+ # This handles quoted strings correctly, including special characters
465
+ param_pattern = r'(\w+)\s*=\s*(".*?"|\'.*?\'|[^,]+?)(?:,|$)'
466
+ for param_match in re.finditer(param_pattern, params_str):
467
+ key, value = param_match.groups()
468
+ # Remove quotes if present
469
+ if (value.startswith('"') and value.endswith('"')) or \
470
+ (value.startswith("'") and value.endswith("'")):
471
+ value = value[1:-1]
472
+ params[key] = value
473
+
474
+ return {
475
+ "name": tool_name,
476
+ "params": params
477
+ }
478
+
479
+ return None
480
+
481
+ async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Any:
482
+ """Execute a registered tool"""
483
+ # Ensure agent is initialized
484
+ await self.initialize()
485
+ agent_logger.debug(f"Executing tool: {tool_name} with params: {params}")
486
+ result = await self.tool_manager.execute_tool(tool_name, params)
487
+ agent_logger.debug(f"Tool execution result: {result}")
488
+ return result
489
+
490
+ # Function to get agent logs (now uses the shared queue)
491
+ def get_agent_logs(lines: int = 50) -> List[str]:
492
+ """Get recent agent logs from the shared queue"""
493
+ logs_list = list(agent_log_queue)
494
+ return logs_list[-lines:] if len(logs_list) > lines else logs_list
@@ -0,0 +1,104 @@
1
+ # agent/llm.py
2
+ """
3
+ LLM client for the Neuro Simulator Agent
4
+ """
5
+
6
+ from typing import Optional
7
+ import os
8
+ import sys
9
+
10
+ # Add project root to path
11
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
12
+
13
+ from google import genai
14
+ from google.genai import types
15
+ from openai import AsyncOpenAI
16
+ from ..config import config_manager
17
+
18
+ class LLMClient:
19
+ """A completely independent LLM client for the built-in agent."""
20
+
21
+ def __init__(self):
22
+ self.client = None
23
+ self.model_name = None
24
+ self._initialize_client()
25
+
26
+ def _initialize_client(self):
27
+ """Initializes the LLM client based on the 'agent' section of the config."""
28
+ settings = config_manager.settings
29
+ provider = settings.agent.agent_provider.lower()
30
+
31
+ if provider == "gemini":
32
+ api_key = settings.api_keys.gemini_api_key
33
+ if not api_key:
34
+ raise ValueError("GEMINI_API_KEY is not set in configuration for the agent.")
35
+
36
+ # Use the new client-based API as per the latest documentation
37
+ self.client = genai.Client(api_key=api_key)
38
+ self.model_name = settings.agent.agent_model
39
+ self._generate_func = self._generate_gemini
40
+
41
+ elif provider == "openai":
42
+ api_key = settings.api_keys.openai_api_key
43
+ if not api_key:
44
+ raise ValueError("OPENAI_API_KEY is not set in configuration for the agent.")
45
+
46
+ self.model_name = settings.agent.agent_model
47
+ self.client = AsyncOpenAI(
48
+ api_key=api_key,
49
+ base_url=settings.api_keys.openai_api_base_url
50
+ )
51
+ self._generate_func = self._generate_openai
52
+ else:
53
+ raise ValueError(f"Unsupported agent provider in config: {settings.agent.agent_provider}")
54
+
55
+ print(f"Agent LLM client initialized. Provider: {provider.upper()}, Model: {self.model_name}")
56
+
57
+ async def _generate_gemini(self, prompt: str, max_tokens: int) -> str:
58
+ """Generates text using the Gemini model with the new SDK."""
59
+ import asyncio
60
+
61
+ generation_config = types.GenerateContentConfig(
62
+ max_output_tokens=max_tokens,
63
+ # temperature can be added later if needed from config
64
+ )
65
+
66
+ try:
67
+ # The new client's generate_content is synchronous, run it in a thread
68
+ response = await asyncio.to_thread(
69
+ self.client.models.generate_content,
70
+ model=self.model_name,
71
+ contents=prompt,
72
+ config=generation_config
73
+ )
74
+ return response.text if response and response.text else ""
75
+ except Exception as e:
76
+ print(f"Error in _generate_gemini: {e}")
77
+ return ""
78
+
79
+ async def _generate_openai(self, prompt: str, max_tokens: int) -> str:
80
+ try:
81
+ response = await self.client.chat.completions.create(
82
+ model=self.model_name,
83
+ messages=[{"role": "user", "content": prompt}],
84
+ max_tokens=max_tokens,
85
+ # temperature can be added to config if needed
86
+ )
87
+ if response.choices and response.choices[0].message and response.choices[0].message.content:
88
+ return response.choices[0].message.content.strip()
89
+ return ""
90
+ except Exception as e:
91
+ print(f"Error in _generate_openai: {e}")
92
+ return ""
93
+
94
+ async def generate(self, prompt: str, max_tokens: int = 1000) -> str:
95
+ """Generate text using the configured LLM."""
96
+ if not self.client:
97
+ raise RuntimeError("LLM Client is not initialized.")
98
+ try:
99
+ result = await self._generate_func(prompt, max_tokens)
100
+ # Ensure we always return a string, even if the result is None
101
+ return result if result is not None else ""
102
+ except Exception as e:
103
+ print(f"Error generating text with Agent LLM: {e}")
104
+ return "My brain is not working, tell Vedal to check the logs."
@@ -0,0 +1,4 @@
1
+ # agent/memory/__init__.py
2
+ """
3
+ Memory module for the Neuro Simulator Agent
4
+ """