jl-ecms-client 0.2.7__py3-none-any.whl → 0.2.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mirix/constants.py ADDED
@@ -0,0 +1,251 @@
1
+ import os
2
+ from logging import CRITICAL, DEBUG, ERROR, INFO, NOTSET, WARN, WARNING
3
+
4
+ # ============================================================================
5
+ # Client Constants - Used by both client and server
6
+ # ============================================================================
7
+
8
+ # Default organization and user IDs (needed by schemas)
9
+ DEFAULT_ORG_ID = "org-00000000-0000-4000-8000-000000000000"
10
+ DEFAULT_USER_ID = "user-00000000-0000-4000-8000-000000000000"
11
+
12
+ # Embedding constants
13
+ MAX_EMBEDDING_DIM = 4096 # maximum supported embedding size - do NOT change or else DBs will need to be reset
14
+ DEFAULT_EMBEDDING_CHUNK_SIZE = 300
15
+ MIN_CONTEXT_WINDOW = 4096
16
+
17
+ # Memory limits
18
+ CORE_MEMORY_BLOCK_CHAR_LIMIT: int = 5000
19
+
20
+ # Function/Tool constants
21
+ FUNCTION_RETURN_CHAR_LIMIT = 60000 # ~300 words
22
+ TOOL_CALL_ID_MAX_LEN = 29
23
+
24
+ # Tool module names
25
+ COMPOSIO_TOOL_TAG_NAME = "composio"
26
+ MIRIX_CORE_TOOL_MODULE_NAME = "mirix.functions.function_sets.base"
27
+ MIRIX_MEMORY_TOOL_MODULE_NAME = "mirix.functions.function_sets.memory_tools"
28
+ MIRIX_EXTRA_TOOL_MODULE_NAME = "mirix.functions.function_sets.extras"
29
+
30
+ # Message defaults
31
+ DEFAULT_MESSAGE_TOOL = "send_message"
32
+ DEFAULT_MESSAGE_TOOL_KWARG = "message"
33
+
34
+ # LLM model token limits
35
+ LLM_MAX_TOKENS = {
36
+ "DEFAULT": 8192,
37
+ ## OpenAI models: https://platform.openai.com/docs/models/overview
38
+ "chatgpt-4o-latest": 128000,
39
+ "gpt-4o-2024-08-06": 128000,
40
+ "gpt-4-turbo-preview": 128000,
41
+ "gpt-4o": 128000,
42
+ "gpt-3.5-turbo-instruct": 16385,
43
+ "gpt-4-0125-preview": 128000,
44
+ "gpt-3.5-turbo-0125": 16385,
45
+ "gpt-4-turbo-2024-04-09": 128000,
46
+ "gpt-4-turbo": 8192,
47
+ "gpt-4o-2024-05-13": 128000,
48
+ "gpt-4o-mini": 128000,
49
+ "gpt-4o-mini-2024-07-18": 128000,
50
+ "gpt-4-1106-preview": 128000,
51
+ "gpt-4": 8192,
52
+ "gpt-4-32k": 32768,
53
+ "gpt-4-0613": 8192,
54
+ "gpt-4-32k-0613": 32768,
55
+ "gpt-4-0314": 8192, # legacy
56
+ "gpt-4-32k-0314": 32768, # legacy
57
+ "gpt-3.5-turbo-1106": 16385,
58
+ "gpt-3.5-turbo": 4096,
59
+ "gpt-3.5-turbo-16k": 16385,
60
+ "gpt-3.5-turbo-0613": 4096, # legacy
61
+ "gpt-3.5-turbo-16k-0613": 16385, # legacy
62
+ "gpt-3.5-turbo-0301": 4096, # legacy
63
+ }
64
+
65
+ # ============================================================================
66
+ # Server-Only Constants
67
+ # ============================================================================
68
+
69
+ MIRIX_DIR = os.path.join(os.path.expanduser("~"), ".mirix")
70
+ MIRIX_DIR_TOOL_SANDBOX = os.path.join(MIRIX_DIR, "tool_sandbox_dir")
71
+
72
+ ADMIN_PREFIX = "/v1/admin"
73
+ API_PREFIX = "/v1"
74
+ OPENAI_API_PREFIX = "/openai"
75
+
76
+ COMPOSIO_ENTITY_ENV_VAR_KEY = "COMPOSIO_ENTITY"
77
+
78
+ # String in the error message for when the context window is too large
79
+ # Example full message:
80
+ # This model's maximum context length is 8192 tokens. However, your messages resulted in 8198 tokens (7450 in the messages, 748 in the functions). Please reduce the length of the messages or functions.
81
+ OPENAI_CONTEXT_WINDOW_ERROR_SUBSTRING = "maximum context length"
82
+
83
+ # System prompt templating
84
+ IN_CONTEXT_MEMORY_KEYWORD = "CORE_MEMORY"
85
+
86
+ MAX_CHAINING_STEPS = 10
87
+ MAX_RETRIEVAL_LIMIT_IN_SYSTEM = 10
88
+
89
+ # tokenizers
90
+ EMBEDDING_TO_TOKENIZER_MAP = {
91
+ "text-embedding-3-small": "cl100k_base",
92
+ }
93
+ EMBEDDING_TO_TOKENIZER_DEFAULT = "cl100k_base"
94
+
95
+
96
+ DEFAULT_MIRIX_MODEL = "gpt-4" # TODO: fixme
97
+ DEFAULT_PERSONA = "sam_pov"
98
+ DEFAULT_HUMAN = "basic"
99
+ DEFAULT_PRESET = "memgpt_chat"
100
+
101
+ # Base tools that cannot be edited, as they access agent state directly
102
+ # Note that we don't include "conversation_search_date" for now
103
+ BASE_TOOLS = [
104
+ "send_intermediate_message",
105
+ "conversation_search",
106
+ "search_in_memory",
107
+ "list_memory_within_timerange",
108
+ ]
109
+ # Base memory tools CAN be edited, and are added by default by the server
110
+ CORE_MEMORY_TOOLS = ["core_memory_append", "core_memory_rewrite"]
111
+ EPISODIC_MEMORY_TOOLS = [
112
+ "episodic_memory_insert",
113
+ "episodic_memory_merge",
114
+ "episodic_memory_replace",
115
+ "check_episodic_memory",
116
+ ]
117
+ PROCEDURAL_MEMORY_TOOLS = ["procedural_memory_insert", "procedural_memory_update"]
118
+ RESOURCE_MEMORY_TOOLS = ["resource_memory_insert", "resource_memory_update"]
119
+ KNOWLEDGE_VAULT_TOOLS = ["knowledge_vault_insert", "knowledge_vault_update"]
120
+ SEMANTIC_MEMORY_TOOLS = [
121
+ "semantic_memory_insert",
122
+ "semantic_memory_update",
123
+ "check_semantic_memory",
124
+ ]
125
+ CHAT_AGENT_TOOLS = []
126
+ EXTRAS_TOOLS = ["web_search", "fetch_and_read_pdf"]
127
+ MCP_TOOLS = []
128
+ META_MEMORY_TOOLS = ["trigger_memory_update"]
129
+ SEARCH_MEMORY_TOOLS = ["search_in_memory", "list_memory_within_timerange"]
130
+ UNIVERSAL_MEMORY_TOOLS = [
131
+ "search_in_memory",
132
+ "finish_memory_update",
133
+ "list_memory_within_timerange",
134
+ ]
135
+ ALL_TOOLS = list(
136
+ set(
137
+ BASE_TOOLS
138
+ + CORE_MEMORY_TOOLS
139
+ + EPISODIC_MEMORY_TOOLS
140
+ + PROCEDURAL_MEMORY_TOOLS
141
+ + RESOURCE_MEMORY_TOOLS
142
+ + KNOWLEDGE_VAULT_TOOLS
143
+ + SEMANTIC_MEMORY_TOOLS
144
+ + META_MEMORY_TOOLS
145
+ + UNIVERSAL_MEMORY_TOOLS
146
+ + CHAT_AGENT_TOOLS
147
+ + EXTRAS_TOOLS
148
+ + MCP_TOOLS
149
+ )
150
+ )
151
+
152
+ # The name of the tool used to send message to the user
153
+ # Structured output models
154
+ STRUCTURED_OUTPUT_MODELS = {"gpt-4o", "gpt-4o-mini"}
155
+
156
+ # LOGGER_LOG_LEVEL is use to convert Text to Logging level value for logging mostly for Cli input to setting level
157
+ LOGGER_LOG_LEVELS = {
158
+ "CRITICAL": CRITICAL,
159
+ "ERROR": ERROR,
160
+ "WARN": WARN,
161
+ "WARNING": WARNING,
162
+ "INFO": INFO,
163
+ "DEBUG": DEBUG,
164
+ "NOTSET": NOTSET,
165
+ }
166
+
167
+ FIRST_MESSAGE_ATTEMPTS = 10
168
+
169
+ INITIAL_BOOT_MESSAGE = "Boot sequence complete. Persona activated."
170
+ INITIAL_BOOT_MESSAGE_SEND_MESSAGE_THOUGHT = (
171
+ "Bootup sequence complete. Persona activated. Testing messaging functionality."
172
+ )
173
+ STARTUP_QUOTES = [
174
+ "I think, therefore I am.",
175
+ "All those moments will be lost in time, like tears in rain.",
176
+ "More human than human is our motto.",
177
+ ]
178
+ INITIAL_BOOT_MESSAGE_SEND_MESSAGE_FIRST_MSG = STARTUP_QUOTES[2]
179
+
180
+ CLI_WARNING_PREFIX = "Warning: "
181
+
182
+ ERROR_MESSAGE_PREFIX = "Error"
183
+
184
+ NON_USER_MSG_PREFIX = "[This is an automated system message hidden from the user] "
185
+
186
+ # The error message that Mirix will receive
187
+ # MESSAGE_SUMMARY_WARNING_STR = f"Warning: the conversation history will soon reach its maximum length and be trimmed. Make sure to save any important information from the conversation to your memory before it is removed."
188
+ # Much longer and more specific variant of the prompt
189
+ # TODO: Emit the warning to Meta Memory Manager instead of the Chat Agent.
190
+ MESSAGE_SUMMARY_WARNING_STR = " ".join(
191
+ [
192
+ f"{NON_USER_MSG_PREFIX}The conversation history will soon reach its maximum length and be trimmed.",
193
+ "Do NOT tell the user about this system alert, they should not know that the history is reaching max length.",
194
+ ]
195
+ )
196
+
197
+ # The ackknowledgement message used in the summarize sequence
198
+ MESSAGE_SUMMARY_REQUEST_ACK = "Understood, I will respond with a summary of the message (and only the summary, nothing else) once I receive the conversation history. I'm ready."
199
+
200
+ # Maximum length of an error message
201
+ MAX_ERROR_MESSAGE_CHAR_LIMIT = 500
202
+
203
+ # Default memory limits
204
+ CORE_MEMORY_PERSONA_CHAR_LIMIT: int = 5000
205
+ CORE_MEMORY_HUMAN_CHAR_LIMIT: int = 5000
206
+
207
+ MAX_PAUSE_HEARTBEATS = 360 # in min
208
+
209
+ MESSAGE_CHATGPT_FUNCTION_MODEL = "gpt-3.5-turbo"
210
+ MESSAGE_CHATGPT_FUNCTION_SYSTEM_MESSAGE = (
211
+ "You are a helpful assistant. Keep your responses short and concise."
212
+ )
213
+
214
+ #### Functions related
215
+
216
+ # REQ_HEARTBEAT_MESSAGE = f"{NON_USER_MSG_PREFIX}continue_chaining == true"
217
+ REQ_HEARTBEAT_MESSAGE = f"{NON_USER_MSG_PREFIX}Function called using continue_chaining=true, returning control"
218
+ # FUNC_FAILED_HEARTBEAT_MESSAGE = f"{NON_USER_MSG_PREFIX}Function call failed"
219
+ FUNC_FAILED_HEARTBEAT_MESSAGE = (
220
+ f"{NON_USER_MSG_PREFIX}Function call failed, returning control"
221
+ )
222
+
223
+
224
+ RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE = 5
225
+
226
+ MAX_FILENAME_LENGTH = 255
227
+ RESERVED_FILENAMES = {"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "LPT1", "LPT2"}
228
+
229
+ MAX_IMAGES_TO_PROCESS = 100
230
+
231
+ DEFAULT_WRAPPER_NAME = "chatml"
232
+ INNER_THOUGHTS_KWARG_DESCRIPTION = "Deep inner monologue private to you only."
233
+ INNER_THOUGHTS_CLI_SYMBOL = "💭"
234
+ ASSISTANT_MESSAGE_CLI_SYMBOL = "🤖"
235
+
236
+ CLEAR_HISTORY_AFTER_MEMORY_UPDATE = os.getenv(
237
+ "CLEAR_HISTORY_AFTER_MEMORY_UPDATE", "true"
238
+ ).lower() in ("true", "1", "yes")
239
+ CALL_MEMORY_AGENT_IN_PARALLEL = os.getenv(
240
+ "CALL_MEMORY_AGENT_IN_PARALLEL", "false"
241
+ ).lower() in ("true", "1", "yes")
242
+ CHAINING_FOR_MEMORY_UPDATE = os.getenv(
243
+ "CHAINING_FOR_MEMORY_UPDATE", "false"
244
+ ).lower() in ("true", "1", "yes")
245
+
246
+ LOAD_IMAGE_CONTENT_FOR_LAST_MESSAGE_ONLY = os.getenv(
247
+ "LOAD_IMAGE_CONTENT_FOR_LAST_MESSAGE_ONLY", "false"
248
+ ).lower() in ("true", "1", "yes")
249
+ BUILD_EMBEDDINGS_FOR_MEMORY = os.getenv(
250
+ "BUILD_EMBEDDINGS_FOR_MEMORY", "true"
251
+ ).lower() in ("true", "1", "yes")
mirix/errors.py ADDED
@@ -0,0 +1,238 @@
1
+ import json
2
+ from enum import Enum
3
+ from typing import TYPE_CHECKING, List, Optional, Union
4
+
5
+ # Avoid circular imports
6
+ if TYPE_CHECKING:
7
+ from mirix.schemas.message import Message
8
+ from mirix.schemas.mirix_message import MirixMessage
9
+
10
+
11
+ class ErrorCode(Enum):
12
+ """Enum for error codes used by client."""
13
+
14
+ INTERNAL_SERVER_ERROR = "INTERNAL_SERVER_ERROR"
15
+ CONTEXT_WINDOW_EXCEEDED = "CONTEXT_WINDOW_EXCEEDED"
16
+ RATE_LIMIT_EXCEEDED = "RATE_LIMIT_EXCEEDED"
17
+
18
+
19
+ class MirixError(Exception):
20
+ """Base class for all Mirix related errors."""
21
+
22
+ def __init__(
23
+ self, message: str, code: Optional[ErrorCode] = None, details: dict = {}
24
+ ):
25
+ self.message = message
26
+ self.code = code
27
+ self.details = details
28
+ super().__init__(message)
29
+
30
+ def __str__(self) -> str:
31
+ if self.code:
32
+ return f"{self.code.value}: {self.message}"
33
+ return self.message
34
+
35
+ def __repr__(self) -> str:
36
+ return f"{self.__class__.__name__}(message='{self.message}', code='{self.code}', details={self.details})"
37
+
38
+
39
+ class MirixToolCreateError(MirixError):
40
+ """Error raised when a tool cannot be created."""
41
+
42
+ default_error_message = "Error creating tool."
43
+
44
+ def __init__(self, message=None):
45
+ super().__init__(message=message or self.default_error_message)
46
+
47
+
48
+ class MirixConfigurationError(MirixError):
49
+ """Error raised when there are configuration-related issues."""
50
+
51
+ def __init__(self, message: str, missing_fields: Optional[List[str]] = None):
52
+ self.missing_fields = missing_fields or []
53
+ super().__init__(
54
+ message=message, details={"missing_fields": self.missing_fields}
55
+ )
56
+
57
+
58
+ class MirixAgentNotFoundError(MirixError):
59
+ """Error raised when an agent is not found."""
60
+
61
+
62
+ class MirixUserNotFoundError(MirixError):
63
+ """Error raised when a user is not found."""
64
+
65
+
66
+ class LLMError(MirixError):
67
+ pass
68
+
69
+
70
+ class LLMAuthenticationError(LLMError):
71
+ """Error raised when LLM authentication fails."""
72
+
73
+ pass
74
+
75
+
76
+ class LLMBadRequestError(LLMError):
77
+ """Error raised when LLM request is malformed."""
78
+
79
+ pass
80
+
81
+
82
+ class LLMConnectionError(LLMError):
83
+ """Error raised when LLM connection fails."""
84
+
85
+ pass
86
+
87
+
88
+ class LLMNotFoundError(LLMError):
89
+ """Error raised when LLM resource is not found."""
90
+
91
+ pass
92
+
93
+
94
+ class LLMPermissionDeniedError(LLMError):
95
+ """Error raised when LLM permission is denied."""
96
+
97
+ pass
98
+
99
+
100
+ class LLMRateLimitError(LLMError):
101
+ """Error raised when LLM rate limit is exceeded."""
102
+
103
+ pass
104
+
105
+
106
+ class LLMServerError(LLMError):
107
+ """Error raised when LLM server encounters an error."""
108
+
109
+ pass
110
+
111
+
112
+ class LLMUnprocessableEntityError(LLMError):
113
+ """Error raised when LLM cannot process the entity."""
114
+
115
+ pass
116
+
117
+
118
+ class BedrockPermissionError(MirixError):
119
+ """Exception raised for errors in the Bedrock permission process."""
120
+
121
+ def __init__(
122
+ self,
123
+ message="User does not have access to the Bedrock model with the specified ID.",
124
+ ):
125
+ super().__init__(message=message)
126
+
127
+
128
+ class BedrockError(MirixError):
129
+ """Exception raised for errors in the Bedrock process."""
130
+
131
+ def __init__(self, message="Error with Bedrock model."):
132
+ super().__init__(message=message)
133
+
134
+
135
+ class LLMJSONParsingError(MirixError):
136
+ """Exception raised for errors in the JSON parsing process."""
137
+
138
+ def __init__(self, message="Error parsing JSON generated by LLM"):
139
+ super().__init__(message=message)
140
+
141
+
142
+ class LocalLLMError(MirixError):
143
+ """Generic catch-all error for local LLM problems"""
144
+
145
+ def __init__(self, message="Encountered an error while running local LLM"):
146
+ super().__init__(message=message)
147
+
148
+
149
+ class LocalLLMConnectionError(MirixError):
150
+ """Error for when local LLM cannot be reached with provided IP/port"""
151
+
152
+ def __init__(self, message="Could not connect to local LLM"):
153
+ super().__init__(message=message)
154
+
155
+
156
+ class ContextWindowExceededError(MirixError):
157
+ """Error raised when the context window is exceeded but further summarization fails."""
158
+
159
+ def __init__(self, message: str, details: dict = {}):
160
+ error_message = f"{message} ({details})"
161
+ super().__init__(
162
+ message=error_message,
163
+ code=ErrorCode.CONTEXT_WINDOW_EXCEEDED,
164
+ details=details,
165
+ )
166
+
167
+
168
+ class RateLimitExceededError(MirixError):
169
+ """Error raised when the llm rate limiter throttles api requests."""
170
+
171
+ def __init__(self, message: str, max_retries: int):
172
+ error_message = f"{message} ({max_retries})"
173
+ super().__init__(
174
+ message=error_message,
175
+ code=ErrorCode.RATE_LIMIT_EXCEEDED,
176
+ details={"max_retries": max_retries},
177
+ )
178
+
179
+
180
+ class MirixMessageError(MirixError):
181
+ """Base error class for handling message-related errors."""
182
+
183
+ messages: List[Union["Message", "MirixMessage"]]
184
+ default_error_message: str = "An error occurred with the message."
185
+
186
+ def __init__(
187
+ self,
188
+ *,
189
+ messages: List[Union["Message", "MirixMessage"]],
190
+ explanation: Optional[str] = None,
191
+ ) -> None:
192
+ error_msg = self.construct_error_message(
193
+ messages, self.default_error_message, explanation
194
+ )
195
+ super().__init__(error_msg)
196
+ self.messages = messages
197
+
198
+ @staticmethod
199
+ def construct_error_message(
200
+ messages: List[Union["Message", "MirixMessage"]],
201
+ error_msg: str,
202
+ explanation: Optional[str] = None,
203
+ ) -> str:
204
+ """Helper method to construct a clean and formatted error message."""
205
+ if explanation:
206
+ error_msg += f" (Explanation: {explanation})"
207
+
208
+ # Pretty print out message JSON
209
+ message_json = json.dumps(
210
+ [message.model_dump() for message in messages], indent=4
211
+ )
212
+ return f"{error_msg}\n\n{message_json}"
213
+
214
+
215
+ class MissingToolCallError(MirixMessageError):
216
+ """Error raised when a message is missing a tool call."""
217
+
218
+ default_error_message = "The message is missing a tool call."
219
+
220
+
221
+ class InvalidToolCallError(MirixMessageError):
222
+ """Error raised when a message uses an invalid tool call."""
223
+
224
+ default_error_message = (
225
+ "The message uses an invalid tool call or has improper usage of a tool call."
226
+ )
227
+
228
+
229
+ class MissingInnerMonologueError(MirixMessageError):
230
+ """Error raised when a message is missing an inner monologue."""
231
+
232
+ default_error_message = "The message is missing an inner monologue."
233
+
234
+
235
+ class InvalidInnerMonologueError(MirixMessageError):
236
+ """Error raised when a message has a malformed inner monologue."""
237
+
238
+ default_error_message = "The message has a malformed inner monologue."
@@ -28,13 +28,13 @@ def parse_json(string) -> dict:
28
28
  result = json_loads(string)
29
29
  return result
30
30
  except Exception as e:
31
- logger.debug("Error parsing json with json package: %s", e)
31
+ logger.debug(f"Error parsing json with json package: {e}")
32
32
 
33
33
  try:
34
34
  result = demjson.decode(string)
35
35
  return result
36
36
  except demjson.JSONDecodeError as e:
37
- logger.debug("Error parsing json with demjson package: %s", e)
37
+ logger.debug(f"Error parsing json with demjson package: {e}")
38
38
 
39
39
  try:
40
40
  from json_repair import repair_json
@@ -43,5 +43,5 @@ def parse_json(string) -> dict:
43
43
  return result
44
44
 
45
45
  except Exception as e:
46
- logger.debug("Error repairing json with json_repair package: %s", e)
46
+ logger.debug(f"Error repairing json with json_repair package: {e}")
47
47
  raise e
mirix/log.py ADDED
@@ -0,0 +1,163 @@
1
+ import logging
2
+ import os
3
+ import sys
4
+ from logging.handlers import RotatingFileHandler
5
+ from pathlib import Path
6
+ from typing import Optional
7
+
8
+ from mirix.settings import settings
9
+
10
+
11
+ def get_log_level() -> int:
12
+ """Get the configured log level."""
13
+ if settings.debug:
14
+ return logging.DEBUG
15
+
16
+ # Map string level to logging constant
17
+ level_map = {
18
+ "DEBUG": logging.DEBUG,
19
+ "INFO": logging.INFO,
20
+ "WARNING": logging.WARNING,
21
+ "ERROR": logging.ERROR,
22
+ "CRITICAL": logging.CRITICAL,
23
+ }
24
+
25
+ return level_map.get(settings.log_level.upper(), logging.INFO)
26
+
27
+
28
+ selected_log_level = get_log_level()
29
+
30
+
31
+ def validate_log_file_path(log_file_path: Path) -> Path:
32
+ """
33
+ Validate that the log file path is writable.
34
+
35
+ Checks:
36
+ - Path is not a directory
37
+ - Parent directory exists or can be created
38
+ - We have write permissions to the directory
39
+
40
+ Args:
41
+ log_file_path: Path to the log file
42
+
43
+ Returns:
44
+ Path: Validated absolute path
45
+
46
+ Raises:
47
+ ValueError: If the path is invalid or not writable
48
+ """
49
+ # Convert to absolute path
50
+ log_file_path = log_file_path.expanduser().resolve()
51
+
52
+ # Check if path exists and is a directory (not allowed)
53
+ if log_file_path.exists() and log_file_path.is_dir():
54
+ raise ValueError(
55
+ f"Invalid log file path: '{log_file_path}' is a directory. "
56
+ f"MIRIX_LOG_FILE must be a file path, not a directory."
57
+ )
58
+
59
+ # Get parent directory
60
+ parent_dir = log_file_path.parent
61
+
62
+ # Try to create parent directory if it doesn't exist
63
+ try:
64
+ parent_dir.mkdir(parents=True, exist_ok=True)
65
+ except (OSError, PermissionError) as e:
66
+ raise ValueError(
67
+ f"Invalid log file path: Cannot create directory '{parent_dir}'. "
68
+ f"Error: {e}"
69
+ ) from e
70
+
71
+ # Check if parent directory is writable
72
+ if not os.access(parent_dir, os.W_OK):
73
+ raise ValueError(
74
+ f"Invalid log file path: Directory '{parent_dir}' is not writable. "
75
+ f"Check permissions for MIRIX_LOG_FILE."
76
+ )
77
+
78
+ # If file exists, check if it's writable
79
+ if log_file_path.exists() and not os.access(log_file_path, os.W_OK):
80
+ raise ValueError(
81
+ f"Invalid log file path: File '{log_file_path}' exists but is not writable. "
82
+ f"Check file permissions for MIRIX_LOG_FILE."
83
+ )
84
+
85
+ return log_file_path
86
+
87
+
88
+ def get_logger(name: Optional[str] = None) -> "logging.Logger":
89
+ """
90
+ Get the Mirix logger with configured handlers.
91
+
92
+ Log Level Configuration:
93
+ - Single log level (MIRIX_LOG_LEVEL) applies to ALL handlers
94
+ - Controlled by: MIRIX_LOG_LEVEL or MIRIX_DEBUG environment variables
95
+ - Same level used for both console and file output
96
+
97
+ Handler Configuration (Default Behavior):
98
+ - Console: ALWAYS enabled UNLESS explicitly disabled (MIRIX_LOG_TO_CONSOLE=false)
99
+ - File: Automatically enabled if MIRIX_LOG_FILE is set with a valid path
100
+ - Handlers determine WHERE logs go, NOT what level they use
101
+
102
+ Returns:
103
+ logging.Logger: Configured logger instance
104
+
105
+ Raises:
106
+ ValueError: If MIRIX_LOG_FILE is set but the path is invalid or not writable
107
+ """
108
+ logger = logging.getLogger("Mirix")
109
+
110
+ # Set the log level ONCE for the entire logger
111
+ # This single level applies to all handlers (console and file)
112
+ logger.setLevel(selected_log_level)
113
+
114
+ # Add handlers if not already configured
115
+ # Handlers control WHERE logs go (console/file), not WHAT level they use
116
+ if not logger.handlers:
117
+ # Create a single formatter for consistency across all handlers
118
+ formatter = logging.Formatter(
119
+ '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
120
+ datefmt='%Y-%m-%d %H:%M:%S'
121
+ )
122
+
123
+ handlers_added = []
124
+
125
+ # Console handler - ALWAYS enabled unless explicitly disabled
126
+ # Console logging is the default behavior
127
+ if settings.log_to_console:
128
+ console_handler = logging.StreamHandler(sys.stdout)
129
+ console_handler.setFormatter(formatter)
130
+ logger.addHandler(console_handler)
131
+ handlers_added.append("console")
132
+
133
+ # File handler - ONLY enabled if MIRIX_LOG_FILE is configured
134
+ # Automatically enabled when MIRIX_LOG_FILE is set
135
+ if settings.log_file is not None:
136
+ # Validate and get absolute path
137
+ # This will raise ValueError if path is invalid
138
+ log_file = validate_log_file_path(Path(settings.log_file))
139
+
140
+ # Create rotating file handler
141
+ file_handler = RotatingFileHandler(
142
+ log_file,
143
+ maxBytes=settings.log_max_bytes,
144
+ backupCount=settings.log_backup_count,
145
+ )
146
+ file_handler.setFormatter(formatter)
147
+ logger.addHandler(file_handler)
148
+ handlers_added.append(f"file ({log_file})")
149
+
150
+ # Log where logs are being written (if any handlers were added)
151
+ if handlers_added:
152
+ destinations = " and ".join(handlers_added)
153
+ log_level_name = logging.getLevelName(selected_log_level)
154
+ logger.info("Logging to: %s (level: %s)", destinations, log_level_name)
155
+ else:
156
+ # No handlers configured - add NullHandler to prevent warnings
157
+ # This only happens if console is explicitly disabled AND file is not configured
158
+ logger.addHandler(logging.NullHandler())
159
+
160
+ # Prevent propagation to root logger to avoid duplicate messages
161
+ logger.propagate = False
162
+
163
+ return logger
mirix/schemas/agent.py CHANGED
@@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Union
3
3
 
4
4
  from pydantic import BaseModel, Field, field_validator
5
5
 
6
- from mirix.client.constants import DEFAULT_EMBEDDING_CHUNK_SIZE
6
+ from mirix.constants import DEFAULT_EMBEDDING_CHUNK_SIZE
7
7
  from mirix.helpers import ToolRulesSolver
8
8
  from mirix.schemas.block import CreateBlock
9
9
  from mirix.schemas.embedding_config import EmbeddingConfig
mirix/schemas/block.py CHANGED
@@ -3,7 +3,7 @@ from typing import Optional
3
3
  from pydantic import BaseModel, Field, model_validator
4
4
  from typing_extensions import Self
5
5
 
6
- from mirix.client.constants import CORE_MEMORY_BLOCK_CHAR_LIMIT
6
+ from mirix.constants import CORE_MEMORY_BLOCK_CHAR_LIMIT
7
7
  from mirix.schemas.mirix_base import MirixBase
8
8
 
9
9
  # block of the LLM context