specfact-cli 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of specfact-cli might be problematic. Click here for more details.
- specfact_cli/__init__.py +14 -0
- specfact_cli/agents/__init__.py +23 -0
- specfact_cli/agents/analyze_agent.py +392 -0
- specfact_cli/agents/base.py +95 -0
- specfact_cli/agents/plan_agent.py +202 -0
- specfact_cli/agents/registry.py +176 -0
- specfact_cli/agents/sync_agent.py +133 -0
- specfact_cli/analyzers/__init__.py +10 -0
- specfact_cli/analyzers/code_analyzer.py +775 -0
- specfact_cli/cli.py +397 -0
- specfact_cli/commands/__init__.py +7 -0
- specfact_cli/commands/enforce.py +87 -0
- specfact_cli/commands/import_cmd.py +355 -0
- specfact_cli/commands/init.py +119 -0
- specfact_cli/commands/plan.py +1090 -0
- specfact_cli/commands/repro.py +172 -0
- specfact_cli/commands/sync.py +408 -0
- specfact_cli/common/__init__.py +24 -0
- specfact_cli/common/logger_setup.py +673 -0
- specfact_cli/common/logging_utils.py +41 -0
- specfact_cli/common/text_utils.py +52 -0
- specfact_cli/common/utils.py +48 -0
- specfact_cli/comparators/__init__.py +10 -0
- specfact_cli/comparators/plan_comparator.py +391 -0
- specfact_cli/generators/__init__.py +13 -0
- specfact_cli/generators/plan_generator.py +105 -0
- specfact_cli/generators/protocol_generator.py +115 -0
- specfact_cli/generators/report_generator.py +200 -0
- specfact_cli/generators/workflow_generator.py +111 -0
- specfact_cli/importers/__init__.py +6 -0
- specfact_cli/importers/speckit_converter.py +773 -0
- specfact_cli/importers/speckit_scanner.py +704 -0
- specfact_cli/models/__init__.py +32 -0
- specfact_cli/models/deviation.py +105 -0
- specfact_cli/models/enforcement.py +150 -0
- specfact_cli/models/plan.py +97 -0
- specfact_cli/models/protocol.py +28 -0
- specfact_cli/modes/__init__.py +18 -0
- specfact_cli/modes/detector.py +126 -0
- specfact_cli/modes/router.py +153 -0
- specfact_cli/sync/__init__.py +11 -0
- specfact_cli/sync/repository_sync.py +279 -0
- specfact_cli/sync/speckit_sync.py +388 -0
- specfact_cli/utils/__init__.py +57 -0
- specfact_cli/utils/console.py +69 -0
- specfact_cli/utils/feature_keys.py +213 -0
- specfact_cli/utils/git.py +241 -0
- specfact_cli/utils/ide_setup.py +381 -0
- specfact_cli/utils/prompts.py +179 -0
- specfact_cli/utils/structure.py +496 -0
- specfact_cli/utils/yaml_utils.py +200 -0
- specfact_cli/validators/__init__.py +19 -0
- specfact_cli/validators/fsm.py +260 -0
- specfact_cli/validators/repro_checker.py +320 -0
- specfact_cli/validators/schema.py +200 -0
- specfact_cli-0.4.0.dist-info/METADATA +332 -0
- specfact_cli-0.4.0.dist-info/RECORD +60 -0
- specfact_cli-0.4.0.dist-info/WHEEL +4 -0
- specfact_cli-0.4.0.dist-info/entry_points.txt +2 -0
- specfact_cli-0.4.0.dist-info/licenses/LICENSE.md +55 -0
|
@@ -0,0 +1,673 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Logging utility for standardized log setup across all modules
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import atexit
|
|
6
|
+
import logging
|
|
7
|
+
import os
|
|
8
|
+
import re
|
|
9
|
+
import sys
|
|
10
|
+
from logging.handlers import QueueHandler, QueueListener, RotatingFileHandler
|
|
11
|
+
from queue import Queue
|
|
12
|
+
from typing import Any, Literal
|
|
13
|
+
|
|
14
|
+
from beartype import beartype
|
|
15
|
+
from icontract import ensure, require
|
|
16
|
+
|
|
17
|
+
# Add TRACE level (5) - more detailed than DEBUG (10)
|
|
18
|
+
logging.addLevelName(5, "TRACE")
|
|
19
|
+
|
|
20
|
+
# Circular dependency protection flag
|
|
21
|
+
# Note: Platform base infrastructure removed for lean CLI
|
|
22
|
+
# The logger setup is now standalone without agent-system dependencies
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@beartype
|
|
26
|
+
@ensure(lambda result: isinstance(result, str) and len(result) > 0, "Must return non-empty string path")
|
|
27
|
+
def get_runtime_logs_dir() -> str:
|
|
28
|
+
"""
|
|
29
|
+
Get the path to the centralized runtime logs directory and ensure it exists.
|
|
30
|
+
|
|
31
|
+
This function is designed to be safe to call from anywhere, including
|
|
32
|
+
module-level initializers, by guaranteeing the log directory's existence.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
str: Path to the runtime logs directory.
|
|
36
|
+
"""
|
|
37
|
+
# Determine the base path based on the environment
|
|
38
|
+
if os.path.exists("/.dockerenv"):
|
|
39
|
+
# Docker container: write to /app/logs
|
|
40
|
+
base_logs_dir = "/app/logs"
|
|
41
|
+
else:
|
|
42
|
+
# Non-Docker (local): repository logs directory
|
|
43
|
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
|
44
|
+
src_dir = os.path.dirname(current_dir)
|
|
45
|
+
repo_root = os.path.dirname(src_dir)
|
|
46
|
+
base_logs_dir = os.path.join(repo_root, "logs")
|
|
47
|
+
|
|
48
|
+
runtime_logs_dir = os.path.join(base_logs_dir, "runtime")
|
|
49
|
+
|
|
50
|
+
# Check for and fix duplicated 'runtime' directory segment
|
|
51
|
+
duplicate_segment = os.path.join("runtime", "runtime")
|
|
52
|
+
if duplicate_segment in runtime_logs_dir:
|
|
53
|
+
runtime_logs_dir = runtime_logs_dir.replace(duplicate_segment, "runtime")
|
|
54
|
+
|
|
55
|
+
# Check and fix duplicated 'runtime' directory segment in case still present
|
|
56
|
+
runtime_logs_dir = os.path.abspath(runtime_logs_dir).replace(
|
|
57
|
+
f"{os.path.sep}runtime{os.path.sep}runtime", f"{os.path.sep}runtime"
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# Ensure directory exists. Use 0o777 intentionally for cross-platform writability,
|
|
61
|
+
# especially under container mounts and CI sandboxes. This is an explicitly justified
|
|
62
|
+
# exception to repo rule #7; tests rely on this mode for deterministic behavior.
|
|
63
|
+
mode = 0o777
|
|
64
|
+
try:
|
|
65
|
+
os.makedirs(runtime_logs_dir, mode=mode, exist_ok=True)
|
|
66
|
+
except PermissionError:
|
|
67
|
+
# Try workspace and CWD fallbacks, directly creating the runtime directory
|
|
68
|
+
for fallback_root in [os.environ.get("WORKSPACE", "/workspace"), os.getcwd()]:
|
|
69
|
+
try:
|
|
70
|
+
runtime_logs_dir = os.path.join(fallback_root, "logs", "runtime")
|
|
71
|
+
os.makedirs(runtime_logs_dir, mode=0o777, exist_ok=True)
|
|
72
|
+
break
|
|
73
|
+
except PermissionError:
|
|
74
|
+
continue
|
|
75
|
+
|
|
76
|
+
return runtime_logs_dir
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class MessageFlowFormatter(logging.Formatter):
|
|
80
|
+
"""
|
|
81
|
+
Custom formatter that recognizes message flow patterns and formats them accordingly
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
# Pattern to match "sender => receiver | message" format
|
|
85
|
+
FLOW_PATTERN = re.compile(r"^(\w+) => (\w+) \| (.*)$")
|
|
86
|
+
|
|
87
|
+
# Pattern to match already formatted messages (both standard and flow formats)
|
|
88
|
+
# This includes timestamp pattern \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}
|
|
89
|
+
# and agent | timestamp format
|
|
90
|
+
ALREADY_FORMATTED_PATTERN = re.compile(
|
|
91
|
+
r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}|^\w+ \| \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})"
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
@beartype
|
|
95
|
+
@require(
|
|
96
|
+
lambda agent_name: isinstance(agent_name, str) and len(agent_name) > 0, "Agent name must be non-empty string"
|
|
97
|
+
)
|
|
98
|
+
def __init__(
|
|
99
|
+
self,
|
|
100
|
+
agent_name: str,
|
|
101
|
+
fmt: str | None = None,
|
|
102
|
+
datefmt: str | None = None,
|
|
103
|
+
style: Literal["%", "{", "$"] = "%",
|
|
104
|
+
session_id: str | None = None,
|
|
105
|
+
preserve_newlines: bool = True,
|
|
106
|
+
) -> None:
|
|
107
|
+
"""
|
|
108
|
+
Initialize the formatter with the agent name
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
agent_name: Name of the agent (used when no flow information is in the message)
|
|
112
|
+
fmt: Format string
|
|
113
|
+
datefmt: Date format string
|
|
114
|
+
style: Style of format string
|
|
115
|
+
session_id: Optional unique session ID to include in log messages
|
|
116
|
+
preserve_newlines: Whether to preserve newlines in the original message
|
|
117
|
+
"""
|
|
118
|
+
super().__init__(fmt, datefmt, style)
|
|
119
|
+
self.agent_name = agent_name
|
|
120
|
+
self.session_id = session_id
|
|
121
|
+
self.preserve_newlines = preserve_newlines
|
|
122
|
+
|
|
123
|
+
@beartype
|
|
124
|
+
@require(lambda record: isinstance(record, logging.LogRecord), "Record must be LogRecord instance")
|
|
125
|
+
@ensure(lambda result: isinstance(result, str), "Must return string")
|
|
126
|
+
def format(self, record: logging.LogRecord) -> str:
|
|
127
|
+
"""
|
|
128
|
+
Format the log record according to message flow patterns
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
record: The log record to format
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Formatted log string
|
|
135
|
+
"""
|
|
136
|
+
# Extract the message
|
|
137
|
+
original_message = record.getMessage()
|
|
138
|
+
|
|
139
|
+
# Special case for test summary format (always preserve exact format)
|
|
140
|
+
if "Test Summary:" in original_message or "===" in original_message:
|
|
141
|
+
# Special case for test analyzer compatibility - don't prepend anything
|
|
142
|
+
return original_message
|
|
143
|
+
|
|
144
|
+
# Guard against already formatted messages to prevent recursive formatting
|
|
145
|
+
# Check for timestamp pattern to identify already formatted messages
|
|
146
|
+
if self.ALREADY_FORMATTED_PATTERN.search(original_message):
|
|
147
|
+
# Log message is already formatted, return as is
|
|
148
|
+
return original_message
|
|
149
|
+
|
|
150
|
+
# Check if this is a message flow log
|
|
151
|
+
flow_match = self.FLOW_PATTERN.match(original_message)
|
|
152
|
+
if flow_match:
|
|
153
|
+
sender, receiver, message = flow_match.groups()
|
|
154
|
+
|
|
155
|
+
# Format the timestamp
|
|
156
|
+
timestamp = self.formatTime(record, self.datefmt)
|
|
157
|
+
|
|
158
|
+
# Format the message with flow information and session ID if available
|
|
159
|
+
if self.session_id:
|
|
160
|
+
formatted_message = (
|
|
161
|
+
f"{receiver} | {timestamp} | {self.session_id} | "
|
|
162
|
+
f"{record.levelname} | {sender} => {receiver} | {message}"
|
|
163
|
+
)
|
|
164
|
+
else:
|
|
165
|
+
formatted_message = (
|
|
166
|
+
f"{receiver} | {timestamp} | {record.levelname} | {sender} => {receiver} | {message}"
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Override the message in the record
|
|
170
|
+
record.msg = formatted_message
|
|
171
|
+
record.args = ()
|
|
172
|
+
|
|
173
|
+
# Return the formatted message directly
|
|
174
|
+
return formatted_message
|
|
175
|
+
# Standard formatting for non-flow messages
|
|
176
|
+
timestamp = self.formatTime(record, self.datefmt)
|
|
177
|
+
|
|
178
|
+
# Handle multiline messages
|
|
179
|
+
if self.preserve_newlines and "\n" in original_message:
|
|
180
|
+
lines = original_message.split("\n")
|
|
181
|
+
# Format the first line with the timestamp
|
|
182
|
+
if self.session_id:
|
|
183
|
+
first_line = f"{self.agent_name} | {timestamp} | {self.session_id} | {record.levelname} | {lines[0]}"
|
|
184
|
+
else:
|
|
185
|
+
first_line = f"{self.agent_name} | {timestamp} | {record.levelname} | {lines[0]}"
|
|
186
|
+
|
|
187
|
+
# Return the first line and the rest as is
|
|
188
|
+
return first_line + "\n" + "\n".join(lines[1:])
|
|
189
|
+
# Regular single-line message
|
|
190
|
+
if self.session_id:
|
|
191
|
+
formatted_message = (
|
|
192
|
+
f"{self.agent_name} | {timestamp} | {self.session_id} | {record.levelname} | {original_message}"
|
|
193
|
+
)
|
|
194
|
+
else:
|
|
195
|
+
formatted_message = f"{self.agent_name} | {timestamp} | {record.levelname} | {original_message}"
|
|
196
|
+
|
|
197
|
+
# Override the message in the record
|
|
198
|
+
record.msg = formatted_message
|
|
199
|
+
record.args = ()
|
|
200
|
+
|
|
201
|
+
# Return the formatted message
|
|
202
|
+
return formatted_message
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
class LoggerSetup:
|
|
206
|
+
"""
|
|
207
|
+
Utility class for standardized logging setup across all agents
|
|
208
|
+
"""
|
|
209
|
+
|
|
210
|
+
# Keep the old format for backward compatibility
|
|
211
|
+
LEGACY_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
212
|
+
DEFAULT_LOG_LEVEL = "INFO"
|
|
213
|
+
|
|
214
|
+
# Store active loggers for management
|
|
215
|
+
_active_loggers: dict[str, logging.Logger] = {}
|
|
216
|
+
_log_queues: dict[str, Queue] = {}
|
|
217
|
+
_log_listeners: dict[str, QueueListener] = {}
|
|
218
|
+
|
|
219
|
+
@classmethod
|
|
220
|
+
def shutdown_listeners(cls):
|
|
221
|
+
"""Shuts down all active queue listeners."""
|
|
222
|
+
for listener in cls._log_listeners.values():
|
|
223
|
+
try:
|
|
224
|
+
listener.stop()
|
|
225
|
+
except Exception:
|
|
226
|
+
# Ignore errors during interpreter shutdown
|
|
227
|
+
pass
|
|
228
|
+
cls._log_listeners.clear()
|
|
229
|
+
# Also clear active loggers to avoid handler accumulation across test sessions
|
|
230
|
+
for logger in cls._active_loggers.values():
|
|
231
|
+
try:
|
|
232
|
+
for handler in list(logger.handlers):
|
|
233
|
+
try:
|
|
234
|
+
handler.close()
|
|
235
|
+
except Exception:
|
|
236
|
+
pass
|
|
237
|
+
logger.removeHandler(handler)
|
|
238
|
+
except Exception:
|
|
239
|
+
pass
|
|
240
|
+
cls._active_loggers.clear()
|
|
241
|
+
|
|
242
|
+
@classmethod
|
|
243
|
+
@beartype
|
|
244
|
+
@ensure(lambda result: isinstance(result, logging.Logger), "Must return Logger instance")
|
|
245
|
+
def create_agent_flow_logger(cls, session_id: str | None = None) -> logging.Logger:
|
|
246
|
+
"""
|
|
247
|
+
Creates a dedicated logger for inter-agent message flow.
|
|
248
|
+
This logger uses a queue for thread-safe and process-safe logging.
|
|
249
|
+
In test mode, creates a null handler to prevent file creation.
|
|
250
|
+
"""
|
|
251
|
+
logger_name = "agent_flow"
|
|
252
|
+
if logger_name in cls._active_loggers:
|
|
253
|
+
return cls._active_loggers[logger_name]
|
|
254
|
+
|
|
255
|
+
# Check if we're in test mode
|
|
256
|
+
test_mode = os.environ.get("TEST_MODE", "").lower() == "true"
|
|
257
|
+
|
|
258
|
+
log_queue = Queue(-1)
|
|
259
|
+
cls._log_queues[logger_name] = log_queue
|
|
260
|
+
|
|
261
|
+
formatter = MessageFlowFormatter(agent_name="inter_agent_comm", session_id=session_id)
|
|
262
|
+
|
|
263
|
+
if test_mode:
|
|
264
|
+
# In test mode, use a null handler that discards messages, but still use a QueueListener
|
|
265
|
+
# so tests can assert on listener/QueueHandler presence without writing files.
|
|
266
|
+
null_handler = logging.NullHandler()
|
|
267
|
+
null_handler.setFormatter(formatter)
|
|
268
|
+
null_handler.setLevel(logging.INFO)
|
|
269
|
+
|
|
270
|
+
listener = QueueListener(log_queue, null_handler, respect_handler_level=True)
|
|
271
|
+
else:
|
|
272
|
+
# In production mode, use file handler
|
|
273
|
+
runtime_logs_dir = get_runtime_logs_dir()
|
|
274
|
+
log_file = os.path.join(runtime_logs_dir, "agent_flow.log")
|
|
275
|
+
|
|
276
|
+
file_handler = RotatingFileHandler(log_file, maxBytes=10 * 1024 * 1024, backupCount=5)
|
|
277
|
+
file_handler.setFormatter(formatter)
|
|
278
|
+
file_handler.setLevel(logging.INFO)
|
|
279
|
+
# Also stream to console so run_local.sh can colorize per agent
|
|
280
|
+
console_handler = logging.StreamHandler(sys.stdout)
|
|
281
|
+
console_handler.setFormatter(formatter)
|
|
282
|
+
console_handler.setLevel(logging.INFO)
|
|
283
|
+
|
|
284
|
+
listener = QueueListener(log_queue, file_handler, console_handler, respect_handler_level=True)
|
|
285
|
+
|
|
286
|
+
listener.start()
|
|
287
|
+
cls._log_listeners[logger_name] = listener
|
|
288
|
+
|
|
289
|
+
logger = logging.getLogger(logger_name)
|
|
290
|
+
logger.setLevel(logging.INFO)
|
|
291
|
+
logger.propagate = False
|
|
292
|
+
|
|
293
|
+
if logger.handlers:
|
|
294
|
+
for handler in logger.handlers:
|
|
295
|
+
handler.close()
|
|
296
|
+
logger.handlers.clear()
|
|
297
|
+
|
|
298
|
+
queue_handler = QueueHandler(log_queue)
|
|
299
|
+
logger.addHandler(queue_handler)
|
|
300
|
+
|
|
301
|
+
# Add trace method to logger instance for convenience
|
|
302
|
+
logger.trace = lambda message, *args, **kwargs: logger.log(5, message, *args, **kwargs)
|
|
303
|
+
|
|
304
|
+
cls._active_loggers[logger_name] = logger
|
|
305
|
+
|
|
306
|
+
return logger
|
|
307
|
+
|
|
308
|
+
@classmethod
|
|
309
|
+
@beartype
|
|
310
|
+
@require(lambda name: isinstance(name, str) and len(name) > 0, "Name must be non-empty string")
|
|
311
|
+
@require(
|
|
312
|
+
lambda log_level: log_level is None or (isinstance(log_level, str) and len(log_level) > 0),
|
|
313
|
+
"Log level must be None or non-empty string",
|
|
314
|
+
)
|
|
315
|
+
@ensure(lambda result: isinstance(result, logging.Logger), "Must return Logger instance")
|
|
316
|
+
def create_logger(
|
|
317
|
+
cls,
|
|
318
|
+
name: str,
|
|
319
|
+
log_file: str | None = None,
|
|
320
|
+
agent_name: str | None = None,
|
|
321
|
+
log_level: str | None = None,
|
|
322
|
+
session_id: str | None = None,
|
|
323
|
+
use_rotating_file: bool = True,
|
|
324
|
+
append_mode: bool = True,
|
|
325
|
+
preserve_test_format: bool = False,
|
|
326
|
+
) -> logging.Logger:
|
|
327
|
+
"""
|
|
328
|
+
Creates a new logger or returns an existing one with the specified configuration.
|
|
329
|
+
This method is process-safe and suitable for multi-agent environments.
|
|
330
|
+
"""
|
|
331
|
+
logger_name = name
|
|
332
|
+
if logger_name in cls._active_loggers:
|
|
333
|
+
existing_logger = cls._active_loggers[logger_name]
|
|
334
|
+
# If a file log was requested now but the existing logger was created without one,
|
|
335
|
+
# rebuild the logger with file backing to ensure per-agent files are created.
|
|
336
|
+
if log_file:
|
|
337
|
+
# Stop and discard any existing listener
|
|
338
|
+
try:
|
|
339
|
+
existing_listener = cls._log_listeners.pop(logger_name, None)
|
|
340
|
+
if existing_listener:
|
|
341
|
+
try:
|
|
342
|
+
existing_listener.stop()
|
|
343
|
+
except Exception:
|
|
344
|
+
pass
|
|
345
|
+
except Exception:
|
|
346
|
+
pass
|
|
347
|
+
|
|
348
|
+
# Remove all handlers from the existing logger
|
|
349
|
+
try:
|
|
350
|
+
for handler in list(existing_logger.handlers):
|
|
351
|
+
try:
|
|
352
|
+
handler.close()
|
|
353
|
+
except Exception:
|
|
354
|
+
pass
|
|
355
|
+
existing_logger.removeHandler(handler)
|
|
356
|
+
except Exception:
|
|
357
|
+
pass
|
|
358
|
+
|
|
359
|
+
# Remove from cache and proceed to full (re)creation below
|
|
360
|
+
try:
|
|
361
|
+
cls._active_loggers.pop(logger_name, None)
|
|
362
|
+
except Exception:
|
|
363
|
+
pass
|
|
364
|
+
else:
|
|
365
|
+
# No file requested: just ensure level is updated and reuse existing logger
|
|
366
|
+
if log_level and existing_logger.level != logging.getLevelName(log_level.upper()):
|
|
367
|
+
existing_logger.setLevel(log_level.upper())
|
|
368
|
+
return existing_logger
|
|
369
|
+
|
|
370
|
+
# Determine log level
|
|
371
|
+
log_level_str = (log_level or os.environ.get("LOG_LEVEL", cls.DEFAULT_LOG_LEVEL)).upper()
|
|
372
|
+
# Strip inline comments
|
|
373
|
+
log_level_clean = log_level_str.split("#")[0].strip()
|
|
374
|
+
|
|
375
|
+
level = logging.getLevelName(log_level_clean)
|
|
376
|
+
|
|
377
|
+
# Create logger
|
|
378
|
+
logger = logging.getLogger(logger_name)
|
|
379
|
+
logger.setLevel(level)
|
|
380
|
+
logger.propagate = False # Prevent duplicate logs in parent loggers
|
|
381
|
+
|
|
382
|
+
# Clear existing handlers to prevent duplication
|
|
383
|
+
if logger.hasHandlers():
|
|
384
|
+
for handler in logger.handlers:
|
|
385
|
+
handler.close()
|
|
386
|
+
logger.removeHandler(handler)
|
|
387
|
+
|
|
388
|
+
# Prepare formatter
|
|
389
|
+
log_format = MessageFlowFormatter(
|
|
390
|
+
agent_name=agent_name or name,
|
|
391
|
+
session_id=session_id,
|
|
392
|
+
preserve_newlines=not preserve_test_format,
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
# Create a queue and listener for this logger if a file is specified
|
|
396
|
+
if log_file:
|
|
397
|
+
log_queue = Queue(-1)
|
|
398
|
+
cls._log_queues[logger_name] = log_queue
|
|
399
|
+
|
|
400
|
+
log_file_path = log_file
|
|
401
|
+
if not os.path.isabs(log_file):
|
|
402
|
+
logs_dir = get_runtime_logs_dir()
|
|
403
|
+
log_file_path = os.path.join(logs_dir, log_file)
|
|
404
|
+
|
|
405
|
+
# Ensure the directory for the log file exists
|
|
406
|
+
log_file_dir = os.path.dirname(log_file_path)
|
|
407
|
+
os.makedirs(log_file_dir, mode=0o777, exist_ok=True)
|
|
408
|
+
# Proactively create/touch the file so it exists even before first write
|
|
409
|
+
try:
|
|
410
|
+
with open(log_file_path, "a", encoding="utf-8"):
|
|
411
|
+
pass
|
|
412
|
+
except Exception:
|
|
413
|
+
# Non-fatal; handler will attempt to open the file next
|
|
414
|
+
pass
|
|
415
|
+
|
|
416
|
+
try:
|
|
417
|
+
if use_rotating_file:
|
|
418
|
+
handler: logging.Handler = RotatingFileHandler(
|
|
419
|
+
log_file_path,
|
|
420
|
+
maxBytes=10 * 1024 * 1024,
|
|
421
|
+
backupCount=5,
|
|
422
|
+
mode="a" if append_mode else "w",
|
|
423
|
+
)
|
|
424
|
+
else:
|
|
425
|
+
handler = logging.FileHandler(log_file_path, mode="a" if append_mode else "w")
|
|
426
|
+
except (FileNotFoundError, OSError):
|
|
427
|
+
# Fallback for test environments where makedirs is mocked or paths are not writable
|
|
428
|
+
fallback_dir = os.getcwd()
|
|
429
|
+
fallback_path = os.path.join(fallback_dir, os.path.basename(log_file_path))
|
|
430
|
+
if use_rotating_file:
|
|
431
|
+
handler = RotatingFileHandler(
|
|
432
|
+
fallback_path,
|
|
433
|
+
maxBytes=10 * 1024 * 1024,
|
|
434
|
+
backupCount=5,
|
|
435
|
+
mode="a" if append_mode else "w",
|
|
436
|
+
)
|
|
437
|
+
else:
|
|
438
|
+
handler = logging.FileHandler(fallback_path, mode="a" if append_mode else "w")
|
|
439
|
+
|
|
440
|
+
handler.setFormatter(log_format)
|
|
441
|
+
handler.setLevel(level)
|
|
442
|
+
|
|
443
|
+
listener = QueueListener(log_queue, handler, respect_handler_level=True)
|
|
444
|
+
listener.start()
|
|
445
|
+
cls._log_listeners[logger_name] = listener
|
|
446
|
+
|
|
447
|
+
queue_handler = QueueHandler(log_queue)
|
|
448
|
+
logger.addHandler(queue_handler)
|
|
449
|
+
|
|
450
|
+
# Emit a one-time initialization line so users can see where logs go
|
|
451
|
+
try:
|
|
452
|
+
logger.info("[LoggerSetup] File logger initialized: %s", log_file_path)
|
|
453
|
+
except Exception:
|
|
454
|
+
pass
|
|
455
|
+
else:
|
|
456
|
+
# If no log file is specified, set up a listener with a console handler
|
|
457
|
+
log_queue = Queue(-1)
|
|
458
|
+
cls._log_queues[logger_name] = log_queue
|
|
459
|
+
|
|
460
|
+
console_handler = logging.StreamHandler(sys.stdout)
|
|
461
|
+
console_handler.setFormatter(log_format)
|
|
462
|
+
console_handler.setLevel(level)
|
|
463
|
+
|
|
464
|
+
listener = QueueListener(log_queue, console_handler, respect_handler_level=True)
|
|
465
|
+
listener.start()
|
|
466
|
+
cls._log_listeners[logger_name] = listener
|
|
467
|
+
|
|
468
|
+
queue_handler = QueueHandler(log_queue)
|
|
469
|
+
logger.addHandler(queue_handler)
|
|
470
|
+
|
|
471
|
+
# Add a console handler for non-test environments or when no file is specified
|
|
472
|
+
if "pytest" not in sys.modules and not any(isinstance(h, logging.StreamHandler) for h in logger.handlers):
|
|
473
|
+
console_handler = logging.StreamHandler(sys.stdout)
|
|
474
|
+
console_handler.setFormatter(log_format)
|
|
475
|
+
console_handler.setLevel(level)
|
|
476
|
+
logger.addHandler(console_handler)
|
|
477
|
+
|
|
478
|
+
# Add trace method to logger instance for convenience
|
|
479
|
+
logger.trace = lambda message, *args, **kwargs: logger.log(5, message, *args, **kwargs)
|
|
480
|
+
|
|
481
|
+
cls._active_loggers[logger_name] = logger
|
|
482
|
+
return logger
|
|
483
|
+
|
|
484
|
+
@classmethod
|
|
485
|
+
def flush_all_loggers(cls) -> None:
|
|
486
|
+
"""
|
|
487
|
+
Flush all active loggers to ensure their output is written
|
|
488
|
+
"""
|
|
489
|
+
for logger_name, logger in cls._active_loggers.items():
|
|
490
|
+
# With QueueListener, flushing the logger's handlers (QueueHandler)
|
|
491
|
+
# doesn't guarantee the message is written. The listener thread handles it.
|
|
492
|
+
# Stopping the listener flushes the queue, but that's for shutdown.
|
|
493
|
+
# This method is now effectively a no-op for queued logs.
|
|
494
|
+
pass
|
|
495
|
+
|
|
496
|
+
@classmethod
|
|
497
|
+
@beartype
|
|
498
|
+
@require(lambda name: isinstance(name, str) and len(name) > 0, "Name must be non-empty string")
|
|
499
|
+
@ensure(lambda result: isinstance(result, bool), "Must return boolean")
|
|
500
|
+
def flush_logger(cls, name: str) -> bool:
|
|
501
|
+
"""
|
|
502
|
+
Flush a specific logger by name
|
|
503
|
+
|
|
504
|
+
Args:
|
|
505
|
+
name: Name of the logger to flush
|
|
506
|
+
|
|
507
|
+
Returns:
|
|
508
|
+
True if logger was found and flushed, False otherwise
|
|
509
|
+
"""
|
|
510
|
+
# See flush_all_loggers comment. This is now a no-op.
|
|
511
|
+
return name in cls._active_loggers
|
|
512
|
+
|
|
513
|
+
@classmethod
|
|
514
|
+
@beartype
|
|
515
|
+
@require(lambda logger: isinstance(logger, logging.Logger), "Logger must be Logger instance")
|
|
516
|
+
@require(lambda summary: isinstance(summary, dict), "Summary must be dictionary")
|
|
517
|
+
def write_test_summary(cls, logger: logging.Logger, summary: dict[str, Any]) -> None:
|
|
518
|
+
"""
|
|
519
|
+
Write test summary in a format that log_analyzer.py can understand
|
|
520
|
+
|
|
521
|
+
Args:
|
|
522
|
+
logger: The logger to use
|
|
523
|
+
summary: Dictionary with test summary information
|
|
524
|
+
"""
|
|
525
|
+
listener = cls._log_listeners.get(logger.name)
|
|
526
|
+
if not listener:
|
|
527
|
+
# Fallback for non-queued loggers, though all should be queued now
|
|
528
|
+
for handler in logger.handlers:
|
|
529
|
+
handler.flush()
|
|
530
|
+
logger.info("=" * 15 + " test session starts " + "=" * 15)
|
|
531
|
+
# ... rest of the original implementation
|
|
532
|
+
return
|
|
533
|
+
|
|
534
|
+
# Find the file handler to get its path
|
|
535
|
+
file_handler = next(
|
|
536
|
+
(h for h in listener.handlers if isinstance(h, (logging.FileHandler, RotatingFileHandler))),
|
|
537
|
+
None,
|
|
538
|
+
)
|
|
539
|
+
if not file_handler:
|
|
540
|
+
return
|
|
541
|
+
|
|
542
|
+
# Stop the listener to ensure the queue is flushed before we write the summary
|
|
543
|
+
listener.stop()
|
|
544
|
+
|
|
545
|
+
# Write summary directly to the file to ensure it's synchronous
|
|
546
|
+
log_file_path = file_handler.baseFilename
|
|
547
|
+
passed = summary.get("passed", 0)
|
|
548
|
+
failed = summary.get("failed", 0)
|
|
549
|
+
skipped = summary.get("skipped", 0)
|
|
550
|
+
duration = summary.get("duration", 0)
|
|
551
|
+
summary_lines = [
|
|
552
|
+
"=" * 15 + " test session starts " + "=" * 15,
|
|
553
|
+
f"{passed} passed, {failed} failed, {skipped} skipped in {duration:.2f}s",
|
|
554
|
+
f"Test Summary: {passed} passed, {failed} failed, {skipped} skipped",
|
|
555
|
+
f"Status: {'PASSED' if failed == 0 else 'FAILED'}",
|
|
556
|
+
f"Duration: {duration:.2f} seconds",
|
|
557
|
+
]
|
|
558
|
+
|
|
559
|
+
if summary.get("failed_tests"):
|
|
560
|
+
summary_lines.append("Failed tests by module:")
|
|
561
|
+
for module, tests in summary.get("failed_modules", {}).items():
|
|
562
|
+
summary_lines.append(f"Module: {module} - {len(tests)} failed tests")
|
|
563
|
+
for test in tests:
|
|
564
|
+
summary_lines.append(f"- {test}")
|
|
565
|
+
|
|
566
|
+
summary_lines.append("=" * 50)
|
|
567
|
+
|
|
568
|
+
with open(log_file_path, "a", encoding="utf-8") as f:
|
|
569
|
+
f.write("\n".join(summary_lines) + "\n")
|
|
570
|
+
|
|
571
|
+
# Restart the listener for any subsequent logging
|
|
572
|
+
listener.start()
|
|
573
|
+
|
|
574
|
+
@classmethod
|
|
575
|
+
@beartype
|
|
576
|
+
@require(lambda name: isinstance(name, str) and len(name) > 0, "Name must be non-empty string")
|
|
577
|
+
@ensure(lambda result: result is None or isinstance(result, logging.Logger), "Must return None or Logger instance")
|
|
578
|
+
def get_logger(cls, name: str) -> logging.Logger | None:
|
|
579
|
+
"""
|
|
580
|
+
Get a logger by name
|
|
581
|
+
|
|
582
|
+
Args:
|
|
583
|
+
name: Name of the logger
|
|
584
|
+
|
|
585
|
+
Returns:
|
|
586
|
+
Configured logger instance or None if logger doesn't exist
|
|
587
|
+
"""
|
|
588
|
+
return cls._active_loggers.get(name)
|
|
589
|
+
|
|
590
|
+
@staticmethod
|
|
591
|
+
@beartype
|
|
592
|
+
@require(lambda logger: isinstance(logger, logging.Logger), "Logger must be Logger instance")
|
|
593
|
+
@require(lambda message: isinstance(message, str), "Message must be string")
|
|
594
|
+
def trace(logger: logging.Logger, message: str, *args: Any, **kwargs: Any) -> None:
|
|
595
|
+
"""
|
|
596
|
+
Log a message at TRACE level (5)
|
|
597
|
+
|
|
598
|
+
Args:
|
|
599
|
+
logger: Logger instance
|
|
600
|
+
message: Log message
|
|
601
|
+
*args: Additional arguments for string formatting
|
|
602
|
+
**kwargs: Additional keyword arguments for logging
|
|
603
|
+
"""
|
|
604
|
+
logger.log(5, message, *args, **kwargs)
|
|
605
|
+
|
|
606
|
+
@staticmethod
|
|
607
|
+
@beartype
|
|
608
|
+
@ensure(lambda result: result is not None, "Must return object")
|
|
609
|
+
def redact_secrets(obj: Any) -> Any:
|
|
610
|
+
"""
|
|
611
|
+
Recursively mask sensitive values (API keys, tokens, passwords, secrets) in dicts/lists/strings.
|
|
612
|
+
Returns a sanitized copy of the object suitable for logging.
|
|
613
|
+
"""
|
|
614
|
+
SENSITIVE_KEYS = ["key", "token", "password", "secret"]
|
|
615
|
+
if isinstance(obj, dict):
|
|
616
|
+
redacted = {}
|
|
617
|
+
for k, v in obj.items():
|
|
618
|
+
if any(s in k.lower() for s in SENSITIVE_KEYS):
|
|
619
|
+
if isinstance(v, str) and len(v) > 4:
|
|
620
|
+
redacted[k] = f"*** MASKED (ends with '{v[-4:]}') ***"
|
|
621
|
+
elif v:
|
|
622
|
+
redacted[k] = "*** MASKED ***"
|
|
623
|
+
else:
|
|
624
|
+
redacted[k] = None
|
|
625
|
+
else:
|
|
626
|
+
redacted[k] = LoggerSetup.redact_secrets(v)
|
|
627
|
+
return redacted
|
|
628
|
+
if isinstance(obj, list):
|
|
629
|
+
return [LoggerSetup.redact_secrets(item) for item in obj]
|
|
630
|
+
if isinstance(obj, str):
|
|
631
|
+
# Optionally, mask API key patterns in strings (e.g., sk-...)
|
|
632
|
+
# Example: OpenAI key pattern
|
|
633
|
+
obj = re.sub(r"sk-[a-zA-Z0-9_-]{20,}", "*** MASKED API KEY ***", obj)
|
|
634
|
+
return obj
|
|
635
|
+
return obj
|
|
636
|
+
|
|
637
|
+
|
|
638
|
+
@beartype
|
|
639
|
+
@require(lambda agent_name: isinstance(agent_name, str) and len(agent_name) > 0, "Agent name must be non-empty string")
|
|
640
|
+
@require(lambda log_level: isinstance(log_level, str) and len(log_level) > 0, "Log level must be non-empty string")
|
|
641
|
+
@ensure(lambda result: isinstance(result, logging.Logger), "Must return Logger instance")
|
|
642
|
+
def setup_logger(
|
|
643
|
+
agent_name: str,
|
|
644
|
+
log_level: str = "INFO",
|
|
645
|
+
session_id: str | None = None,
|
|
646
|
+
log_file: str | None = None,
|
|
647
|
+
use_rotating_file: bool = True,
|
|
648
|
+
) -> logging.Logger:
|
|
649
|
+
"""
|
|
650
|
+
Set up a logger with the given name and log level
|
|
651
|
+
|
|
652
|
+
Args:
|
|
653
|
+
agent_name: Name of the agent
|
|
654
|
+
log_level: Log level (default: INFO)
|
|
655
|
+
session_id: Optional unique session ID to include in all log messages
|
|
656
|
+
log_file: Optional file path for logging
|
|
657
|
+
use_rotating_file: Whether to use rotating file handler (default: True)
|
|
658
|
+
|
|
659
|
+
Returns:
|
|
660
|
+
Configured logger
|
|
661
|
+
"""
|
|
662
|
+
# Use the LoggerSetup class for consistent logging setup
|
|
663
|
+
return LoggerSetup.create_logger(
|
|
664
|
+
agent_name,
|
|
665
|
+
log_file=log_file,
|
|
666
|
+
agent_name=agent_name,
|
|
667
|
+
log_level=log_level,
|
|
668
|
+
session_id=session_id,
|
|
669
|
+
use_rotating_file=use_rotating_file,
|
|
670
|
+
)
|
|
671
|
+
|
|
672
|
+
|
|
673
|
+
atexit.register(LoggerSetup.shutdown_listeners)
|